text
stringlengths
1
22.8M
```javascript // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. let {session, contextGroup, Protocol} = InspectorTest.start('Checks framework debugging with blackboxed ranges.'); contextGroup.addScript( ` function foo() { return boo(); } function boo() { return 42; } function testFunction() { foo(); } //# sourceURL=test.js`, 7, 26); session.setupScriptMap(); Protocol.Debugger.onPaused(message => { session.logCallFrames(message.params.callFrames); InspectorTest.log(''); Protocol.Debugger.stepInto(); }); var scriptId; Protocol.Debugger.onScriptParsed(message => { if (message.params.url === 'test.js') { scriptId = message.params.scriptId; } }); Protocol.Debugger.enable() .then(() => Protocol.Debugger.setBlackboxPatterns({patterns: ['expr\.js']})) .then(() => InspectorTest.runTestSuite(testSuite)); var testSuite = [ function testEntireScript(next) { testPositions([position(0, 0)]).then(next); }, function testFooNotBlackboxed(next) { testPositions([position(11, 0)]).then(next); }, function testFooBlackboxed(next) { testPositions([position(8, 0), position(10, 3)]).then(next); }, function testBooPartiallyBlackboxed1(next) { // first line is not blackboxed, second and third - blackboxed. testPositions([position(12, 0)]).then(next); }, function testBooPartiallyBlackboxed2(next) { // first line is blackboxed, second - not, third - blackboxed. testPositions([ position(11, 0), position(12, 0), position(13, 0) ]).then(next); }, function testBooPartiallyBlackboxed3(next) { // first line is blackboxed, second and third - not. testPositions([ position(11, 0), position(12, 0), position(14, 0) ]).then(next); } ]; function testPositions(positions) { contextGroup.schedulePauseOnNextStatement('', ''); return Protocol.Debugger .setBlackboxedRanges({scriptId: scriptId, positions: positions}) .then(InspectorTest.logMessage) .then( () => Protocol.Runtime.evaluate( {expression: 'testFunction()//# sourceURL=expr.js'})); } function position(line, column) { return {lineNumber: line, columnNumber: column}; } ```
```c++ #pragma once #include "core_configuration/core_configuration.hpp" #include "json_utility.hpp" #include "manipulator/manipulator_factory.hpp" #include "manipulator/manipulator_manager.hpp" #include "manipulator/manipulators/basic/basic.hpp" namespace krbn { namespace grabber { namespace device_grabber_details { class fn_function_keys_manipulator_manager final { public: fn_function_keys_manipulator_manager(void) { manipulator_manager_ = std::make_shared<manipulator::manipulator_manager>(); } std::shared_ptr<manipulator::manipulator_manager> get_manipulator_manager(void) const { return manipulator_manager_; } void update(const core_configuration::details::profile& profile, const pqrs::osx::system_preferences::properties& system_preferences_properties) { manipulator_manager_->invalidate_manipulators(); auto from_mandatory_modifiers = nlohmann::json::array(); auto from_optional_modifiers = nlohmann::json::array(); from_optional_modifiers.push_back("any"); auto to_modifiers = nlohmann::json::array(); if (system_preferences_properties.get_use_fkeys_as_standard_function_keys()) { // f1 -> f1 // fn+f1 -> display_brightness_decrement from_mandatory_modifiers.push_back("fn"); // consumer_key_code::dictation does not work with modifier_flag::fn. // So, we send the plain media keys. // (e.g., display_brightness_decrement, not fn+display_brightness_decrement) } else { // f1 -> display_brightness_decrement // fn+f1 -> f1 // fn+f1 ... fn+f12 -> f1 .. f12 for (int i = 1; i <= 12; ++i) { auto from_json = nlohmann::json::object({ {"key_code", fmt::format("f{0}", i)}, {"modifiers", nlohmann::json::object({ {"mandatory", nlohmann::json::array({"fn"})}, {"optional", nlohmann::json::array({"any"})}, })}, }); auto to_json = nlohmann::json::object({ {"key_code", fmt::format("f{0}", i)}, {"modifiers", nlohmann::json::array({"fn"})}, }); std::vector<manipulator::to_event_definition> to_event_definitions; to_event_definitions.emplace_back(to_json); try { auto manipulator = std::make_shared<manipulator::manipulators::basic::basic>(manipulator::manipulators::basic::from_event_definition(from_json), to_event_definitions); manipulator_manager_->push_back_manipulator(std::shared_ptr<manipulator::manipulators::base>(manipulator)); } catch (const pqrs::json::unmarshal_error& e) { logger::get_logger()->error(fmt::format("karabiner.json error: {0}", e.what())); } catch (const std::exception& e) { logger::get_logger()->error(e.what()); } } } // from_modifiers+f1 -> display_brightness_decrement ... for (const auto& device : profile.get_devices()) { for (const auto& pair : device->get_fn_function_keys()->get_pairs()) { try { if (auto m = make_manipulator(pair, from_mandatory_modifiers, from_optional_modifiers, to_modifiers)) { m->push_back_condition(manipulator::manipulator_factory::make_event_changed_if_condition(false)); m->push_back_condition(manipulator::manipulator_factory::make_device_unless_touch_bar_condition()); auto c = manipulator::manipulator_factory::make_device_if_condition(*device); m->push_back_condition(c); manipulator_manager_->push_back_manipulator(m); } } catch (const pqrs::json::unmarshal_error& e) { logger::get_logger()->error(fmt::format("karabiner.json error: {0}", e.what())); } catch (const std::exception& e) { logger::get_logger()->error(e.what()); } } } for (const auto& pair : profile.get_fn_function_keys()->get_pairs()) { if (auto m = make_manipulator(pair, from_mandatory_modifiers, from_optional_modifiers, to_modifiers)) { m->push_back_condition(manipulator::manipulator_factory::make_event_changed_if_condition(false)); m->push_back_condition(manipulator::manipulator_factory::make_device_unless_touch_bar_condition()); manipulator_manager_->push_back_manipulator(m); } } // fn+return_or_enter -> keypad_enter ... { nlohmann::json data = nlohmann::json::array(); data.push_back(nlohmann::json::object({ {"from", nlohmann::json::object({{"key_code", "return_or_enter"}})}, {"to", nlohmann::json::object({{"key_code", "keypad_enter"}})}, })); data.push_back(nlohmann::json::object({ {"from", nlohmann::json::object({{"key_code", "delete_or_backspace"}})}, {"to", nlohmann::json::object({{"key_code", "delete_forward"}})}, })); data.push_back(nlohmann::json::object({ {"from", nlohmann::json::object({{"key_code", "right_arrow"}})}, {"to", nlohmann::json::object({{"key_code", "end"}})}, })); data.push_back(nlohmann::json::object({ {"from", nlohmann::json::object({{"key_code", "left_arrow"}})}, {"to", nlohmann::json::object({{"key_code", "home"}})}, })); data.push_back(nlohmann::json::object({ {"from", nlohmann::json::object({{"key_code", "down_arrow"}})}, {"to", nlohmann::json::object({{"key_code", "page_down"}})}, })); data.push_back(nlohmann::json::object({ {"from", nlohmann::json::object({{"key_code", "up_arrow"}})}, {"to", nlohmann::json::object({{"key_code", "page_up"}})}, })); for (const auto& d : data) { auto from_json = d["from"]; from_json["modifiers"]["mandatory"] = nlohmann::json::array({"fn"}); from_json["modifiers"]["optional"] = nlohmann::json::array({"any"}); auto to_json = d["to"]; to_json["modifiers"] = nlohmann::json::array({"fn"}); std::vector<manipulator::to_event_definition> to_event_definitions; to_event_definitions.emplace_back(to_json); try { auto manipulator = std::make_shared<manipulator::manipulators::basic::basic>(manipulator::manipulators::basic::from_event_definition(from_json), to_event_definitions); manipulator_manager_->push_back_manipulator(std::shared_ptr<manipulator::manipulators::base>(manipulator)); } catch (const pqrs::json::unmarshal_error& e) { logger::get_logger()->error(fmt::format("karabiner.json error: {0}", e.what())); } catch (const std::exception& e) { logger::get_logger()->error(e.what()); } } } // // Change keys which macOS will ignore. // // Touch ID { try { for (const auto& from_json : { nlohmann::json::object({ // Touch ID {"consumer_key_code", "menu"}, }), nlohmann::json::object({ // Lock key on Magic Keyboard without Touch ID {"consumer_key_code", "al_terminal_lock_or_screensaver"}, }), }) { std::vector<manipulator::to_event_definition> to_event_definitions; to_event_definitions.emplace_back(nlohmann::json::object({ {"consumer_key_code", "al_terminal_lock_or_screensaver"}, })); auto manipulator = std::make_shared<manipulator::manipulators::basic::basic>(manipulator::manipulators::basic::from_event_definition(from_json), to_event_definitions); manipulator_manager_->push_back_manipulator(std::shared_ptr<manipulator::manipulators::base>(manipulator)); } } catch (const std::exception& e) { logger::get_logger()->error(e.what()); } } // Application launch buttons { nlohmann::json data = nlohmann::json::array(); // Typically al_consumer_control_configuration is used as the key to open the music player. // path_to_url data.push_back(nlohmann::json::object({ {"from", nlohmann::json::object({{"consumer_key_code", "al_consumer_control_configuration"}})}, {"to", nlohmann::json::object({{"shell_command", "open -a 'Music.app'"}})}, })); data.push_back(nlohmann::json::object({ {"from", nlohmann::json::object({{"consumer_key_code", "al_word_processor"}})}, {"to", nlohmann::json::object({{"shell_command", "open -a 'Pages.app'"}})}, })); data.push_back(nlohmann::json::object({ {"from", nlohmann::json::object({{"consumer_key_code", "al_text_editor"}})}, {"to", nlohmann::json::object({{"shell_command", "open -a 'TextEdit.app'"}})}, })); data.push_back(nlohmann::json::object({ {"from", nlohmann::json::object({{"consumer_key_code", "al_spreadsheet"}})}, {"to", nlohmann::json::object({{"shell_command", "open -a 'Numbers.app'"}})}, })); data.push_back(nlohmann::json::object({ {"from", nlohmann::json::object({{"consumer_key_code", "al_presentation_app"}})}, {"to", nlohmann::json::object({{"shell_command", "open -a 'Keynote.app'"}})}, })); data.push_back(nlohmann::json::object({ {"from", nlohmann::json::object({{"consumer_key_code", "al_email_reader"}})}, {"to", nlohmann::json::object({{"shell_command", "open -a 'Mail.app'"}})}, })); data.push_back(nlohmann::json::object({ {"from", nlohmann::json::object({{"consumer_key_code", "al_calculator"}})}, {"to", nlohmann::json::object({{"shell_command", "open -a 'Calculator.app'"}})}, })); data.push_back(nlohmann::json::object({ {"from", nlohmann::json::object({{"consumer_key_code", "al_local_machine_browser"}})}, {"to", nlohmann::json::object({{"shell_command", "open -a 'Finder.app'"}})}, })); data.push_back(nlohmann::json::object({ {"from", nlohmann::json::object({{"consumer_key_code", "al_internet_browser"}})}, {"to", nlohmann::json::object({{"shell_command", "open -a 'Safari.app'"}})}, })); data.push_back(nlohmann::json::object({ {"from", nlohmann::json::object({{"consumer_key_code", "al_dictionary"}})}, {"to", nlohmann::json::object({{"shell_command", "open -a 'Dictionary.app'"}})}, })); for (const auto& d : data) { auto from_json = d["from"]; from_json["modifiers"]["mandatory"] = nlohmann::json::array({"any"}); auto to_json = d["to"]; std::vector<manipulator::to_event_definition> to_event_definitions; to_event_definitions.emplace_back(to_json); try { auto manipulator = std::make_shared<manipulator::manipulators::basic::basic>(manipulator::manipulators::basic::from_event_definition(from_json), to_event_definitions); manipulator_manager_->push_back_manipulator(std::shared_ptr<manipulator::manipulators::base>(manipulator)); } catch (const pqrs::json::unmarshal_error& e) { logger::get_logger()->error(fmt::format("karabiner.json error: {0}", e.what())); } catch (const std::exception& e) { logger::get_logger()->error(e.what()); } } } } private: std::shared_ptr<manipulator::manipulators::base> make_manipulator(const std::pair<std::string, std::string>& pair, const nlohmann::json& from_mandatory_modifiers, const nlohmann::json& from_optional_modifiers, const nlohmann::json& to_modifiers) const { try { auto from_json = json_utility::parse_jsonc(pair.first); if (from_json.empty()) { return nullptr; } from_json["modifiers"]["mandatory"] = from_mandatory_modifiers; from_json["modifiers"]["optional"] = from_optional_modifiers; auto to_json = json_utility::parse_jsonc(pair.second); if (to_json.empty()) { return nullptr; } std::vector<manipulator::to_event_definition> to_event_definitions; for (auto&& j : to_json) { j["modifiers"] = to_modifiers; to_event_definitions.emplace_back(j); } return std::make_shared<manipulator::manipulators::basic::basic>(manipulator::manipulators::basic::from_event_definition(from_json), to_event_definitions); } catch (const pqrs::json::unmarshal_error& e) { logger::get_logger()->error(fmt::format("karabiner.json error: {0}", e.what())); } catch (const std::exception& e) { logger::get_logger()->error(e.what()); } return nullptr; } std::shared_ptr<manipulator::manipulator_manager> manipulator_manager_; }; } // namespace device_grabber_details } // namespace grabber } // namespace krbn ```
No 81 Squadron was a squadron of the Royal Air Force. It flew Fighter aircraft during the Second World War, and reconnaissance aircraft in the Far East after the war and was disbanded in 1970. History First World War No. 81 Squadron Royal Flying Corps was formed on 7 January 1917 at Gosport as a training unit, but unlike many other Training squadrons during the First World War, it was not mobilised for active service and was disbanded on 4 July 1918. The squadron reformed 25 November 1918 with all Canadian personnel and was officially known as No. 1 Squadron, Canadian Air Force until disbanding again on 1 February 1920. Second World War The rebirth of No. 81 Squadron seemingly can be traced to the Air Component Field Force Communication Squadron RAF formed in August 1939, probably associated with the British Expeditionary Force Air Component under Air Vice-Marshal Charles Blount (see British Air Forces in France). On 1 December 1939 the Communications Squadron at Mountjoie, France (see :fr:Montjoi), operating de Havilland Tiger Moths, was redesignated No. 81. It was disbanded on 15 June 1940, when the advancing German forces forced its withdrawal to the United Kingdom. Following the German Invasion of the Soviet Union, it was decided to send a wing of Hawker Hurricane fighters to assist the Soviet war effort, and No. 81 Squadron reformed at RAF Leconfield on 29 July 1941 as part of No. 151 Wing RAF. In September it flew its Hurricanes off the carrier , deploying to an airfield near Murmansk. It flew both defensive sorties and escort missions for Soviet bombers, while carrying out its principal role of training Soviet pilots on the Hurricane. After a few weeks of operations the Hurricanes were handed over to the Soviets and the Squadron left to return to the UK at the end of November. When it arrived back at the UK, it was re-equipped with Supermarine Spitfires at RAF Turnhouse, Edinburgh, being declared operational on 1 February 1942. It moved to RAF Hornchurch near London in May, flying its first operation, escorting Hurricanes bombing Bruges on 1 June. At the end of October the Squadron moved to Gibraltar and on 8 November, 19 Spitfires moved to the newly captured airfield at Maison Blanche, Algiers. Following the German surrender in North Africa, it moved to Malta in preparation for the Invasion of Sicily. It then moved to Italy in September but was withdrawn to Egypt in November to prepare for deployment to the Far East. During operations in the Mediterranean, they found their most frequent opponents were Jagdgeschwader 53 who had an ace of spades motif on their aircraft. As the squadron commander considered that they had 'bested' their enemy, they took the motif and started applying it to their aircraft. It arrived at Alipore, India in December 1943, equipped with more modern Spitfire VIII, starting operations in January, flying fighter and ground attack missions in support of the Second Battle of Arakan and the Battle of Imphal as part of the RAF Third Tactical Air Force. It was withdrawn to Ceylon in August and disbanded on 20 June 1945. On the same day 123 Squadron was renumbered 81 Squadron but its Thunderbolts did not become operational before the war ended. In October, the squadron was sent to Java during the Indonesian War of Independence, flying tactical reconnaissance duties and covering Allied road convoys, while attacking nationalist held airfields and ammunition dumps. On 30 June 1946, the squadron was again disbanded. Postwar reconnaissance operations On 1 September 1946, No. 684 Squadron, the Far east photo-reconnaissance squadron flying de Havilland Mosquito PR.34s and Spitfire PR.19s, was renumbered as No. 81. It added fighter-reconnaissance Spitfires in August 1947, when it became involved in the Malayan Emergency. Conversion to Meteor PR.10s began in September 1953, with the Squadron flying the RAF's last operational Spitfire mission on 1 April 1954 and the last operational RAF Mosquito mission on 15 December 1955. It received a few Percival Pembrokes for survey operations in 1956 and began converting to the English Electric Canberra in 1958, flying its last Meteor mission on 7 July 1961, retaining the Canberra until the Squadron was disbanded as part of Britain's withdrawal from bases East of Suez on 16 January 1970. The Squadron was based at RAF Seletar and RAF Tengah in Singapore from 1947 to 1970 with a small detachment at RAF Kai Tak in Hong Kong from 1947 to 1954. Squadron Insignia The squadron insignia consists of sword on a red star. According to the memoirs of Alan McGregor Peart DFC, who served with the squadron in World War 2, the squadron had no formal insignia until 1943, when the members of the squadron decided to devise one. The red star was used as a symbol of the squadron's previous deployment to the Soviet Union, and the borrowed the rampant sword of the First Army in North Africa to represent the squadron's deployment in that theatre. The squadron's motto, Non Solum Nobis, is Latin for "not for ourselves alone". Footnotes References External links 81 Squadron RAF Air of Authority - No 81-85 Squadron Histories 081 081 Fighter squadrons of the Royal Air Force in World War II 1917 establishments in the United Kingdom Military units and formations of Ceylon in World War II Military units and formations established in 1917 Military units and formations disestablished in 1970
```python import functools import math from enum import IntEnum import sympy import torch from . import ir from .utils import get_dtype_size, sympy_product from .virtualized import V class NCCL_COLL(IntEnum): ALL_REDUCE = 0 ALL_GATHER = 1 REDUCE_SCATTER = 2 class NVIDIA_GPU_TYPE(IntEnum): VOLTA = 0 AMPERE = 1 HOPPER = 2 @functools.lru_cache def get_gpu_type() -> NVIDIA_GPU_TYPE: gpu_info = torch.utils.collect_env.get_gpu_info(torch.utils.collect_env.run) or "" if "V100" in gpu_info: return NVIDIA_GPU_TYPE.VOLTA elif "A100" in gpu_info: return NVIDIA_GPU_TYPE.AMPERE elif "H100" in gpu_info: return NVIDIA_GPU_TYPE.HOPPER else: # for other gpu types, assume Ampere return NVIDIA_GPU_TYPE.AMPERE def get_collective_type(node: ir.IRNode) -> NCCL_COLL: if not isinstance(node, ir._CollectiveKernel): raise ValueError(f"node is not a collective kernel: {node}") kernel_name = node.python_kernel_name assert kernel_name is not None if "all_reduce" in kernel_name: return NCCL_COLL.ALL_REDUCE elif "all_gather" in kernel_name: return NCCL_COLL.ALL_GATHER elif "reduce_scatter" in kernel_name: return NCCL_COLL.REDUCE_SCATTER else: raise ValueError(f"Unsupported collective kernel: {kernel_name}") def get_collective_input_size_bytes(node: ir.IRNode) -> int: sz_bytes = 0 for inp in node.inputs: # type: ignore[attr-defined] numel = sympy_product(inp.layout.size) if isinstance(numel, sympy.Integer): # For ease of testing numel = int(numel) else: numel = V.graph.sizevars.size_hint(numel, fallback=0) sz_bytes += numel * get_dtype_size(inp.layout.dtype) return sz_bytes def get_collective_group_size(node: ir.IRNode) -> int: if type(node) == ir._CollectiveKernel: from torch.distributed.distributed_c10d import _get_group_size_by_name return _get_group_size_by_name(node.constant_args[-1]) else: raise TypeError(f"Unsupported collective type: {node}") #################################################################################################################### # The following code and constants are adapted from path_to_url # #################################################################################################################### class NCCL_HW(IntEnum): NVLINK = 0 PCI = 1 NET = 2 class NCCL_ALGO(IntEnum): TREE = 0 RING = 1 class NCCL_PROTO(IntEnum): # The ordering and enum values here matches original in # path_to_url#L28 # For difference between these protocols, see path_to_url#issuecomment-571816990 LL = 0 # Low-latency # LL128 = 1 # Low-latency 128-byte # SIMPLE = 2 # Latencies in us # len(NCCL_ALGO) x len(NCCL_PROTO) # NOTE: use array instead of tensor to prevent incompatibility with fake mode baseLat = [ # Tree [ 6.8, # LL ], # Ring [ 6.6, # LL ], ] # Latencies in us # len(NCCL_HW) x len(NCCL_ALGO) x len(NCCL_PROTO) hwLat = [ # NVLINK [ [0.6], # Tree (LL) [0.6], # Ring (LL) ], # PCI [ [1.0], # Tree (LL) [1.0], # Ring (LL) ], # NET [ [5.0], # Tree (LL) [2.7], # Ring (LL) ], ] # LL128 max BW per channel llMaxBws = [ # Volta-N1/Intel-N2/Intel-N4 [ 39.0, 39.0, 20.4, ], # Ampere-N1/AMD-N2/AMD-N4 [ 87.7, 22.5, # avg of ring & tree 19.0, ], # Hopper-N1/AMD-N2/AMD-N4 [ 87.7, 22.5, # avg of ring & tree 19.0, ], ] def estimate_nccl_collective_runtime(node: ir.IRNode) -> float: """ Returns estimated NCCL collective runtime in nanoseconds (ns). The following heuristics are copied from path_to_url We aim to estimate the runtime as accurately as possible. Assumptions: - only ring algorithm (NCCL_ALGO_RING) is used - only Low-Latency protocol (NCCL_PROTO_LL) is used, i.e. Simple or LL128 is not used - 8 gpus per node # TODO: Need to find a way to get accurate "gpus per node" and "# nodes" info. - collective is one of: allreduce, reducescatter, allgather """ tensor_storage_size_bytes = get_collective_input_size_bytes(node) # Convert bytes to GB tensor_storage_size_GB = tensor_storage_size_bytes / 1024 / 1024 / 1024 # Currently assumes each node has 8 gpus. And when >1 node is used, assumes each node uses all 8 gpus. # TODO: Need to find a way to get accurate "gpus per node" and "# nodes" info. num_gpus_per_node = 8 group_size = get_collective_group_size(node) nNodes = math.ceil(group_size / num_gpus_per_node) nRanks = group_size # this is total # of gpus globally that participate in this collective op if nRanks <= 1: return 0 # Assumes ring algorithm nccl_algo = NCCL_ALGO.RING nccl_proto = NCCL_PROTO.LL coll = get_collective_type(node) # =============== bandwidth computation =============== # First compute bandwidth in GB/s; then at the end, convert it to GB/ns bwIntra = torch._inductor.config.intra_node_bw bwInter = torch._inductor.config.inter_node_bw compCapIndex = get_gpu_type() index2 = nNodes - 1 if nNodes <= 2 else 2 # LL: for single node, we look at GPU type; for multi-node, we look at CPU type index1 = compCapIndex if nNodes == 1 else 0 llMaxBw = llMaxBws[index1][index2] # NOTE: each step of ring algorithm is synchronized, # and is bottlenecked by the slowest link which is the inter-node interconnect. # hence when nNodes >= 2, bw is inter-node bandwidth. # NOTE: the original code in path_to_url # have this as `if nNodes <= 2` which seems wrong. Corrected it here. bw = bwIntra if nNodes == 1 else bwInter nChannels = 2 # Assume # channels is 2 busBw = nChannels * bw # Various model refinements busBw = min( llMaxBw, busBw * (1.0 / 4.0 if (nNodes > 1 or coll == NCCL_COLL.ALL_REDUCE) else 1.0 / 3.0), ) if coll == NCCL_COLL.ALL_REDUCE: nsteps = 2 * (nRanks - 1) elif coll in (NCCL_COLL.REDUCE_SCATTER, NCCL_COLL.ALL_GATHER): nsteps = nRanks - 1 # Convert bus BW to algorithm BW (tensor bytes / algoBW = actual execution time) ratio = (1.0 * nRanks) / nsteps # type: ignore[possibly-undefined] bandwidth = busBw * ratio # Convert GB/s to GB/ns bandwidth_GB_per_ns = bandwidth / 1e9 # =============== latency computation =============== intraHw = NCCL_HW.NVLINK if coll == NCCL_COLL.ALL_REDUCE: if nNodes > 1: nInterSteps = 2 * nNodes else: nInterSteps = 0 elif coll in (NCCL_COLL.REDUCE_SCATTER, NCCL_COLL.ALL_GATHER): nInterSteps = nNodes - 1 # First compute latency in us; then at the end, convert it to ns latency = baseLat[nccl_algo][nccl_proto] intraLat = hwLat[intraHw][nccl_algo][nccl_proto] interLat = hwLat[NCCL_HW.NET][nccl_algo][nccl_proto] # Inter-node rings still have to launch nsteps * net overhead. netOverhead = 0.0 if nNodes > 1: netOverhead = 1.0 # getNetOverhead(comm); intraLat = max(intraLat, netOverhead) latency += (nsteps - nInterSteps) * intraLat + nInterSteps * interLat # type: ignore[possibly-undefined] # Convert us to ns latency_ns = latency * 1e3 # =============== final result =============== transport_ns = tensor_storage_size_GB / bandwidth_GB_per_ns return transport_ns + latency_ns ################################################################################################################ # The above code and constants are adapted from path_to_url # ################################################################################################################ ```
```go /* path_to_url Unless required by applicable law or agreed to in writing, software WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ package presetcontroller import ( "context" "fmt" "go.uber.org/zap" kubermaticv1 "k8c.io/kubermatic/v2/pkg/apis/kubermatic/v1" "k8c.io/kubermatic/v2/pkg/util/workerlabel" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/selection" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/builder" ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) const ( ControllerName = "kkp-preset-controller" ) type reconciler struct { log *zap.SugaredLogger workerNameLabelSelector labels.Selector workerName string recorder record.EventRecorder seedClient ctrlruntimeclient.Client } func Add( mgr manager.Manager, log *zap.SugaredLogger, workerName string, numWorkers int, ) error { workerSelector, err := workerlabel.LabelSelector(workerName) if err != nil { return fmt.Errorf("failed to build worker-name selector: %w", err) } reconciler := &reconciler{ log: log.Named(ControllerName), workerNameLabelSelector: workerSelector, workerName: workerName, recorder: mgr.GetEventRecorderFor(ControllerName), seedClient: mgr.GetClient(), } _, err = builder.ControllerManagedBy(mgr). Named(ControllerName). WithOptions(controller.Options{ MaxConcurrentReconciles: numWorkers, }). For(&kubermaticv1.Preset{}). Build(reconciler) return err } // Reconcile reconciles the kubermatic cluster template instance in the seed cluster. func (r *reconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { log := r.log.With("request", request) log.Debug("Reconciling") preset := &kubermaticv1.Preset{} if err := r.seedClient.Get(ctx, request.NamespacedName, preset); err != nil { if apierrors.IsNotFound(err) { return reconcile.Result{}, nil } return reconcile.Result{}, fmt.Errorf("failed to get preset %s: %w", request.NamespacedName, err) } err := r.reconcile(ctx, preset, log) if err != nil { r.recorder.Event(preset, corev1.EventTypeWarning, "ReconcilingError", err.Error()) } return reconcile.Result{}, err } func (r *reconciler) reconcile(ctx context.Context, preset *kubermaticv1.Preset, log *zap.SugaredLogger) error { // handle deletion to change all cluster annotation if !preset.DeletionTimestamp.IsZero() { log.Debug("The preset was deleted") workerNameLabelSelectorRequirements, _ := r.workerNameLabelSelector.Requirements() presetLabelRequirement, err := labels.NewRequirement(kubermaticv1.IsCredentialPresetLabelKey, selection.Equals, []string{"true"}) if err != nil { return fmt.Errorf("failed to construct label requirement for credential preset: %w", err) } listOpts := &ctrlruntimeclient.ListOptions{ LabelSelector: labels.NewSelector().Add(append(workerNameLabelSelectorRequirements, *presetLabelRequirement)...), } clusters := &kubermaticv1.ClusterList{} if err := r.seedClient.List(ctx, clusters, listOpts); err != nil { return fmt.Errorf("failed to get clusters %w", err) } log.Debug("Update clusters after preset deletion") for _, cluster := range clusters.Items { if cluster.Annotations != nil && cluster.Annotations[kubermaticv1.PresetNameAnnotation] == preset.Name { log.Debugw("Update cluster", "cluster", cluster.Name) copyCluster := cluster.DeepCopy() copyCluster.Annotations[kubermaticv1.PresetInvalidatedAnnotation] = string(kubermaticv1.PresetDeleted) if err := r.seedClient.Update(ctx, copyCluster); err != nil { return err } } } } return nil } ```
```xml // See LICENSE in the project root for license information. import * as path from 'path'; import type { Asset, Chunk, ChunkGraph, Compilation, Compiler, LoaderContext, Module, runtime, WebpackError, WebpackPluginInstance } from 'webpack'; import { getPseudolocalizer, type ILocalizationFile, parseResJson } from '@rushstack/localization-utilities'; import * as Constants from './utilities/Constants'; import type { ILocalizationPluginOptions, ILocalizationStats, ILocaleFileData, ILocaleFileObject, IResolvedMissingTranslations } from './interfaces'; import type { IAssetPathOptions } from './webpackInterfaces'; import { markEntity, getMark } from './utilities/EntityMarker'; import { processLocalizedAsset, processNonLocalizedAsset } from './AssetProcessor'; import { getHashFunction, type HashFn, updateAssetHashes } from './trueHashes'; import { chunkIsJs } from './utilities/chunkUtilities'; /** * @public */ export interface IStringPlaceholder { /** * The literal string that will be injected for later replacement. */ value: string; /** * The identifier for this particular placeholder, for lookup. */ suffix: string; /** * The values of this string in each output locale. */ valuesByLocale: Map<string, string>; /** * The key used to identify the source file containing the string. */ locFilePath: string; /** * The identifier of the string within its original source file. */ stringName: string; } const PLUGIN_NAME: 'localization' = 'localization'; const pluginForCompiler: WeakMap<Compiler, LocalizationPlugin> = new WeakMap(); /** * Gets the instance of the LocalizationPlugin bound to the specified webpack compiler. * Used by loaders. */ export function getPluginInstance(compiler: Compiler | undefined): LocalizationPlugin { const instance: LocalizationPlugin | undefined = compiler && pluginForCompiler.get(compiler); if (!instance) { throw new Error(`Could not find a LocalizationPlugin instance for the current webpack compiler!`); } return instance; } /** * This plugin facilitates localization in webpack. * * @public */ export class LocalizationPlugin implements WebpackPluginInstance { public readonly stringKeys: Map<string, IStringPlaceholder> = new Map(); /** * @internal */ public readonly _options: ILocalizationPluginOptions; private readonly _resolvedTranslatedStringsFromOptions: Map< string, Map<string, ILocaleFileObject | string | ReadonlyMap<string, string>> > = new Map(); private _stringPlaceholderCounter: number = 0; private readonly _stringPlaceholderMap: Map<string, IStringPlaceholder> = new Map(); private _passthroughLocaleName!: string; private _defaultLocale!: string; private _noStringsLocaleName!: string; private _fillMissingTranslationStrings!: boolean; private _formatLocaleForFilename!: (loc: string) => string; private readonly _pseudolocalizers: Map<string, (str: string) => string> = new Map(); /** * The outermost map's keys are the locale names. * The middle map's keys are the resolved, file names. * The innermost map's keys are the string identifiers and its values are the string values. */ private _resolvedLocalizedStrings: Map<string, Map<string, Map<string, string>>> = new Map(); public constructor(options: ILocalizationPluginOptions) { this._options = options; } /** * Apply this plugin to the specified webpack compiler. */ public apply(compiler: Compiler): void { pluginForCompiler.set(compiler, this); // path_to_url#diff-15fb51940da53816af13330d8ce69b4eR66 const isWebpackDevServer: boolean = process.env.WEBPACK_DEV_SERVER === 'true'; const { errors, warnings } = this._initializeAndValidateOptions(compiler, isWebpackDevServer); if (errors.length > 0 || warnings.length > 0) { compiler.hooks.compilation.tap(PLUGIN_NAME, (compilation: Compilation) => { compilation.errors.push(...errors); compilation.warnings.push(...warnings); }); if (errors.length > 0) { // If there are any errors, just pass through the resources in source and don't do any // additional configuration return; } } const { webpack: thisWebpack } = compiler; const { WebpackError, runtime: { GetChunkFilenameRuntimeModule } } = thisWebpack; // Side-channel for async chunk URL generator chunk, since the actual chunk is completely inaccessible // from the assetPath hook below when invoked to build the async URL generator let chunkWithAsyncURLGenerator: Chunk | undefined; const originalGenerate: typeof GetChunkFilenameRuntimeModule.prototype.generate = GetChunkFilenameRuntimeModule.prototype.generate; GetChunkFilenameRuntimeModule.prototype.generate = function ( this: runtime.GetChunkFilenameRuntimeModule ) { // `originalGenerate` will invoke `getAssetPath` to produce the async URL generator // Need to know the identity of the containing chunk to correctly produce the asset path expression chunkWithAsyncURLGenerator = this.chunk; const result: string = originalGenerate.call(this); // Unset after the call finishes because we are no longer generating async URL generators chunkWithAsyncURLGenerator = undefined; return result; }; const asyncGeneratorTest: RegExp = /^\" \+/; const { runtimeLocaleExpression } = this._options; compiler.hooks.thisCompilation.tap(PLUGIN_NAME, (compilation: Compilation) => { let hashFn: HashFn | undefined; if (this._options.realContentHash) { if (runtimeLocaleExpression) { compilation.errors.push( new WebpackError( `The "realContentHash" option cannot be used in conjunction with "runtimeLocaleExpression".` ) ); } else { hashFn = getHashFunction({ thisWebpack, compilation }); } } else if (compiler.options.optimization?.realContentHash) { compilation.errors.push( new thisWebpack.WebpackError( `The \`optimization.realContentHash\` option is set and the ${LocalizationPlugin.name}'s ` + '`realContentHash` option is not set. This will likely produce invalid results. Consider setting the ' + `\`realContentHash\` option in the ${LocalizationPlugin.name} plugin.` ) ); } compilation.hooks.assetPath.tap( PLUGIN_NAME, (assetPath: string, options: IAssetPathOptions): string => { const { chunkGraph } = compilation; if ( options.contentHashType === 'javascript' && assetPath.match(Constants.LOCALE_FILENAME_TOKEN_REGEX) ) { // Does this look like an async chunk URL generator? if (typeof options.chunk?.id === 'string' && options.chunk.id.match(asyncGeneratorTest)) { const chunkIdsWithStrings: Set<number | string> = new Set<number | string>(); const chunkIdsWithoutStrings: Set<number | string> = new Set<number | string>(); if (!chunkWithAsyncURLGenerator) { compilation.errors.push( new WebpackError(`No active chunk while constructing async chunk URL generator!`) ); return assetPath; } const asyncChunks: Set<Chunk> = chunkWithAsyncURLGenerator!.getAllAsyncChunks(); for (const asyncChunk of asyncChunks) { const chunkId: number | string | null = asyncChunk.id; if (chunkId === null || chunkId === undefined) { throw new Error(`Chunk "${asyncChunk.name}"'s ID is null or undefined.`); } if (_chunkHasLocalizedModules(chunkGraph, asyncChunk, runtimeLocaleExpression)) { chunkIdsWithStrings.add(chunkId); } else { chunkIdsWithoutStrings.add(chunkId); } } return assetPath.replace(Constants.LOCALE_FILENAME_TOKEN_REGEX, () => { // Use a replacer function so that we don't need to escape anything in the return value // If the runtime chunk is itself localized, forcibly match the locale of the runtime chunk // Otherwise prefer the runtime expression if specified const localeExpression: string = (!_chunkHasLocalizedModules( chunkGraph, chunkWithAsyncURLGenerator!, runtimeLocaleExpression ) && runtimeLocaleExpression) || Constants.JSONP_PLACEHOLDER; if (chunkIdsWithStrings.size === 0) { return this._formatLocaleForFilename(this._noStringsLocaleName); } else if (chunkIdsWithoutStrings.size === 0) { return `" + ${localeExpression} + "`; } else { // Generate an object that is used to select between <locale> and <nostrings locale> for each chunk ID // Method: pick the smaller set of (localized, non-localized) and map that to 1 (a truthy value) // All other IDs map to `undefined` (a falsy value), so we then use the ternary operator to select // the appropriate token // // This can be improved in the future. We can maybe sort the chunks such that the chunks below a certain ID // number are localized and the those above are not. const chunkMapping: { [chunkId: string]: 1 } = {}; // Use the map with the fewest values to shorten the expression const isLocalizedSmaller: boolean = chunkIdsWithStrings.size <= chunkIdsWithoutStrings.size; // These are the ids for which the expression should evaluate to a truthy value const smallerSet: Set<number | string> = isLocalizedSmaller ? chunkIdsWithStrings : chunkIdsWithoutStrings; for (const id of smallerSet) { chunkMapping[id] = 1; } const noLocaleExpression: string = JSON.stringify( this._formatLocaleForFilename(this._noStringsLocaleName) ); return `" + (${JSON.stringify(chunkMapping)}[chunkId]?${ isLocalizedSmaller ? localeExpression : noLocaleExpression }:${isLocalizedSmaller ? noLocaleExpression : localeExpression}) + "`; } }); } else { let locale: string | undefined = options.locale; if (!locale) { const isLocalized: boolean = _chunkHasLocalizedModules( chunkGraph, options.chunk as Chunk, runtimeLocaleExpression ); // Ensure that the initial name maps to a file that should exist in the final output locale = isLocalized ? this._defaultLocale : this._noStringsLocaleName; } return assetPath.replace( Constants.LOCALE_FILENAME_TOKEN_REGEX, this._formatLocaleForFilename(locale) ); } } else { return assetPath; } } ); const { outputOptions } = compilation; // For compatibility with minifiers, need to generate the additional assets after the optimize process runs compilation.hooks.processAssets.tapPromise( { name: PLUGIN_NAME, // Generating derived assets, but explicitly want to create them *after* asset optimization stage: compiler.webpack.Compilation.PROCESS_ASSETS_STAGE_DEV_TOOLING - 1 }, async (): Promise<void> => { const locales: Set<string> = new Set(this._resolvedLocalizedStrings.keys()); const { chunkGraph, chunks } = compilation; const { localizationStats: statsOptions } = this._options; const filesByChunkName: Map<string, Record<string, string>> | undefined = statsOptions ? new Map() : undefined; const localizedEntryPointNames: string[] = []; const localizedChunkNames: string[] = []; for (const chunk of chunks) { if (!chunkIsJs(chunk, chunkGraph)) { continue; } const isLocalized: boolean = _chunkHasLocalizedModules( chunkGraph, chunk, runtimeLocaleExpression ); const template: Parameters<typeof Compilation.prototype.getAssetPath>[0] = chunk.filenameTemplate || (chunk.hasRuntime() ? outputOptions.filename : outputOptions.chunkFilename)!; const defaultAssetName: string = compilation.getPath(template, { chunk, contentHashType: 'javascript' // Without locale this should return the name of the default asset }); const asset: Asset | undefined = compilation.getAsset(defaultAssetName); if (!asset) { compilation.errors.push(new WebpackError(`Missing expected chunk asset ${defaultAssetName}`)); continue; } if (isLocalized) { const localizedAssets: Record<string, string> = processLocalizedAsset({ // Global values plugin: this, compilation, locales, defaultLocale: this._defaultLocale, fillMissingTranslationStrings: this._fillMissingTranslationStrings, formatLocaleForFilenameFn: this._formatLocaleForFilename, // Chunk-specific values chunk, asset, filenameTemplate: template }); if (filesByChunkName && chunk.name) { filesByChunkName.set(chunk.name, localizedAssets); (chunk.hasRuntime() ? localizedEntryPointNames : localizedChunkNames).push(chunk.name); } } else { processNonLocalizedAsset({ // Global values plugin: this, compilation, noStringsLocaleName: this._noStringsLocaleName, formatLocaleForFilenameFn: this._formatLocaleForFilename, // Chunk-specific values chunk, asset, fileName: defaultAssetName }); } } if (hashFn) { updateAssetHashes({ thisWebpack, compilation, hashFn, filesByChunkName }); } // Since the stats generation doesn't depend on content, do it immediately if (statsOptions && filesByChunkName) { const localizationStats: ILocalizationStats = { entrypoints: {}, namedChunkGroups: {} }; // Sort in lexicographic order to ensure stable output localizedChunkNames.sort(); for (const chunkName of localizedChunkNames) { localizationStats.namedChunkGroups[chunkName] = { localizedAssets: filesByChunkName.get(chunkName)! }; } // Sort in lexicographic order to ensure stable output localizedEntryPointNames.sort(); for (const chunkName of localizedEntryPointNames) { localizationStats.entrypoints[chunkName] = { localizedAssets: filesByChunkName.get(chunkName)! }; } const { dropPath, callback } = statsOptions; if (dropPath) { compilation.emitAsset( dropPath, new compiler.webpack.sources.RawSource(JSON.stringify(localizationStats)) ); } if (callback) { try { callback(localizationStats, compilation); } catch (e) { /* swallow errors from the callback */ } } } } ); }); } /** * @public * * @returns An object mapping the string keys to placeholders */ public async addDefaultLocFileAsync( context: LoaderContext<{}>, localizedFileKey: string, localizedResourceData: ILocalizationFile ): Promise<Record<string, string>> { const locFileData: ReadonlyMap<string, string> = convertLocalizationFileToLocData(localizedResourceData); const resultObject: Record<string, string> = this._addLocFileAndGetPlaceholders( this._defaultLocale, localizedFileKey, locFileData ); const missingLocales: string[] = []; for (const [translatedLocaleName, translatedStrings] of this._resolvedTranslatedStringsFromOptions) { const translatedLocFileFromOptions: ILocaleFileData | undefined = translatedStrings.get(localizedFileKey); if (!translatedLocFileFromOptions) { missingLocales.push(translatedLocaleName); } else { const translatedLocFileData: ReadonlyMap<string, string> = await normalizeLocalizedData( context, translatedLocFileFromOptions ); this._addTranslations(translatedLocaleName, localizedFileKey, translatedLocFileData); } } const { resolveMissingTranslatedStrings } = this._options.localizedData; if (missingLocales.length > 0 && resolveMissingTranslatedStrings) { let resolvedTranslatedData: IResolvedMissingTranslations | undefined = undefined; try { resolvedTranslatedData = await resolveMissingTranslatedStrings( missingLocales, localizedFileKey, context ); } catch (e) { context.emitError(e); } if (resolvedTranslatedData) { const iterable: Iterable<[string, ILocaleFileData]> = resolvedTranslatedData instanceof Map ? resolvedTranslatedData.entries() : Object.entries(resolvedTranslatedData); for (const [resolvedLocaleName, resolvedLocaleData] of iterable) { if (resolvedLocaleData) { const translatedLocFileData: ReadonlyMap<string, string> = await normalizeLocalizedData( context, resolvedLocaleData ); this._addTranslations(resolvedLocaleName, localizedFileKey, translatedLocFileData); } } } } for (const [pseudolocaleName, pseudolocalizer] of this._pseudolocalizers) { const pseudolocFileData: Map<string, string> = new Map(); for (const [stringName, stringValue] of locFileData) { pseudolocFileData.set(stringName, pseudolocalizer(stringValue)); } this._addTranslations(pseudolocaleName, localizedFileKey, pseudolocFileData); } markEntity(context._module!, true); return resultObject; } /** * @public */ public getPlaceholder(localizedFileKey: string, stringName: string): IStringPlaceholder | undefined { const stringKey: string = `${localizedFileKey}?${stringName}`; return this.stringKeys.get(stringKey); } /** * @internal */ public getDataForSerialNumber(serialNumber: string): IStringPlaceholder | undefined { return this._stringPlaceholderMap.get(serialNumber); } private _addLocFileAndGetPlaceholders( localeName: string, localizedFileKey: string, localizedFileData: ReadonlyMap<string, string> ): Record<string, string> { const filesMap: Map<string, ReadonlyMap<string, string>> = this._resolvedLocalizedStrings.get( localeName )!; filesMap.set(localizedFileKey, localizedFileData); const resultObject: Record<string, string> = {}; for (const [stringName, stringValue] of localizedFileData) { const stringKey: string = `${localizedFileKey}?${stringName}`; let placeholder: IStringPlaceholder | undefined = this.stringKeys.get(stringKey); if (!placeholder) { // TODO: This may need to be a deterministic identifier to support watch / incremental compilation const suffix: string = `${this._stringPlaceholderCounter++}`; const values: Map<string, string> = new Map(); values.set(this._passthroughLocaleName, stringName); placeholder = { value: `${Constants.STRING_PLACEHOLDER_PREFIX}_\\_${Constants.STRING_PLACEHOLDER_LABEL}_${suffix}`, suffix, valuesByLocale: values, locFilePath: localizedFileKey, stringName }; this.stringKeys.set(stringKey, placeholder); this._stringPlaceholderMap.set(suffix, placeholder); } resultObject[stringName] = placeholder.value; placeholder.valuesByLocale.set(localeName, stringValue); } return resultObject; } private _addTranslations( localeName: string, localizedFileKey: string, localizedFileData: ReadonlyMap<string, string> ): void { for (const [stringName, stringValue] of localizedFileData) { const stringKey: string = `${localizedFileKey}?${stringName}`; const placeholder: IStringPlaceholder | undefined = this.stringKeys.get(stringKey); if (placeholder) { placeholder.valuesByLocale.set(localeName, stringValue); } } } private _initializeAndValidateOptions( compiler: Compiler, isWebpackDevServer: boolean ): { errors: WebpackError[]; warnings: WebpackError[] } { const errors: WebpackError[] = []; const warnings: WebpackError[] = []; const { WebpackError } = compiler.webpack; const { options: configuration } = compiler; const LOCALE_NAME_REGEX: RegExp = /[a-z-]/i; function ensureValidLocaleName(localeName: string): boolean { if (!localeName.match(LOCALE_NAME_REGEX)) { errors.push( new WebpackError( `Invalid locale name: ${localeName}. Locale names may only contain letters and hyphens.` ) ); return false; } else { return true; } } // START configuration if ( !configuration.output || !configuration.output.filename || typeof configuration.output.filename !== 'string' || configuration.output.filename.indexOf(Constants.LOCALE_FILENAME_TOKEN) === -1 ) { errors.push( new WebpackError( 'The configuration.output.filename property must be provided, must be a string, and must include ' + `the ${Constants.LOCALE_FILENAME_TOKEN} placeholder` ) ); } // END configuration // START misc options // START options.localizedData const { localizedData } = this._options; if (localizedData) { // START options.localizedData.passthroughLocale const { passthroughLocale } = localizedData; if (passthroughLocale) { const { usePassthroughLocale, passthroughLocaleName = 'passthrough' } = passthroughLocale; if (usePassthroughLocale) { this._passthroughLocaleName = passthroughLocaleName; this._resolvedLocalizedStrings.set(passthroughLocaleName, new Map()); } } // END options.localizedData.passthroughLocale // START options.localizedData.translatedStrings const resolveRelativeToContext: (relative: string) => string = ( configuration.context?.startsWith('/') ? path.posix.resolve : path.resolve ).bind(0, configuration.context!); const { translatedStrings } = localizedData; this._resolvedTranslatedStringsFromOptions.clear(); if (translatedStrings) { for (const [localeName, locale] of Object.entries(translatedStrings)) { if (this._resolvedLocalizedStrings.has(localeName)) { errors.push( new WebpackError( `The locale "${localeName}" appears multiple times. ` + 'There may be multiple instances with different casing.' ) ); return { errors, warnings }; } if (!ensureValidLocaleName(localeName)) { return { errors, warnings }; } this._resolvedLocalizedStrings.set(localeName, new Map()); const resolvedFromOptionsForLocale: Map<string, ILocaleFileData> = new Map(); this._resolvedTranslatedStringsFromOptions.set(localeName, resolvedFromOptionsForLocale); for (const [locFilePath, locFileDataFromOptions] of Object.entries(locale)) { const normalizedLocFilePath: string = resolveRelativeToContext(locFilePath); if (resolvedFromOptionsForLocale.has(normalizedLocFilePath)) { errors.push( new WebpackError( `The localization file path "${locFilePath}" appears multiple times in locale ${localeName}. ` + 'There may be multiple instances with different casing.' ) ); return { errors, warnings }; } const normalizedLocFileDataFromOptions: ILocaleFileData = typeof locFileDataFromOptions === 'string' ? resolveRelativeToContext(locFileDataFromOptions) : locFileDataFromOptions; resolvedFromOptionsForLocale.set(normalizedLocFilePath, normalizedLocFileDataFromOptions); } } } // END options.localizedData.translatedStrings // START options.localizedData.defaultLocale const { defaultLocale } = localizedData; if (defaultLocale) { const { localeName, fillMissingTranslationStrings } = defaultLocale; if (localeName) { if (this._resolvedLocalizedStrings.has(localeName)) { errors.push(new WebpackError('The default locale is also specified in the translated strings.')); return { errors, warnings }; } else if (!ensureValidLocaleName(localeName)) { return { errors, warnings }; } this._resolvedLocalizedStrings.set(localeName, new Map()); this._defaultLocale = localeName; this._fillMissingTranslationStrings = !!fillMissingTranslationStrings; } else { errors.push(new WebpackError('Missing default locale name')); return { errors, warnings }; } } else { errors.push(new WebpackError('Missing default locale options.')); return { errors, warnings }; } // END options.localizedData.defaultLocale // START options.localizedData.pseudoLocales const { pseudolocales } = localizedData; if (pseudolocales) { for (const [pseudolocaleName, pseudoLocaleOpts] of Object.entries(pseudolocales)) { if (this._defaultLocale === pseudolocaleName) { errors.push( new WebpackError(`A pseudolocale (${pseudolocaleName}) name is also the default locale name.`) ); return { errors, warnings }; } if (this._resolvedLocalizedStrings.has(pseudolocaleName)) { errors.push( new WebpackError( `A pseudolocale (${pseudolocaleName}) name is also specified in the translated strings.` ) ); return { errors, warnings }; } this._pseudolocalizers.set(pseudolocaleName, getPseudolocalizer(pseudoLocaleOpts)); this._resolvedLocalizedStrings.set(pseudolocaleName, new Map<string, Map<string, string>>()); } } // END options.localizedData.pseudoLocales } else if (!isWebpackDevServer) { throw new Error('Localized data must be provided unless webpack dev server is running.'); } // END options.localizedData // START options.noStringsLocaleName const { noStringsLocaleName } = this._options; if ( noStringsLocaleName === undefined || noStringsLocaleName === null || !ensureValidLocaleName(noStringsLocaleName) ) { this._noStringsLocaleName = 'none'; } else { this._noStringsLocaleName = noStringsLocaleName; } // END options.noStringsLocaleName // START options.formatLocaleForFilename const { formatLocaleForFilename = (localeName: string) => localeName } = this._options; this._formatLocaleForFilename = formatLocaleForFilename; // END options.formatLocaleForFilename return { errors, warnings }; } } function _chunkHasLocalizedModules( chunkGraph: ChunkGraph, chunk: Chunk, runtimeLocaleExpression: string | undefined ): boolean { let chunkHasAnyLocModules: boolean | undefined = getMark(chunk); if (chunkHasAnyLocModules === undefined) { chunkHasAnyLocModules = false; const candidateModules: Iterable<Module> | undefined = chunkGraph.getChunkModulesIterableBySourceType( chunk, 'javascript' ); if (candidateModules) { outer: for (const module of candidateModules) { const moduleMark: boolean | undefined = getMark(module); if (moduleMark) { chunkHasAnyLocModules = true; break; } else if (moduleMark === false) { continue; } // Is this a concatenated module? const { _modules: modules } = module as { _modules?: Iterable<Module> }; if (modules) { for (const nestedModule of modules) { if (getMark(nestedModule)) { markEntity(module, true); chunkHasAnyLocModules = true; break outer; } } markEntity(module, false); } } } // If this chunk doesn't directly contain any localized resources, it still // needs to be localized if it's an entrypoint chunk (i.e. - it has a runtime) // and it loads localized async chunks. // In that case, the generated chunk URL generation code needs to contain // the locale name. if (!chunkHasAnyLocModules && !runtimeLocaleExpression && chunk.hasRuntime()) { for (const asyncChunk of chunk.getAllAsyncChunks()) { if (_chunkHasLocalizedModules(chunkGraph, asyncChunk, runtimeLocaleExpression)) { chunkHasAnyLocModules = true; break; } } } markEntity(chunk, chunkHasAnyLocModules); } return chunkHasAnyLocModules; } function convertLocalizationFileToLocData(locFile: ILocalizationFile): ReadonlyMap<string, string> { const locFileData: Map<string, string> = new Map(); for (const [stringName, locFileEntry] of Object.entries(locFile)) { locFileData.set(stringName, locFileEntry.value); } return locFileData; } async function normalizeLocalizedData( context: LoaderContext<{}>, localizedData: ILocaleFileData ): Promise<ReadonlyMap<string, string>> { if (typeof localizedData === 'string') { // The value is the path to a file. Add it as a file dependency context.addDependency(localizedData); const content: string = await new Promise((resolve, reject) => { // Use context.fs so that the plugin is compatible with overriding compiler.inputFileSystem context.fs.readFile(localizedData, (err, data) => { if (err) { return reject(err); } else if (!data) { return reject(new Error(`No data in ${localizedData}`)); } resolve(data.toString()); }); }); const localizationFile: ILocalizationFile = parseResJson({ filePath: localizedData, content }); return convertLocalizationFileToLocData(localizationFile); } else { return localizedData instanceof Map ? localizedData : new Map(Object.entries(localizedData)); } } ```
```go // This file is part of go-ethereum. // // go-ethereum is free software: you can redistribute it and/or modify // (at your option) any later version. // // go-ethereum is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // // along with go-ethereum. If not, see <path_to_url // Package utils contains internal helper functions for go-ethereum commands. package utils import ( "compress/gzip" "fmt" "io" "os" "os/signal" "runtime" "strings" "syscall" "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/internal/debug" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/rlp" "gopkg.in/urfave/cli.v1" ) const ( importBatchSize = 2500 ) // Fatalf formats a message to standard error and exits the program. // The message is also printed to standard output if standard error // is redirected to a different file. func Fatalf(format string, args ...interface{}) { w := io.MultiWriter(os.Stdout, os.Stderr) if runtime.GOOS == "windows" { // The SameFile check below doesn't work on Windows. // stdout is unlikely to get redirected though, so just print there. w = os.Stdout } else { outf, _ := os.Stdout.Stat() errf, _ := os.Stderr.Stat() if outf != nil && errf != nil && os.SameFile(outf, errf) { w = os.Stderr } } fmt.Fprintf(w, "Fatal: "+format+"\n", args...) os.Exit(1) } func StartNode(ctx *cli.Context, stack *node.Node) { if err := stack.Start(); err != nil { Fatalf("Error starting protocol stack: %v", err) } go func() { sigc := make(chan os.Signal, 1) signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM) defer signal.Stop(sigc) minFreeDiskSpace := ethconfig.Defaults.TrieDirtyCache if ctx.GlobalIsSet(MinFreeDiskSpaceFlag.Name) { minFreeDiskSpace = ctx.GlobalInt(MinFreeDiskSpaceFlag.Name) } else if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) { minFreeDiskSpace = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100 } if minFreeDiskSpace > 0 { go monitorFreeDiskSpace(sigc, stack.InstanceDir(), uint64(minFreeDiskSpace)*1024*1024) } <-sigc log.Info("Got interrupt, shutting down...") go stack.Close() for i := 10; i > 0; i-- { <-sigc if i > 1 { log.Warn("Already shutting down, interrupt more to panic.", "times", i-1) } } debug.Exit() // ensure trace and CPU profile data is flushed. debug.LoudPanic("boom") }() } func monitorFreeDiskSpace(sigc chan os.Signal, path string, freeDiskSpaceCritical uint64) { for { freeSpace, err := getFreeDiskSpace(path) if err != nil { log.Warn("Failed to get free disk space", "path", path, "err", err) break } if freeSpace < freeDiskSpaceCritical { log.Error("Low disk space. Gracefully shutting down Geth to prevent database corruption.", "available", common.StorageSize(freeSpace)) sigc <- syscall.SIGTERM break } else if freeSpace < 2*freeDiskSpaceCritical { log.Warn("Disk space is running low. Geth will shutdown if disk space runs below critical level.", "available", common.StorageSize(freeSpace), "critical_level", common.StorageSize(freeDiskSpaceCritical)) } time.Sleep(60 * time.Second) } } func ImportChain(chain *core.BlockChain, fn string) error { // Watch for Ctrl-C while the import is running. // If a signal is received, the import will stop at the next batch. interrupt := make(chan os.Signal, 1) stop := make(chan struct{}) signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM) defer signal.Stop(interrupt) defer close(interrupt) go func() { if _, ok := <-interrupt; ok { log.Info("Interrupted during import, stopping at next batch") } close(stop) }() checkInterrupt := func() bool { select { case <-stop: return true default: return false } } log.Info("Importing blockchain", "file", fn) // Open the file handle and potentially unwrap the gzip stream fh, err := os.Open(fn) if err != nil { return err } defer fh.Close() var reader io.Reader = fh if strings.HasSuffix(fn, ".gz") { if reader, err = gzip.NewReader(reader); err != nil { return err } } stream := rlp.NewStream(reader, 0) // Run actual the import. blocks := make(types.Blocks, importBatchSize) n := 0 for batch := 0; ; batch++ { // Load a batch of RLP blocks. if checkInterrupt() { return fmt.Errorf("interrupted") } i := 0 for ; i < importBatchSize; i++ { var b types.Block if err := stream.Decode(&b); err == io.EOF { break } else if err != nil { return fmt.Errorf("at block %d: %v", n, err) } // don't import first block if b.NumberU64() == 0 { i-- continue } blocks[i] = &b n++ } if i == 0 { break } // Import the batch. if checkInterrupt() { return fmt.Errorf("interrupted") } missing := missingBlocks(chain, blocks[:i]) if len(missing) == 0 { log.Info("Skipping batch as all blocks present", "batch", batch, "first", blocks[0].Hash(), "last", blocks[i-1].Hash()) continue } if _, err := chain.InsertChain(missing); err != nil { return fmt.Errorf("invalid block %d: %v", n, err) } } return nil } func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block { head := chain.CurrentBlock() for i, block := range blocks { // If we're behind the chain head, only check block, state is available at head if head.NumberU64() > block.NumberU64() { if !chain.HasBlock(block.Hash(), block.NumberU64()) { return blocks[i:] } continue } // If we're above the chain head, state availability is a must if !chain.HasBlockAndState(block.Hash(), block.NumberU64()) { return blocks[i:] } } return nil } // ExportChain exports a blockchain into the specified file, truncating any data // already present in the file. func ExportChain(blockchain *core.BlockChain, fn string) error { log.Info("Exporting blockchain", "file", fn) // Open the file handle and potentially wrap with a gzip stream fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) if err != nil { return err } defer fh.Close() var writer io.Writer = fh if strings.HasSuffix(fn, ".gz") { writer = gzip.NewWriter(writer) defer writer.(*gzip.Writer).Close() } // Iterate over the blocks and export them if err := blockchain.Export(writer); err != nil { return err } log.Info("Exported blockchain", "file", fn) return nil } // ExportAppendChain exports a blockchain into the specified file, appending to // the file if data already exists in it. func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error { log.Info("Exporting blockchain", "file", fn) // Open the file handle and potentially wrap with a gzip stream fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm) if err != nil { return err } defer fh.Close() var writer io.Writer = fh if strings.HasSuffix(fn, ".gz") { writer = gzip.NewWriter(writer) defer writer.(*gzip.Writer).Close() } // Iterate over the blocks and export them if err := blockchain.ExportN(writer, first, last); err != nil { return err } log.Info("Exported blockchain to", "file", fn) return nil } // ImportPreimages imports a batch of exported hash preimages into the database. func ImportPreimages(db ethdb.Database, fn string) error { log.Info("Importing preimages", "file", fn) // Open the file handle and potentially unwrap the gzip stream fh, err := os.Open(fn) if err != nil { return err } defer fh.Close() var reader io.Reader = fh if strings.HasSuffix(fn, ".gz") { if reader, err = gzip.NewReader(reader); err != nil { return err } } stream := rlp.NewStream(reader, 0) // Import the preimages in batches to prevent disk trashing preimages := make(map[common.Hash][]byte) for { // Read the next entry and ensure it's not junk var blob []byte if err := stream.Decode(&blob); err != nil { if err == io.EOF { break } return err } // Accumulate the preimages and flush when enough ws gathered preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob) if len(preimages) > 1024 { rawdb.WritePreimages(db, preimages) preimages = make(map[common.Hash][]byte) } } // Flush the last batch preimage data if len(preimages) > 0 { rawdb.WritePreimages(db, preimages) } return nil } // ExportPreimages exports all known hash preimages into the specified file, // truncating any data already present in the file. func ExportPreimages(db ethdb.Database, fn string) error { log.Info("Exporting preimages", "file", fn) // Open the file handle and potentially wrap with a gzip stream fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) if err != nil { return err } defer fh.Close() var writer io.Writer = fh if strings.HasSuffix(fn, ".gz") { writer = gzip.NewWriter(writer) defer writer.(*gzip.Writer).Close() } // Iterate over the preimages and export them it := db.NewIterator([]byte("secure-key-"), nil) defer it.Release() for it.Next() { if err := rlp.Encode(writer, it.Value()); err != nil { return err } } log.Info("Exported preimages", "file", fn) return nil } ```
```java package com.ctrip.xpipe.redis.checker.healthcheck.actions.keeper; import com.ctrip.xpipe.redis.checker.healthcheck.KeeperHealthCheckInstance; import com.ctrip.xpipe.redis.checker.healthcheck.actions.redisstats.AbstractInstanceStatsCheckAction; import java.util.concurrent.ExecutorService; import java.util.concurrent.ScheduledExecutorService; public abstract class KeeperStatsCheckAction<T, K> extends AbstractInstanceStatsCheckAction<T, K , KeeperHealthCheckInstance> { public KeeperStatsCheckAction(ScheduledExecutorService scheduled, KeeperHealthCheckInstance instance, ExecutorService executors) { super(scheduled, instance, executors); } } ```
Robertson Quay is a wharf near the source of the Singapore River. It is the largest and most upstream of the three wharfs (the other two being Boat Quay and Clarke Quay) on the river and is named after a municipal counsellor Dr J Murray Robertson. It now has al fresco dining as well as arts and culture. History The upper reaches of the Singapore River were originally mud flats and swamps. As the population and commerce of Singapore increased, the area was reclaimed in the mid nineteenth century. In the 19th Century the swamps were reclaimed and warehouses and boatyards were constructed in the 1880s in both European and Chinese styles. Children would jump into the waters to cool down in the afternoons. The Quay is named after the municipal counsellor Dr J Murray Robertson. In 1977, then Prime Minister Lee Kuan Yew calls for a large $170 million initiative, lasting a decade, to clean up the Singapore River. This involved clearing away the rubbish as well as dredging the river. The lighterage industry eventually relocated to Pasir Panjang. In the 1990s, the Urban Redevelopment Authority rezoned the area for new development of residential, hotel, and commercial use. It is now popular among joggers as well as families. It now has al fresco dining as well as arts and culture. On 16 May 2020, during the COVID-19 pandemic in Singapore, Robertson Quay was the site of a significant breach of rules designed to curb the spread of COVID-19 by numerous parties. On 2 June 2020, seven people were charged for breaching rules. They were ultimately fined between S$8,000 and S$9,000 each, and six of them had their work passes revoked, permanently debarring them from working in Singapore in the future. On 16 June, two British citizens who were also permanent residents of Singapore were charged in relation to the same gathering. The result of their case was not published, but one was still posting photos on Instagram from Singapore after the case was completed, suggesting he had not been ejected from Singapore. Geography Robertson Quay occupies 51 hectares of the Singapore River planning area. It starts roughly midway, or about 1.5 km from the river mouth, and terminates at the river source near the Kim Seng Bridge. Gallery Notes Singapore River Wharves Geography of Singapore
```javascript import Vue from 'vue'; import { addClass, removeClass } from 'element-ui/src/utils/dom'; let hasModal = false; let hasInitZIndex = false; let zIndex; const getModal = function() { if (Vue.prototype.$isServer) return; let modalDom = PopupManager.modalDom; if (modalDom) { hasModal = true; } else { hasModal = false; modalDom = document.createElement('div'); PopupManager.modalDom = modalDom; modalDom.addEventListener('touchmove', function(event) { event.preventDefault(); event.stopPropagation(); }); modalDom.addEventListener('click', function() { PopupManager.doOnModalClick && PopupManager.doOnModalClick(); }); } return modalDom; }; const instances = {}; const PopupManager = { modalFade: true, getInstance: function(id) { return instances[id]; }, register: function(id, instance) { if (id && instance) { instances[id] = instance; } }, deregister: function(id) { if (id) { instances[id] = null; delete instances[id]; } }, nextZIndex: function() { return PopupManager.zIndex++; }, modalStack: [], doOnModalClick: function() { const topItem = PopupManager.modalStack[PopupManager.modalStack.length - 1]; if (!topItem) return; const instance = PopupManager.getInstance(topItem.id); if (instance && instance.closeOnClickModal) { instance.close(); } }, openModal: function(id, zIndex, dom, modalClass, modalFade) { if (Vue.prototype.$isServer) return; if (!id || zIndex === undefined) return; this.modalFade = modalFade; const modalStack = this.modalStack; for (let i = 0, j = modalStack.length; i < j; i++) { const item = modalStack[i]; if (item.id === id) { return; } } const modalDom = getModal(); addClass(modalDom, 'v-modal'); if (this.modalFade && !hasModal) { addClass(modalDom, 'v-modal-enter'); } if (modalClass) { let classArr = modalClass.trim().split(/\s+/); classArr.forEach(item => addClass(modalDom, item)); } setTimeout(() => { removeClass(modalDom, 'v-modal-enter'); }, 200); if (dom && dom.parentNode && dom.parentNode.nodeType !== 11) { dom.parentNode.appendChild(modalDom); } else { document.body.appendChild(modalDom); } if (zIndex) { modalDom.style.zIndex = zIndex; } modalDom.tabIndex = 0; modalDom.style.display = ''; this.modalStack.push({ id: id, zIndex: zIndex, modalClass: modalClass }); }, closeModal: function(id) { const modalStack = this.modalStack; const modalDom = getModal(); if (modalStack.length > 0) { const topItem = modalStack[modalStack.length - 1]; if (topItem.id === id) { if (topItem.modalClass) { let classArr = topItem.modalClass.trim().split(/\s+/); classArr.forEach(item => removeClass(modalDom, item)); } modalStack.pop(); if (modalStack.length > 0) { modalDom.style.zIndex = modalStack[modalStack.length - 1].zIndex; } } else { for (let i = modalStack.length - 1; i >= 0; i--) { if (modalStack[i].id === id) { modalStack.splice(i, 1); break; } } } } if (modalStack.length === 0) { if (this.modalFade) { addClass(modalDom, 'v-modal-leave'); } setTimeout(() => { if (modalStack.length === 0) { if (modalDom.parentNode) modalDom.parentNode.removeChild(modalDom); modalDom.style.display = 'none'; PopupManager.modalDom = undefined; } removeClass(modalDom, 'v-modal-leave'); }, 200); } } }; Object.defineProperty(PopupManager, 'zIndex', { configurable: true, get() { if (!hasInitZIndex) { zIndex = zIndex || (Vue.prototype.$ELEMENT || {}).zIndex || 2000; hasInitZIndex = true; } return zIndex; }, set(value) { zIndex = value; } }); const getTopPopup = function() { if (Vue.prototype.$isServer) return; if (PopupManager.modalStack.length > 0) { const topPopup = PopupManager.modalStack[PopupManager.modalStack.length - 1]; if (!topPopup) return; const instance = PopupManager.getInstance(topPopup.id); return instance; } }; if (!Vue.prototype.$isServer) { // handle `esc` key when the popup is shown window.addEventListener('keydown', function(event) { if (event.keyCode === 27) { const topPopup = getTopPopup(); if (topPopup && topPopup.closeOnPressEscape) { topPopup.handleClose ? topPopup.handleClose() : (topPopup.handleAction ? topPopup.handleAction('cancel') : topPopup.close()); } } }); } export default PopupManager; ```
```javascript /** * For commercial licenses see path_to_url * * Version: 5.4.1 (2020-07-08) */ !function(m){"use strict";var n,t,e,u,o=tinymce.util.Tools.resolve("tinymce.PluginManager"),i=function(n){return function(){return n}},a=i(!1),c=i(!0),r=function(){return l},l=(n=function(n){return n.isNone()},{fold:function(n,t){return n()},is:a,isSome:a,isNone:c,getOr:e=function(n){return n},getOrThunk:t=function(n){return n()},getOrDie:function(n){throw new Error(n||"error: getOrDie called on none.")},getOrNull:i(null),getOrUndefined:i(undefined),or:e,orThunk:t,map:r,each:function(){},bind:r,exists:a,forall:c,filter:r,equals:n,equals_:n,toArray:function(){return[]},toString:i("none()")}),s=function(e){var n=i(e),t=function(){return r},o=function(n){return n(e)},r={fold:function(n,t){return t(e)},is:function(n){return e===n},isSome:c,isNone:a,getOr:n,getOrThunk:n,getOrDie:n,getOrNull:n,getOrUndefined:n,or:t,orThunk:t,map:function(n){return s(n(e))},each:function(n){n(e)},bind:o,exists:o,forall:o,filter:function(n){return n(e)?r:l},toArray:function(){return[e]},toString:function(){return"some("+e+")"},equals:function(n){return n.is(e)},equals_:function(n,t){return n.fold(a,function(n){return t(e,n)})}};return r},g={some:s,none:r,from:function(n){return null===n||n===undefined?l:s(n)}},f=function(n,t){return-1!==n.indexOf(t)},d=function(n,t){return f(n.title.toLowerCase(),t)||function(n,t){for(var e=0,o=n.length;e<o;e++){if(t(n[e],e))return!0}return!1}(n.keywords,function(n){return f(n.toLowerCase(),t)})},y=function(n,t,e){for(var o=[],r=t.toLowerCase(),i=e.fold(function(){return a},function(t){return function(n){return t<=n}}),u=0;u<n.length&&(0!==t.length&&!d(n[u],r)||(o.push({value:n[u]["char"],text:n[u].title,icon:n[u]["char"]}),!i(o.length)));u++);return o},h=function(n){var t=n;return{get:function(){return t},set:function(n){t=n}}},v=function(){return(v=Object.assign||function(n){for(var t,e=1,o=arguments.length;e<o;e++)for(var r in t=arguments[e])Object.prototype.hasOwnProperty.call(t,r)&&(n[r]=t[r]);return n}).apply(this,arguments)},p=Object.prototype.hasOwnProperty,b=(u=function(n,t){return t},function(){for(var n=new Array(arguments.length),t=0;t<n.length;t++)n[t]=arguments[t];if(0===n.length)throw new Error("Can't merge zero objects");for(var e={},o=0;o<n.length;o++){var r=n[o];for(var i in r)p.call(r,i)&&(e[i]=u(e[i],r[i]))}return e}),w=Object.keys,O=Object.hasOwnProperty,C=function(n,t){for(var e=w(n),o=0,r=e.length;o<r;o++){var i=e[o];t(n[i],i)}},k=function(n,o){var r={};return C(n,function(n,t){var e=o(n,t);r[e.k]=e.v}),r},j=tinymce.util.Tools.resolve("tinymce.Resource"),A=tinymce.util.Tools.resolve("tinymce.util.Delay"),T=tinymce.util.Tools.resolve("tinymce.util.Promise"),_="All",D={symbols:"Symbols",people:"People",animals_and_nature:"Animals and Nature",food_and_drink:"Food and Drink",activity:"Activity",travel_and_places:"Travel and Places",objects:"Objects",flags:"Flags",user:"User Defined"},P=function(n,t){return e=n,o=t,O.call(e,o)?n[t]:t;var e,o},x=function(n){var e,t=n.getParam("emoticons_append",{},"object");return e=function(n){return v({keywords:[],category:"user"},n)},k(t,function(n,t){return{k:t,v:e(n,t)}})},L=function(o,r,n){var u=h(g.none()),a=h(g.none());o.on("init",function(){j.load(n,r).then(function(n){var t,r,i,e=x(o);t=b(n,e),r={},i=[],C(t,function(n,t){var e={title:t,keywords:n.keywords,"char":n["char"],category:P(D,n.category)},o=r[e.category]!==undefined?r[e.category]:[];r[e.category]=o.concat([e]),i.push(e)}),u.set(g.some(r)),a.set(g.some(i))},function(n){m.console.log("Failed to load emoticons: "+n),u.set(g.some({})),a.set(g.some([]))})});var e=function(){return a.get().getOr([])},i=function(){return u.get().isSome()&&a.get().isSome()};return{listCategories:function(){return[_].concat(w(u.get().getOr({})))},hasLoaded:i,waitForLoad:function(){return i()?T.resolve(!0):new T(function(n,t){var e=15,o=A.setInterval(function(){i()?(A.clearInterval(o),n(!0)):--e<0&&(m.console.log("Could not load emojis from url: "+r),A.clearInterval(o),t(!1))},100)})},listAll:e,listCategory:function(t){return t===_?e():u.get().bind(function(n){return g.from(n[t])}).getOr([])}}},S="pattern",N=function(r,u){var e,o,i,n={pattern:"",results:y(u.listAll(),"",g.some(300))},a=h(_),c=(e=function(n){var t,e,o,r,i;e=(t=n).getData(),o=a.get(),r=u.listCategory(o),i=y(r,e[S],o===_?g.some(300):g.none()),t.setData({results:i})},o=200,i=null,{cancel:function(){null!==i&&(m.clearTimeout(i),i=null)},throttle:function(){for(var n=[],t=0;t<arguments.length;t++)n[t]=arguments[t];null!==i&&m.clearTimeout(i),i=m.setTimeout(function(){e.apply(null,n),i=null},o)}}),t={label:"Search",type:"input",name:S},l={type:"collection",name:"results"},s=function(){return{title:"Emoticons",size:"normal",body:{type:"tabpanel",tabs:function(n,t){for(var e=n.length,o=new Array(e),r=0;r<e;r++){var i=n[r];o[r]=t(i,r)}return o}(u.listCategories(),function(n){return{title:n,name:n,items:[t,l]}})},initialData:n,onTabChange:function(n,t){a.set(t.newTabName),c.throttle(n)},onChange:c.throttle,onAction:function(n,t){var e,o;"results"===t.name&&(e=r,o=t.value,e.insertContent(o),n.close())},buttons:[{type:"cancel",text:"Close",primary:!0}]}},f=r.windowManager.open(s());f.focus(S),u.hasLoaded()||(f.block("Loading emoticons..."),u.waitForLoad().then(function(){f.redial(s()),c.throttle(f),f.focus(S),f.unblock()})["catch"](function(n){f.redial({title:"Emoticons",body:{type:"panel",items:[{type:"alertbanner",level:"error",icon:"warning",text:"<p>Could not load emoticons</p>"}]},buttons:[{type:"cancel",text:"Close",primary:!0}],initialData:{pattern:"",results:[]}}),f.focus(S),f.unblock()}))};!function E(){o.add("emoticons",function(n,t){var e,o,r,i,u,a,c,l=(o=t,(e=n).getParam("emoticons_database_url",o+"/js/emojis"+e.suffix+".js")),s=n.getParam("emoticons_database_id","tinymce.plugins.emoticons","string"),f=L(n,l,s);i=f,u=function(){return N(r,i)},(r=n).ui.registry.addButton("emoticons",{tooltip:"Emoticons",icon:"emoji",onAction:u}),r.ui.registry.addMenuItem("emoticons",{text:"Emoticons...",icon:"emoji",onAction:u}),c=f,(a=n).ui.registry.addAutocompleter("emoticons",{ch:":",columns:"auto",minChars:2,fetch:function(t,e){return c.waitForLoad().then(function(){var n=c.listAll();return y(n,t,g.some(e))})},onAction:function(n,t,e){a.selection.setRng(t),a.insertContent(e),n.hide()}})})}()}(window); ```
Geroa Socialverdes (), officially Geroa Socialverdes de Navarra en Europa/Nafarroako Sozialberdeak Europan (, GSB/GSV) is a regional party based in Navarre founded in September 2020 by former president of Navarre Uxue Barkos and integrated within the Geroa Bai coalition. References Geroa Bai 2020 establishments in Spain Political parties established in 2020 Political parties in Navarre
Konstantin Ognjanović (Serbian Cyrillic: Константин Огњановић; born 5 May 1973) is a Serbian former football player. During his career he played for FK Budućnost Podgorica, Red Star Belgrade, FK Zemun, FK Vojvodina, FK Milicionar, OFK Beograd, mostly in First League of FR Yugoslavia, and since January 2000, in Germany, with Greuther Fürth, playing in the 2. Bundesliga, and 1. FC Union Berlin in the Regionalliga Nord. External links Profile at Greuther Fuerth site. Konstantin Ognjanovic at immerunioner.de 1973 births Living people Footballers from Belgrade Serbian men's footballers Serbian expatriate men's footballers FK Budućnost Podgorica players Red Star Belgrade footballers FK Zemun players FK Vojvodina players FK Milicionar players OFK Beograd players SpVgg Greuther Fürth players 1. FC Union Berlin players Expatriate men's footballers in Germany Men's association football forwards
Côn Đảo Prison (), also Côn Sơn Prison, is a prison on Côn Sơn Island (also known as Côn Lôn) the largest island of the Côn Đảo archipelago in southern Vietnam (today it is in Bà Rịa–Vũng Tàu province. The prison was built in 1861 by the French colonists to jail those considered especially dangerous to the colonial government. Many of the high-ranking leaders of Vietnam were detained here. It is ranked a special historical relic of national importance by the government of Vietnam. The most famous site in this prison are the "tiger cages" (). The French tiger cages cover an area of 5.475 m2, within which each cell occupies 1.408 m2, solariums occupy 1.873 m2, and other spaces occupy 2.194 m2. The prison includes 120 cells. The prison was closed after the end of the Vietnam War and opened for visitors soon after. History French era In 1861, the French colonial government established a prison on the island to house prisoners who had committed especially severe crimes. After the turn of the century, the prison held an increasingly larger population of political prisoners. In 1954, it was turned over to the South Vietnamese government, who continued to use it for the same purpose. Notable prisoners held at Côn Sơn included Phan Châu Trinh from 1908-1911, Tôn Đức Thắng, Phạm Văn Đồng and Lê Đức Thọ in the 1930s, and Nguyễn An Ninh who died in the prison on 14 August 1943, possibly killed by his jailers for fear that he might be used politically by the Japanese. Võ Thị Sáu was executed at the prison in 1952 (though she was imprisoned at the police post outside of the prison). Not far from the prison is Hàng Dương Cemetery, where some of the prisoners who died between 1941 and 1975 were buried. Vietnam War During the Vietnam War, prisoners who had been held at the prison in the 1960s and 70s were abused and tortured. In July 1970, two U.S. Congressional representatives, Augustus Hawkins and William Anderson, visited the prison. They were accompanied by Tom Harkin (then an aide), translator Don Luce, and USAID Office of Public Safety Director Frank Walton. When the delegation arrived at the prison, they departed from the planned tour, guided by a map drawn by a former detainee. The map led to the door of a building, which was opened from the inside by a guard when he heard the people outside the door talking. Inside they found prisoners were being shackled within cramped "tiger cages". Prisoners began crying out for water when the delegation walked in. They had sores and bruises, and some were mutilated. Harkin took photos of the scene. The photos were published in Life magazine on 17 July 1970. Tiger Cage Tiger Cage is the name of the prison constructed by the French. The Republic of Vietnam took control of the cage in order to detain Viet Minh political prisoners, the Liberation Army of South Vietnam and people who fought against France and America during the war era. French-made Tiger Cage Construction year: 1940 Total area: 5.475 m2 Each cell's area: 1.408 m2 "Sunbathe" cell's area: 1.873 m2 Empty space: 2.194 m² This cage consists of: 120 solitary confinement room (Divided into 2 regions, each regions has 60 cells) Basic characteristic: The top of each cells installed an iron grid and has a border between two regions which is a road for wardens. Besides, 60 cells have no ceiling, and are called "sunbathe" cells which are used to torture prisoners by making them "sunbathe" themselves. Notes Citations References used External links Trang tin tức Côn Đảo - Du lịch Côn Đảo - Lịch sử Côn Đảo Giới thiệu Côn Đảo Di tích lịch sử - Nhà tù Côn Đảo Khái quát địa lý nhân văn Côn Đảo Kì 1 : Bất khuất Kì 2 : Một người đổ máu, trăm người rơi nước mắt Kì 3 : Những cuộc đào thoát Kì 4 : Ngày giải phóng Kì 5 : Ở lại với Côn Đảo The Tiger Cages of Con Son Buildings and structures in Bà Rịa-Vũng Tàu province Defunct prisons in Vietnam War crimes in Vietnam Vietnam War prisoner-of-war camps Vietnam War sites Torture in Vietnam
```xml import { defineConfig } from 'tsup'; import baseConfig from '../../tsup.base.config'; export default defineConfig({ ...baseConfig, entry: { 'botframework-webchat-component': './src/index.ts', 'botframework-webchat-component.internal': './src/internal.ts' } }); ```
```python #!/usr/bin/env python3 # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from collections import namedtuple import textwrap import sys SHARD_FILENAME_TEMPLATE = "test/mjsunit/compiler/inline-exception-{shard}.js" # Generates 2 files. Found by trial and error. SHARD_SIZE = 97 PREAMBLE = """ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Flags: --allow-natives-syntax --no-always-opt // This test file was generated by tools/gen-inlining-tests.py . // Global variables var deopt = undefined; // either true or false var counter = 0; function resetState() { counter = 0; } function warmUp(f) { try { f(); } catch (ex) { // ok } try { f(); } catch (ex) { // ok } } function resetOptAndAssertResultEquals(expected, f) { warmUp(f); resetState(); // %DebugPrint(f); eval("'dont optimize this function itself please, but do optimize f'"); %OptimizeFunctionOnNextCall(f); assertEquals(expected, f()); } function resetOptAndAssertThrowsWith(expected, f) { warmUp(f); resetState(); // %DebugPrint(f); eval("'dont optimize this function itself please, but do optimize f'"); %OptimizeFunctionOnNextCall(f); try { var result = f(); fail("resetOptAndAssertThrowsWith", "exception: " + expected, "result: " + result); } catch (ex) { assertEquals(expected, ex); } } function increaseAndReturn15() { if (deopt) %DeoptimizeFunction(f); counter++; return 15; } function increaseAndThrow42() { if (deopt) %DeoptimizeFunction(f); counter++; throw 42; } function increaseAndReturn15_noopt_inner() { if (deopt) %DeoptimizeFunction(f); counter++; return 15; } %NeverOptimizeFunction(increaseAndReturn15_noopt_inner); function increaseAndThrow42_noopt_inner() { if (deopt) %DeoptimizeFunction(f); counter++; throw 42; } %NeverOptimizeFunction(increaseAndThrow42_noopt_inner); // Alternative 1 function returnOrThrow(doReturn) { if (doReturn) { return increaseAndReturn15(); } else { return increaseAndThrow42(); } } // Alternative 2 function increaseAndReturn15_calls_noopt() { return increaseAndReturn15_noopt_inner(); } function increaseAndThrow42_calls_noopt() { return increaseAndThrow42_noopt_inner(); } // Alternative 3. // When passed either {increaseAndReturn15} or {increaseAndThrow42}, it acts // as the other one. function invertFunctionCall(f) { var result; try { result = f(); } catch (ex) { return ex - 27; } throw result + 27; } // Alternative 4: constructor function increaseAndStore15Constructor() { if (deopt) %DeoptimizeFunction(f); ++counter; this.x = 15; } function increaseAndThrow42Constructor() { if (deopt) %DeoptimizeFunction(f); ++counter; this.x = 42; throw this.x; } // Alternative 5: property var magic = {}; Object.defineProperty(magic, 'prop', { get: function () { if (deopt) %DeoptimizeFunction(f); return 15 + 0 * ++counter; }, set: function(x) { // argument should be 37 if (deopt) %DeoptimizeFunction(f); counter -= 36 - x; // increments counter throw 42; } }) // Generate type feedback. assertEquals(15, increaseAndReturn15_calls_noopt()); assertThrowsEquals(function() { return increaseAndThrow42_noopt_inner() }, 42); assertEquals(15, (new increaseAndStore15Constructor()).x); assertThrowsEquals(function() { return (new increaseAndThrow42Constructor()).x; }, 42); function runThisShard() { """.strip() def booltuples(n): """booltuples(2) yields 4 tuples: (False, False), (False, True), (True, False), (True, True).""" assert isinstance(n, int) if n <= 0: yield () else: for initial in booltuples(n-1): yield initial + (False,) yield initial + (True,) def fnname(flags): assert len(FLAGLETTERS) == len(flags) return "f_" + ''.join( FLAGLETTERS[i] if b else '_' for (i, b) in enumerate(flags)) NUM_TESTS_PRINTED = 0 NUM_TESTS_IN_SHARD = 0 def printtest(flags): """Print a test case. Takes a couple of boolean flags, on which the printed Javascript code depends.""" assert all(isinstance(flag, bool) for flag in flags) # The alternative flags are in reverse order so that if we take all possible # tuples, ordered lexicographically from false to true, we get first the # default, then alternative 1, then 2, etc. ( alternativeFn5, # use alternative #5 for returning/throwing: # return/throw using property alternativeFn4, # use alternative #4 for returning/throwing: # return/throw using constructor alternativeFn3, # use alternative #3 for returning/throwing: # return/throw indirectly, based on function argument alternativeFn2, # use alternative #2 for returning/throwing: # return/throw indirectly in unoptimized code, # no branching alternativeFn1, # use alternative #1 for returning/throwing: # return/throw indirectly, based on boolean arg tryThrows, # in try block, call throwing function tryReturns, # in try block, call returning function tryFirstReturns, # in try block, returning goes before throwing tryResultToLocal, # in try block, result goes to local variable doCatch, # include catch block catchReturns, # in catch block, return catchWithLocal, # in catch block, modify or return the local variable catchThrows, # in catch block, throw doFinally, # include finally block finallyReturns, # in finally block, return local variable finallyThrows, # in finally block, throw endReturnLocal, # at very end, return variable local deopt, # deopt inside inlined function ) = flags # BASIC RULES # Only one alternative can be applied at any time. if (alternativeFn1 + alternativeFn2 + alternativeFn3 + alternativeFn4 + alternativeFn5 > 1): return # In try, return or throw, or both. if not (tryReturns or tryThrows): return # Either doCatch or doFinally. if not doCatch and not doFinally: return # Catch flags only make sense when catching if not doCatch and (catchReturns or catchWithLocal or catchThrows): return # Finally flags only make sense when finallying if not doFinally and (finallyReturns or finallyThrows): return # tryFirstReturns is only relevant when both tryReturns and tryThrows are # true. if tryFirstReturns and not (tryReturns and tryThrows): return # From the try and finally block, we can return or throw, but not both. if catchReturns and catchThrows: return if finallyReturns and finallyThrows: return # If at the end we return the local, we need to have touched it. if endReturnLocal and not (tryResultToLocal or catchWithLocal): return # PRUNING anyAlternative = any([alternativeFn1, alternativeFn2, alternativeFn3, alternativeFn4, alternativeFn5]) specificAlternative = any([alternativeFn2, alternativeFn3]) rareAlternative = not specificAlternative # If try returns and throws, then don't catchWithLocal, endReturnLocal, or # deopt, or do any alternative. if (tryReturns and tryThrows and (catchWithLocal or endReturnLocal or deopt or anyAlternative)): return # We don't do any alternative if we do a finally. if doFinally and anyAlternative: return # We only use the local variable if we do alternative #2 or #3. if ((tryResultToLocal or catchWithLocal or endReturnLocal) and not specificAlternative): return # We don't need to test deopting into a finally. if doFinally and deopt: return # We're only interested in alternative #2 if we have endReturnLocal, no # catchReturns, and no catchThrows, and deopt. if (alternativeFn2 and (not endReturnLocal or catchReturns or catchThrows or not deopt)): return # Flag check succeeded. trueFlagNames = [name for (name, value) in flags._asdict().items() if value] flagsMsgLine = " // Variant flags: [{}]".format(', '.join(trueFlagNames)) write(textwrap.fill(flagsMsgLine, subsequent_indent=' // ')) write("") if not anyAlternative: fragments = { 'increaseAndReturn15': 'increaseAndReturn15()', 'increaseAndThrow42': 'increaseAndThrow42()', } elif alternativeFn1: fragments = { 'increaseAndReturn15': 'returnOrThrow(true)', 'increaseAndThrow42': 'returnOrThrow(false)', } elif alternativeFn2: fragments = { 'increaseAndReturn15': 'increaseAndReturn15_calls_noopt()', 'increaseAndThrow42': 'increaseAndThrow42_calls_noopt()', } elif alternativeFn3: fragments = { 'increaseAndReturn15': 'invertFunctionCall(increaseAndThrow42)', 'increaseAndThrow42': 'invertFunctionCall(increaseAndReturn15)', } elif alternativeFn4: fragments = { 'increaseAndReturn15': '(new increaseAndStore15Constructor()).x', 'increaseAndThrow42': '(new increaseAndThrow42Constructor()).x', } else: assert alternativeFn5 fragments = { 'increaseAndReturn15': 'magic.prop /* returns 15 */', 'increaseAndThrow42': '(magic.prop = 37 /* throws 42 */)', } # As we print code, we also maintain what the result should be. Variable # {result} can be one of three things: # # - None, indicating returning JS null # - ("return", n) with n an integer # - ("throw", n), with n an integer result = None # We also maintain what the counter should be at the end. # The counter is reset just before f is called. counter = 0 write( " f = function {} () {{".format(fnname(flags))) write( " var local = 888;") write( " deopt = {};".format("true" if deopt else "false")) local = 888 write( " try {") write( " counter++;") counter += 1 resultTo = "local +=" if tryResultToLocal else "return" if tryReturns and not (tryThrows and not tryFirstReturns): write( " {} 4 + {increaseAndReturn15};".format(resultTo, **fragments)) if result == None: counter += 1 if tryResultToLocal: local += 19 else: result = ("return", 19) if tryThrows: write( " {} 4 + {increaseAndThrow42};".format(resultTo, **fragments)) if result == None: counter += 1 result = ("throw", 42) if tryReturns and tryThrows and not tryFirstReturns: write( " {} 4 + {increaseAndReturn15};".format(resultTo, **fragments)) if result == None: counter += 1 if tryResultToLocal: local += 19 else: result = ("return", 19) write( " counter++;") if result == None: counter += 1 if doCatch: write( " } catch (ex) {") write( " counter++;") if isinstance(result, tuple) and result[0] == 'throw': counter += 1 if catchThrows: write(" throw 2 + ex;") if isinstance(result, tuple) and result[0] == "throw": result = ('throw', 2 + result[1]) elif catchReturns and catchWithLocal: write(" return 2 + local;") if isinstance(result, tuple) and result[0] == "throw": result = ('return', 2 + local) elif catchReturns and not catchWithLocal: write(" return 2 + ex;"); if isinstance(result, tuple) and result[0] == "throw": result = ('return', 2 + result[1]) elif catchWithLocal: write(" local += ex;"); if isinstance(result, tuple) and result[0] == "throw": local += result[1] result = None counter += 1 else: if isinstance(result, tuple) and result[0] == "throw": result = None counter += 1 write( " counter++;") if doFinally: write( " } finally {") write( " counter++;") counter += 1 if finallyThrows: write(" throw 25;") result = ('throw', 25) elif finallyReturns: write(" return 3 + local;") result = ('return', 3 + local) elif not finallyReturns and not finallyThrows: write(" local += 2;") local += 2 counter += 1 else: assert False # unreachable write( " counter++;") write( " }") write( " counter++;") if result == None: counter += 1 if endReturnLocal: write( " return 5 + local;") if result == None: result = ('return', 5 + local) write( " }") if result == None: write( " resetOptAndAssertResultEquals(undefined, f);") else: tag, value = result if tag == "return": write( " resetOptAndAssertResultEquals({}, f);".format(value)) else: assert tag == "throw" write( " resetOptAndAssertThrowsWith({}, f);".format(value)) write( " assertEquals({}, counter);".format(counter)) write( "") global NUM_TESTS_PRINTED, NUM_TESTS_IN_SHARD NUM_TESTS_PRINTED += 1 NUM_TESTS_IN_SHARD += 1 FILE = None # to be initialised to an open file SHARD_NUM = 1 def write(*args): return print(*args, file=FILE) def rotateshard(): global FILE, NUM_TESTS_IN_SHARD, SHARD_SIZE if MODE != 'shard': return if FILE != None and NUM_TESTS_IN_SHARD < SHARD_SIZE: return if FILE != None: finishshard() assert FILE == None FILE = open(SHARD_FILENAME_TEMPLATE.format(shard=SHARD_NUM), 'w') write_shard_header() NUM_TESTS_IN_SHARD = 0 def finishshard(): global FILE, SHARD_NUM, MODE assert FILE write_shard_footer() if MODE == 'shard': print("Wrote shard {}.".format(SHARD_NUM)) FILE.close() FILE = None SHARD_NUM += 1 def write_shard_header(): if MODE == 'shard': write("// Shard {}.".format(SHARD_NUM)) write("") write(PREAMBLE) write("") def write_shard_footer(): write("}") write("%NeverOptimizeFunction(runThisShard);") write("") write("// {} tests in this shard.".format(NUM_TESTS_IN_SHARD)) write("// {} tests up to here.".format(NUM_TESTS_PRINTED)) write("") write("runThisShard();") FLAGLETTERS="54321trflcrltfrtld" flagtuple = namedtuple('flagtuple', ( "alternativeFn5", "alternativeFn4", "alternativeFn3", "alternativeFn2", "alternativeFn1", "tryThrows", "tryReturns", "tryFirstReturns", "tryResultToLocal", "doCatch", "catchReturns", "catchWithLocal", "catchThrows", "doFinally", "finallyReturns", "finallyThrows", "endReturnLocal", "deopt" )) emptyflags = flagtuple(*((False,) * len(flagtuple._fields))) f1 = emptyflags._replace(tryReturns=True, doCatch=True) # You can test function printtest with f1. allFlagCombinations = [ flagtuple(*bools) for bools in booltuples(len(flagtuple._fields)) ] if __name__ == '__main__': global MODE if sys.argv[1:] == []: MODE = 'stdout' print("// Printing all shards together to stdout.") print("") write_shard_header() FILE = sys.stdout elif sys.argv[1:] == ['--shard-and-overwrite']: MODE = 'shard' else: print("Usage:") print("") print(" python {}".format(sys.argv[0])) print(" print all tests to standard output") print(" python {} --shard-and-overwrite".format(sys.argv[0])) print(" print all tests to {}".format(SHARD_FILENAME_TEMPLATE)) print("") print(sys.argv[1:]) print("") sys.exit(1) rotateshard() for flags in allFlagCombinations: printtest(flags) rotateshard() finishshard() if MODE == 'shard': print("Total: {} tests.".format(NUM_TESTS_PRINTED)) ```
```javascript Http Server in **Node** Handle `JSON.parse` error in Node.js `process.nextTick()` callback pattern Clustering `exports` vs. `module.exports` in **Node** ```
```python from streamlink.plugins.vtvgo import VTVgo from tests.plugins import PluginCanHandleUrl class TestPluginCanHandleUrlVTVgo(PluginCanHandleUrl): __plugin__ = VTVgo should_match = [ "path_to_url", "path_to_url", "path_to_url", "path_to_url", "path_to_url", "path_to_url", "path_to_url", "path_to_url", "path_to_url", "path_to_url", "path_to_url", ] should_not_match = [ # POST request will error with www. "path_to_url", ] ```
```go /* path_to_url Unless required by applicable law or agreed to in writing, software WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ package fake import ( "context" "github.com/stretchr/testify/assert" "testing" v1 "k8s.io/api/core/v1" policy "k8s.io/api/policy/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1ac "k8s.io/client-go/applyconfigurations/core/v1" ) func TestNewSimpleClientset(t *testing.T) { client := NewSimpleClientset() client.CoreV1().Pods("default").Create(context.Background(), &v1.Pod{ ObjectMeta: meta_v1.ObjectMeta{ Name: "pod-1", Namespace: "default", }, }, meta_v1.CreateOptions{}) client.CoreV1().Pods("default").Create(context.Background(), &v1.Pod{ ObjectMeta: meta_v1.ObjectMeta{ Name: "pod-2", Namespace: "default", }, }, meta_v1.CreateOptions{}) err := client.CoreV1().Pods("default").EvictV1(context.Background(), &policy.Eviction{ ObjectMeta: meta_v1.ObjectMeta{ Name: "pod-2", }, }) if err != nil { t.Errorf("TestNewSimpleClientset() res = %v", err.Error()) } pods, err := client.CoreV1().Pods("default").List(context.Background(), meta_v1.ListOptions{}) // err: item[0]: can't assign or convert v1beta1.Eviction into v1.Pod if err != nil { t.Errorf("TestNewSimpleClientset() res = %v", err.Error()) } else { t.Logf("TestNewSimpleClientset() res = %v", pods) } } func TestManagedFieldClientset(t *testing.T) { client := NewClientset() name := "pod-1" namespace := "default" cm, err := client.CoreV1().ConfigMaps("default").Create(context.Background(), &v1.ConfigMap{ ObjectMeta: meta_v1.ObjectMeta{Name: name, Namespace: namespace}, Data: map[string]string{"k0": "v0"}, }, meta_v1.CreateOptions{FieldManager: "test-manager-0"}) if err != nil { t.Errorf("Failed to create pod: %v", err) } assert.Equal(t, map[string]string{"k0": "v0"}, cm.Data) // Apply with test-manager-1 // Expect data to be shared with initial create cm, err = client.CoreV1().ConfigMaps("default").Apply(context.Background(), v1ac.ConfigMap(name, namespace).WithData(map[string]string{"k1": "v1"}), meta_v1.ApplyOptions{FieldManager: "test-manager-1"}) if err != nil { t.Errorf("Failed to create pod: %v", err) } assert.Equal(t, map[string]string{"k0": "v0", "k1": "v1"}, cm.Data) // Apply conflicting with test-manager-2, expect apply to fail _, err = client.CoreV1().ConfigMaps("default").Apply(context.Background(), v1ac.ConfigMap(name, namespace).WithData(map[string]string{"k1": "xyz"}), meta_v1.ApplyOptions{FieldManager: "test-manager-2"}) if assert.Error(t, err) { assert.Equal(t, "Apply failed with 1 conflict: conflict with \"test-manager-1\": .data.k1", err.Error()) } // Apply with test-manager-2 // Expect data to be shared with initial create and test-manager-1 cm, err = client.CoreV1().ConfigMaps("default").Apply(context.Background(), v1ac.ConfigMap(name, namespace).WithData(map[string]string{"k2": "v2"}), meta_v1.ApplyOptions{FieldManager: "test-manager-2"}) if err != nil { t.Errorf("Failed to create pod: %v", err) } assert.Equal(t, map[string]string{"k0": "v0", "k1": "v1", "k2": "v2"}, cm.Data) // Apply with test-manager-1 // Expect owned data to be updated cm, err = client.CoreV1().ConfigMaps("default").Apply(context.Background(), v1ac.ConfigMap(name, namespace).WithData(map[string]string{"k1": "v101"}), meta_v1.ApplyOptions{FieldManager: "test-manager-1"}) if err != nil { t.Errorf("Failed to create pod: %v", err) } assert.Equal(t, map[string]string{"k0": "v0", "k1": "v101", "k2": "v2"}, cm.Data) // Force apply with test-manager-2 // Expect data owned by test-manager-1 to be updated, expect data already owned but not in apply configuration to be removed cm, err = client.CoreV1().ConfigMaps("default").Apply(context.Background(), v1ac.ConfigMap(name, namespace).WithData(map[string]string{"k1": "v202"}), meta_v1.ApplyOptions{FieldManager: "test-manager-2", Force: true}) if err != nil { t.Errorf("Failed to create pod: %v", err) } assert.Equal(t, map[string]string{"k0": "v0", "k1": "v202"}, cm.Data) // Update with test-manager-1 to perform a force update of the entire resource cm, err = client.CoreV1().ConfigMaps("default").Update(context.Background(), &v1.ConfigMap{ TypeMeta: meta_v1.TypeMeta{ APIVersion: "v1", Kind: "ConfigMap", }, ObjectMeta: meta_v1.ObjectMeta{ Name: name, Namespace: namespace, }, Data: map[string]string{ "k99": "v99", }, }, meta_v1.UpdateOptions{FieldManager: "test-manager-0"}) if err != nil { t.Errorf("Failed to update pod: %v", err) } assert.Equal(t, map[string]string{"k99": "v99"}, cm.Data) } ```
Julianne Ankley is a singer/songwriter and visual artist from southeast Michigan. She has won a total of fifteen Detroit Music Awards, including Outstanding Country Vocalist and Outstanding Country Recording at the 2021 Detroit Music Awards, Julianne's music includes Motown, Country and Roots music influences. Ankley has performed the national anthem for the Detroit Red Wings, the Detroit Tigers, the Toledo Mud Hens, and for nine seasons was the "Voice of the Icehawks" for the Port Huron IHL Icehawks. Milestones In 2010, Ankley was a finalist in the Merlefest Songwriter Competition for her song: "It Ain't Over" In 2011, Ankley was named one of the Best Country Music Artists in Detroit. Songs and Stories: Detroit PBS Spotlights Julianne Ankley & The Rogues. Biography, interview and performance video. Julianne's music and the Detroit music scene. Ankley performed her original song, "Christmas in Your Heart," at the "2016 America's Thanksgiving Day Parade" in Detroit, MI. Port Huron singer and painter brings both talents to Highland exhibit. Julianne discussed her music, art and what drives her. Ankley was named Blue Water Woman Musician of the Year in 2020 for musical contributions. Singer/songwriter and visual artist, Julianne Ankley has combined her talents for audiences. Releasing an album during a pandemic: Artist Spotlight - Julianne Ankley During the pandemic of 2020-2021, Ankley performed free live concerts online to help boost morale and entertain. She also participated in online benefit concert(s) during this time. She continues to tour nationally and has played smaller venues to large music festivals. Discography It Ain't Over (EP) 2009 Vivid (LP) 2011 "He's Still My Boy" (Single) 2012 "Christmas in Your Heart" (Single) 2012 Don't Let Go (LP) 2015 "Raining for You (Single) 2017 "Why" (Single) 2019 With Love from Lake Huron (LP) 2020 References American women singer-songwriters Musicians from Detroit
```m4sugar # $OpenBSD: regress.m4,v 1.1 2016/07/30 13:55:54 tb Exp $ # $FreeBSD: head/usr.bin/tests/regress.m4 263227 2014-03-16 08:04:06Z jmmv $ dnl Originally /usr/src/usr.bin/tests/regress.m4 on FreeBSD dnl Merged into jot tests for OpenBSD by attila@stalphonsos.com dnl A library of routines for doing regression tests for userland utilities. dnl Start up. We initialise the exit status to 0 (no failure) and change dnl into the directory specified by our first argument, which is the dnl directory to run the tests inside. dnl We need backticks and square brackets, use [[ ]] for m4 quoting changequote([[,]]) dnl Set some things up before we start running tests define([[REGRESSION_START]], TESTDIR=$1 if [ -z "$TESTDIR" ]; then TESTDIR=. fi cd $TESTDIR TOTAL=0 NFAILED=0 FAILED="" STATUS=0 ) dnl Check $? to see if we passed or failed. The first parameter is the test dnl which passed or failed. It may be nil. define([[REGRESSION_PASSFAIL]], if [ $? -eq 0 ]; then echo "ok - $1 # Test detected no regression. (in $TESTDIR)" else STATUS=$? NFAILED=`expr 1 + ${NFAILED}` [ -n "${FAILED}" ] && FAILED="${FAILED} " FAILED="${FAILED}$1" SEE_ABOVE="" if [ ${VERBOSE-0} != 0 ]; then diff -u ${SRCDIR:-.}/regress.$1.out ./test.$1.out SEE_ABOVE="See above. " fi echo "not ok - $1 # Test failed: regression detected. ${SEE_ABOVE}(in $TESTDIR)" fi) dnl An actual test. The first parameter is the test name. The second is the dnl command/commands to execute for the actual test. Their exit status is dnl checked. It is assumed that the test will output to stdout, and that the dnl output to be used to check for regression will be in regress.TESTNAME.out. define([[REGRESSION_TEST]], TOTAL=`expr 1 + ${TOTAL}` $2 >test.$1.out diff -q ${SRCDIR:-.}/regress.$1.out ./test.$1.out >/dev/null REGRESSION_PASSFAIL($1)) dnl Cleanup. Exit with the status code of the last failure. Should probably dnl be the number of failed tests, but hey presto, this is what it does. This dnl could also clean up potential droppings, if some forms of regression tests dnl end up using mktemp(1) or such. define([[REGRESSION_END]], if [ ${NFAILED} -ne 0 ]; then echo "FAILED ${NFAILED} tests out of ${TOTAL}: ${FAILED}" else echo "PASSED ${TOTAL} tests" fi exit $STATUS) ```
```c /* { dg-do run } */ #include <omp.h> extern void abort (void); void subdomain (float *x, int istart, int ipoints) { int i; for (i = 0; i < ipoints; i++) x[istart + i] = 123.456; } void sub (float *x, int npoints) { int iam, nt, ipoints, istart; #pragma omp parallel default(shared) private(iam,nt,ipoints,istart) { iam = omp_get_thread_num (); nt = omp_get_num_threads (); ipoints = npoints / nt; /* size of partition */ istart = iam * ipoints; /* starting array index */ if (iam == nt - 1) /* last thread may do more */ ipoints = npoints - istart; subdomain (x, istart, ipoints); } } int main () { int i; float array[10000]; sub (array, 10000); for (i = 0; i < 10000; i++) if (array[i] < 123.45 || array[i] > 123.46) abort (); return 0; } ```
Salah Al-Din () is a 2001 historical Arabic television series directed by Hatem Ali which deals with the political events in the sixth century AH in the region of the Shaam and Egypt, in the scene of the Crusades. The series focuses on the biography of Salah ad-Din and highlights his courage and good creation and wisdom as he tells how he managed to unite the Muslims and crush the crusaders in the battle of Hattin and restore Jerusalem after being taken away by the Crusaders for almost a hundred years. The series presents the historical narrative from an Islamic point of view and distanced itself from the thorny areas between Sunnis and Shiites, as it refrains from addressing the Fatimid Ismaili Shiite orientation and the difference between it and the Zengid Sultanate of the Sunni Abbasid Caliphate, whether in faith or practice. The series consists of thirty episodes starting from the birth of the protagonist and continues to display events in chronological order with exposure to other threads complementary to the story and related to the historical period. The series conjuncts with the series Searching for Salah al-Din by Najdat Anzour on the impact of the Al-Aqsa restore, which began in autumn 2000. Cast Jamal Suleiman: Saladin. Suzan Najm Aldeen: Ismat ad-Din Khatun. Bassem Yakhour : Nur ad-Din Zangi Najah Safkouni: Najm ad-Din Ayyub. Wael Ramadan: Raynald of Châtillon. Mohammed Miftah: Asad al-Din Shirkuh. Arab characters: Bassel Khayat - Qais Sheikh Najib - Taim Hasan - Mahmoud Khalili - Ahmed Mansour - Nasser Wardiani - Osama Sayed Yusuf - Ali Madarati - Fadi Sobeeh - Mohammed Khair Jarrah - Kifah Khoos - Nizar Abu Hajar - Ghassan Jbai Rami Hanna : Usama ibn Munqidh Abdul Rahman Al-Rashi: Imad ad-Din Zengi . Alaeddin Kokch : Mu'in ad-Din Unur . Hassan Awiti : Shawar of Egypt. Jalal Shamout : Taqi al-Din Omar . From Sudan Yasser Abdel Latif : Custodian of the Caliphate Jawhar. From Jordan Nadera Imran : Saqaut Al Malik Jamrud. Guests of honor Rafiq Subaie: Sahab al Hadas Murad (Sheikh). Rafik Ali Ahmed: Rashid ad-Din Sinan Ghassan Massoud: Judge Al-Fadil. The characters Franjieh Faiq Erqsusi - Nidal Segher - Ramez Atallah - Mohammed Al Rashi - Hima Ismail - Mustafa El Khani - Sulafa Memar - Radwan Jamous - Hossam Al Shah - Mufid Abu Hamda . Salma Al Masri : Queen of Jerusalem. Sala Faukharjy: Hadirna Qandisat Trabalas. Maher Salibi : Amalric of Jerusalem. Samer Omran : Louis VII of France. Saif al-Din Subaie : King Baldwin III. Mohamed El Tayeb : Conrad III of Germany From Lebanon Urd Al-Khal: Constance of Antioch Walid Al-Allaily - Aida Al-Nazer. Guests of honor: Khaled Taja : Byzantine emperor Manuel I Jihad Abdo : Raymond III the Prince of Tripoli . From Lebanon Carmen Lebbos: Princess Agnes of Courtenay. With new faces Saad Lustan : Izz al-Din Gerdik. Amer Ali : Baldwin De Abelin. Nadia Hamza : Queen of France Eleanora. Kenaz Salem :Palian de Abelin. Aman Al-Arnad: Salah al-Din as a youth. Ahmed Al-Ahmad : Humphrey I de Bohun. Adnan Abdel Jalil : Egyptian Coptic. Bassam Daoud : Patriarch Heraclius. See also List of Islamic films Al-Taghreba al-Falastenya Selahaddin Eyyubi (2024) References External links All episodes in official YouTube page Arabic-language television shows 2001 television films 2001 films 2001 Syrian television series debuts 2001 Syrian television series endings 2001 television specials Television series about Islam Cultural depictions of Saladin Television series about the Crusades
```go // // // path_to_url // // Unless required by applicable law or agreed to in writing, software // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. package controller import ( "crypto/tls" "fmt" api "github.com/coreos/etcd-operator/pkg/apis/etcd/v1beta2" "github.com/coreos/etcd-operator/pkg/util/etcdutil" "github.com/coreos/etcd-operator/pkg/util/k8sutil" "k8s.io/client-go/kubernetes" ) func generateTLSConfig(kubecli kubernetes.Interface, clientTLSSecret, namespace string) (*tls.Config, error) { var tlsConfig *tls.Config if len(clientTLSSecret) != 0 { d, err := k8sutil.GetTLSDataFromSecret(kubecli, namespace, clientTLSSecret) if err != nil { return nil, fmt.Errorf("failed to get TLS data from secret (%v): %v", clientTLSSecret, err) } tlsConfig, err = etcdutil.NewTLSConfig(d.CertData, d.KeyData, d.CAData) if err != nil { return nil, fmt.Errorf("failed to constructs tls config: %v", err) } } return tlsConfig, nil } func isPeriodicBackup(ebSpec *api.BackupSpec) bool { if ebSpec.BackupPolicy != nil { return ebSpec.BackupPolicy.BackupIntervalInSecond != 0 } return false } func containsString(slice []string, s string) bool { for _, item := range slice { if item == s { return true } } return false } ```
```go // Code generated by smithy-go-codegen DO NOT EDIT. package elasticloadbalancingv2 import ( "context" "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) // Deregisters the specified targets from the specified target group. After the // targets are deregistered, they no longer receive traffic from the load balancer. // // The load balancer stops sending requests to targets that are deregistering, but // uses connection draining to ensure that in-flight traffic completes on the // existing connections. This deregistration delay is configured by default but can // be updated for each target group. // // For more information, see the following: // // [Deregistration delay] // - in the Application Load Balancers User Guide // // [Deregistration delay] // - in the Network Load Balancers User Guide // // [Deregistration delay] // - in the Gateway Load Balancers User Guide // // Note: If the specified target does not exist, the action returns successfully. // // [Deregistration delay]: path_to_url#deregistration-delay func (c *Client) DeregisterTargets(ctx context.Context, params *DeregisterTargetsInput, optFns ...func(*Options)) (*DeregisterTargetsOutput, error) { if params == nil { params = &DeregisterTargetsInput{} } result, metadata, err := c.invokeOperation(ctx, "DeregisterTargets", params, optFns, c.addOperationDeregisterTargetsMiddlewares) if err != nil { return nil, err } out := result.(*DeregisterTargetsOutput) out.ResultMetadata = metadata return out, nil } type DeregisterTargetsInput struct { // The Amazon Resource Name (ARN) of the target group. // // This member is required. TargetGroupArn *string // The targets. If you specified a port override when you registered a target, you // must specify both the target ID and the port when you deregister it. // // This member is required. Targets []types.TargetDescription noSmithyDocumentSerde } type DeregisterTargetsOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata noSmithyDocumentSerde } func (c *Client) addOperationDeregisterTargetsMiddlewares(stack *middleware.Stack, options Options) (err error) { if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { return err } err = stack.Serialize.Add(&awsAwsquery_serializeOpDeregisterTargets{}, middleware.After) if err != nil { return err } err = stack.Deserialize.Add(&awsAwsquery_deserializeOpDeregisterTargets{}, middleware.After) if err != nil { return err } if err := addProtocolFinalizerMiddlewares(stack, options, "DeregisterTargets"); err != nil { return fmt.Errorf("add protocol finalizers: %v", err) } if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } if err = addClientRequestID(stack); err != nil { return err } if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } if err = addComputePayloadSHA256(stack); err != nil { return err } if err = addRetry(stack, options); err != nil { return err } if err = addRawResponseToMetadata(stack); err != nil { return err } if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { return err } if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addTimeOffsetBuild(stack, c); err != nil { return err } if err = addUserAgentRetryMode(stack, options); err != nil { return err } if err = addOpDeregisterTargetsValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeregisterTargets(options.Region), middleware.Before); err != nil { return err } if err = addRecursionDetection(stack); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } if err = addResponseErrorMiddleware(stack); err != nil { return err } if err = addRequestResponseLogging(stack, options); err != nil { return err } if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil } func newServiceMetadataMiddleware_opDeregisterTargets(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, OperationName: "DeregisterTargets", } } ```
Thamarai is a 1994 Tamil language drama film directed and produced by K. K. Rajsirpy. The film stars Napoleon, Rupini and Rohini. It was released on 19 August 1994. Plot According to the prophecy, Kali temple has to be shifted in Madurai Veeran temple's place. This prophecy creates troubles between the two communities of the village. In the meantime, Thamarai (Napoleon) comes back from jail to help the "Madurai Veeran" community against the vicious village chief Subbarayan (Rajesh). Five years back, a brute Thamarai came to Subbarayan's village. Being short-tempered, he clashed against the ruthless Subbarayan many times. Thamarai and Poogodai fell in love with each other but the poor palm wine seller Sarasu (Rupini) developed a soft corner for Thamarai. Poogodai later got married with a rich groom and Thamarai went to jail for cutting the hand of the groom's father. The next day, the groom committed suicide. During the Madurai Veeran temple festival, Subbarayan plans to demolish the Madurai Veeran temple but Thamarai prevents it. In the confrontation, Sarasu died by saving Thamarai. What transpires next forms the rest of the story. Cast Napoleon as Thamarai Rupini as Sarasu, an alcohol seller who loves Thamarai Rohini as Poongodai, Thamarai’s love interest Rajesh as Subbarayan, Poongodai’s father S. S. Chandran as Poosari, Maniarasu’s father and Subbarayan’s sidekick R. Sundarrajan as Veerappan Supergood Kannan as Maniarasu, Poosari’s son and Elanji’s love interest Poorani as Elanji, Maniarasu’s love interest Kavitha Vijaya Chandrika A. K. Veerasamy as Sangili Kumarimuthu as Manickam Thideer Kannaiah Suryakanth Krishnamoorthy as Semalai Nalinikanth Bayilvan Ranganathan Vellai Subbaiah Durai Ramachandran A. Madhur Swamy Nadesh Alex as Parisal Production Actor Napoleon, who played villains and character roles until then, signed up to play for the first time the hero role. But the film that first got released in which he had played the hero role was Seevalaperi Pandi. Soundtrack The film score and the soundtrack were composed by Deva. The soundtrack, released in 1994, features 6 tracks with lyrics written by Vairamuthu and Deva. References 1994 films 1990s Tamil-language films Films scored by Deva (composer)
Gach-e Sofla (, also Romanized as Gach-e Soflá and Gach Soflá; also known as Gach-e Pā’īn) is a village in Horjand Rural District, Kuhsaran District, Ravar County, Kerman Province, Iran. At the 2006 census, its population was 86, in 23 families. References Populated places in Ravar County
Fox Crossing is a village in Winnebago County, Wisconsin, United States. It was incorporated from the former town of Menasha in 2016. The population as of the 2020 census was 18,974. Fox Crossing is located in the Fox Cities region and the Appleton-Oshkosh-Neenah, WI CSA, the third largest metropolitan area in Wisconsin. History The Town of Menasha was organized on April 3, 1855, in part from land formerly belonging to the Town of Neenah. The original land area of the town was reduced over several decades due to annexations from the cities of Menasha and Appleton. After town residents west of Little Lake Butte des Morts approved a referendum to incorporate, the Village was incorporated on April 20, 2016. On August 17, 2016, an agreement was reached between the Village of Fox Crossing and the remaining Town of Menasha to annex all remaining town lands to Fox Crossing. This agreement effectively ended the existence of the Town of Menasha. On September 22, 2016, the remaining parts of the town of Menasha became part of the Village of Fox Crossing. Officials from Fox Crossing reportedly told officials from nearby municipalities that the sole motivation for incorporation was to avoid being annexed by a larger community. A prominent feature of the village's early history was border struggles; Fox Crossing was sued by the town of Clayton and town of Neenah over separate disputes. Clayton sued Fox Crossing in December 2017 over a 72-acre annexation the previous September, and Fox Crossing sued Clayton in 2018 over a wastewater collection settlement with the Department of Natural Resources. Clayton had previously rebuffed attempts by Fox Crossing to annex three more high-value acres of land in exchange for wastewater service. In 2020, the town and village agreed to a wastewater collection plan. Clayton paid Fox Crossing a one-time sum of $11.5 million and Fox Crossing agreed to a decade of inaction annexing land in Clayton. In June 2020, Fox Crossing annexed over a hundred acres of land from the town of Neenah at the site of a future Neenah High School. It was later revealed that Fox Crossing and Neenah had a border agreement in place from 2016 through 2019; the town blamed Fox Crossing for failing to find a long-term border solution. Geography Fox Crossing is located at the north end of Lake Winnebago along the Fox River which forms Little Lake Butte des Morts through the Village. The Village partially surrounds the City of Menasha. Other adjacent communities include: Harrison to the east, the City of Neenah and Town of Neenah to the south, Clayton to the west, and the Outagamie County communities of Greenville and Grand Chute to the north and Appleton to the northeast. According to the United States Census Bureau, the Village has a total area of , of which is land and , or 12.50%, is water. Demographics Education Public education is provided by the Neenah Joint School District, west of Little Lake Butte des Morts, and the Menasha Joint School District, east of the lake. St. Mary Catholic High School and St. Mary Catholic Middle School are located in Fox Crossing. Points of Interest Fox River Friendship State Trail Lake Winnebago Little Lake Butte des Morts Tri-County Arena Transportation Major transportation routes in Fox Crossing include: Appleton International Airport is located adjacent to Fox Crossing in Greenville. Valley Transit provides bus services. References External links Villages in Winnebago County, Wisconsin Villages in Wisconsin Appleton–Fox Cities metropolitan area 2016 establishments in Wisconsin
```ruby # frozen_string_literal: true class ActivityPub::Activity::Follow < ActivityPub::Activity include Payloadable def perform target_account = account_from_uri(object_uri) return if target_account.nil? || !target_account.local? || delete_arrived_first?(@json['id']) # Update id of already-existing follow requests existing_follow_request = ::FollowRequest.find_by(account: @account, target_account: target_account) unless existing_follow_request.nil? existing_follow_request.update!(uri: @json['id']) return end if target_account.blocking?(@account) || target_account.domain_blocking?(@account.domain) || target_account.moved? || target_account.instance_actor? reject_follow_request!(target_account) return end # Fast-forward repeat follow requests existing_follow = ::Follow.find_by(account: @account, target_account: target_account) unless existing_follow.nil? existing_follow.update!(uri: @json['id']) AuthorizeFollowService.new.call(@account, target_account, skip_follow_request: true, follow_request_uri: @json['id']) return end follow_request = FollowRequest.create!(account: @account, target_account: target_account, uri: @json['id']) if target_account.locked? || @account.silenced? LocalNotificationWorker.perform_async(target_account.id, follow_request.id, 'FollowRequest', 'follow_request') else AuthorizeFollowService.new.call(@account, target_account) LocalNotificationWorker.perform_async(target_account.id, ::Follow.find_by(account: @account, target_account: target_account).id, 'Follow', 'follow') end end def reject_follow_request!(target_account) json = Oj.dump(serialize_payload(FollowRequest.new(account: @account, target_account: target_account, uri: @json['id']), ActivityPub::RejectFollowSerializer)) ActivityPub::DeliveryWorker.perform_async(json, target_account.id, @account.inbox_url) end end ```
```prolog #! /usr/bin/env perl # # in the file LICENSE in the source distribution or at # path_to_url $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; push(@INC,"${dir}","${dir}../../perlasm"); require "x86asm.pl"; require "cbc.pl"; $output = pop; open STDOUT,">$output"; &asm_init($ARGV[0],"rc5-586.pl"); $RC5_MAX_ROUNDS=16; $RC5_32_OFF=($RC5_MAX_ROUNDS+2)*4; $A="edi"; $B="esi"; $S="ebp"; $tmp1="eax"; $r="ebx"; $tmpc="ecx"; $tmp4="edx"; &RC5_32_encrypt("RC5_32_encrypt",1); &RC5_32_encrypt("RC5_32_decrypt",0); &cbc("RC5_32_cbc_encrypt","RC5_32_encrypt","RC5_32_decrypt",0,4,5,3,-1,-1); &asm_finish(); close STDOUT; sub RC5_32_encrypt { local($name,$enc)=@_; &function_begin_B($name,""); &comment(""); &push("ebp"); &push("esi"); &push("edi"); &mov($tmp4,&wparam(0)); &mov($S,&wparam(1)); &comment("Load the 2 words"); &mov($A,&DWP(0,$tmp4,"",0)); &mov($B,&DWP(4,$tmp4,"",0)); &push($r); &mov($r, &DWP(0,$S,"",0)); # encrypting part if ($enc) { &add($A, &DWP(4+0,$S,"",0)); &add($B, &DWP(4+4,$S,"",0)); for ($i=0; $i<$RC5_MAX_ROUNDS; $i++) { &xor($A, $B); &mov($tmp1, &DWP(12+$i*8,$S,"",0)); &mov($tmpc, $B); &rotl($A, &LB("ecx")); &add($A, $tmp1); &xor($B, $A); &mov($tmp1, &DWP(16+$i*8,$S,"",0)); &mov($tmpc, $A); &rotl($B, &LB("ecx")); &add($B, $tmp1); if (($i == 7) || ($i == 11)) { &cmp($r, $i+1); &je(&label("rc5_exit")); } } } else { &cmp($r, 12); &je(&label("rc5_dec_12")); &cmp($r, 8); &je(&label("rc5_dec_8")); for ($i=$RC5_MAX_ROUNDS; $i > 0; $i--) { &set_label("rc5_dec_$i") if ($i == 12) || ($i == 8); &mov($tmp1, &DWP($i*8+8,$S,"",0)); &sub($B, $tmp1); &mov($tmpc, $A); &rotr($B, &LB("ecx")); &xor($B, $A); &mov($tmp1, &DWP($i*8+4,$S,"",0)); &sub($A, $tmp1); &mov($tmpc, $B); &rotr($A, &LB("ecx")); &xor($A, $B); } &sub($B, &DWP(4+4,$S,"",0)); &sub($A, &DWP(4+0,$S,"",0)); } &set_label("rc5_exit"); &mov(&DWP(0,$tmp4,"",0),$A); &mov(&DWP(4,$tmp4,"",0),$B); &pop("ebx"); &pop("edi"); &pop("esi"); &pop("ebp"); &ret(); &function_end_B($name); } ```
The date of birth of Jesus is not stated in the gospels or in any historical sources, but most biblical scholars generally accept a date of birth between 6 BC and 4 BC, the year in which King Herod died. The historical evidence is too incomplete to allow a definitive dating, but the year is estimated through three different approaches: analysing references to known historical events mentioned in the nativity accounts in the Gospels of Luke and Matthew, working backward from the estimation of the start of the ministry of Jesus, and astrological or astronomical alignments. The common Christian traditional dating of the birthdate of Jesus was 25 December, a date first asserted officially by Pope Julius I in 350 AD, although this claim is dubious or otherwise unfounded. The day or season has been estimated by various methods, including the description of shepherds watching over their sheep. Year of birth Nativity accounts The nativity accounts in the New Testament gospels of Matthew and Luke do not mention a date or time of year for the birth of Jesus. Karl Rahner states that the authors of the gospels generally focused on theological elements rather than historical chronologies. Both Luke and Matthew associate Jesus' birth with the time of Herod the Great. Matthew 2:1 states that "Jesus was born in Bethlehem of Judaea in the days of Herod the king". He also implies that Jesus could have been as much as two years old at the time of the visit of the Magi, because Herod ordered the murder of all boys up to the age of two years (Massacre of the Innocents), "in accordance with the time he had learned from the Magi" Matthew 2:16. In addition, if the phrase "about 30" in Luke 3:23 is interpreted to mean 32 years old, this could fit a date of birth just within the reign of Herod, who died in 4 BC according to most scholars. Luke 1:5 mentions the reign of Herod shortly before the birth of Jesus. This Herod died in 4 BC. Luke 2:1-2 also places the birth during a census decreed by Caesar Augustus, when Quirinius was governing Judah. Some interpreters of Luke determine that this was the Census of Quirinius, which the Jewish historian Josephus described as taking place circa AD 6 in his book Antiquities of the Jews (written c. AD 93), by indicating that Cyrenius/Quirinius began to be the governor of Syria in AD 6 and a census took place during his tenure sometime between AD 6–7. Since Herod died a decade before this census, most scholars Luke 2:1-2 and generally accept a date of birth between 6 and 4 BC. On the other hand, a census was not a unique event in the Roman Empire. For example, Tertullian argued that a number of censuses were performed throughout the Roman world under Sentius Saturninus at the same time. Some biblical scholars and commentators believe the two accounts can be harmonized, arguing that the text in Luke can be read as "registration before (πρώτη) Quirinius was governor of Syria", i.e., that Luke was actually referring to a completely different census, though this understanding of the Greek word has been rejected by scholars. Other gospel evidence Another approach to estimating the year of birth is based on an attempt to work backwards from the point when Jesus began preaching, using the statement in Luke 3:23 that he was "about 30 years of age" at that time. Jesus began to preach after being baptized by John the Baptist, and based on Luke's gospel John only began baptizing people in "the fifteenth year of the reign of Tiberius Caesar" (Luke 3:1–2), which scholars estimate would place the year at about AD 28–29. By working backwards from this, it would appear that Jesus was probably born no later than 1 BC. Another theory is that Herod's death was as late as after the January eclipse of 1 BC or even AD 1 after the eclipse that occurred in 1 December BC. Luke's date is independently confirmed by John's reference in John 2:20 to the Temple being in its 46th year of construction when Jesus began his ministry during Passover, which corresponds to around 27–29 AD according to scholarly estimates. Theories based on the Star of Bethlehem Most scholars regard the Star of Bethlehem account to be a pious fiction, of literary and theological value, rather than historical. Nonetheless, attempts have been made to interpret it as an astronomical event, which might then help date Jesus' birth through the use of ancient astronomical records, or modern astronomical calculations. The first such attempt was made by Johannes Kepler who interpreted the account to describe a Great Conjunction. Other astronomical events have been considered, including a close planetary conjunction between Venus and Jupiter in 2 BC. Date of Herod's death Most scholars concerning the date of Herod's death follows Emil Schürer's calculations published in 1896, which revised a traditional death date of 1 BC to 4 BC. Two of Herod's sons, Archelaus and Philip the Tetrarch, dated their rule from 4 BC, though Archelaus apparently held royal authority during Herod's lifetime. Philip's reign would last for 37 years, until his death in the traditionally accepted 20th year of Tiberius (AD 34), which implies his accession as 4 BC. In 1998, Beyer published that the oldest manuscripts of Josephus’s Antiquities have the death of Philip in the 22nd year of Tiberius (and not the 20th year, as shown in later editions of the Atiquities). In the British Library, there is not a single manuscript prior to AD 1544 that has the traditionally accepted 20th year of Tiberius for the death of Philip. This evidence removes the main obstacle for a later date of 1 BC for the death of Herod. Some other scholars also support the traditional date of 1 BC for Herod's death, and argue that his heirs backdated their reigns to 4 or 3 BC to assert an overlapping with Herod's rule and bolster their own legitimacy, something that had already been done by a few rulers before them. According to Dionysius Exiguus: the Anno Domini system The Anno Domini dating system was devised in 525 by Dionysius Exiguus to enumerate the years in his Easter table. His system was to replace the Diocletian era that had been used in older Easter tables, as he did not wish to continue the memory of a tyrant who persecuted Christians. The last year of the old table, Diocletian Anno Martyrium 247, was immediately followed by the first year of his table, Anno Domini 532. When Dionysius devised his table, Julian calendar years were identified by naming the consuls who held office that year — Dionysius himself stated that the "present year" was "the consulship of Probus Junior", which was 525 years "since the incarnation of our Lord Jesus Christ". Thus, Dionysius implied that Jesus' incarnation occurred 525 years earlier, without stating the specific year during which his birth or conception occurred. "However, nowhere in his exposition of his table does Dionysius relate his epoch to any other dating system, whether consulate, Olympiad, year of the world, or regnal year of Augustus; much less does he explain or justify the underlying date." Bonnie J. Blackburn and Leofranc Holford-Strevens briefly present arguments for 2 BC, 1 BC, or AD 1 as the year Dionysius intended for the Nativity or Incarnation. Among the sources of confusion are: In modern times, Incarnation is synonymous with the conception, but some ancient writers, such as Bede, considered incarnation to be synonymous with the Nativity. The civil or consular year began on 1 January, but the Diocletian year began on 29 August (30 August in the year before a Julian leap year). There were inaccuracies in the lists of consuls. There were confused summations of emperors' regnal years. It is not known how Dionysius established the year of Jesus's birth. Two major theories are that Dionysius based his calculation on the Gospel of Luke, which states that Jesus was "about thirty years old" shortly after "the fifteenth year of the reign of Tiberius Caesar" (AD 28), and hence subtracted thirty years from that date, or that Dionysius counted back 532 years from the first year of his new table. Another possibility is that Dionysius tried to follow the works of ancient historians such as Tertullian, Eusebius or Epiphanius, all of whom agree that Jesus was born in 2 BC. It has also been speculated by Georges Declercq that Dionysius' desire to replace Diocletian years with a calendar based on the incarnation of Christ was intended to prevent people from believing the imminent end of the world. At the time, it was believed by some that the resurrection of the dead and end of the world would occur 500 years after the birth of Jesus. The old Anno Mundi calendar theoretically commenced with the creation of the world based on information in the Old Testament. It was believed that, based on the Anno Mundi calendar, Jesus was born in the year 5500 (5500 years after the world was created) with the year 6000 of the Anno Mundi calendar marking the end of the world. Anno Mundi 6000 (approximately AD 500) was thus equated with the end of the world but this date had already passed in the time of Dionysius. The "Historia Brittonum" attributed to Nennius written in the 9th century makes extensive use of the Anno Passionis (AP) dating system which was in common use as well as the newer AD dating system. The AP dating system took its start from 'The Year of The Passion'. It is generally accepted by experts there is a 27-year difference between AP and AD reference. Pope Benedictus XVI states that Dionysius Exiguus committed an error. According to Jewish sources Yeshu in Jewish scholarly sources is speculated by researchers as a reference to Jesus as in Hebrew the word "Yeshu" is used to refer to Jesus and also there are similarities between Talmud Yeshu and Christian Jesus. However this fact is disputed, as Yeshu also can mean "may his name and memory be blotted out", probably used as a Damnatio memoriae to censor certain names. Talmud claims that Yeshu lived around the reign of Alexander Jannaeus who lived from 100 BC to 76 BC and since Sanhedrin 107b and Sotah 47a depicts Yeshu taking refuge in Egypt during 88-76 BC persecution of Pharisees, it can be assumed the Talmudic Yeshu was born before 88 BC and after 100 BC. Chagigah 2:2 also depicts Yeshu in same position however claims that Yeshu became an apostate during the refuge in Egypt. This Talmudic Jewish claim that Yeshu was born before 88 BC and after 100 BC during life of Alexander Jannaeus of Hasmonean dynasty (conflicting with the account that he lived during era of Pontius Pilate, which is sourced from traditional Christian, Josephus and Tacitus) is also repeated in Jewish 11th century medieval tract Toledot Yeshu which implies that this belief was alive among at least a number of Jews during these times. Baring-Gould (page 71) notes that, although the Wagenseil version named the Queen as Helene, she is also expressly described as the widow of Alexander Jannaeus, who died BC 76, and whose widow was named Salome Alexandra and she died in BC 67. Yeshu in Toledot Yeshu is Jesus himself and there is no possibility that he is another person named Yeshu because the tract is formed as a response to the claims of gospels. It was widely circulated in Europe and the Middle East in the medieval period as a Jewish response to Christian account. Yemenite edition of this tract, which is named "Episode of Jesus", repeats the same claim about the date when Yeshu lived. However, scholarly consensus generally sees the as an unreliable source for the historical Jesus. Day and season Despite the modern celebration of Christmas in December, neither the Gospel of Luke nor Gospel of Matthew mention a season for Jesus' birth. Scholarly arguments have been made regarding whether shepherds would have been grazing their flock during the winter, with some scholars challenging a winter birth for Jesus, and some defending the idea by citing the mildness of winters in Judea and rabbinic rules regarding sheep near Bethlehem before February. Adam C. English, professor of religion at Campbell University, argues for the veracity of December 25 as Jesus's date of birth. English assumes that Zechariah's ministry in the Temple, as described in , took place on Yom Kippur the year before Jesus's birth; he then traces Luke's narrative through the Annunciation and the birth of John the Baptist to conclude that the Nativity occurred on December 25. Alexander Murray of History Today argues that the celebration of Christmas as the birth day of Jesus is based on a date of a pagan feast rather than historical analysis. Saturnalia, the Roman feast for Saturn, was associated with the winter solstice. But Saturnalia was held on 17 December of the Julian calendar and later expanded with festivities only up through 23 December. The holiday was celebrated with a sacrifice at the Temple of Saturn and in the Roman Forum, as well as a public banquet, followed by private gift-giving, continual partying, and a carnival atmosphere that overturned Roman social norms. The Roman festival of Natalis Solis Invicti has also been suggested, since it was celebrated on 25 December and was associated with some prominent emperors. It is likely that such a Christian feast was chosen for Christ's marked contrast and triumph over paganism; indeed, new converts who attempted to introduce pagan elements into the Christian celebrations were sharply rebuked. Alternatively, 25 December may have been selected owing to its proximity to the winter solstice because of its symbolic theological significance. After the solstice, the days begin to lengthen with longer hours of sunlight, which Christians see as representing the Light of Christ entering the world. This symbolism applies equally to the celebration of the Nativity of Saint John the Baptist on 24 June, near the summer solstice, based on John's remark about Jesus that "He must increase; I must decrease." (). In the 1st and 2nd centuries, the Lord's Day (Sunday) was the earliest Christian celebration and included a number of theological themes. In the 2nd century, the Resurrection of Jesus became a separate feast (now called Easter) and in the same century Epiphany began to be celebrated in the Eastern Churches on 6 January. The festival of the Nativity which later turned into Christmas was a 4th-century feast in the Western Church notably in Rome and North Africa, although it is uncertain exactly where and when it was first celebrated. The earliest source stating 25 December as the date of birth of Jesus is likely a book by Hippolytus of Rome, written in the early 3rd century. He based his view on the assumption that the conception of Jesus took place at the Spring equinox which Hippolytus placed on 25 March, and then added nine months to calculate the date of birth. That date was then used for the Christmas celebration. 25 March would also roughly be the date of his crucifixion, which ancient Christians would have seen as confirming the date of his birth, since many people of that era held the belief that the great prophets were conceived into the afterlife on the same date they were conceived into the world. Ignacio L. Götz suggests that Jesus could have been born "in the late spring of the year because pregnancies began in the fall after the harvests were in and there was enough money for a wedding feast." John Chrysostom argued for a 25 December date in the late 4th century, basing his argument on the assumption that the offering of incense mentioned in refers to the offering of incense by a high priest on Yom Kippur (early October), and, as above, counting fifteen months forward. However, this was very likely a retrospective justification of a choice already made rather than a genuine attempt to derive the correct birth date. John Chrysostom also writes in his homily on the Nativity of our Lord Jesus Christ (Εἰς τὸ γενέθλιον τοῦ Σωτῆρος ἡµῶν Ἰησοῦ Χριστοῦ) that the date of 25 December was well known from the beginning among Westerners. Other sources stating 25 December as the date of Jesus are: Evodius in an epistle reported in part by Nikephoros Kallistos Xanthopoulos in his Ecclesiastical History II, 3 Saint Jerome described a commentary by Victorinus of Pettau on papers by Alexander of Jerusalem: We have found, among the papers of Alexander, who was Bishop in Jerusalem, what he transcribed in his own hand from apostolic documents: on the eighth day before the calends of January Our Lord Jesus Christ was born, during the consulate of Sulpicius and Camerinus Theophilus, bishop of Caesarea, as reported in Historia Ecclesiae Christi (or Centuriae Magdeburgenses, cent. II. chapter VI Lastly, 25 December might be a reference to the date of the Feast of Dedication, which occurs on 25 Kislev of the Jewish calendar. This would require that early Christians simply translated Kislev directly to December. Research done by members of the Church of Jesus Christ of Latter-day Saints generally places the birth of Jesus at some point in early to mid April. This research is motivated by a revelation from LDS founder Joseph Smith, which can be read to suggest that 6 April is the birth date of Jesus. September or late March have been suggested by theologian, biblical scholar and author Ian Paul. Islamic view In the hadith compilation Tuhaf al-Uqul, the sixth imam, Jafar As Sadiq says the following when approached about the birth of Christ during Christmas: "They have lied. Rather, it was in the middle of June. The day and night become even [equal] in the middle of March". This statement of his does not literally mean it was on 15 June but it is in reference to a day near the Spring Equinox. See also Adoration of the shepherds Anno Domini Ante Christum Natum Baptism of Jesus Christ myth theory Chronology of Jesus Common Era Detailed Christian timeline Dionysius Exiguus Gospel harmony Historical Jesus Historicity of Jesus Jesus in Christianity Life of Jesus in the New Testament Timeline of the Bible Venerable Bede Talmud's claim that Jesus was born before 88 BCE References Notes Citations Sources Corrected reprinting of original 1999 edition. repr. in Further reading External links Catholic Encyclopedia (1910): Chronology of the Life of Jesus Christ Chronology Birth of Jesus Historiography of Jesus 1st-century BC Christianity Nativity of Jesus
```elixir defmodule Mix.Tasks.Firmware.Gen.Gdb do @shortdoc "Generates a helper shell script for using gdb to analyze core dumps" @moduledoc """ Generates a helper shell script for using gdb to analyze core dumps This script may be used on its own or used as a base for more complicated debugging. It saves the script to gdb.sh. """ use Mix.Task import Mix.Nerves.Utils alias Mix.Nerves.Preflight @script_name "gdb.sh" @impl Mix.Task def run(_args) do Preflight.check!() system_path = check_nerves_system_is_set!() _ = check_nerves_toolchain_is_set!() gdb_script_contents = Application.app_dir(:nerves, "priv/templates/script.run-gdb.sh.eex") |> EEx.eval_file(assigns: [nerves_system: system_path]) if File.exists?(@script_name) do Mix.shell().yes?("OK to overwrite #{@script_name}?") || Mix.raise("Aborted") end Mix.shell().info(""" Writing #{@script_name}... """) File.write!(@script_name, gdb_script_contents) File.chmod!(@script_name, 0o755) end end ```
Tutta Rolf (born Solveig Jenny Berntzen; 7 October 1907 – 26 October 1994) was a Norwegian-Swedish film and theatre actress and singer. Born in Oslo. She appeared in 14 films between 1932 and 1939. She was married three times, firstly to Swedish actor and singer Ernst Rolf (1930–1932) and then to American director Jack Donohue (1936–1950) and finally to Swedish director/actor Hasse Ekman (1953–1972). She was the mother of Academy Award-winning film editor Tom Rolf and actress Jill Donahue. Selected filmography Paramount on Parade (1930) with husband Ernst Rolf in Scandinavian version Servant's Entrance (1932) Lucky Devils (1932) Love and Deficit (1932) Dear Relatives (1933) Fasters millioner (1934) En stille flirt (1933) En stilla flirt (1934) Under False Flag (1935) Dressed to Thrill (1935) Swedenhielms (1935) Adventure (1936) Sara Learns Manners (1937) The Great Love (1938) Dollar (1938) Variety Is the Spice of Life (1939) Whalers (1939) Further reading External links 1907 births 1994 deaths Swedish film actresses Norwegian film actresses 20th-century Swedish actresses 20th-century Norwegian women 20th-century Norwegian people
```c++ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: #ident "$Id$" /* COPYING CONDITIONS NOTICE: This program is free software; you can redistribute it and/or modify published by the Free Software Foundation, and provided that the following conditions are met: * Redistributions of source code must retain this COPYING CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the PATENT MARKING NOTICE (below), and the PATENT RIGHTS GRANT (below). * Redistributions in binary form must reproduce this COPYING CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the PATENT MARKING NOTICE (below), and the PATENT RIGHTS GRANT (below) in the documentation and/or other materials provided with the distribution. along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. COPYRIGHT NOTICE: TokuFT, Tokutek Fractal Tree Indexing Library. DISCLAIMER: This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU UNIVERSITY PATENT NOTICE: The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it. PATENT MARKING NOTICE: This software is covered by US Patent No. 8,185,551. This software is covered by US Patent No. 8,489,638. PATENT RIGHTS GRANT: "THIS IMPLEMENTATION" means the copyrightable works distributed by Tokutek as part of the Fractal Tree project. "PATENT CLAIMS" means the claims of patents that are owned or licensable by Tokutek, both currently or in the future; and that in the absence of this license would be infringed by THIS IMPLEMENTATION or by using or running THIS IMPLEMENTATION. "PATENT CHALLENGE" shall mean a challenge to the validity, patentability, enforceability and/or non-infringement of any of the PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. Tokutek hereby grants to you, for the term and geographical scope of the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, transfer, and otherwise run, modify, and propagate the contents of THIS IMPLEMENTATION, where such license applies only to the PATENT CLAIMS. This grant does not include claims that would be infringed only as a consequence of further modifications of THIS IMPLEMENTATION. If you or your agent or licensee institute or order or agree to the institution of patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that THIS IMPLEMENTATION constitutes direct or contributory patent infringement, or inducement of patent infringement, then any rights such litigation is filed. If you or your agent or exclusive licensee institute or order or agree to the institution of a PATENT CHALLENGE, then Tokutek may terminate any rights granted to you */ #include <portability/toku_config.h> #include <toku_portability.h> #include <stdlib.h> #if defined(HAVE_MALLOC_H) # include <malloc.h> #elif defined(HAVE_SYS_MALLOC_H) # include <sys/malloc.h> #endif #include <dlfcn.h> #include <string.h> // #define this to use a version of os_malloc that helps to debug certain features. // This version uses the real malloc (so that valgrind should still work) but it forces things to be slightly // misaligned (in particular, avoiding 512-byte alignment if possible, to find situations where O_DIRECT will fail. // #define USE_DEBUGGING_MALLOCS #ifdef USE_DEBUGGING_MALLOCS #include <pthread.h> // Make things misaligned on 512-byte boundaries static size_t malloced_now_count=0, malloced_now_size=0; struct malloc_pair { void *returned_pointer; void *true_pointer; size_t requested_size = 0; }; static struct malloc_pair *malloced_now; static pthread_mutex_t malloc_mutex = PTHREAD_MUTEX_INITIALIZER; static void malloc_lock(void) { int r = pthread_mutex_lock(&malloc_mutex); assert(r==0); } static void malloc_unlock(void) { int r = pthread_mutex_unlock(&malloc_mutex); assert(r==0); } static void push_to_malloced_memory(void *returned_pointer, void *true_pointer, size_t requested_size) { malloc_lock(); if (malloced_now_count == malloced_now_size) { malloced_now_size = 2*malloced_now_size + 1; malloced_now = (struct malloc_pair *)realloc(malloced_now, malloced_now_size * sizeof(*malloced_now)); } malloced_now[malloced_now_count].returned_pointer = returned_pointer; malloced_now[malloced_now_count].true_pointer = true_pointer; malloced_now[malloced_now_count].requested_size = requested_size; malloced_now_count++; malloc_unlock(); } static struct malloc_pair *find_malloced_pair(const void *p) // Requires: Lock must be held before calling. { for (size_t i=0; i<malloced_now_count; i++) { if (malloced_now[i].returned_pointer==p) return &malloced_now[i]; } return 0; } void *os_malloc(size_t size) { void *raw_ptr = malloc(size+16); // allocate 16 extra bytes size_t raw_ptr_i = (size_t) raw_ptr; if (raw_ptr_i%512==0) { push_to_malloced_memory(16+(char*)raw_ptr, raw_ptr, size); return 16+(char*)raw_ptr; } else { push_to_malloced_memory(raw_ptr, raw_ptr, size); return raw_ptr; } } void *os_malloc_aligned(size_t alignment, size_t size) // Effect: Perform a malloc(size) with the additional property that the returned pointer is a multiple of ALIGNMENT. // Requires: alignment is a power of two. { void *p; int r = posix_memalign(&p, alignment, size); if (r != 0) { errno = r; p = nullptr; } return p; if (alignment%512==0) { void *raw_ptr; int r = posix_memalign(&raw_ptr, alignment, size); if (r != 0) { errno = r; return nullptr; } push_to_malloced_memory(raw_ptr, raw_ptr, size); return raw_ptr; } else { // Make sure it isn't 512-byte aligned void *raw_ptr; int r = posix_memalign(&raw_ptr, alignment, size+alignment); if (r != 0) { errno = r; return nullptr; } size_t raw_ptr_i = (size_t) raw_ptr; if (raw_ptr_i%512==0) { push_to_malloced_memory(alignment+(char*)raw_ptr, raw_ptr, size); return alignment+(char*)raw_ptr; } else { push_to_malloced_memory(raw_ptr, raw_ptr, size); return raw_ptr; } } } static size_t min(size_t a, size_t b) { if (a<b) return a; else return b; } void *os_realloc(void *p, size_t size) { size_t alignment; if (size<4) { alignment = 1; } else if (size<8) { alignment = 4; } else if (size<16) { alignment = 8; } else { alignment = 16; } return os_realloc_aligned(alignment, p, size); } void * os_realloc_aligned(size_t alignment, void *p, size_t size) // Effect: Perform a realloc(p, size) with the additional property that the returned pointer is a multiple of ALIGNMENT. // Requires: alignment is a power of two. { if (p==NULL) { return os_malloc_aligned(alignment, size); } else { void *result = os_malloc_aligned(alignment, size); malloc_lock(); struct malloc_pair *mp = find_malloced_pair(p); assert(mp); // now copy all the good stuff from p to result memcpy(result, p, min(size, mp->requested_size)); malloc_unlock(); os_free(p); return result; } } void os_free(void* p) { malloc_lock(); struct malloc_pair *mp = find_malloced_pair(p); assert(mp); free(mp->true_pointer); *mp = malloced_now[--malloced_now_count]; malloc_unlock(); } size_t os_malloc_usable_size(const void *p) { malloc_lock(); struct malloc_pair *mp = find_malloced_pair(p); assert(mp); size_t size = mp->requested_size; malloc_unlock(); return size; } #else void * os_malloc(size_t size) { return malloc(size); } void *os_malloc_aligned(size_t alignment, size_t size) // Effect: Perform a malloc(size) with the additional property that the returned pointer is a multiple of ALIGNMENT. // Requires: alignment is a power of two. { void *p; int r = posix_memalign(&p, alignment, size); if (r != 0) { errno = r; p = nullptr; } return p; } void * os_realloc(void *p, size_t size) { return realloc(p, size); } void * os_realloc_aligned(size_t alignment, void *p, size_t size) // Effect: Perform a realloc(p, size) with the additional property that the returned pointer is a multiple of ALIGNMENT. // Requires: alignment is a power of two. { #if 1 if (p==NULL) { return os_malloc_aligned(alignment, size); } else { void *newp = realloc(p, size); if (0!=((long long)newp%alignment)) { // it's not aligned, so align it ourselves. void *newp2 = os_malloc_aligned(alignment, size); memcpy(newp2, newp, size); free(newp); newp = newp2; } return newp; } #else // THIS STUFF SEEMS TO FAIL VALGRIND if (p==NULL) { return os_malloc_aligned(alignment, size); } else { size_t ignore; int r = rallocm(&p, // returned pointer &ignore, // actual size of returned object. size, // the size we want 0, // extra bytes to "try" to allocate at the end ALLOCM_ALIGN(alignment)); if (r!=0) return NULL; else return p; } #endif } void os_free(void* p) { free(p); } typedef size_t (*malloc_usable_size_fun_t)(const void *); static malloc_usable_size_fun_t malloc_usable_size_f = NULL; size_t os_malloc_usable_size(const void *p) { if (p==NULL) return 0; if (!malloc_usable_size_f) { malloc_usable_size_f = (malloc_usable_size_fun_t) dlsym(RTLD_DEFAULT, "malloc_usable_size"); if (!malloc_usable_size_f) { malloc_usable_size_f = (malloc_usable_size_fun_t) dlsym(RTLD_DEFAULT, "malloc_size"); // darwin if (!malloc_usable_size_f) { abort(); // couldn't find a malloc size function } } } return malloc_usable_size_f(p); } #endif ```
```yaml commonfields: id: ExtractHash version: -1 name: ExtractHash script: >- var text = args.text; if (typeof text !== 'string') { text = JSON.stringify(args.text).replace(/\\n/g,' '); // need to replace \n } var whitelist = getCSVListAsArray('Indicators Whitelist'); var matches = {}, found; while (found = md5Regex.exec(text)) { if (whitelist.indexOf(found[0]) < 0) { matches[found[0]] = true; } } while (found = sha1Regex.exec(text)) { if (whitelist.indexOf(found[0]) < 0) { matches[found[0]] = true; } } while (found = sha256Regex.exec(text)) { if (whitelist.indexOf(found[0]) < 0) { matches[found[0]] = true; } } var uniqueMatches = Object.keys(matches); var ec = {}; ec[outputPaths.file] = []; var md = '### Extract hash\n'; for (var i=0; i < uniqueMatches.length; i++) { var f = {}; var hashType = uniqueMatches[i].length === 32 ? 'MD5' : uniqueMatches[i].length === 40 ? 'SHA1' : 'SHA256'; f[hashType] = uniqueMatches[i]; ec[outputPaths.file].push(f); md += '- ' + uniqueMatches[i] + '\n'; } return {Type: entryTypes.note, Contents: ec[outputPaths.file], ContentsFormat: formats.json, HumanReadable: md, EntryContext: ec}; type: javascript tags: - Utility comment: Deprecated. We recommend using extractIndicators command instead. Extract md5, sha1, sha256 from the given text and place them both as output and in the context of a playbook system: true enabled: true args: - name: text required: true default: true description: The text to extract hashes from. If object will convert to JSON. outputs: - contextPath: File.MD5 description: Extracted MD5 - contextPath: File.SHA1 description: Extracted SHA1 - contextPath: File.SHA256 description: Extracted SHA256 scripttarget: 0 deprecated: true fromversion: 5.0.0 ```
```javascript 'use strict'; module.exports = function generate_validate(it, $keyword, $ruleType) { var out = ''; var $async = it.schema.$async === true, $refKeywords = it.util.schemaHasRulesExcept(it.schema, it.RULES.all, '$ref'), $id = it.self._getId(it.schema); if (it.isTop) { if ($async) { it.async = true; var $es7 = it.opts.async == 'es7'; it.yieldAwait = $es7 ? 'await' : 'yield'; } out += ' var validate = '; if ($async) { if ($es7) { out += ' (async function '; } else { if (it.opts.async != '*') { out += 'co.wrap'; } out += '(function* '; } } else { out += ' (function '; } out += ' (data, dataPath, parentData, parentDataProperty, rootData) { \'use strict\'; '; if ($id && (it.opts.sourceCode || it.opts.processCode)) { out += ' ' + ('/\*# sourceURL=' + $id + ' */') + ' '; } } if (typeof it.schema == 'boolean' || !($refKeywords || it.schema.$ref)) { var $keyword = 'false schema'; var $lvl = it.level; var $dataLvl = it.dataLevel; var $schema = it.schema[$keyword]; var $schemaPath = it.schemaPath + it.util.getProperty($keyword); var $errSchemaPath = it.errSchemaPath + '/' + $keyword; var $breakOnError = !it.opts.allErrors; var $errorKeyword; var $data = 'data' + ($dataLvl || ''); var $valid = 'valid' + $lvl; if (it.schema === false) { if (it.isTop) { $breakOnError = true; } else { out += ' var ' + ($valid) + ' = false; '; } var $$outStack = $$outStack || []; $$outStack.push(out); out = ''; /* istanbul ignore else */ if (it.createErrors !== false) { out += ' { keyword: \'' + ($errorKeyword || 'false schema') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: {} '; if (it.opts.messages !== false) { out += ' , message: \'boolean schema is false\' '; } if (it.opts.verbose) { out += ' , schema: false , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; } out += ' } '; } else { out += ' {} '; } var __err = out; out = $$outStack.pop(); if (!it.compositeRule && $breakOnError) { /* istanbul ignore if */ if (it.async) { out += ' throw new ValidationError([' + (__err) + ']); '; } else { out += ' validate.errors = [' + (__err) + ']; return false; '; } } else { out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; } } else { if (it.isTop) { if ($async) { out += ' return data; '; } else { out += ' validate.errors = null; return true; '; } } else { out += ' var ' + ($valid) + ' = true; '; } } if (it.isTop) { out += ' }); return validate; '; } return out; } if (it.isTop) { var $top = it.isTop, $lvl = it.level = 0, $dataLvl = it.dataLevel = 0, $data = 'data'; it.rootId = it.resolve.fullPath(it.self._getId(it.root.schema)); it.baseId = it.baseId || it.rootId; delete it.isTop; it.dataPathArr = [undefined]; out += ' var vErrors = null; '; out += ' var errors = 0; '; out += ' if (rootData === undefined) rootData = data; '; } else { var $lvl = it.level, $dataLvl = it.dataLevel, $data = 'data' + ($dataLvl || ''); if ($id) it.baseId = it.resolve.url(it.baseId, $id); if ($async && !it.async) throw new Error('async schema in sync schema'); out += ' var errs_' + ($lvl) + ' = errors;'; } var $valid = 'valid' + $lvl, $breakOnError = !it.opts.allErrors, $closingBraces1 = '', $closingBraces2 = ''; var $errorKeyword; var $typeSchema = it.schema.type, $typeIsArray = Array.isArray($typeSchema); if ($typeIsArray && $typeSchema.length == 1) { $typeSchema = $typeSchema[0]; $typeIsArray = false; } if (it.schema.$ref && $refKeywords) { if (it.opts.extendRefs == 'fail') { throw new Error('$ref: validation keywords used in schema at path "' + it.errSchemaPath + '" (see option extendRefs)'); } else if (it.opts.extendRefs !== true) { $refKeywords = false; it.logger.warn('$ref: keywords ignored in schema at path "' + it.errSchemaPath + '"'); } } if ($typeSchema) { if (it.opts.coerceTypes) { var $coerceToTypes = it.util.coerceToTypes(it.opts.coerceTypes, $typeSchema); } var $rulesGroup = it.RULES.types[$typeSchema]; if ($coerceToTypes || $typeIsArray || $rulesGroup === true || ($rulesGroup && !$shouldUseGroup($rulesGroup))) { var $schemaPath = it.schemaPath + '.type', $errSchemaPath = it.errSchemaPath + '/type'; var $schemaPath = it.schemaPath + '.type', $errSchemaPath = it.errSchemaPath + '/type', $method = $typeIsArray ? 'checkDataTypes' : 'checkDataType'; out += ' if (' + (it.util[$method]($typeSchema, $data, true)) + ') { '; if ($coerceToTypes) { var $dataType = 'dataType' + $lvl, $coerced = 'coerced' + $lvl; out += ' var ' + ($dataType) + ' = typeof ' + ($data) + '; '; if (it.opts.coerceTypes == 'array') { out += ' if (' + ($dataType) + ' == \'object\' && Array.isArray(' + ($data) + ')) ' + ($dataType) + ' = \'array\'; '; } out += ' var ' + ($coerced) + ' = undefined; '; var $bracesCoercion = ''; var arr1 = $coerceToTypes; if (arr1) { var $type, $i = -1, l1 = arr1.length - 1; while ($i < l1) { $type = arr1[$i += 1]; if ($i) { out += ' if (' + ($coerced) + ' === undefined) { '; $bracesCoercion += '}'; } if (it.opts.coerceTypes == 'array' && $type != 'array') { out += ' if (' + ($dataType) + ' == \'array\' && ' + ($data) + '.length == 1) { ' + ($coerced) + ' = ' + ($data) + ' = ' + ($data) + '[0]; ' + ($dataType) + ' = typeof ' + ($data) + '; } '; } if ($type == 'string') { out += ' if (' + ($dataType) + ' == \'number\' || ' + ($dataType) + ' == \'boolean\') ' + ($coerced) + ' = \'\' + ' + ($data) + '; else if (' + ($data) + ' === null) ' + ($coerced) + ' = \'\'; '; } else if ($type == 'number' || $type == 'integer') { out += ' if (' + ($dataType) + ' == \'boolean\' || ' + ($data) + ' === null || (' + ($dataType) + ' == \'string\' && ' + ($data) + ' && ' + ($data) + ' == +' + ($data) + ' '; if ($type == 'integer') { out += ' && !(' + ($data) + ' % 1)'; } out += ')) ' + ($coerced) + ' = +' + ($data) + '; '; } else if ($type == 'boolean') { out += ' if (' + ($data) + ' === \'false\' || ' + ($data) + ' === 0 || ' + ($data) + ' === null) ' + ($coerced) + ' = false; else if (' + ($data) + ' === \'true\' || ' + ($data) + ' === 1) ' + ($coerced) + ' = true; '; } else if ($type == 'null') { out += ' if (' + ($data) + ' === \'\' || ' + ($data) + ' === 0 || ' + ($data) + ' === false) ' + ($coerced) + ' = null; '; } else if (it.opts.coerceTypes == 'array' && $type == 'array') { out += ' if (' + ($dataType) + ' == \'string\' || ' + ($dataType) + ' == \'number\' || ' + ($dataType) + ' == \'boolean\' || ' + ($data) + ' == null) ' + ($coerced) + ' = [' + ($data) + ']; '; } } } out += ' ' + ($bracesCoercion) + ' if (' + ($coerced) + ' === undefined) { '; var $$outStack = $$outStack || []; $$outStack.push(out); out = ''; /* istanbul ignore else */ if (it.createErrors !== false) { out += ' { keyword: \'' + ($errorKeyword || 'type') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { type: \''; if ($typeIsArray) { out += '' + ($typeSchema.join(",")); } else { out += '' + ($typeSchema); } out += '\' } '; if (it.opts.messages !== false) { out += ' , message: \'should be '; if ($typeIsArray) { out += '' + ($typeSchema.join(",")); } else { out += '' + ($typeSchema); } out += '\' '; } if (it.opts.verbose) { out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; } out += ' } '; } else { out += ' {} '; } var __err = out; out = $$outStack.pop(); if (!it.compositeRule && $breakOnError) { /* istanbul ignore if */ if (it.async) { out += ' throw new ValidationError([' + (__err) + ']); '; } else { out += ' validate.errors = [' + (__err) + ']; return false; '; } } else { out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; } out += ' } else { '; var $parentData = $dataLvl ? 'data' + (($dataLvl - 1) || '') : 'parentData', $parentDataProperty = $dataLvl ? it.dataPathArr[$dataLvl] : 'parentDataProperty'; out += ' ' + ($data) + ' = ' + ($coerced) + '; '; if (!$dataLvl) { out += 'if (' + ($parentData) + ' !== undefined)'; } out += ' ' + ($parentData) + '[' + ($parentDataProperty) + '] = ' + ($coerced) + '; } '; } else { var $$outStack = $$outStack || []; $$outStack.push(out); out = ''; /* istanbul ignore else */ if (it.createErrors !== false) { out += ' { keyword: \'' + ($errorKeyword || 'type') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { type: \''; if ($typeIsArray) { out += '' + ($typeSchema.join(",")); } else { out += '' + ($typeSchema); } out += '\' } '; if (it.opts.messages !== false) { out += ' , message: \'should be '; if ($typeIsArray) { out += '' + ($typeSchema.join(",")); } else { out += '' + ($typeSchema); } out += '\' '; } if (it.opts.verbose) { out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; } out += ' } '; } else { out += ' {} '; } var __err = out; out = $$outStack.pop(); if (!it.compositeRule && $breakOnError) { /* istanbul ignore if */ if (it.async) { out += ' throw new ValidationError([' + (__err) + ']); '; } else { out += ' validate.errors = [' + (__err) + ']; return false; '; } } else { out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; } } out += ' } '; } } if (it.schema.$ref && !$refKeywords) { out += ' ' + (it.RULES.all.$ref.code(it, '$ref')) + ' '; if ($breakOnError) { out += ' } if (errors === '; if ($top) { out += '0'; } else { out += 'errs_' + ($lvl); } out += ') { '; $closingBraces2 += '}'; } } else { if (it.opts.v5 && it.schema.patternGroups) { it.logger.warn('keyword "patternGroups" is deprecated and disabled. Use option patternGroups: true to enable.'); } var arr2 = it.RULES; if (arr2) { var $rulesGroup, i2 = -1, l2 = arr2.length - 1; while (i2 < l2) { $rulesGroup = arr2[i2 += 1]; if ($shouldUseGroup($rulesGroup)) { if ($rulesGroup.type) { out += ' if (' + (it.util.checkDataType($rulesGroup.type, $data)) + ') { '; } if (it.opts.useDefaults && !it.compositeRule) { if ($rulesGroup.type == 'object' && it.schema.properties) { var $schema = it.schema.properties, $schemaKeys = Object.keys($schema); var arr3 = $schemaKeys; if (arr3) { var $propertyKey, i3 = -1, l3 = arr3.length - 1; while (i3 < l3) { $propertyKey = arr3[i3 += 1]; var $sch = $schema[$propertyKey]; if ($sch.default !== undefined) { var $passData = $data + it.util.getProperty($propertyKey); out += ' if (' + ($passData) + ' === undefined) ' + ($passData) + ' = '; if (it.opts.useDefaults == 'shared') { out += ' ' + (it.useDefault($sch.default)) + ' '; } else { out += ' ' + (JSON.stringify($sch.default)) + ' '; } out += '; '; } } } } else if ($rulesGroup.type == 'array' && Array.isArray(it.schema.items)) { var arr4 = it.schema.items; if (arr4) { var $sch, $i = -1, l4 = arr4.length - 1; while ($i < l4) { $sch = arr4[$i += 1]; if ($sch.default !== undefined) { var $passData = $data + '[' + $i + ']'; out += ' if (' + ($passData) + ' === undefined) ' + ($passData) + ' = '; if (it.opts.useDefaults == 'shared') { out += ' ' + (it.useDefault($sch.default)) + ' '; } else { out += ' ' + (JSON.stringify($sch.default)) + ' '; } out += '; '; } } } } } var arr5 = $rulesGroup.rules; if (arr5) { var $rule, i5 = -1, l5 = arr5.length - 1; while (i5 < l5) { $rule = arr5[i5 += 1]; if ($shouldUseRule($rule)) { var $code = $rule.code(it, $rule.keyword, $rulesGroup.type); if ($code) { out += ' ' + ($code) + ' '; if ($breakOnError) { $closingBraces1 += '}'; } } } } } if ($breakOnError) { out += ' ' + ($closingBraces1) + ' '; $closingBraces1 = ''; } if ($rulesGroup.type) { out += ' } '; if ($typeSchema && $typeSchema === $rulesGroup.type && !$coerceToTypes) { out += ' else { '; var $schemaPath = it.schemaPath + '.type', $errSchemaPath = it.errSchemaPath + '/type'; var $$outStack = $$outStack || []; $$outStack.push(out); out = ''; /* istanbul ignore else */ if (it.createErrors !== false) { out += ' { keyword: \'' + ($errorKeyword || 'type') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { type: \''; if ($typeIsArray) { out += '' + ($typeSchema.join(",")); } else { out += '' + ($typeSchema); } out += '\' } '; if (it.opts.messages !== false) { out += ' , message: \'should be '; if ($typeIsArray) { out += '' + ($typeSchema.join(",")); } else { out += '' + ($typeSchema); } out += '\' '; } if (it.opts.verbose) { out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; } out += ' } '; } else { out += ' {} '; } var __err = out; out = $$outStack.pop(); if (!it.compositeRule && $breakOnError) { /* istanbul ignore if */ if (it.async) { out += ' throw new ValidationError([' + (__err) + ']); '; } else { out += ' validate.errors = [' + (__err) + ']; return false; '; } } else { out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; } out += ' } '; } } if ($breakOnError) { out += ' if (errors === '; if ($top) { out += '0'; } else { out += 'errs_' + ($lvl); } out += ') { '; $closingBraces2 += '}'; } } } } } if ($breakOnError) { out += ' ' + ($closingBraces2) + ' '; } if ($top) { if ($async) { out += ' if (errors === 0) return data; '; out += ' else throw new ValidationError(vErrors); '; } else { out += ' validate.errors = vErrors; '; out += ' return errors === 0; '; } out += ' }); return validate;'; } else { out += ' var ' + ($valid) + ' = errors === errs_' + ($lvl) + ';'; } out = it.util.cleanUpCode(out); if ($top) { out = it.util.finalCleanUpCode(out, $async); } function $shouldUseGroup($rulesGroup) { var rules = $rulesGroup.rules; for (var i = 0; i < rules.length; i++) if ($shouldUseRule(rules[i])) return true; } function $shouldUseRule($rule) { return it.schema[$rule.keyword] !== undefined || ($rule.implements && $ruleImplementsSomeKeyword($rule)); } function $ruleImplementsSomeKeyword($rule) { var impl = $rule.implements; for (var i = 0; i < impl.length; i++) if (it.schema[impl[i]] !== undefined) return true; } return out; } ```
```go // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package remote import ( "bytes" "context" "encoding/json" "errors" "fmt" "net/http" "strings" "sync" "time" "github.com/golang/protobuf/proto" "github.com/golang/snappy" "github.com/prometheus/prometheus/model/labels" promql "github.com/prometheus/prometheus/promql/parser" "github.com/uber-go/tally" "go.uber.org/zap" comparator "github.com/m3db/m3/src/cmd/services/m3comparator/main/parser" "github.com/m3db/m3/src/query/api/v1/handler/prometheus" "github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions" "github.com/m3db/m3/src/query/api/v1/options" "github.com/m3db/m3/src/query/api/v1/route" "github.com/m3db/m3/src/query/block" "github.com/m3db/m3/src/query/executor" "github.com/m3db/m3/src/query/generated/proto/prompb" "github.com/m3db/m3/src/query/models" xpromql "github.com/m3db/m3/src/query/parser/promql" "github.com/m3db/m3/src/query/storage" "github.com/m3db/m3/src/query/ts" "github.com/m3db/m3/src/query/util" "github.com/m3db/m3/src/query/util/logging" xerrors "github.com/m3db/m3/src/x/errors" xhttp "github.com/m3db/m3/src/x/net/http" xtime "github.com/m3db/m3/src/x/time" ) const ( // PromReadURL is the url for remote prom read handler PromReadURL = route.Prefix + "/prom/remote/read" ) // PromReadHTTPMethods are the HTTP methods used with this resource. var PromReadHTTPMethods = []string{http.MethodPost, http.MethodGet} // promReadHandler is a handler for the prometheus remote read endpoint. type promReadHandler struct { promReadMetrics promReadMetrics opts options.HandlerOptions } // NewPromReadHandler returns a new instance of handler. func NewPromReadHandler(opts options.HandlerOptions) http.Handler { taggedScope := opts.InstrumentOpts().MetricsScope(). Tagged(map[string]string{"handler": "remote-read"}) return &promReadHandler{ promReadMetrics: newPromReadMetrics(taggedScope), opts: opts, } } type promReadMetrics struct { fetchSuccess tally.Counter fetchErrorsServer tally.Counter fetchErrorsClient tally.Counter fetchTimerSuccess tally.Timer } func newPromReadMetrics(scope tally.Scope) promReadMetrics { return promReadMetrics{ fetchSuccess: scope. Counter("fetch.success"), fetchErrorsServer: scope.Tagged(map[string]string{"code": "5XX"}). Counter("fetch.errors"), fetchErrorsClient: scope.Tagged(map[string]string{"code": "4XX"}). Counter("fetch.errors"), fetchTimerSuccess: scope.Timer("fetch.success.latency"), } } func (m *promReadMetrics) incError(err error) { if xhttp.IsClientError(err) { m.fetchErrorsClient.Inc(1) } else { m.fetchErrorsServer.Inc(1) } } func (h *promReadHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { timer := h.promReadMetrics.fetchTimerSuccess.Start() defer timer.Stop() logger := logging.WithContext(r.Context(), h.opts.InstrumentOpts()) ctx, req, fetchOpts, rErr := ParseRequest(r.Context(), r, h.opts) if rErr != nil { h.promReadMetrics.incError(rErr) logger.Error("remote read query parse error", zap.Error(rErr), zap.Any("req", req), zap.Any("fetchOpts", fetchOpts)) xhttp.WriteError(w, rErr) return } readResult, err := Read(ctx, req, fetchOpts, h.opts) if err != nil { h.promReadMetrics.incError(err) logger.Error("remote read query error", zap.Error(err), zap.Any("req", req), zap.Any("fetchOpts", fetchOpts)) xhttp.WriteError(w, err) return } // Write headers before response. err = handleroptions.AddDBResultResponseHeaders(w, readResult.Meta, fetchOpts) if err != nil { h.promReadMetrics.incError(err) logger.Error("remote read query write response header error", zap.Error(err), zap.Any("req", req), zap.Any("fetchOpts", fetchOpts)) xhttp.WriteError(w, err) return } // NB: if this errors, all relevant headers and information should already // be sent to the writer; so it is not necessary to do anything here other // than increment success/failure metrics. switch r.FormValue("format") { case "json": result := readResultsJSON{ Queries: make([]queryResultsJSON, 0, len(req.Queries)), } for i, q := range req.Queries { start := storage.PromTimestampToTime(q.StartTimestampMs) end := storage.PromTimestampToTime(q.EndTimestampMs) all := readResult.Result[i].Timeseries timeseries := make([]comparator.Series, 0, len(all)) for _, s := range all { datapoints := storage.PromSamplesToM3Datapoints(s.Samples) tags := storage.PromLabelsToM3Tags(s.Labels, h.opts.TagOptions()) series := toSeries(datapoints, tags) series.Start = start series.End = end timeseries = append(timeseries, series) } matchers := make([]labelMatcherJSON, 0, len(q.Matchers)) for _, m := range q.Matchers { matcher := labelMatcherJSON{ Type: m.Type.String(), Name: string(m.Name), Value: string(m.Value), } matchers = append(matchers, matcher) } result.Queries = append(result.Queries, queryResultsJSON{ Query: queryJSON{ Matchers: matchers, }, Start: start, End: end, Series: timeseries, }) } w.Header().Set(xhttp.HeaderContentType, xhttp.ContentTypeJSON) err = json.NewEncoder(w).Encode(result) default: err = WriteSnappyCompressed(w, readResult, logger) } if err != nil { h.promReadMetrics.incError(err) } else { h.promReadMetrics.fetchSuccess.Inc(1) } } type readResultsJSON struct { Queries []queryResultsJSON `json:"queries"` } type queryResultsJSON struct { Query queryJSON `json:"query"` Start time.Time `json:"start"` End time.Time `json:"end"` Series []comparator.Series `json:"series"` } type queryJSON struct { Matchers []labelMatcherJSON `json:"matchers"` } type labelMatcherJSON struct { Type string `json:"type"` Name string `json:"name"` Value string `json:"value"` } // WriteSnappyCompressed writes snappy compressed results to the given writer. func WriteSnappyCompressed( w http.ResponseWriter, readResult ReadResult, logger *zap.Logger, ) error { resp := &prompb.ReadResponse{ Results: readResult.Result, } data, err := proto.Marshal(resp) if err != nil { logger.Error("unable to marshal read results to protobuf", zap.Error(err)) xhttp.WriteError(w, err) return err } w.Header().Set(xhttp.HeaderContentType, xhttp.ContentTypeProtobuf) w.Header().Set("Content-Encoding", "snappy") compressed := snappy.Encode(nil, data) if _, err := w.Write(compressed); err != nil { logger.Error("unable to encode read results to snappy", zap.Error(err)) xhttp.WriteError(w, err) } return err } func parseCompressedRequest( r *http.Request, ) (*prompb.ReadRequest, error) { result, err := prometheus.ParsePromCompressedRequest(r) if err != nil { return nil, err } var req prompb.ReadRequest if err := proto.Unmarshal(result.UncompressedBody, &req); err != nil { return nil, xerrors.NewInvalidParamsError(err) } return &req, nil } // ReadResult is a read result. type ReadResult struct { Meta block.ResultMetadata Result []*prompb.QueryResult } // ParseExpr parses a prometheus request expression into the constituent // fetches, rather than the full query application. func ParseExpr( r *http.Request, opts xpromql.ParseOptions, ) (*prompb.ReadRequest, error) { expr, err := parseExpr(r, opts) if err != nil { // Always invalid request if parsing fails params. return nil, xerrors.NewInvalidParamsError(err) } return expr, nil } func parseExpr( r *http.Request, opts xpromql.ParseOptions, ) (*prompb.ReadRequest, error) { var req *prompb.ReadRequest exprParam := strings.TrimSpace(r.FormValue("query")) if len(exprParam) == 0 { return nil, fmt.Errorf("cannot parse params: no expr") } queryStart, err := util.ParseTimeString(r.FormValue("start")) if err != nil { return nil, err } queryEnd, err := util.ParseTimeString(r.FormValue("end")) if err != nil { return nil, err } fn := opts.ParseFn() req = &prompb.ReadRequest{} expr, err := fn(exprParam) if err != nil { return nil, err } var vectorsInspected []*promql.VectorSelector promql.Inspect(expr, func(node promql.Node, path []promql.Node) error { var ( start = xtime.ToUnixNano(queryStart) end = xtime.ToUnixNano(queryEnd) offset time.Duration labelMatchers []*labels.Matcher ) if n, ok := node.(*promql.MatrixSelector); ok { if n.Range > 0 { start = start.Add(-1 * n.Range) } vectorSelector := n.VectorSelector.(*promql.VectorSelector) // Check already inspected (matrix can be walked further into // child vector selector). for _, existing := range vectorsInspected { if existing == vectorSelector { return nil // Already inspected. } } vectorsInspected = append(vectorsInspected, vectorSelector) offset = vectorSelector.OriginalOffset labelMatchers = vectorSelector.LabelMatchers } else if n, ok := node.(*promql.VectorSelector); ok { // Check already inspected (matrix can be walked further into // child vector selector). for _, existing := range vectorsInspected { if existing == n { return nil // Already inspected. } } vectorsInspected = append(vectorsInspected, n) offset = n.OriginalOffset labelMatchers = n.LabelMatchers } else { return nil } if offset > 0 { start = start.Add(-1 * offset) end = end.Add(-1 * offset) } matchers, err := toLabelMatchers(labelMatchers) if err != nil { return err } query := &prompb.Query{ StartTimestampMs: storage.TimeToPromTimestamp(start), EndTimestampMs: storage.TimeToPromTimestamp(end), Matchers: matchers, } req.Queries = append(req.Queries, query) return nil }) return req, nil } // ParseRequest parses the compressed request func ParseRequest( ctx context.Context, r *http.Request, opts options.HandlerOptions, ) (context.Context, *prompb.ReadRequest, *storage.FetchOptions, error) { ctx, req, fetchOpts, err := parseRequest(ctx, r, opts) if err != nil { // Always invalid request if parsing fails params. return nil, nil, nil, xerrors.NewInvalidParamsError(err) } return ctx, req, fetchOpts, nil } func parseRequest( ctx context.Context, r *http.Request, opts options.HandlerOptions, ) (context.Context, *prompb.ReadRequest, *storage.FetchOptions, error) { var ( req *prompb.ReadRequest err error ) switch { case r.Method == http.MethodGet && strings.TrimSpace(r.FormValue("query")) != "": req, err = ParseExpr(r, opts.Engine().Options().ParseOptions()) default: req, err = parseCompressedRequest(r) } if err != nil { return nil, nil, nil, err } ctx, fetchOpts, rErr := opts.FetchOptionsBuilder().NewFetchOptions(ctx, r) if rErr != nil { return nil, nil, nil, rErr } return ctx, req, fetchOpts, nil } // Read performs a remote read on the given engine. func Read( ctx context.Context, r *prompb.ReadRequest, fetchOpts *storage.FetchOptions, opts options.HandlerOptions, ) (ReadResult, error) { var ( queryCount = len(r.Queries) cancelFuncs = make([]context.CancelFunc, queryCount) queryResults = make([]*prompb.QueryResult, queryCount) meta = block.NewResultMetadata() queryOpts = &executor.QueryOptions{ QueryContextOptions: models.QueryContextOptions{ LimitMaxTimeseries: fetchOpts.SeriesLimit, LimitMaxDocs: fetchOpts.DocsLimit, LimitMaxReturnedSeries: fetchOpts.ReturnedSeriesLimit, LimitMaxReturnedDatapoints: fetchOpts.ReturnedDatapointsLimit, LimitMaxReturnedSeriesMetadata: fetchOpts.ReturnedSeriesMetadataLimit, }, } engine = opts.Engine() wg sync.WaitGroup mu sync.Mutex multiErr xerrors.MultiError ) wg.Add(queryCount) for i, promQuery := range r.Queries { i, promQuery := i, promQuery // Capture vars for lambda. go func() { ctx, cancel := context.WithTimeout(ctx, fetchOpts.Timeout) defer func() { wg.Done() cancel() }() cancelFuncs[i] = cancel query, err := storage.PromReadQueryToM3(promQuery) if err != nil { mu.Lock() multiErr = multiErr.Add(err) mu.Unlock() return } result, err := engine.ExecuteProm(ctx, query, queryOpts, fetchOpts) if err != nil { mu.Lock() multiErr = multiErr.Add(err) mu.Unlock() return } result.PromResult.Timeseries = filterResults( result.PromResult.GetTimeseries(), fetchOpts) mu.Lock() queryResults[i] = result.PromResult meta = meta.CombineMetadata(result.Metadata) mu.Unlock() }() } wg.Wait() for _, cancel := range cancelFuncs { cancel() } if err := multiErr.FinalError(); err != nil { return ReadResult{Result: nil, Meta: meta}, err } return ReadResult{Result: queryResults, Meta: meta}, nil } // filterResults removes series tags based on options. func filterResults( series []*prompb.TimeSeries, opts *storage.FetchOptions, ) []*prompb.TimeSeries { if opts == nil { return series } keys := opts.RestrictQueryOptions.GetRestrictByTag().GetFilterByNames() if len(keys) == 0 { return series } for i, s := range series { series[i].Labels = filterLabels(s.Labels, keys) } return series } func filterLabels( labels []prompb.Label, filtering [][]byte, ) []prompb.Label { if len(filtering) == 0 { return labels } filtered := labels[:0] for _, l := range labels { skip := false for _, f := range filtering { if bytes.Equal(l.GetName(), f) { skip = true break } } if skip { continue } filtered = append(filtered, l) } return filtered } func tagsConvert(ts models.Tags) comparator.Tags { tags := make(comparator.Tags, 0, ts.Len()) for _, t := range ts.Tags { tags = append(tags, comparator.NewTag(string(t.Name), string(t.Value))) } return tags } func datapointsConvert(dps ts.Datapoints) comparator.Datapoints { datapoints := make(comparator.Datapoints, 0, dps.Len()) for _, dp := range dps.Datapoints() { val := comparator.Datapoint{ Value: comparator.Value(dp.Value), Timestamp: dp.Timestamp.ToTime(), } datapoints = append(datapoints, val) } return datapoints } func toSeries(dps ts.Datapoints, tags models.Tags) comparator.Series { return comparator.Series{ Tags: tagsConvert(tags), Datapoints: datapointsConvert(dps), } } func toLabelMatchers(matchers []*labels.Matcher) ([]*prompb.LabelMatcher, error) { pbMatchers := make([]*prompb.LabelMatcher, 0, len(matchers)) for _, m := range matchers { var mType prompb.LabelMatcher_Type switch m.Type { case labels.MatchEqual: mType = prompb.LabelMatcher_EQ case labels.MatchNotEqual: mType = prompb.LabelMatcher_NEQ case labels.MatchRegexp: mType = prompb.LabelMatcher_RE case labels.MatchNotRegexp: mType = prompb.LabelMatcher_NRE default: return nil, errors.New("invalid matcher type") } pbMatchers = append(pbMatchers, &prompb.LabelMatcher{ Type: mType, Name: []byte(m.Name), Value: []byte(m.Value), }) } return pbMatchers, nil } ```
```java /* * * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials provided * with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors may be used to * endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. */ package com.oracle.truffle.llvm.runtime.nodes.intrinsics.interop; import com.oracle.truffle.api.CompilerDirectives.TruffleBoundary; import com.oracle.truffle.api.dsl.Cached; import com.oracle.truffle.api.dsl.NodeChild; import com.oracle.truffle.api.dsl.Specialization; import com.oracle.truffle.api.frame.VirtualFrame; import com.oracle.truffle.llvm.runtime.except.LLVMPolyglotException; import com.oracle.truffle.llvm.runtime.nodes.api.LLVMExpressionNode; import com.oracle.truffle.llvm.runtime.nodes.api.LLVMNode; import com.oracle.truffle.llvm.runtime.pointer.LLVMPointer; import java.nio.ByteBuffer; import java.nio.charset.Charset; @NodeChild(type = LLVMExpressionNode.class) public abstract class LLVMReadCharsetNode extends LLVMNode { @Child LLVMReadStringNode readString = LLVMReadStringNodeGen.create(); public abstract Object execute(VirtualFrame frame); /** * @param pointer @NodeChild * @see LLVMReadCharsetNode */ @Specialization(guards = "cachedPointer.isSame(pointer)") protected LLVMCharset doCachedPointer(LLVMPointer pointer, @Cached("pointer") @SuppressWarnings("unused") LLVMPointer cachedPointer, @Cached("doGeneric(cachedPointer)") LLVMCharset cachedCharset) { return cachedCharset; } /** * @param address @NodeChild * @see LLVMReadCharsetNode */ @Specialization(guards = "address == cachedAddress") protected LLVMCharset doCachedOther(Object address, @Cached("address") @SuppressWarnings("unused") Object cachedAddress, @Cached("doGeneric(cachedAddress)") LLVMCharset cachedCharset) { return cachedCharset; } @Specialization(replaces = {"doCachedPointer", "doCachedOther"}) protected LLVMCharset doGeneric(Object strPtr) { String string = readString.executeWithTarget(strPtr); return lookup(string); } @TruffleBoundary private LLVMCharset lookup(String str) { try { return new LLVMCharset(Charset.forName(str)); } catch (Exception e) { throw new LLVMPolyglotException(this, "Invalid charset '%s'.", str); } } public static final class LLVMCharset { private final Charset charset; public final int zeroTerminatorLen; private LLVMCharset(Charset charset) { this.charset = charset; this.zeroTerminatorLen = charset.encode("\0").limit(); } @TruffleBoundary public ByteBuffer encode(String str) { return charset.encode(str); } @TruffleBoundary public String decode(byte[] b) { return charset.decode(ByteBuffer.wrap(b)).toString(); } } } ```
Stara Ruda is a village in the administrative district of Gmina Radzyń Chełmiński, within Grudziądz County, Kuyavian-Pomeranian Voivodeship, in north-central Poland. During the German occupation of Poland (World War II), in 1939, the Germans carried out a massacre of several Poles from Stara Ruda, Radzyń Chełmiński and other nearby settlements in the forest of Stara Ruda, as part of the Intelligenzaktion. References Stara Ruda Massacres of Poles Nazi war crimes in Poland
Scott Lucas (born 30 December 1977) is a former Australian rules footballer for the Essendon Football Club in the Australian Football League, and he is noted as being the other major forward for the Bombers along with Matthew Lloyd during Essedon's turn-of-the-century domination. Together, Lloyd and Lucas were affectionately dubbed the "twin towers" due to their height in the Bomber forward line. Football career Lucas is known for his strong marking and being a powerful, accurate left-foot kick, although his apparent inability to kick with his right foot is almost as notable. Lucas once joked on The Sunday Footy Show in 2006 that the last time he kicked with his right foot was to Gary Moorcroft when he took the famous 2001 Mark of the Year, considered by many to be the greatest AFL mark of all time. Lucas mainly played across half-forward or centre half-forward, but he also played at centre half-back and full-forward. Lucas regularly had shots on goal from outside 60 metres with his booming left foot, and he rarely handballed, averaging 3.09 handballs a game (although it was actually slightly more than Lloyd's career average of 3.01). Nevertheless, Lucas's kick-to-handball ratio was 3.88, whereas Lloyd's was 3.34. In Round 19, 2005, Lucas played his 200th AFL game and kicked his 300th AFL goal against at Telstra Dome, but the milestones would be remembered for all the wrong reasons, as the Bombers lost by 20 points despite leading at every change. In 2006, Lucas had an outstanding year in an underachieving Essendon side, playing at full-forward and being their main target and goalkicker in the absence of captain Matthew Lloyd. Lucas finished the season with a career-best 67 goals, runner-up in the Coleman Medal behind Brendan Fevola, and polled a joint team-high seven votes from just 36 votes received by Essendon players in the 2006 Brownlow Medal. Lucas was one of just three players who played every match for season 2006. He capped off this season with his second Crichton Medal, winning with 239 votes from Jobe Watson's 221. Lucas had previously won the club trophy in 2003, when the honours were shared with club legend James Hird. Against West Coast in Round 11, 2007, Lucas booted his 400th career goal. A vintage display also came against West Coast in Round 22 when, despite being close to 50 points down, Lucas rallied in the last quarter in a remarkable solo effort, booting 7 goals (and just missing for a record-equalling eighth), reducing the margin to just 2 points. In the end, though, a late goal to the Eagles saw a gallant Essendon fall 8 points short of one of the greatest-ever AFL comebacks. This was also the last game of James Hird and coach of 27 years, Kevin Sheedy. During a match against the Western Bulldogs in the 2008 NAB Cup, Lucas managed to kick a record 3 Super Goals, which took his career tally to 7, making him the leading super-goal-kicker in the AFL. Lucas suffered a knee injury (specifically, a torn cartilage) in the first round of the 2008 season against North Melbourne and was out of action for a number of weeks. Along with teammate Dustin Fletcher, Lucas signed a one-year contract in mid-2008. He bounced back to play in the second half of the season and kicked an amazing midair soccer goal against Richmond late in the season. On 18 August 2009, Lucas announced his immediate retirement from football. In 2013, Lucas was inducted into the Essendon Football Club Hall of Fame. Playing statistics |- style="background-color: #EAEAEA" ! scope="row" style="text-align:center" | 1996 |style="text-align:center;"| | 25 || 14 || 11 || 11 || 84 || 49 || 133 || 44 || 19 || 0.8 || 0.8 || 6.0 || 3.5 || 9.5 || 3.1 || 1.4 |- ! scope="row" style="text-align:center" | 1997 |style="text-align:center;"| | 25 || 22 || 23 || 11 || 241 || 85 || 326 || 123 || 24 || 1.0 || 0.5 || 11.0 || 3.9 || 14.8 || 5.6 || 1.1 |- style="background:#eaeaea;" ! scope="row" style="text-align:center" | 1998 |style="text-align:center;"| | 25 || 23 || 49 || 31 || 287 || 51 || 338 || 123 || 30 || 2.1 || 1.3 || 12.5 || 2.2 || 14.7 || 5.3 || 1.3 |- ! scope="row" style="text-align:center" | 1999 |style="text-align:center;"| | 25 || 10 || 8 || 9 || 106 || 27 || 133 || 45 || 12 || 0.8 || 0.9 || 10.6 || 2.7 || 13.3 || 4.5 || 1.2 |- style="background:#eaeaea;" ! scope="row" style="text-align:center" | 2000 |style="text-align:center;"| | 25 || 23 || 57 || 42 || 265 || 84 || 349 || 132 || 38 || 2.5 || 1.8 || 11.5 || 3.7 || 15.2 || 5.7 || 1.7 |- ! scope="row" style="text-align:center" | 2001 |style="text-align:center;"| | 25 || 25 || 35 || 34 || 297 || 76 || 373 || 135 || 42 || 1.4 || 1.4 || 11.9 || 3.0 || 14.9 || 5.4 || 1.7 |- style="background:#eaeaea;" ! scope="row" style="text-align:center" | 2002 |style="text-align:center;"| | 25 || 19 || 25 || 15 || 235 || 60 || 295 || 108 || 33 || 1.3 || 0.8 || 12.4 || 3.2 || 15.5 || 5.7 || 1.7 |- ! scope="row" style="text-align:center" | 2003 |style="text-align:center;"| | 25 || 24 || 19 || 16 || 334 || 83 || 417 || 130 || 61 || 0.8 || 0.7 || 13.9 || 3.5 || 17.4 || 5.4 || 2.5 |- style="background:#eaeaea;" ! scope="row" style="text-align:center" | 2004 |style="text-align:center;"| | 25 || 21 || 25 || 21 || 275 || 69 || 344 || 108 || 53 || 1.2 || 1.0 || 13.1 || 3.3 || 16.4 || 5.1 || 2.5 |- ! scope="row" style="text-align:center" | 2005 |style="text-align:center;"| | 25 || 22 || 51 || 24 || 289 || 48 || 337 || 134 || 49 || 2.3 || 1.1 || 13.1 || 2.2 || 15.3 || 6.1 || 2.2 |- style="background:#eaeaea;" ! scope="row" style="text-align:center" | 2006 |style="text-align:center;"| | 25 || 22 || 67 || 44 || 310 || 58 || 368 || 183 || 33 || 3.0 || 2.0 || 14.1 || 2.6 || 16.7 || 8.3 || 1.5 |- ! scope="row" style="text-align:center" | 2007 |style="text-align:center;"| | 25 || 22 || 61 || 28 || 292 || 66 || 358 || 159 || 31 || 2.8 || 1.3 || 13.3 || 3.0 || 16.3 || 7.2 || 1.4 |- style="background:#eaeaea;" ! scope="row" style="text-align:center" | 2008 |style="text-align:center;"| | 25 || 9 || 18 || 9 || 53 || 17 || 70 || 36 || 12 || 2.0 || 1.0 || 5.9 || 1.9 || 7.8 || 4.0 || 1.3 |- ! scope="row" style="text-align:center" | 2009 |style="text-align:center;"| | 25 || 14 || 22 || 14 || 133 || 62 || 195 || 90 || 33 || 1.6 || 1.0 || 9.5 || 4.4 || 13.9 || 6.4 || 2.4 |- class="sortbottom" ! colspan=3| Career ! 270 ! 471 ! 309 ! 3201 ! 835 ! 4036 ! 1550 ! 470 ! 1.7 ! 1.1 ! 11.9 ! 3.1 ! 14.9 ! 5.7 ! 1.7 |} Personal life On 4 January 2002, Lucas married Georgina Short. They have 3 daughters: Mia (born 12 December 2002), Hannah (born 12 July 2004), and Meg (born 26 May 2006). References External links Essendon Football Club players Essendon Football Club premiership players Crichton Medal winners 1977 births Living people Australian rules footballers from Victoria (state) Geelong Falcons players Greenvale Football Club players Victorian State of Origin players VFL/AFL premiership players
Tomorrow and Tomorrow is a 1997 science fiction novel by Charles Sheffield. The book starts in approximately the year 2020 and follows the protracted adventures of Drake Merlin, in his quest to save his wife from a terminal brain disease, over the course of eons. Similar premises are presented in the 2006 film The Fountain, as well as the Isaac Asimov story "The Last Question". Plot summary Originally, Drake is a professional musician, with minor celebrity. When his wife Ana is diagnosed with an unspecified incurable brain disorder, Drake exhausts every option attempting to cure her. Only then does he decide to have her body cryogenically stored, in the hopes future generations will discover an effective treatment. However, Drake is extremely cautious, and in case the future culture doesn't care about her plight, he has himself frozen as well. Furthermore, he devotes all his energies for a decade before his freezing to becoming an expert primary source on the musically notable people of his era. He correctly assumes that if you become the world's foremost expert in any subject, eventually someone will want to write a book on that exact subject. At that time the hypothetical future writer will want to awaken Drake, and he can in turn awaken his wife, if treatment is available. He is awakened in the year 2512. Although society is vastly different, no cure for Ana yet exists. He spends six years apprenticed to a musical historian to pay for his reviving costs and to gain a foothold in this new world. Drake is continually laid dormant and revived, progressively later into the future, all the way until the time of the Big Crunch. Human civilization alters radically over the eons, but Ana's mangled brain proves an extremely difficult problem. Despite the incomprehensible changes surrounding him in each successive awakening, Drake never loses sight of his mission. Eventually, in the extremely remote posthuman future a few billion years later, Drake's original biological body has disintegrated, despite the cryogenic treatment, and he has become an uploaded consciousness, though still in stasis. At this point the descendants of humanity have colonized the entire Milky Way galaxy, yet an inexplicable threat is wiping out their colonies in a widening arc. The leaders of this civilization have exhausted every answer they can conceive of and have no information as to even the cause of the threat. Their last hope is Drake, an ancient holdover, who may have ideas new to them—namely, war. The main problem is that the beings have no idea what is happening because the planets wiped out seem exactly the same, but they do not respond to signals, and outside communication is impossible. All probes sent do not return, nor do they reply once they reach the surface of the planet. Drake becomes the commander of the residents of the galaxy, in designing weapons and defenses, ideas that have long vanished from the minds of these beings. At this time, their technology allows for extremely powerful and deep manipulation of matter at a fundamental scale. An experimental technology called the caesura is used as the plot device to carry the novel. It is a means of instantaneous teleportation using exotic physics, which has by now developed to a stage where it will have no meaning to the causal being. This caesura is not guaranteed teleportation but has a low chance of succeeding. Billions upon billions of copies of Drake are thus sent out to the planets on the border of the invasion, and by means of the caesura they are teleported back to the base to collect information about the threat. Eventually it is discovered that the threat is an exotic interplanetary type of plant life with spores that migrate between systems. These plants do not intentionally destroy the living beings on the planet, but as a result of their growth they do so. After the cause of the problem is found, the posthumans decide that the militant Drake is no longer needed or deemed a positive influence—he is seen as too warlike. They tell him to merge with all the returning Drake copies, of which there are billions. This he agrees to, and over billions of years, he collates those copies—forming a collective mind of copies of himself. In one subplot, a version of himself was randomly teleported by the caesura to a distant galaxy, and he manages to return over a few billion years. Finally, the collective version of Drake resolves to use the Omega Point to gain complete knowledge of everything and to restore Ana. The story ends on an ambiguous note as Ana is potentially revived, and they seek to create a new universe by means of the caesura to live in. References 1997 American novels American science fiction novels 1997 science fiction novels Fiction about suspended animation Bantam Spectra books
```c /* * */ #include "sdkconfig.h" #include "esp_efuse.h" #include <assert.h> #include "esp_efuse_table.h" // md5_digest_table e0674ff40a1e124670c6eecf33410e76 // This file was generated from the file esp_efuse_table.csv. DO NOT CHANGE THIS FILE MANUALLY. // If you want to change some fields, you need to change esp_efuse_table.csv file // then run `efuse_common_table` or `efuse_custom_table` command it will generate this file. // To show efuse_table run the command 'show_efuse_table'. static const esp_efuse_desc_t WR_DIS[] = { {EFUSE_BLK0, 0, 32}, // [] Disable programming of individual eFuses, }; static const esp_efuse_desc_t WR_DIS_RD_DIS[] = { {EFUSE_BLK0, 0, 1}, // [] wr_dis of RD_DIS, }; static const esp_efuse_desc_t WR_DIS_DIS_ICACHE[] = { {EFUSE_BLK0, 2, 1}, // [] wr_dis of DIS_ICACHE, }; static const esp_efuse_desc_t WR_DIS_DIS_DCACHE[] = { {EFUSE_BLK0, 2, 1}, // [] wr_dis of DIS_DCACHE, }; static const esp_efuse_desc_t WR_DIS_DIS_DOWNLOAD_ICACHE[] = { {EFUSE_BLK0, 2, 1}, // [] wr_dis of DIS_DOWNLOAD_ICACHE, }; static const esp_efuse_desc_t WR_DIS_DIS_DOWNLOAD_DCACHE[] = { {EFUSE_BLK0, 2, 1}, // [] wr_dis of DIS_DOWNLOAD_DCACHE, }; static const esp_efuse_desc_t WR_DIS_DIS_FORCE_DOWNLOAD[] = { {EFUSE_BLK0, 2, 1}, // [] wr_dis of DIS_FORCE_DOWNLOAD, }; static const esp_efuse_desc_t WR_DIS_DIS_USB_OTG[] = { {EFUSE_BLK0, 2, 1}, // [WR_DIS.DIS_USB] wr_dis of DIS_USB_OTG, }; static const esp_efuse_desc_t WR_DIS_DIS_TWAI[] = { {EFUSE_BLK0, 2, 1}, // [WR_DIS.DIS_CAN] wr_dis of DIS_TWAI, }; static const esp_efuse_desc_t WR_DIS_DIS_APP_CPU[] = { {EFUSE_BLK0, 2, 1}, // [] wr_dis of DIS_APP_CPU, }; static const esp_efuse_desc_t WR_DIS_DIS_PAD_JTAG[] = { {EFUSE_BLK0, 2, 1}, // [WR_DIS.HARD_DIS_JTAG] wr_dis of DIS_PAD_JTAG, }; static const esp_efuse_desc_t WR_DIS_DIS_DOWNLOAD_MANUAL_ENCRYPT[] = { {EFUSE_BLK0, 2, 1}, // [] wr_dis of DIS_DOWNLOAD_MANUAL_ENCRYPT, }; static const esp_efuse_desc_t WR_DIS_DIS_USB_JTAG[] = { {EFUSE_BLK0, 2, 1}, // [] wr_dis of DIS_USB_JTAG, }; static const esp_efuse_desc_t WR_DIS_DIS_USB_SERIAL_JTAG[] = { {EFUSE_BLK0, 2, 1}, // [WR_DIS.DIS_USB_DEVICE] wr_dis of DIS_USB_SERIAL_JTAG, }; static const esp_efuse_desc_t WR_DIS_STRAP_JTAG_SEL[] = { {EFUSE_BLK0, 2, 1}, // [] wr_dis of STRAP_JTAG_SEL, }; static const esp_efuse_desc_t WR_DIS_USB_PHY_SEL[] = { {EFUSE_BLK0, 2, 1}, // [] wr_dis of USB_PHY_SEL, }; static const esp_efuse_desc_t WR_DIS_VDD_SPI_XPD[] = { {EFUSE_BLK0, 3, 1}, // [] wr_dis of VDD_SPI_XPD, }; static const esp_efuse_desc_t WR_DIS_VDD_SPI_TIEH[] = { {EFUSE_BLK0, 3, 1}, // [] wr_dis of VDD_SPI_TIEH, }; static const esp_efuse_desc_t WR_DIS_VDD_SPI_FORCE[] = { {EFUSE_BLK0, 3, 1}, // [] wr_dis of VDD_SPI_FORCE, }; static const esp_efuse_desc_t WR_DIS_WDT_DELAY_SEL[] = { {EFUSE_BLK0, 3, 1}, // [] wr_dis of WDT_DELAY_SEL, }; static const esp_efuse_desc_t WR_DIS_SPI_BOOT_CRYPT_CNT[] = { {EFUSE_BLK0, 4, 1}, // [] wr_dis of SPI_BOOT_CRYPT_CNT, }; static const esp_efuse_desc_t WR_DIS_SECURE_BOOT_KEY_REVOKE0[] = { {EFUSE_BLK0, 5, 1}, // [] wr_dis of SECURE_BOOT_KEY_REVOKE0, }; static const esp_efuse_desc_t WR_DIS_SECURE_BOOT_KEY_REVOKE1[] = { {EFUSE_BLK0, 6, 1}, // [] wr_dis of SECURE_BOOT_KEY_REVOKE1, }; static const esp_efuse_desc_t WR_DIS_SECURE_BOOT_KEY_REVOKE2[] = { {EFUSE_BLK0, 7, 1}, // [] wr_dis of SECURE_BOOT_KEY_REVOKE2, }; static const esp_efuse_desc_t WR_DIS_KEY_PURPOSE_0[] = { {EFUSE_BLK0, 8, 1}, // [WR_DIS.KEY0_PURPOSE] wr_dis of KEY_PURPOSE_0, }; static const esp_efuse_desc_t WR_DIS_KEY_PURPOSE_1[] = { {EFUSE_BLK0, 9, 1}, // [WR_DIS.KEY1_PURPOSE] wr_dis of KEY_PURPOSE_1, }; static const esp_efuse_desc_t WR_DIS_KEY_PURPOSE_2[] = { {EFUSE_BLK0, 10, 1}, // [WR_DIS.KEY2_PURPOSE] wr_dis of KEY_PURPOSE_2, }; static const esp_efuse_desc_t WR_DIS_KEY_PURPOSE_3[] = { {EFUSE_BLK0, 11, 1}, // [WR_DIS.KEY3_PURPOSE] wr_dis of KEY_PURPOSE_3, }; static const esp_efuse_desc_t WR_DIS_KEY_PURPOSE_4[] = { {EFUSE_BLK0, 12, 1}, // [WR_DIS.KEY4_PURPOSE] wr_dis of KEY_PURPOSE_4, }; static const esp_efuse_desc_t WR_DIS_KEY_PURPOSE_5[] = { {EFUSE_BLK0, 13, 1}, // [WR_DIS.KEY5_PURPOSE] wr_dis of KEY_PURPOSE_5, }; static const esp_efuse_desc_t WR_DIS_SECURE_BOOT_EN[] = { {EFUSE_BLK0, 15, 1}, // [] wr_dis of SECURE_BOOT_EN, }; static const esp_efuse_desc_t WR_DIS_SECURE_BOOT_AGGRESSIVE_REVOKE[] = { {EFUSE_BLK0, 16, 1}, // [] wr_dis of SECURE_BOOT_AGGRESSIVE_REVOKE, }; static const esp_efuse_desc_t WR_DIS_FLASH_TPUW[] = { {EFUSE_BLK0, 18, 1}, // [] wr_dis of FLASH_TPUW, }; static const esp_efuse_desc_t WR_DIS_DIS_DOWNLOAD_MODE[] = { {EFUSE_BLK0, 18, 1}, // [] wr_dis of DIS_DOWNLOAD_MODE, }; static const esp_efuse_desc_t WR_DIS_DIS_DIRECT_BOOT[] = { {EFUSE_BLK0, 18, 1}, // [WR_DIS.DIS_LEGACY_SPI_BOOT] wr_dis of DIS_DIRECT_BOOT, }; static const esp_efuse_desc_t WR_DIS_DIS_USB_SERIAL_JTAG_ROM_PRINT[] = { {EFUSE_BLK0, 18, 1}, // [WR_DIS.UART_PRINT_CHANNEL] wr_dis of DIS_USB_SERIAL_JTAG_ROM_PRINT, }; static const esp_efuse_desc_t WR_DIS_FLASH_ECC_MODE[] = { {EFUSE_BLK0, 18, 1}, // [] wr_dis of FLASH_ECC_MODE, }; static const esp_efuse_desc_t WR_DIS_DIS_USB_SERIAL_JTAG_DOWNLOAD_MODE[] = { {EFUSE_BLK0, 18, 1}, // [WR_DIS.DIS_USB_DOWNLOAD_MODE] wr_dis of DIS_USB_SERIAL_JTAG_DOWNLOAD_MODE, }; static const esp_efuse_desc_t WR_DIS_ENABLE_SECURITY_DOWNLOAD[] = { {EFUSE_BLK0, 18, 1}, // [] wr_dis of ENABLE_SECURITY_DOWNLOAD, }; static const esp_efuse_desc_t WR_DIS_UART_PRINT_CONTROL[] = { {EFUSE_BLK0, 18, 1}, // [] wr_dis of UART_PRINT_CONTROL, }; static const esp_efuse_desc_t WR_DIS_PIN_POWER_SELECTION[] = { {EFUSE_BLK0, 18, 1}, // [] wr_dis of PIN_POWER_SELECTION, }; static const esp_efuse_desc_t WR_DIS_FLASH_TYPE[] = { {EFUSE_BLK0, 18, 1}, // [] wr_dis of FLASH_TYPE, }; static const esp_efuse_desc_t WR_DIS_FLASH_PAGE_SIZE[] = { {EFUSE_BLK0, 18, 1}, // [] wr_dis of FLASH_PAGE_SIZE, }; static const esp_efuse_desc_t WR_DIS_FLASH_ECC_EN[] = { {EFUSE_BLK0, 18, 1}, // [] wr_dis of FLASH_ECC_EN, }; static const esp_efuse_desc_t WR_DIS_FORCE_SEND_RESUME[] = { {EFUSE_BLK0, 18, 1}, // [] wr_dis of FORCE_SEND_RESUME, }; static const esp_efuse_desc_t WR_DIS_SECURE_VERSION[] = { {EFUSE_BLK0, 18, 1}, // [] wr_dis of SECURE_VERSION, }; static const esp_efuse_desc_t WR_DIS_DIS_USB_OTG_DOWNLOAD_MODE[] = { {EFUSE_BLK0, 19, 1}, // [] wr_dis of DIS_USB_OTG_DOWNLOAD_MODE, }; static const esp_efuse_desc_t WR_DIS_DISABLE_WAFER_VERSION_MAJOR[] = { {EFUSE_BLK0, 19, 1}, // [] wr_dis of DISABLE_WAFER_VERSION_MAJOR, }; static const esp_efuse_desc_t WR_DIS_DISABLE_BLK_VERSION_MAJOR[] = { {EFUSE_BLK0, 19, 1}, // [] wr_dis of DISABLE_BLK_VERSION_MAJOR, }; static const esp_efuse_desc_t WR_DIS_BLK1[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of BLOCK1, }; static const esp_efuse_desc_t WR_DIS_MAC[] = { {EFUSE_BLK0, 20, 1}, // [WR_DIS.MAC_FACTORY] wr_dis of MAC, }; static const esp_efuse_desc_t WR_DIS_SPI_PAD_CONFIG_CLK[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of SPI_PAD_CONFIG_CLK, }; static const esp_efuse_desc_t WR_DIS_SPI_PAD_CONFIG_Q[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of SPI_PAD_CONFIG_Q, }; static const esp_efuse_desc_t WR_DIS_SPI_PAD_CONFIG_D[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of SPI_PAD_CONFIG_D, }; static const esp_efuse_desc_t WR_DIS_SPI_PAD_CONFIG_CS[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of SPI_PAD_CONFIG_CS, }; static const esp_efuse_desc_t WR_DIS_SPI_PAD_CONFIG_HD[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of SPI_PAD_CONFIG_HD, }; static const esp_efuse_desc_t WR_DIS_SPI_PAD_CONFIG_WP[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of SPI_PAD_CONFIG_WP, }; static const esp_efuse_desc_t WR_DIS_SPI_PAD_CONFIG_DQS[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of SPI_PAD_CONFIG_DQS, }; static const esp_efuse_desc_t WR_DIS_SPI_PAD_CONFIG_D4[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of SPI_PAD_CONFIG_D4, }; static const esp_efuse_desc_t WR_DIS_SPI_PAD_CONFIG_D5[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of SPI_PAD_CONFIG_D5, }; static const esp_efuse_desc_t WR_DIS_SPI_PAD_CONFIG_D6[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of SPI_PAD_CONFIG_D6, }; static const esp_efuse_desc_t WR_DIS_SPI_PAD_CONFIG_D7[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of SPI_PAD_CONFIG_D7, }; static const esp_efuse_desc_t WR_DIS_WAFER_VERSION_MINOR_LO[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of WAFER_VERSION_MINOR_LO, }; static const esp_efuse_desc_t WR_DIS_PKG_VERSION[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of PKG_VERSION, }; static const esp_efuse_desc_t WR_DIS_BLK_VERSION_MINOR[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of BLK_VERSION_MINOR, }; static const esp_efuse_desc_t WR_DIS_FLASH_CAP[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of FLASH_CAP, }; static const esp_efuse_desc_t WR_DIS_FLASH_TEMP[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of FLASH_TEMP, }; static const esp_efuse_desc_t WR_DIS_FLASH_VENDOR[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of FLASH_VENDOR, }; static const esp_efuse_desc_t WR_DIS_PSRAM_CAP[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of PSRAM_CAP, }; static const esp_efuse_desc_t WR_DIS_PSRAM_TEMP[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of PSRAM_TEMP, }; static const esp_efuse_desc_t WR_DIS_PSRAM_VENDOR[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of PSRAM_VENDOR, }; static const esp_efuse_desc_t WR_DIS_K_RTC_LDO[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of K_RTC_LDO, }; static const esp_efuse_desc_t WR_DIS_K_DIG_LDO[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of K_DIG_LDO, }; static const esp_efuse_desc_t WR_DIS_V_RTC_DBIAS20[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of V_RTC_DBIAS20, }; static const esp_efuse_desc_t WR_DIS_V_DIG_DBIAS20[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of V_DIG_DBIAS20, }; static const esp_efuse_desc_t WR_DIS_DIG_DBIAS_HVT[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of DIG_DBIAS_HVT, }; static const esp_efuse_desc_t WR_DIS_WAFER_VERSION_MINOR_HI[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of WAFER_VERSION_MINOR_HI, }; static const esp_efuse_desc_t WR_DIS_WAFER_VERSION_MAJOR[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of WAFER_VERSION_MAJOR, }; static const esp_efuse_desc_t WR_DIS_ADC2_CAL_VOL_ATTEN3[] = { {EFUSE_BLK0, 20, 1}, // [] wr_dis of ADC2_CAL_VOL_ATTEN3, }; static const esp_efuse_desc_t WR_DIS_SYS_DATA_PART1[] = { {EFUSE_BLK0, 21, 1}, // [] wr_dis of BLOCK2, }; static const esp_efuse_desc_t WR_DIS_OPTIONAL_UNIQUE_ID[] = { {EFUSE_BLK0, 21, 1}, // [] wr_dis of OPTIONAL_UNIQUE_ID, }; static const esp_efuse_desc_t WR_DIS_BLK_VERSION_MAJOR[] = { {EFUSE_BLK0, 21, 1}, // [] wr_dis of BLK_VERSION_MAJOR, }; static const esp_efuse_desc_t WR_DIS_TEMP_CALIB[] = { {EFUSE_BLK0, 21, 1}, // [] wr_dis of TEMP_CALIB, }; static const esp_efuse_desc_t WR_DIS_OCODE[] = { {EFUSE_BLK0, 21, 1}, // [] wr_dis of OCODE, }; static const esp_efuse_desc_t WR_DIS_ADC1_INIT_CODE_ATTEN0[] = { {EFUSE_BLK0, 21, 1}, // [] wr_dis of ADC1_INIT_CODE_ATTEN0, }; static const esp_efuse_desc_t WR_DIS_ADC1_INIT_CODE_ATTEN1[] = { {EFUSE_BLK0, 21, 1}, // [] wr_dis of ADC1_INIT_CODE_ATTEN1, }; static const esp_efuse_desc_t WR_DIS_ADC1_INIT_CODE_ATTEN2[] = { {EFUSE_BLK0, 21, 1}, // [] wr_dis of ADC1_INIT_CODE_ATTEN2, }; static const esp_efuse_desc_t WR_DIS_ADC1_INIT_CODE_ATTEN3[] = { {EFUSE_BLK0, 21, 1}, // [] wr_dis of ADC1_INIT_CODE_ATTEN3, }; static const esp_efuse_desc_t WR_DIS_ADC2_INIT_CODE_ATTEN0[] = { {EFUSE_BLK0, 21, 1}, // [] wr_dis of ADC2_INIT_CODE_ATTEN0, }; static const esp_efuse_desc_t WR_DIS_ADC2_INIT_CODE_ATTEN1[] = { {EFUSE_BLK0, 21, 1}, // [] wr_dis of ADC2_INIT_CODE_ATTEN1, }; static const esp_efuse_desc_t WR_DIS_ADC2_INIT_CODE_ATTEN2[] = { {EFUSE_BLK0, 21, 1}, // [] wr_dis of ADC2_INIT_CODE_ATTEN2, }; static const esp_efuse_desc_t WR_DIS_ADC2_INIT_CODE_ATTEN3[] = { {EFUSE_BLK0, 21, 1}, // [] wr_dis of ADC2_INIT_CODE_ATTEN3, }; static const esp_efuse_desc_t WR_DIS_ADC1_CAL_VOL_ATTEN0[] = { {EFUSE_BLK0, 21, 1}, // [] wr_dis of ADC1_CAL_VOL_ATTEN0, }; static const esp_efuse_desc_t WR_DIS_ADC1_CAL_VOL_ATTEN1[] = { {EFUSE_BLK0, 21, 1}, // [] wr_dis of ADC1_CAL_VOL_ATTEN1, }; static const esp_efuse_desc_t WR_DIS_ADC1_CAL_VOL_ATTEN2[] = { {EFUSE_BLK0, 21, 1}, // [] wr_dis of ADC1_CAL_VOL_ATTEN2, }; static const esp_efuse_desc_t WR_DIS_ADC1_CAL_VOL_ATTEN3[] = { {EFUSE_BLK0, 21, 1}, // [] wr_dis of ADC1_CAL_VOL_ATTEN3, }; static const esp_efuse_desc_t WR_DIS_ADC2_CAL_VOL_ATTEN0[] = { {EFUSE_BLK0, 21, 1}, // [] wr_dis of ADC2_CAL_VOL_ATTEN0, }; static const esp_efuse_desc_t WR_DIS_ADC2_CAL_VOL_ATTEN1[] = { {EFUSE_BLK0, 21, 1}, // [] wr_dis of ADC2_CAL_VOL_ATTEN1, }; static const esp_efuse_desc_t WR_DIS_ADC2_CAL_VOL_ATTEN2[] = { {EFUSE_BLK0, 21, 1}, // [] wr_dis of ADC2_CAL_VOL_ATTEN2, }; static const esp_efuse_desc_t WR_DIS_BLOCK_USR_DATA[] = { {EFUSE_BLK0, 22, 1}, // [WR_DIS.USER_DATA] wr_dis of BLOCK_USR_DATA, }; static const esp_efuse_desc_t WR_DIS_CUSTOM_MAC[] = { {EFUSE_BLK0, 22, 1}, // [WR_DIS.MAC_CUSTOM WR_DIS.USER_DATA_MAC_CUSTOM] wr_dis of CUSTOM_MAC, }; static const esp_efuse_desc_t WR_DIS_BLOCK_KEY0[] = { {EFUSE_BLK0, 23, 1}, // [WR_DIS.KEY0] wr_dis of BLOCK_KEY0, }; static const esp_efuse_desc_t WR_DIS_BLOCK_KEY1[] = { {EFUSE_BLK0, 24, 1}, // [WR_DIS.KEY1] wr_dis of BLOCK_KEY1, }; static const esp_efuse_desc_t WR_DIS_BLOCK_KEY2[] = { {EFUSE_BLK0, 25, 1}, // [WR_DIS.KEY2] wr_dis of BLOCK_KEY2, }; static const esp_efuse_desc_t WR_DIS_BLOCK_KEY3[] = { {EFUSE_BLK0, 26, 1}, // [WR_DIS.KEY3] wr_dis of BLOCK_KEY3, }; static const esp_efuse_desc_t WR_DIS_BLOCK_KEY4[] = { {EFUSE_BLK0, 27, 1}, // [WR_DIS.KEY4] wr_dis of BLOCK_KEY4, }; static const esp_efuse_desc_t WR_DIS_BLOCK_KEY5[] = { {EFUSE_BLK0, 28, 1}, // [WR_DIS.KEY5] wr_dis of BLOCK_KEY5, }; static const esp_efuse_desc_t WR_DIS_BLOCK_SYS_DATA2[] = { {EFUSE_BLK0, 29, 1}, // [WR_DIS.SYS_DATA_PART2] wr_dis of BLOCK_SYS_DATA2, }; static const esp_efuse_desc_t WR_DIS_USB_EXCHG_PINS[] = { {EFUSE_BLK0, 30, 1}, // [] wr_dis of USB_EXCHG_PINS, }; static const esp_efuse_desc_t WR_DIS_USB_EXT_PHY_ENABLE[] = { {EFUSE_BLK0, 30, 1}, // [WR_DIS.EXT_PHY_ENABLE] wr_dis of USB_EXT_PHY_ENABLE, }; static const esp_efuse_desc_t WR_DIS_SOFT_DIS_JTAG[] = { {EFUSE_BLK0, 31, 1}, // [] wr_dis of SOFT_DIS_JTAG, }; static const esp_efuse_desc_t RD_DIS[] = { {EFUSE_BLK0, 32, 7}, // [] Disable reading from BlOCK4-10, }; static const esp_efuse_desc_t RD_DIS_BLOCK_KEY0[] = { {EFUSE_BLK0, 32, 1}, // [RD_DIS.KEY0] rd_dis of BLOCK_KEY0, }; static const esp_efuse_desc_t RD_DIS_BLOCK_KEY1[] = { {EFUSE_BLK0, 33, 1}, // [RD_DIS.KEY1] rd_dis of BLOCK_KEY1, }; static const esp_efuse_desc_t RD_DIS_BLOCK_KEY2[] = { {EFUSE_BLK0, 34, 1}, // [RD_DIS.KEY2] rd_dis of BLOCK_KEY2, }; static const esp_efuse_desc_t RD_DIS_BLOCK_KEY3[] = { {EFUSE_BLK0, 35, 1}, // [RD_DIS.KEY3] rd_dis of BLOCK_KEY3, }; static const esp_efuse_desc_t RD_DIS_BLOCK_KEY4[] = { {EFUSE_BLK0, 36, 1}, // [RD_DIS.KEY4] rd_dis of BLOCK_KEY4, }; static const esp_efuse_desc_t RD_DIS_BLOCK_KEY5[] = { {EFUSE_BLK0, 37, 1}, // [RD_DIS.KEY5] rd_dis of BLOCK_KEY5, }; static const esp_efuse_desc_t RD_DIS_BLOCK_SYS_DATA2[] = { {EFUSE_BLK0, 38, 1}, // [RD_DIS.SYS_DATA_PART2] rd_dis of BLOCK_SYS_DATA2, }; static const esp_efuse_desc_t DIS_ICACHE[] = { {EFUSE_BLK0, 40, 1}, // [] Set this bit to disable Icache, }; static const esp_efuse_desc_t DIS_DCACHE[] = { {EFUSE_BLK0, 41, 1}, // [] Set this bit to disable Dcache, }; static const esp_efuse_desc_t DIS_DOWNLOAD_ICACHE[] = { {EFUSE_BLK0, 42, 1}, // [] Set this bit to disable Icache in download mode (boot_mode[3:0] is 0; 1; 2; 3; 6; 7), }; static const esp_efuse_desc_t DIS_DOWNLOAD_DCACHE[] = { {EFUSE_BLK0, 43, 1}, // [] Set this bit to disable Dcache in download mode ( boot_mode[3:0] is 0; 1; 2; 3; 6; 7), }; static const esp_efuse_desc_t DIS_FORCE_DOWNLOAD[] = { {EFUSE_BLK0, 44, 1}, // [] Set this bit to disable the function that forces chip into download mode, }; static const esp_efuse_desc_t DIS_USB_OTG[] = { {EFUSE_BLK0, 45, 1}, // [DIS_USB] Set this bit to disable USB function, }; static const esp_efuse_desc_t DIS_TWAI[] = { {EFUSE_BLK0, 46, 1}, // [DIS_CAN] Set this bit to disable CAN function, }; static const esp_efuse_desc_t DIS_APP_CPU[] = { {EFUSE_BLK0, 47, 1}, // [] Disable app cpu, }; static const esp_efuse_desc_t SOFT_DIS_JTAG[] = { {EFUSE_BLK0, 48, 3}, // [] Set these bits to disable JTAG in the soft way (odd number 1 means disable ). JTAG can be enabled in HMAC module, }; static const esp_efuse_desc_t DIS_PAD_JTAG[] = { {EFUSE_BLK0, 51, 1}, // [HARD_DIS_JTAG] Set this bit to disable JTAG in the hard way. JTAG is disabled permanently, }; static const esp_efuse_desc_t DIS_DOWNLOAD_MANUAL_ENCRYPT[] = { {EFUSE_BLK0, 52, 1}, // [] Set this bit to disable flash encryption when in download boot modes, }; static const esp_efuse_desc_t USB_EXCHG_PINS[] = { {EFUSE_BLK0, 57, 1}, // [] Set this bit to exchange USB D+ and D- pins, }; static const esp_efuse_desc_t USB_EXT_PHY_ENABLE[] = { {EFUSE_BLK0, 58, 1}, // [EXT_PHY_ENABLE] Set this bit to enable external PHY, }; static const esp_efuse_desc_t VDD_SPI_XPD[] = { {EFUSE_BLK0, 68, 1}, // [] SPI regulator power up signal, }; static const esp_efuse_desc_t VDD_SPI_TIEH[] = { {EFUSE_BLK0, 69, 1}, // [] If VDD_SPI_FORCE is 1; determines VDD_SPI voltage {0: "VDD_SPI connects to 1.8 V LDO"; 1: "VDD_SPI connects to VDD3P3_RTC_IO"}, }; static const esp_efuse_desc_t VDD_SPI_FORCE[] = { {EFUSE_BLK0, 70, 1}, // [] Set this bit and force to use the configuration of eFuse to configure VDD_SPI, }; static const esp_efuse_desc_t WDT_DELAY_SEL[] = { {EFUSE_BLK0, 80, 2}, // [] RTC watchdog timeout threshold; in unit of slow clock cycle {0: "40000"; 1: "80000"; 2: "160000"; 3: "320000"}, }; static const esp_efuse_desc_t SPI_BOOT_CRYPT_CNT[] = { {EFUSE_BLK0, 82, 3}, // [] Enables flash encryption when 1 or 3 bits are set and disabled otherwise {0: "Disable"; 1: "Enable"; 3: "Disable"; 7: "Enable"}, }; static const esp_efuse_desc_t SECURE_BOOT_KEY_REVOKE0[] = { {EFUSE_BLK0, 85, 1}, // [] Revoke 1st secure boot key, }; static const esp_efuse_desc_t SECURE_BOOT_KEY_REVOKE1[] = { {EFUSE_BLK0, 86, 1}, // [] Revoke 2nd secure boot key, }; static const esp_efuse_desc_t SECURE_BOOT_KEY_REVOKE2[] = { {EFUSE_BLK0, 87, 1}, // [] Revoke 3rd secure boot key, }; static const esp_efuse_desc_t KEY_PURPOSE_0[] = { {EFUSE_BLK0, 88, 4}, // [KEY0_PURPOSE] Purpose of Key0, }; static const esp_efuse_desc_t KEY_PURPOSE_1[] = { {EFUSE_BLK0, 92, 4}, // [KEY1_PURPOSE] Purpose of Key1, }; static const esp_efuse_desc_t KEY_PURPOSE_2[] = { {EFUSE_BLK0, 96, 4}, // [KEY2_PURPOSE] Purpose of Key2, }; static const esp_efuse_desc_t KEY_PURPOSE_3[] = { {EFUSE_BLK0, 100, 4}, // [KEY3_PURPOSE] Purpose of Key3, }; static const esp_efuse_desc_t KEY_PURPOSE_4[] = { {EFUSE_BLK0, 104, 4}, // [KEY4_PURPOSE] Purpose of Key4, }; static const esp_efuse_desc_t KEY_PURPOSE_5[] = { {EFUSE_BLK0, 108, 4}, // [KEY5_PURPOSE] Purpose of Key5, }; static const esp_efuse_desc_t SECURE_BOOT_EN[] = { {EFUSE_BLK0, 116, 1}, // [] Set this bit to enable secure boot, }; static const esp_efuse_desc_t SECURE_BOOT_AGGRESSIVE_REVOKE[] = { {EFUSE_BLK0, 117, 1}, // [] Set this bit to enable revoking aggressive secure boot, }; static const esp_efuse_desc_t DIS_USB_JTAG[] = { {EFUSE_BLK0, 118, 1}, // [] Set this bit to disable function of usb switch to jtag in module of usb device, }; static const esp_efuse_desc_t DIS_USB_SERIAL_JTAG[] = { {EFUSE_BLK0, 119, 1}, // [DIS_USB_DEVICE] Set this bit to disable usb device, }; static const esp_efuse_desc_t STRAP_JTAG_SEL[] = { {EFUSE_BLK0, 120, 1}, // [] Set this bit to enable selection between usb_to_jtag and pad_to_jtag through strapping gpio10 when both reg_dis_usb_jtag and reg_dis_pad_jtag are equal to 0, }; static const esp_efuse_desc_t USB_PHY_SEL[] = { {EFUSE_BLK0, 121, 1}, // [] This bit is used to switch internal PHY and external PHY for USB OTG and USB Device {0: "internal PHY is assigned to USB Device while external PHY is assigned to USB OTG"; 1: "internal PHY is assigned to USB OTG while external PHY is assigned to USB Device"}, }; static const esp_efuse_desc_t FLASH_TPUW[] = { {EFUSE_BLK0, 124, 4}, // [] Configures flash waiting time after power-up; in unit of ms. If the value is less than 15; the waiting time is the configurable value. Otherwise; the waiting time is twice the configurable value, }; static const esp_efuse_desc_t DIS_DOWNLOAD_MODE[] = { {EFUSE_BLK0, 128, 1}, // [] Set this bit to disable download mode (boot_mode[3:0] = 0; 1; 2; 3; 6; 7), }; static const esp_efuse_desc_t DIS_DIRECT_BOOT[] = { {EFUSE_BLK0, 129, 1}, // [DIS_LEGACY_SPI_BOOT] Disable direct boot mode, }; static const esp_efuse_desc_t DIS_USB_SERIAL_JTAG_ROM_PRINT[] = { {EFUSE_BLK0, 130, 1}, // [UART_PRINT_CHANNEL] USB printing {0: "Enable"; 1: "Disable"}, }; static const esp_efuse_desc_t FLASH_ECC_MODE[] = { {EFUSE_BLK0, 131, 1}, // [] Flash ECC mode in ROM {0: "16to18 byte"; 1: "16to17 byte"}, }; static const esp_efuse_desc_t DIS_USB_SERIAL_JTAG_DOWNLOAD_MODE[] = { {EFUSE_BLK0, 132, 1}, // [DIS_USB_DOWNLOAD_MODE] Set this bit to disable UART download mode through USB, }; static const esp_efuse_desc_t ENABLE_SECURITY_DOWNLOAD[] = { {EFUSE_BLK0, 133, 1}, // [] Set this bit to enable secure UART download mode, }; static const esp_efuse_desc_t UART_PRINT_CONTROL[] = { {EFUSE_BLK0, 134, 2}, // [] Set the default UART boot message output mode {0: "Enable"; 1: "Enable when GPIO46 is low at reset"; 2: "Enable when GPIO46 is high at reset"; 3: "Disable"}, }; static const esp_efuse_desc_t PIN_POWER_SELECTION[] = { {EFUSE_BLK0, 136, 1}, // [] Set default power supply for GPIO33-GPIO37; set when SPI flash is initialized {0: "VDD3P3_CPU"; 1: "VDD_SPI"}, }; static const esp_efuse_desc_t FLASH_TYPE[] = { {EFUSE_BLK0, 137, 1}, // [] SPI flash type {0: "4 data lines"; 1: "8 data lines"}, }; static const esp_efuse_desc_t FLASH_PAGE_SIZE[] = { {EFUSE_BLK0, 138, 2}, // [] Set Flash page size, }; static const esp_efuse_desc_t FLASH_ECC_EN[] = { {EFUSE_BLK0, 140, 1}, // [] Set 1 to enable ECC for flash boot, }; static const esp_efuse_desc_t FORCE_SEND_RESUME[] = { {EFUSE_BLK0, 141, 1}, // [] Set this bit to force ROM code to send a resume command during SPI boot, }; static const esp_efuse_desc_t SECURE_VERSION[] = { {EFUSE_BLK0, 142, 16}, // [] Secure version (used by ESP-IDF anti-rollback feature), }; static const esp_efuse_desc_t DIS_USB_OTG_DOWNLOAD_MODE[] = { {EFUSE_BLK0, 159, 1}, // [] Set this bit to disable download through USB-OTG, }; static const esp_efuse_desc_t DISABLE_WAFER_VERSION_MAJOR[] = { {EFUSE_BLK0, 160, 1}, // [] Disables check of wafer version major, }; static const esp_efuse_desc_t DISABLE_BLK_VERSION_MAJOR[] = { {EFUSE_BLK0, 161, 1}, // [] Disables check of blk version major, }; static const esp_efuse_desc_t MAC[] = { {EFUSE_BLK1, 40, 8}, // [MAC_FACTORY] MAC address, {EFUSE_BLK1, 32, 8}, // [MAC_FACTORY] MAC address, {EFUSE_BLK1, 24, 8}, // [MAC_FACTORY] MAC address, {EFUSE_BLK1, 16, 8}, // [MAC_FACTORY] MAC address, {EFUSE_BLK1, 8, 8}, // [MAC_FACTORY] MAC address, {EFUSE_BLK1, 0, 8}, // [MAC_FACTORY] MAC address, }; static const esp_efuse_desc_t SPI_PAD_CONFIG_CLK[] = { {EFUSE_BLK1, 48, 6}, // [] SPI_PAD_configure CLK, }; static const esp_efuse_desc_t SPI_PAD_CONFIG_Q[] = { {EFUSE_BLK1, 54, 6}, // [] SPI_PAD_configure Q(D1), }; static const esp_efuse_desc_t SPI_PAD_CONFIG_D[] = { {EFUSE_BLK1, 60, 6}, // [] SPI_PAD_configure D(D0), }; static const esp_efuse_desc_t SPI_PAD_CONFIG_CS[] = { {EFUSE_BLK1, 66, 6}, // [] SPI_PAD_configure CS, }; static const esp_efuse_desc_t SPI_PAD_CONFIG_HD[] = { {EFUSE_BLK1, 72, 6}, // [] SPI_PAD_configure HD(D3), }; static const esp_efuse_desc_t SPI_PAD_CONFIG_WP[] = { {EFUSE_BLK1, 78, 6}, // [] SPI_PAD_configure WP(D2), }; static const esp_efuse_desc_t SPI_PAD_CONFIG_DQS[] = { {EFUSE_BLK1, 84, 6}, // [] SPI_PAD_configure DQS, }; static const esp_efuse_desc_t SPI_PAD_CONFIG_D4[] = { {EFUSE_BLK1, 90, 6}, // [] SPI_PAD_configure D4, }; static const esp_efuse_desc_t SPI_PAD_CONFIG_D5[] = { {EFUSE_BLK1, 96, 6}, // [] SPI_PAD_configure D5, }; static const esp_efuse_desc_t SPI_PAD_CONFIG_D6[] = { {EFUSE_BLK1, 102, 6}, // [] SPI_PAD_configure D6, }; static const esp_efuse_desc_t SPI_PAD_CONFIG_D7[] = { {EFUSE_BLK1, 108, 6}, // [] SPI_PAD_configure D7, }; static const esp_efuse_desc_t WAFER_VERSION_MINOR_LO[] = { {EFUSE_BLK1, 114, 3}, // [] WAFER_VERSION_MINOR least significant bits, }; static const esp_efuse_desc_t PKG_VERSION[] = { {EFUSE_BLK1, 117, 3}, // [] Package version, }; static const esp_efuse_desc_t BLK_VERSION_MINOR[] = { {EFUSE_BLK1, 120, 3}, // [] BLK_VERSION_MINOR, }; static const esp_efuse_desc_t FLASH_CAP[] = { {EFUSE_BLK1, 123, 3}, // [] Flash capacity {0: "None"; 1: "8M"; 2: "4M"}, }; static const esp_efuse_desc_t FLASH_TEMP[] = { {EFUSE_BLK1, 126, 2}, // [] Flash temperature {0: "None"; 1: "105C"; 2: "85C"}, }; static const esp_efuse_desc_t FLASH_VENDOR[] = { {EFUSE_BLK1, 128, 3}, // [] Flash vendor {0: "None"; 1: "XMC"; 2: "GD"; 3: "FM"; 4: "TT"; 5: "BY"}, }; static const esp_efuse_desc_t PSRAM_CAP[] = { {EFUSE_BLK1, 131, 2}, // [] PSRAM capacity {0: "None"; 1: "8M"; 2: "2M"}, }; static const esp_efuse_desc_t PSRAM_TEMP[] = { {EFUSE_BLK1, 133, 2}, // [] PSRAM temperature {0: "None"; 1: "105C"; 2: "85C"}, }; static const esp_efuse_desc_t PSRAM_VENDOR[] = { {EFUSE_BLK1, 135, 2}, // [] PSRAM vendor {0: "None"; 1: "AP_3v3"; 2: "AP_1v8"}, }; static const esp_efuse_desc_t K_RTC_LDO[] = { {EFUSE_BLK1, 141, 7}, // [] BLOCK1 K_RTC_LDO, }; static const esp_efuse_desc_t K_DIG_LDO[] = { {EFUSE_BLK1, 148, 7}, // [] BLOCK1 K_DIG_LDO, }; static const esp_efuse_desc_t V_RTC_DBIAS20[] = { {EFUSE_BLK1, 155, 8}, // [] BLOCK1 voltage of rtc dbias20, }; static const esp_efuse_desc_t V_DIG_DBIAS20[] = { {EFUSE_BLK1, 163, 8}, // [] BLOCK1 voltage of digital dbias20, }; static const esp_efuse_desc_t DIG_DBIAS_HVT[] = { {EFUSE_BLK1, 171, 5}, // [] BLOCK1 digital dbias when hvt, }; static const esp_efuse_desc_t WAFER_VERSION_MINOR_HI[] = { {EFUSE_BLK1, 183, 1}, // [] WAFER_VERSION_MINOR most significant bit, }; static const esp_efuse_desc_t WAFER_VERSION_MAJOR[] = { {EFUSE_BLK1, 184, 2}, // [] WAFER_VERSION_MAJOR, }; static const esp_efuse_desc_t ADC2_CAL_VOL_ATTEN3[] = { {EFUSE_BLK1, 186, 6}, // [] ADC2 calibration voltage at atten3, }; static const esp_efuse_desc_t OPTIONAL_UNIQUE_ID[] = { {EFUSE_BLK2, 0, 128}, // [] Optional unique 128-bit ID, }; static const esp_efuse_desc_t BLK_VERSION_MAJOR[] = { {EFUSE_BLK2, 128, 2}, // [] BLK_VERSION_MAJOR of BLOCK2 {0: "No calib"; 1: "ADC calib V1"}, }; static const esp_efuse_desc_t TEMP_CALIB[] = { {EFUSE_BLK2, 132, 9}, // [] Temperature calibration data, }; static const esp_efuse_desc_t OCODE[] = { {EFUSE_BLK2, 141, 8}, // [] ADC OCode, }; static const esp_efuse_desc_t ADC1_INIT_CODE_ATTEN0[] = { {EFUSE_BLK2, 149, 8}, // [] ADC1 init code at atten0, }; static const esp_efuse_desc_t ADC1_INIT_CODE_ATTEN1[] = { {EFUSE_BLK2, 157, 6}, // [] ADC1 init code at atten1, }; static const esp_efuse_desc_t ADC1_INIT_CODE_ATTEN2[] = { {EFUSE_BLK2, 163, 6}, // [] ADC1 init code at atten2, }; static const esp_efuse_desc_t ADC1_INIT_CODE_ATTEN3[] = { {EFUSE_BLK2, 169, 6}, // [] ADC1 init code at atten3, }; static const esp_efuse_desc_t ADC2_INIT_CODE_ATTEN0[] = { {EFUSE_BLK2, 175, 8}, // [] ADC2 init code at atten0, }; static const esp_efuse_desc_t ADC2_INIT_CODE_ATTEN1[] = { {EFUSE_BLK2, 183, 6}, // [] ADC2 init code at atten1, }; static const esp_efuse_desc_t ADC2_INIT_CODE_ATTEN2[] = { {EFUSE_BLK2, 189, 6}, // [] ADC2 init code at atten2, }; static const esp_efuse_desc_t ADC2_INIT_CODE_ATTEN3[] = { {EFUSE_BLK2, 195, 6}, // [] ADC2 init code at atten3, }; static const esp_efuse_desc_t ADC1_CAL_VOL_ATTEN0[] = { {EFUSE_BLK2, 201, 8}, // [] ADC1 calibration voltage at atten0, }; static const esp_efuse_desc_t ADC1_CAL_VOL_ATTEN1[] = { {EFUSE_BLK2, 209, 8}, // [] ADC1 calibration voltage at atten1, }; static const esp_efuse_desc_t ADC1_CAL_VOL_ATTEN2[] = { {EFUSE_BLK2, 217, 8}, // [] ADC1 calibration voltage at atten2, }; static const esp_efuse_desc_t ADC1_CAL_VOL_ATTEN3[] = { {EFUSE_BLK2, 225, 8}, // [] ADC1 calibration voltage at atten3, }; static const esp_efuse_desc_t ADC2_CAL_VOL_ATTEN0[] = { {EFUSE_BLK2, 233, 8}, // [] ADC2 calibration voltage at atten0, }; static const esp_efuse_desc_t ADC2_CAL_VOL_ATTEN1[] = { {EFUSE_BLK2, 241, 7}, // [] ADC2 calibration voltage at atten1, }; static const esp_efuse_desc_t ADC2_CAL_VOL_ATTEN2[] = { {EFUSE_BLK2, 248, 7}, // [] ADC2 calibration voltage at atten2, }; static const esp_efuse_desc_t USER_DATA[] = { {EFUSE_BLK3, 0, 256}, // [BLOCK_USR_DATA] User data, }; static const esp_efuse_desc_t USER_DATA_MAC_CUSTOM[] = { {EFUSE_BLK3, 200, 48}, // [MAC_CUSTOM CUSTOM_MAC] Custom MAC, }; static const esp_efuse_desc_t KEY0[] = { {EFUSE_BLK4, 0, 256}, // [BLOCK_KEY0] Key0 or user data, }; static const esp_efuse_desc_t KEY1[] = { {EFUSE_BLK5, 0, 256}, // [BLOCK_KEY1] Key1 or user data, }; static const esp_efuse_desc_t KEY2[] = { {EFUSE_BLK6, 0, 256}, // [BLOCK_KEY2] Key2 or user data, }; static const esp_efuse_desc_t KEY3[] = { {EFUSE_BLK7, 0, 256}, // [BLOCK_KEY3] Key3 or user data, }; static const esp_efuse_desc_t KEY4[] = { {EFUSE_BLK8, 0, 256}, // [BLOCK_KEY4] Key4 or user data, }; static const esp_efuse_desc_t KEY5[] = { {EFUSE_BLK9, 0, 256}, // [BLOCK_KEY5] Key5 or user data, }; static const esp_efuse_desc_t SYS_DATA_PART2[] = { {EFUSE_BLK10, 0, 256}, // [BLOCK_SYS_DATA2] System data part 2 (reserved), }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS[] = { &WR_DIS[0], // [] Disable programming of individual eFuses NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_RD_DIS[] = { &WR_DIS_RD_DIS[0], // [] wr_dis of RD_DIS NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_DIS_ICACHE[] = { &WR_DIS_DIS_ICACHE[0], // [] wr_dis of DIS_ICACHE NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_DIS_DCACHE[] = { &WR_DIS_DIS_DCACHE[0], // [] wr_dis of DIS_DCACHE NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_DIS_DOWNLOAD_ICACHE[] = { &WR_DIS_DIS_DOWNLOAD_ICACHE[0], // [] wr_dis of DIS_DOWNLOAD_ICACHE NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_DIS_DOWNLOAD_DCACHE[] = { &WR_DIS_DIS_DOWNLOAD_DCACHE[0], // [] wr_dis of DIS_DOWNLOAD_DCACHE NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_DIS_FORCE_DOWNLOAD[] = { &WR_DIS_DIS_FORCE_DOWNLOAD[0], // [] wr_dis of DIS_FORCE_DOWNLOAD NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_DIS_USB_OTG[] = { &WR_DIS_DIS_USB_OTG[0], // [WR_DIS.DIS_USB] wr_dis of DIS_USB_OTG NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_DIS_TWAI[] = { &WR_DIS_DIS_TWAI[0], // [WR_DIS.DIS_CAN] wr_dis of DIS_TWAI NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_DIS_APP_CPU[] = { &WR_DIS_DIS_APP_CPU[0], // [] wr_dis of DIS_APP_CPU NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_DIS_PAD_JTAG[] = { &WR_DIS_DIS_PAD_JTAG[0], // [WR_DIS.HARD_DIS_JTAG] wr_dis of DIS_PAD_JTAG NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_DIS_DOWNLOAD_MANUAL_ENCRYPT[] = { &WR_DIS_DIS_DOWNLOAD_MANUAL_ENCRYPT[0], // [] wr_dis of DIS_DOWNLOAD_MANUAL_ENCRYPT NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_DIS_USB_JTAG[] = { &WR_DIS_DIS_USB_JTAG[0], // [] wr_dis of DIS_USB_JTAG NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_DIS_USB_SERIAL_JTAG[] = { &WR_DIS_DIS_USB_SERIAL_JTAG[0], // [WR_DIS.DIS_USB_DEVICE] wr_dis of DIS_USB_SERIAL_JTAG NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_STRAP_JTAG_SEL[] = { &WR_DIS_STRAP_JTAG_SEL[0], // [] wr_dis of STRAP_JTAG_SEL NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_USB_PHY_SEL[] = { &WR_DIS_USB_PHY_SEL[0], // [] wr_dis of USB_PHY_SEL NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_VDD_SPI_XPD[] = { &WR_DIS_VDD_SPI_XPD[0], // [] wr_dis of VDD_SPI_XPD NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_VDD_SPI_TIEH[] = { &WR_DIS_VDD_SPI_TIEH[0], // [] wr_dis of VDD_SPI_TIEH NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_VDD_SPI_FORCE[] = { &WR_DIS_VDD_SPI_FORCE[0], // [] wr_dis of VDD_SPI_FORCE NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_WDT_DELAY_SEL[] = { &WR_DIS_WDT_DELAY_SEL[0], // [] wr_dis of WDT_DELAY_SEL NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_SPI_BOOT_CRYPT_CNT[] = { &WR_DIS_SPI_BOOT_CRYPT_CNT[0], // [] wr_dis of SPI_BOOT_CRYPT_CNT NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_SECURE_BOOT_KEY_REVOKE0[] = { &WR_DIS_SECURE_BOOT_KEY_REVOKE0[0], // [] wr_dis of SECURE_BOOT_KEY_REVOKE0 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_SECURE_BOOT_KEY_REVOKE1[] = { &WR_DIS_SECURE_BOOT_KEY_REVOKE1[0], // [] wr_dis of SECURE_BOOT_KEY_REVOKE1 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_SECURE_BOOT_KEY_REVOKE2[] = { &WR_DIS_SECURE_BOOT_KEY_REVOKE2[0], // [] wr_dis of SECURE_BOOT_KEY_REVOKE2 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_KEY_PURPOSE_0[] = { &WR_DIS_KEY_PURPOSE_0[0], // [WR_DIS.KEY0_PURPOSE] wr_dis of KEY_PURPOSE_0 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_KEY_PURPOSE_1[] = { &WR_DIS_KEY_PURPOSE_1[0], // [WR_DIS.KEY1_PURPOSE] wr_dis of KEY_PURPOSE_1 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_KEY_PURPOSE_2[] = { &WR_DIS_KEY_PURPOSE_2[0], // [WR_DIS.KEY2_PURPOSE] wr_dis of KEY_PURPOSE_2 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_KEY_PURPOSE_3[] = { &WR_DIS_KEY_PURPOSE_3[0], // [WR_DIS.KEY3_PURPOSE] wr_dis of KEY_PURPOSE_3 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_KEY_PURPOSE_4[] = { &WR_DIS_KEY_PURPOSE_4[0], // [WR_DIS.KEY4_PURPOSE] wr_dis of KEY_PURPOSE_4 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_KEY_PURPOSE_5[] = { &WR_DIS_KEY_PURPOSE_5[0], // [WR_DIS.KEY5_PURPOSE] wr_dis of KEY_PURPOSE_5 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_SECURE_BOOT_EN[] = { &WR_DIS_SECURE_BOOT_EN[0], // [] wr_dis of SECURE_BOOT_EN NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_SECURE_BOOT_AGGRESSIVE_REVOKE[] = { &WR_DIS_SECURE_BOOT_AGGRESSIVE_REVOKE[0], // [] wr_dis of SECURE_BOOT_AGGRESSIVE_REVOKE NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_FLASH_TPUW[] = { &WR_DIS_FLASH_TPUW[0], // [] wr_dis of FLASH_TPUW NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_DIS_DOWNLOAD_MODE[] = { &WR_DIS_DIS_DOWNLOAD_MODE[0], // [] wr_dis of DIS_DOWNLOAD_MODE NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_DIS_DIRECT_BOOT[] = { &WR_DIS_DIS_DIRECT_BOOT[0], // [WR_DIS.DIS_LEGACY_SPI_BOOT] wr_dis of DIS_DIRECT_BOOT NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_DIS_USB_SERIAL_JTAG_ROM_PRINT[] = { &WR_DIS_DIS_USB_SERIAL_JTAG_ROM_PRINT[0], // [WR_DIS.UART_PRINT_CHANNEL] wr_dis of DIS_USB_SERIAL_JTAG_ROM_PRINT NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_FLASH_ECC_MODE[] = { &WR_DIS_FLASH_ECC_MODE[0], // [] wr_dis of FLASH_ECC_MODE NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_DIS_USB_SERIAL_JTAG_DOWNLOAD_MODE[] = { &WR_DIS_DIS_USB_SERIAL_JTAG_DOWNLOAD_MODE[0], // [WR_DIS.DIS_USB_DOWNLOAD_MODE] wr_dis of DIS_USB_SERIAL_JTAG_DOWNLOAD_MODE NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_ENABLE_SECURITY_DOWNLOAD[] = { &WR_DIS_ENABLE_SECURITY_DOWNLOAD[0], // [] wr_dis of ENABLE_SECURITY_DOWNLOAD NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_UART_PRINT_CONTROL[] = { &WR_DIS_UART_PRINT_CONTROL[0], // [] wr_dis of UART_PRINT_CONTROL NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_PIN_POWER_SELECTION[] = { &WR_DIS_PIN_POWER_SELECTION[0], // [] wr_dis of PIN_POWER_SELECTION NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_FLASH_TYPE[] = { &WR_DIS_FLASH_TYPE[0], // [] wr_dis of FLASH_TYPE NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_FLASH_PAGE_SIZE[] = { &WR_DIS_FLASH_PAGE_SIZE[0], // [] wr_dis of FLASH_PAGE_SIZE NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_FLASH_ECC_EN[] = { &WR_DIS_FLASH_ECC_EN[0], // [] wr_dis of FLASH_ECC_EN NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_FORCE_SEND_RESUME[] = { &WR_DIS_FORCE_SEND_RESUME[0], // [] wr_dis of FORCE_SEND_RESUME NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_SECURE_VERSION[] = { &WR_DIS_SECURE_VERSION[0], // [] wr_dis of SECURE_VERSION NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_DIS_USB_OTG_DOWNLOAD_MODE[] = { &WR_DIS_DIS_USB_OTG_DOWNLOAD_MODE[0], // [] wr_dis of DIS_USB_OTG_DOWNLOAD_MODE NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_DISABLE_WAFER_VERSION_MAJOR[] = { &WR_DIS_DISABLE_WAFER_VERSION_MAJOR[0], // [] wr_dis of DISABLE_WAFER_VERSION_MAJOR NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_DISABLE_BLK_VERSION_MAJOR[] = { &WR_DIS_DISABLE_BLK_VERSION_MAJOR[0], // [] wr_dis of DISABLE_BLK_VERSION_MAJOR NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_BLK1[] = { &WR_DIS_BLK1[0], // [] wr_dis of BLOCK1 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_MAC[] = { &WR_DIS_MAC[0], // [WR_DIS.MAC_FACTORY] wr_dis of MAC NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_SPI_PAD_CONFIG_CLK[] = { &WR_DIS_SPI_PAD_CONFIG_CLK[0], // [] wr_dis of SPI_PAD_CONFIG_CLK NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_SPI_PAD_CONFIG_Q[] = { &WR_DIS_SPI_PAD_CONFIG_Q[0], // [] wr_dis of SPI_PAD_CONFIG_Q NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_SPI_PAD_CONFIG_D[] = { &WR_DIS_SPI_PAD_CONFIG_D[0], // [] wr_dis of SPI_PAD_CONFIG_D NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_SPI_PAD_CONFIG_CS[] = { &WR_DIS_SPI_PAD_CONFIG_CS[0], // [] wr_dis of SPI_PAD_CONFIG_CS NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_SPI_PAD_CONFIG_HD[] = { &WR_DIS_SPI_PAD_CONFIG_HD[0], // [] wr_dis of SPI_PAD_CONFIG_HD NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_SPI_PAD_CONFIG_WP[] = { &WR_DIS_SPI_PAD_CONFIG_WP[0], // [] wr_dis of SPI_PAD_CONFIG_WP NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_SPI_PAD_CONFIG_DQS[] = { &WR_DIS_SPI_PAD_CONFIG_DQS[0], // [] wr_dis of SPI_PAD_CONFIG_DQS NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_SPI_PAD_CONFIG_D4[] = { &WR_DIS_SPI_PAD_CONFIG_D4[0], // [] wr_dis of SPI_PAD_CONFIG_D4 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_SPI_PAD_CONFIG_D5[] = { &WR_DIS_SPI_PAD_CONFIG_D5[0], // [] wr_dis of SPI_PAD_CONFIG_D5 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_SPI_PAD_CONFIG_D6[] = { &WR_DIS_SPI_PAD_CONFIG_D6[0], // [] wr_dis of SPI_PAD_CONFIG_D6 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_SPI_PAD_CONFIG_D7[] = { &WR_DIS_SPI_PAD_CONFIG_D7[0], // [] wr_dis of SPI_PAD_CONFIG_D7 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_WAFER_VERSION_MINOR_LO[] = { &WR_DIS_WAFER_VERSION_MINOR_LO[0], // [] wr_dis of WAFER_VERSION_MINOR_LO NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_PKG_VERSION[] = { &WR_DIS_PKG_VERSION[0], // [] wr_dis of PKG_VERSION NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_BLK_VERSION_MINOR[] = { &WR_DIS_BLK_VERSION_MINOR[0], // [] wr_dis of BLK_VERSION_MINOR NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_FLASH_CAP[] = { &WR_DIS_FLASH_CAP[0], // [] wr_dis of FLASH_CAP NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_FLASH_TEMP[] = { &WR_DIS_FLASH_TEMP[0], // [] wr_dis of FLASH_TEMP NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_FLASH_VENDOR[] = { &WR_DIS_FLASH_VENDOR[0], // [] wr_dis of FLASH_VENDOR NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_PSRAM_CAP[] = { &WR_DIS_PSRAM_CAP[0], // [] wr_dis of PSRAM_CAP NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_PSRAM_TEMP[] = { &WR_DIS_PSRAM_TEMP[0], // [] wr_dis of PSRAM_TEMP NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_PSRAM_VENDOR[] = { &WR_DIS_PSRAM_VENDOR[0], // [] wr_dis of PSRAM_VENDOR NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_K_RTC_LDO[] = { &WR_DIS_K_RTC_LDO[0], // [] wr_dis of K_RTC_LDO NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_K_DIG_LDO[] = { &WR_DIS_K_DIG_LDO[0], // [] wr_dis of K_DIG_LDO NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_V_RTC_DBIAS20[] = { &WR_DIS_V_RTC_DBIAS20[0], // [] wr_dis of V_RTC_DBIAS20 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_V_DIG_DBIAS20[] = { &WR_DIS_V_DIG_DBIAS20[0], // [] wr_dis of V_DIG_DBIAS20 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_DIG_DBIAS_HVT[] = { &WR_DIS_DIG_DBIAS_HVT[0], // [] wr_dis of DIG_DBIAS_HVT NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_WAFER_VERSION_MINOR_HI[] = { &WR_DIS_WAFER_VERSION_MINOR_HI[0], // [] wr_dis of WAFER_VERSION_MINOR_HI NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_WAFER_VERSION_MAJOR[] = { &WR_DIS_WAFER_VERSION_MAJOR[0], // [] wr_dis of WAFER_VERSION_MAJOR NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_ADC2_CAL_VOL_ATTEN3[] = { &WR_DIS_ADC2_CAL_VOL_ATTEN3[0], // [] wr_dis of ADC2_CAL_VOL_ATTEN3 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_SYS_DATA_PART1[] = { &WR_DIS_SYS_DATA_PART1[0], // [] wr_dis of BLOCK2 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_OPTIONAL_UNIQUE_ID[] = { &WR_DIS_OPTIONAL_UNIQUE_ID[0], // [] wr_dis of OPTIONAL_UNIQUE_ID NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_BLK_VERSION_MAJOR[] = { &WR_DIS_BLK_VERSION_MAJOR[0], // [] wr_dis of BLK_VERSION_MAJOR NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_TEMP_CALIB[] = { &WR_DIS_TEMP_CALIB[0], // [] wr_dis of TEMP_CALIB NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_OCODE[] = { &WR_DIS_OCODE[0], // [] wr_dis of OCODE NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_ADC1_INIT_CODE_ATTEN0[] = { &WR_DIS_ADC1_INIT_CODE_ATTEN0[0], // [] wr_dis of ADC1_INIT_CODE_ATTEN0 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_ADC1_INIT_CODE_ATTEN1[] = { &WR_DIS_ADC1_INIT_CODE_ATTEN1[0], // [] wr_dis of ADC1_INIT_CODE_ATTEN1 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_ADC1_INIT_CODE_ATTEN2[] = { &WR_DIS_ADC1_INIT_CODE_ATTEN2[0], // [] wr_dis of ADC1_INIT_CODE_ATTEN2 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_ADC1_INIT_CODE_ATTEN3[] = { &WR_DIS_ADC1_INIT_CODE_ATTEN3[0], // [] wr_dis of ADC1_INIT_CODE_ATTEN3 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_ADC2_INIT_CODE_ATTEN0[] = { &WR_DIS_ADC2_INIT_CODE_ATTEN0[0], // [] wr_dis of ADC2_INIT_CODE_ATTEN0 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_ADC2_INIT_CODE_ATTEN1[] = { &WR_DIS_ADC2_INIT_CODE_ATTEN1[0], // [] wr_dis of ADC2_INIT_CODE_ATTEN1 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_ADC2_INIT_CODE_ATTEN2[] = { &WR_DIS_ADC2_INIT_CODE_ATTEN2[0], // [] wr_dis of ADC2_INIT_CODE_ATTEN2 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_ADC2_INIT_CODE_ATTEN3[] = { &WR_DIS_ADC2_INIT_CODE_ATTEN3[0], // [] wr_dis of ADC2_INIT_CODE_ATTEN3 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_ADC1_CAL_VOL_ATTEN0[] = { &WR_DIS_ADC1_CAL_VOL_ATTEN0[0], // [] wr_dis of ADC1_CAL_VOL_ATTEN0 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_ADC1_CAL_VOL_ATTEN1[] = { &WR_DIS_ADC1_CAL_VOL_ATTEN1[0], // [] wr_dis of ADC1_CAL_VOL_ATTEN1 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_ADC1_CAL_VOL_ATTEN2[] = { &WR_DIS_ADC1_CAL_VOL_ATTEN2[0], // [] wr_dis of ADC1_CAL_VOL_ATTEN2 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_ADC1_CAL_VOL_ATTEN3[] = { &WR_DIS_ADC1_CAL_VOL_ATTEN3[0], // [] wr_dis of ADC1_CAL_VOL_ATTEN3 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_ADC2_CAL_VOL_ATTEN0[] = { &WR_DIS_ADC2_CAL_VOL_ATTEN0[0], // [] wr_dis of ADC2_CAL_VOL_ATTEN0 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_ADC2_CAL_VOL_ATTEN1[] = { &WR_DIS_ADC2_CAL_VOL_ATTEN1[0], // [] wr_dis of ADC2_CAL_VOL_ATTEN1 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_ADC2_CAL_VOL_ATTEN2[] = { &WR_DIS_ADC2_CAL_VOL_ATTEN2[0], // [] wr_dis of ADC2_CAL_VOL_ATTEN2 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_BLOCK_USR_DATA[] = { &WR_DIS_BLOCK_USR_DATA[0], // [WR_DIS.USER_DATA] wr_dis of BLOCK_USR_DATA NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_CUSTOM_MAC[] = { &WR_DIS_CUSTOM_MAC[0], // [WR_DIS.MAC_CUSTOM WR_DIS.USER_DATA_MAC_CUSTOM] wr_dis of CUSTOM_MAC NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_BLOCK_KEY0[] = { &WR_DIS_BLOCK_KEY0[0], // [WR_DIS.KEY0] wr_dis of BLOCK_KEY0 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_BLOCK_KEY1[] = { &WR_DIS_BLOCK_KEY1[0], // [WR_DIS.KEY1] wr_dis of BLOCK_KEY1 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_BLOCK_KEY2[] = { &WR_DIS_BLOCK_KEY2[0], // [WR_DIS.KEY2] wr_dis of BLOCK_KEY2 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_BLOCK_KEY3[] = { &WR_DIS_BLOCK_KEY3[0], // [WR_DIS.KEY3] wr_dis of BLOCK_KEY3 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_BLOCK_KEY4[] = { &WR_DIS_BLOCK_KEY4[0], // [WR_DIS.KEY4] wr_dis of BLOCK_KEY4 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_BLOCK_KEY5[] = { &WR_DIS_BLOCK_KEY5[0], // [WR_DIS.KEY5] wr_dis of BLOCK_KEY5 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_BLOCK_SYS_DATA2[] = { &WR_DIS_BLOCK_SYS_DATA2[0], // [WR_DIS.SYS_DATA_PART2] wr_dis of BLOCK_SYS_DATA2 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_USB_EXCHG_PINS[] = { &WR_DIS_USB_EXCHG_PINS[0], // [] wr_dis of USB_EXCHG_PINS NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_USB_EXT_PHY_ENABLE[] = { &WR_DIS_USB_EXT_PHY_ENABLE[0], // [WR_DIS.EXT_PHY_ENABLE] wr_dis of USB_EXT_PHY_ENABLE NULL }; const esp_efuse_desc_t* ESP_EFUSE_WR_DIS_SOFT_DIS_JTAG[] = { &WR_DIS_SOFT_DIS_JTAG[0], // [] wr_dis of SOFT_DIS_JTAG NULL }; const esp_efuse_desc_t* ESP_EFUSE_RD_DIS[] = { &RD_DIS[0], // [] Disable reading from BlOCK4-10 NULL }; const esp_efuse_desc_t* ESP_EFUSE_RD_DIS_BLOCK_KEY0[] = { &RD_DIS_BLOCK_KEY0[0], // [RD_DIS.KEY0] rd_dis of BLOCK_KEY0 NULL }; const esp_efuse_desc_t* ESP_EFUSE_RD_DIS_BLOCK_KEY1[] = { &RD_DIS_BLOCK_KEY1[0], // [RD_DIS.KEY1] rd_dis of BLOCK_KEY1 NULL }; const esp_efuse_desc_t* ESP_EFUSE_RD_DIS_BLOCK_KEY2[] = { &RD_DIS_BLOCK_KEY2[0], // [RD_DIS.KEY2] rd_dis of BLOCK_KEY2 NULL }; const esp_efuse_desc_t* ESP_EFUSE_RD_DIS_BLOCK_KEY3[] = { &RD_DIS_BLOCK_KEY3[0], // [RD_DIS.KEY3] rd_dis of BLOCK_KEY3 NULL }; const esp_efuse_desc_t* ESP_EFUSE_RD_DIS_BLOCK_KEY4[] = { &RD_DIS_BLOCK_KEY4[0], // [RD_DIS.KEY4] rd_dis of BLOCK_KEY4 NULL }; const esp_efuse_desc_t* ESP_EFUSE_RD_DIS_BLOCK_KEY5[] = { &RD_DIS_BLOCK_KEY5[0], // [RD_DIS.KEY5] rd_dis of BLOCK_KEY5 NULL }; const esp_efuse_desc_t* ESP_EFUSE_RD_DIS_BLOCK_SYS_DATA2[] = { &RD_DIS_BLOCK_SYS_DATA2[0], // [RD_DIS.SYS_DATA_PART2] rd_dis of BLOCK_SYS_DATA2 NULL }; const esp_efuse_desc_t* ESP_EFUSE_DIS_ICACHE[] = { &DIS_ICACHE[0], // [] Set this bit to disable Icache NULL }; const esp_efuse_desc_t* ESP_EFUSE_DIS_DCACHE[] = { &DIS_DCACHE[0], // [] Set this bit to disable Dcache NULL }; const esp_efuse_desc_t* ESP_EFUSE_DIS_DOWNLOAD_ICACHE[] = { &DIS_DOWNLOAD_ICACHE[0], // [] Set this bit to disable Icache in download mode (boot_mode[3:0] is 0; 1; 2; 3; 6; 7) NULL }; const esp_efuse_desc_t* ESP_EFUSE_DIS_DOWNLOAD_DCACHE[] = { &DIS_DOWNLOAD_DCACHE[0], // [] Set this bit to disable Dcache in download mode ( boot_mode[3:0] is 0; 1; 2; 3; 6; 7) NULL }; const esp_efuse_desc_t* ESP_EFUSE_DIS_FORCE_DOWNLOAD[] = { &DIS_FORCE_DOWNLOAD[0], // [] Set this bit to disable the function that forces chip into download mode NULL }; const esp_efuse_desc_t* ESP_EFUSE_DIS_USB_OTG[] = { &DIS_USB_OTG[0], // [DIS_USB] Set this bit to disable USB function NULL }; const esp_efuse_desc_t* ESP_EFUSE_DIS_TWAI[] = { &DIS_TWAI[0], // [DIS_CAN] Set this bit to disable CAN function NULL }; const esp_efuse_desc_t* ESP_EFUSE_DIS_APP_CPU[] = { &DIS_APP_CPU[0], // [] Disable app cpu NULL }; const esp_efuse_desc_t* ESP_EFUSE_SOFT_DIS_JTAG[] = { &SOFT_DIS_JTAG[0], // [] Set these bits to disable JTAG in the soft way (odd number 1 means disable ). JTAG can be enabled in HMAC module NULL }; const esp_efuse_desc_t* ESP_EFUSE_DIS_PAD_JTAG[] = { &DIS_PAD_JTAG[0], // [HARD_DIS_JTAG] Set this bit to disable JTAG in the hard way. JTAG is disabled permanently NULL }; const esp_efuse_desc_t* ESP_EFUSE_DIS_DOWNLOAD_MANUAL_ENCRYPT[] = { &DIS_DOWNLOAD_MANUAL_ENCRYPT[0], // [] Set this bit to disable flash encryption when in download boot modes NULL }; const esp_efuse_desc_t* ESP_EFUSE_USB_EXCHG_PINS[] = { &USB_EXCHG_PINS[0], // [] Set this bit to exchange USB D+ and D- pins NULL }; const esp_efuse_desc_t* ESP_EFUSE_USB_EXT_PHY_ENABLE[] = { &USB_EXT_PHY_ENABLE[0], // [EXT_PHY_ENABLE] Set this bit to enable external PHY NULL }; const esp_efuse_desc_t* ESP_EFUSE_VDD_SPI_XPD[] = { &VDD_SPI_XPD[0], // [] SPI regulator power up signal NULL }; const esp_efuse_desc_t* ESP_EFUSE_VDD_SPI_TIEH[] = { &VDD_SPI_TIEH[0], // [] If VDD_SPI_FORCE is 1; determines VDD_SPI voltage {0: "VDD_SPI connects to 1.8 V LDO"; 1: "VDD_SPI connects to VDD3P3_RTC_IO"} NULL }; const esp_efuse_desc_t* ESP_EFUSE_VDD_SPI_FORCE[] = { &VDD_SPI_FORCE[0], // [] Set this bit and force to use the configuration of eFuse to configure VDD_SPI NULL }; const esp_efuse_desc_t* ESP_EFUSE_WDT_DELAY_SEL[] = { &WDT_DELAY_SEL[0], // [] RTC watchdog timeout threshold; in unit of slow clock cycle {0: "40000"; 1: "80000"; 2: "160000"; 3: "320000"} NULL }; const esp_efuse_desc_t* ESP_EFUSE_SPI_BOOT_CRYPT_CNT[] = { &SPI_BOOT_CRYPT_CNT[0], // [] Enables flash encryption when 1 or 3 bits are set and disabled otherwise {0: "Disable"; 1: "Enable"; 3: "Disable"; 7: "Enable"} NULL }; const esp_efuse_desc_t* ESP_EFUSE_SECURE_BOOT_KEY_REVOKE0[] = { &SECURE_BOOT_KEY_REVOKE0[0], // [] Revoke 1st secure boot key NULL }; const esp_efuse_desc_t* ESP_EFUSE_SECURE_BOOT_KEY_REVOKE1[] = { &SECURE_BOOT_KEY_REVOKE1[0], // [] Revoke 2nd secure boot key NULL }; const esp_efuse_desc_t* ESP_EFUSE_SECURE_BOOT_KEY_REVOKE2[] = { &SECURE_BOOT_KEY_REVOKE2[0], // [] Revoke 3rd secure boot key NULL }; const esp_efuse_desc_t* ESP_EFUSE_KEY_PURPOSE_0[] = { &KEY_PURPOSE_0[0], // [KEY0_PURPOSE] Purpose of Key0 NULL }; const esp_efuse_desc_t* ESP_EFUSE_KEY_PURPOSE_1[] = { &KEY_PURPOSE_1[0], // [KEY1_PURPOSE] Purpose of Key1 NULL }; const esp_efuse_desc_t* ESP_EFUSE_KEY_PURPOSE_2[] = { &KEY_PURPOSE_2[0], // [KEY2_PURPOSE] Purpose of Key2 NULL }; const esp_efuse_desc_t* ESP_EFUSE_KEY_PURPOSE_3[] = { &KEY_PURPOSE_3[0], // [KEY3_PURPOSE] Purpose of Key3 NULL }; const esp_efuse_desc_t* ESP_EFUSE_KEY_PURPOSE_4[] = { &KEY_PURPOSE_4[0], // [KEY4_PURPOSE] Purpose of Key4 NULL }; const esp_efuse_desc_t* ESP_EFUSE_KEY_PURPOSE_5[] = { &KEY_PURPOSE_5[0], // [KEY5_PURPOSE] Purpose of Key5 NULL }; const esp_efuse_desc_t* ESP_EFUSE_SECURE_BOOT_EN[] = { &SECURE_BOOT_EN[0], // [] Set this bit to enable secure boot NULL }; const esp_efuse_desc_t* ESP_EFUSE_SECURE_BOOT_AGGRESSIVE_REVOKE[] = { &SECURE_BOOT_AGGRESSIVE_REVOKE[0], // [] Set this bit to enable revoking aggressive secure boot NULL }; const esp_efuse_desc_t* ESP_EFUSE_DIS_USB_JTAG[] = { &DIS_USB_JTAG[0], // [] Set this bit to disable function of usb switch to jtag in module of usb device NULL }; const esp_efuse_desc_t* ESP_EFUSE_DIS_USB_SERIAL_JTAG[] = { &DIS_USB_SERIAL_JTAG[0], // [DIS_USB_DEVICE] Set this bit to disable usb device NULL }; const esp_efuse_desc_t* ESP_EFUSE_STRAP_JTAG_SEL[] = { &STRAP_JTAG_SEL[0], // [] Set this bit to enable selection between usb_to_jtag and pad_to_jtag through strapping gpio10 when both reg_dis_usb_jtag and reg_dis_pad_jtag are equal to 0 NULL }; const esp_efuse_desc_t* ESP_EFUSE_USB_PHY_SEL[] = { &USB_PHY_SEL[0], // [] This bit is used to switch internal PHY and external PHY for USB OTG and USB Device {0: "internal PHY is assigned to USB Device while external PHY is assigned to USB OTG"; 1: "internal PHY is assigned to USB OTG while external PHY is assigned to USB Device"} NULL }; const esp_efuse_desc_t* ESP_EFUSE_FLASH_TPUW[] = { &FLASH_TPUW[0], // [] Configures flash waiting time after power-up; in unit of ms. If the value is less than 15; the waiting time is the configurable value. Otherwise; the waiting time is twice the configurable value NULL }; const esp_efuse_desc_t* ESP_EFUSE_DIS_DOWNLOAD_MODE[] = { &DIS_DOWNLOAD_MODE[0], // [] Set this bit to disable download mode (boot_mode[3:0] = 0; 1; 2; 3; 6; 7) NULL }; const esp_efuse_desc_t* ESP_EFUSE_DIS_DIRECT_BOOT[] = { &DIS_DIRECT_BOOT[0], // [DIS_LEGACY_SPI_BOOT] Disable direct boot mode NULL }; const esp_efuse_desc_t* ESP_EFUSE_DIS_USB_SERIAL_JTAG_ROM_PRINT[] = { &DIS_USB_SERIAL_JTAG_ROM_PRINT[0], // [UART_PRINT_CHANNEL] USB printing {0: "Enable"; 1: "Disable"} NULL }; const esp_efuse_desc_t* ESP_EFUSE_FLASH_ECC_MODE[] = { &FLASH_ECC_MODE[0], // [] Flash ECC mode in ROM {0: "16to18 byte"; 1: "16to17 byte"} NULL }; const esp_efuse_desc_t* ESP_EFUSE_DIS_USB_SERIAL_JTAG_DOWNLOAD_MODE[] = { &DIS_USB_SERIAL_JTAG_DOWNLOAD_MODE[0], // [DIS_USB_DOWNLOAD_MODE] Set this bit to disable UART download mode through USB NULL }; const esp_efuse_desc_t* ESP_EFUSE_ENABLE_SECURITY_DOWNLOAD[] = { &ENABLE_SECURITY_DOWNLOAD[0], // [] Set this bit to enable secure UART download mode NULL }; const esp_efuse_desc_t* ESP_EFUSE_UART_PRINT_CONTROL[] = { &UART_PRINT_CONTROL[0], // [] Set the default UART boot message output mode {0: "Enable"; 1: "Enable when GPIO46 is low at reset"; 2: "Enable when GPIO46 is high at reset"; 3: "Disable"} NULL }; const esp_efuse_desc_t* ESP_EFUSE_PIN_POWER_SELECTION[] = { &PIN_POWER_SELECTION[0], // [] Set default power supply for GPIO33-GPIO37; set when SPI flash is initialized {0: "VDD3P3_CPU"; 1: "VDD_SPI"} NULL }; const esp_efuse_desc_t* ESP_EFUSE_FLASH_TYPE[] = { &FLASH_TYPE[0], // [] SPI flash type {0: "4 data lines"; 1: "8 data lines"} NULL }; const esp_efuse_desc_t* ESP_EFUSE_FLASH_PAGE_SIZE[] = { &FLASH_PAGE_SIZE[0], // [] Set Flash page size NULL }; const esp_efuse_desc_t* ESP_EFUSE_FLASH_ECC_EN[] = { &FLASH_ECC_EN[0], // [] Set 1 to enable ECC for flash boot NULL }; const esp_efuse_desc_t* ESP_EFUSE_FORCE_SEND_RESUME[] = { &FORCE_SEND_RESUME[0], // [] Set this bit to force ROM code to send a resume command during SPI boot NULL }; const esp_efuse_desc_t* ESP_EFUSE_SECURE_VERSION[] = { &SECURE_VERSION[0], // [] Secure version (used by ESP-IDF anti-rollback feature) NULL }; const esp_efuse_desc_t* ESP_EFUSE_DIS_USB_OTG_DOWNLOAD_MODE[] = { &DIS_USB_OTG_DOWNLOAD_MODE[0], // [] Set this bit to disable download through USB-OTG NULL }; const esp_efuse_desc_t* ESP_EFUSE_DISABLE_WAFER_VERSION_MAJOR[] = { &DISABLE_WAFER_VERSION_MAJOR[0], // [] Disables check of wafer version major NULL }; const esp_efuse_desc_t* ESP_EFUSE_DISABLE_BLK_VERSION_MAJOR[] = { &DISABLE_BLK_VERSION_MAJOR[0], // [] Disables check of blk version major NULL }; const esp_efuse_desc_t* ESP_EFUSE_MAC[] = { &MAC[0], // [MAC_FACTORY] MAC address &MAC[1], // [MAC_FACTORY] MAC address &MAC[2], // [MAC_FACTORY] MAC address &MAC[3], // [MAC_FACTORY] MAC address &MAC[4], // [MAC_FACTORY] MAC address &MAC[5], // [MAC_FACTORY] MAC address NULL }; const esp_efuse_desc_t* ESP_EFUSE_SPI_PAD_CONFIG_CLK[] = { &SPI_PAD_CONFIG_CLK[0], // [] SPI_PAD_configure CLK NULL }; const esp_efuse_desc_t* ESP_EFUSE_SPI_PAD_CONFIG_Q[] = { &SPI_PAD_CONFIG_Q[0], // [] SPI_PAD_configure Q(D1) NULL }; const esp_efuse_desc_t* ESP_EFUSE_SPI_PAD_CONFIG_D[] = { &SPI_PAD_CONFIG_D[0], // [] SPI_PAD_configure D(D0) NULL }; const esp_efuse_desc_t* ESP_EFUSE_SPI_PAD_CONFIG_CS[] = { &SPI_PAD_CONFIG_CS[0], // [] SPI_PAD_configure CS NULL }; const esp_efuse_desc_t* ESP_EFUSE_SPI_PAD_CONFIG_HD[] = { &SPI_PAD_CONFIG_HD[0], // [] SPI_PAD_configure HD(D3) NULL }; const esp_efuse_desc_t* ESP_EFUSE_SPI_PAD_CONFIG_WP[] = { &SPI_PAD_CONFIG_WP[0], // [] SPI_PAD_configure WP(D2) NULL }; const esp_efuse_desc_t* ESP_EFUSE_SPI_PAD_CONFIG_DQS[] = { &SPI_PAD_CONFIG_DQS[0], // [] SPI_PAD_configure DQS NULL }; const esp_efuse_desc_t* ESP_EFUSE_SPI_PAD_CONFIG_D4[] = { &SPI_PAD_CONFIG_D4[0], // [] SPI_PAD_configure D4 NULL }; const esp_efuse_desc_t* ESP_EFUSE_SPI_PAD_CONFIG_D5[] = { &SPI_PAD_CONFIG_D5[0], // [] SPI_PAD_configure D5 NULL }; const esp_efuse_desc_t* ESP_EFUSE_SPI_PAD_CONFIG_D6[] = { &SPI_PAD_CONFIG_D6[0], // [] SPI_PAD_configure D6 NULL }; const esp_efuse_desc_t* ESP_EFUSE_SPI_PAD_CONFIG_D7[] = { &SPI_PAD_CONFIG_D7[0], // [] SPI_PAD_configure D7 NULL }; const esp_efuse_desc_t* ESP_EFUSE_WAFER_VERSION_MINOR_LO[] = { &WAFER_VERSION_MINOR_LO[0], // [] WAFER_VERSION_MINOR least significant bits NULL }; const esp_efuse_desc_t* ESP_EFUSE_PKG_VERSION[] = { &PKG_VERSION[0], // [] Package version NULL }; const esp_efuse_desc_t* ESP_EFUSE_BLK_VERSION_MINOR[] = { &BLK_VERSION_MINOR[0], // [] BLK_VERSION_MINOR NULL }; const esp_efuse_desc_t* ESP_EFUSE_FLASH_CAP[] = { &FLASH_CAP[0], // [] Flash capacity {0: "None"; 1: "8M"; 2: "4M"} NULL }; const esp_efuse_desc_t* ESP_EFUSE_FLASH_TEMP[] = { &FLASH_TEMP[0], // [] Flash temperature {0: "None"; 1: "105C"; 2: "85C"} NULL }; const esp_efuse_desc_t* ESP_EFUSE_FLASH_VENDOR[] = { &FLASH_VENDOR[0], // [] Flash vendor {0: "None"; 1: "XMC"; 2: "GD"; 3: "FM"; 4: "TT"; 5: "BY"} NULL }; const esp_efuse_desc_t* ESP_EFUSE_PSRAM_CAP[] = { &PSRAM_CAP[0], // [] PSRAM capacity {0: "None"; 1: "8M"; 2: "2M"} NULL }; const esp_efuse_desc_t* ESP_EFUSE_PSRAM_TEMP[] = { &PSRAM_TEMP[0], // [] PSRAM temperature {0: "None"; 1: "105C"; 2: "85C"} NULL }; const esp_efuse_desc_t* ESP_EFUSE_PSRAM_VENDOR[] = { &PSRAM_VENDOR[0], // [] PSRAM vendor {0: "None"; 1: "AP_3v3"; 2: "AP_1v8"} NULL }; const esp_efuse_desc_t* ESP_EFUSE_K_RTC_LDO[] = { &K_RTC_LDO[0], // [] BLOCK1 K_RTC_LDO NULL }; const esp_efuse_desc_t* ESP_EFUSE_K_DIG_LDO[] = { &K_DIG_LDO[0], // [] BLOCK1 K_DIG_LDO NULL }; const esp_efuse_desc_t* ESP_EFUSE_V_RTC_DBIAS20[] = { &V_RTC_DBIAS20[0], // [] BLOCK1 voltage of rtc dbias20 NULL }; const esp_efuse_desc_t* ESP_EFUSE_V_DIG_DBIAS20[] = { &V_DIG_DBIAS20[0], // [] BLOCK1 voltage of digital dbias20 NULL }; const esp_efuse_desc_t* ESP_EFUSE_DIG_DBIAS_HVT[] = { &DIG_DBIAS_HVT[0], // [] BLOCK1 digital dbias when hvt NULL }; const esp_efuse_desc_t* ESP_EFUSE_WAFER_VERSION_MINOR_HI[] = { &WAFER_VERSION_MINOR_HI[0], // [] WAFER_VERSION_MINOR most significant bit NULL }; const esp_efuse_desc_t* ESP_EFUSE_WAFER_VERSION_MAJOR[] = { &WAFER_VERSION_MAJOR[0], // [] WAFER_VERSION_MAJOR NULL }; const esp_efuse_desc_t* ESP_EFUSE_ADC2_CAL_VOL_ATTEN3[] = { &ADC2_CAL_VOL_ATTEN3[0], // [] ADC2 calibration voltage at atten3 NULL }; const esp_efuse_desc_t* ESP_EFUSE_OPTIONAL_UNIQUE_ID[] = { &OPTIONAL_UNIQUE_ID[0], // [] Optional unique 128-bit ID NULL }; const esp_efuse_desc_t* ESP_EFUSE_BLK_VERSION_MAJOR[] = { &BLK_VERSION_MAJOR[0], // [] BLK_VERSION_MAJOR of BLOCK2 {0: "No calib"; 1: "ADC calib V1"} NULL }; const esp_efuse_desc_t* ESP_EFUSE_TEMP_CALIB[] = { &TEMP_CALIB[0], // [] Temperature calibration data NULL }; const esp_efuse_desc_t* ESP_EFUSE_OCODE[] = { &OCODE[0], // [] ADC OCode NULL }; const esp_efuse_desc_t* ESP_EFUSE_ADC1_INIT_CODE_ATTEN0[] = { &ADC1_INIT_CODE_ATTEN0[0], // [] ADC1 init code at atten0 NULL }; const esp_efuse_desc_t* ESP_EFUSE_ADC1_INIT_CODE_ATTEN1[] = { &ADC1_INIT_CODE_ATTEN1[0], // [] ADC1 init code at atten1 NULL }; const esp_efuse_desc_t* ESP_EFUSE_ADC1_INIT_CODE_ATTEN2[] = { &ADC1_INIT_CODE_ATTEN2[0], // [] ADC1 init code at atten2 NULL }; const esp_efuse_desc_t* ESP_EFUSE_ADC1_INIT_CODE_ATTEN3[] = { &ADC1_INIT_CODE_ATTEN3[0], // [] ADC1 init code at atten3 NULL }; const esp_efuse_desc_t* ESP_EFUSE_ADC2_INIT_CODE_ATTEN0[] = { &ADC2_INIT_CODE_ATTEN0[0], // [] ADC2 init code at atten0 NULL }; const esp_efuse_desc_t* ESP_EFUSE_ADC2_INIT_CODE_ATTEN1[] = { &ADC2_INIT_CODE_ATTEN1[0], // [] ADC2 init code at atten1 NULL }; const esp_efuse_desc_t* ESP_EFUSE_ADC2_INIT_CODE_ATTEN2[] = { &ADC2_INIT_CODE_ATTEN2[0], // [] ADC2 init code at atten2 NULL }; const esp_efuse_desc_t* ESP_EFUSE_ADC2_INIT_CODE_ATTEN3[] = { &ADC2_INIT_CODE_ATTEN3[0], // [] ADC2 init code at atten3 NULL }; const esp_efuse_desc_t* ESP_EFUSE_ADC1_CAL_VOL_ATTEN0[] = { &ADC1_CAL_VOL_ATTEN0[0], // [] ADC1 calibration voltage at atten0 NULL }; const esp_efuse_desc_t* ESP_EFUSE_ADC1_CAL_VOL_ATTEN1[] = { &ADC1_CAL_VOL_ATTEN1[0], // [] ADC1 calibration voltage at atten1 NULL }; const esp_efuse_desc_t* ESP_EFUSE_ADC1_CAL_VOL_ATTEN2[] = { &ADC1_CAL_VOL_ATTEN2[0], // [] ADC1 calibration voltage at atten2 NULL }; const esp_efuse_desc_t* ESP_EFUSE_ADC1_CAL_VOL_ATTEN3[] = { &ADC1_CAL_VOL_ATTEN3[0], // [] ADC1 calibration voltage at atten3 NULL }; const esp_efuse_desc_t* ESP_EFUSE_ADC2_CAL_VOL_ATTEN0[] = { &ADC2_CAL_VOL_ATTEN0[0], // [] ADC2 calibration voltage at atten0 NULL }; const esp_efuse_desc_t* ESP_EFUSE_ADC2_CAL_VOL_ATTEN1[] = { &ADC2_CAL_VOL_ATTEN1[0], // [] ADC2 calibration voltage at atten1 NULL }; const esp_efuse_desc_t* ESP_EFUSE_ADC2_CAL_VOL_ATTEN2[] = { &ADC2_CAL_VOL_ATTEN2[0], // [] ADC2 calibration voltage at atten2 NULL }; const esp_efuse_desc_t* ESP_EFUSE_USER_DATA[] = { &USER_DATA[0], // [BLOCK_USR_DATA] User data NULL }; const esp_efuse_desc_t* ESP_EFUSE_USER_DATA_MAC_CUSTOM[] = { &USER_DATA_MAC_CUSTOM[0], // [MAC_CUSTOM CUSTOM_MAC] Custom MAC NULL }; const esp_efuse_desc_t* ESP_EFUSE_KEY0[] = { &KEY0[0], // [BLOCK_KEY0] Key0 or user data NULL }; const esp_efuse_desc_t* ESP_EFUSE_KEY1[] = { &KEY1[0], // [BLOCK_KEY1] Key1 or user data NULL }; const esp_efuse_desc_t* ESP_EFUSE_KEY2[] = { &KEY2[0], // [BLOCK_KEY2] Key2 or user data NULL }; const esp_efuse_desc_t* ESP_EFUSE_KEY3[] = { &KEY3[0], // [BLOCK_KEY3] Key3 or user data NULL }; const esp_efuse_desc_t* ESP_EFUSE_KEY4[] = { &KEY4[0], // [BLOCK_KEY4] Key4 or user data NULL }; const esp_efuse_desc_t* ESP_EFUSE_KEY5[] = { &KEY5[0], // [BLOCK_KEY5] Key5 or user data NULL }; const esp_efuse_desc_t* ESP_EFUSE_SYS_DATA_PART2[] = { &SYS_DATA_PART2[0], // [BLOCK_SYS_DATA2] System data part 2 (reserved) NULL }; ```
```objective-c /* Do not modify this file. */ /* It is created automatically by the ASN.1 to Wireshark dissector compiler */ /* packet-acse.h */ /* ../../tools/asn2wrs.py -b -e -C -p acse -c ./acse.cnf -s ./packet-acse-template -D . acse.asn */ /* Input file: packet-acse-template.h */ #line 1 "packet-acse-template.h" /* packet-acse.h * Routines for ACSE packet dissection * Ronnie Sahlberg 2005 * * $Id: packet-acse.h 31322 2009-12-19 14:27:17Z stig $ * * Wireshark - Network traffic analyzer * By Gerald Combs <gerald@wireshark.org> * * This program is free software; you can redistribute it and/or * as published by the Free Software Foundation; either version 2 * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef PACKET_ACSE_H #define PACKET_ACSE_H /*--- Included file: packet-acse-exp.h ---*/ #line 1 "packet-acse-exp.h" extern const value_string acse_AP_title_vals[]; extern const value_string acse_ASO_qualifier_vals[]; extern const value_string acse_AE_title_vals[]; int dissect_acse_EXTERNALt(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_); int dissect_acse_AP_title(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_); int dissect_acse_AE_qualifier(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_); int dissect_acse_ASO_qualifier(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_); int dissect_acse_AE_title(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_); int dissect_acse_AE_invocation_identifier(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_); int dissect_acse_AP_invocation_identifier(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_); /*--- End of included file: packet-acse-exp.h ---*/ #line 30 "packet-acse-template.h" #endif /* PACKET_ACSE_H */ ```
```objective-c // // YPLogger.m // Wuxianda // // Created by MichaelPPP on 16/1/21. // #import "YPLogger.h" #import "SystemInfo.h" @implementation YPLogger + (void)load { #ifdef DEBUG fprintf(stderr, "****************************************************************************************\n"); fprintf(stderr, " \n"); fprintf(stderr, " copyright (c) 2016, {MichaelHuyp} \n"); fprintf(stderr, " path_to_url \n"); fprintf(stderr, " \n"); #if (TARGET_OS_IPHONE || TARGET_IPHONE_SIMULATOR) fprintf(stderr, " %s %s \n", [SystemInfo platformString].UTF8String, [SystemInfo osVersion].UTF8String); // fprintf( stderr, " ip: %s \n", [SystemInfo localIPAddress].UTF8String ); fprintf(stderr, " : %s \n", [SystemInfo getCarrierName].UTF8String); // fprintf(stderr, " : %s \n", [SystemInfo uuidSolution].UTF8String); fprintf(stderr, " Home: %s \n", [NSBundle mainBundle].bundlePath.UTF8String); fprintf(stderr, " \n"); fprintf(stderr, "****************************************************************************************\n"); #endif #endif } + (instancetype)sharedInstance { static YPLogger* _instance; static dispatch_once_t onceToken; dispatch_once(&onceToken, ^{ _instance = [[YPLogger alloc] init]; }); return _instance; } @end ```
```javascript export default function Page() { return <p>custom 404</p>; } export function getStaticProps() { return { props: { is404: true, }, }; } ```
In legal history, an animal trial was the criminal trial of a non-human animal. Such trials are recorded as having taken place in Europe from the thirteenth century until the eighteenth. In modern times, it is considered in most criminal justice systems that non-human animals lack moral agency and so cannot be held culpable for an act. Historical animal trials Animals, including insects, faced the possibility of criminal charges for several centuries across many parts of Europe. The earliest extant record of an animal trial is often assumed to be found in the execution of a pig in 1266 at Fontenay-aux-Roses. Newer research, however, suggests that this reading might be mistaken and no trial took place in that particular incident. Notwithstanding this controversy, such trials remained part of several legal systems until the 18th century. Animal defendants appeared before both church and secular courts, and the offences alleged against them ranged from murder to criminal damage. Human witnesses were often heard, and in ecclesiastical courts the animals were routinely provided with lawyers (this was not the case in secular courts, but for most of the period concerned, neither were human defendants). If convicted, it was usual for an animal to be executed or exiled. However, in 1750, a female donkey was acquitted of charges of bestiality due to witnesses to the animal's virtue and good behaviour while her co-accused human was sentenced to death. Translations of several of the most detailed records can be found in E. P. Evans' The Criminal Prosecution and Capital Punishment of Animals, published in 1906. The text alludes to research such as that carried out by Karl von Amira, who dealt with the matter from a jurisprudential approach to the work "Consilia" made by the lawyer Bartholomew Chassenée, defender of animals, more than once called to represent animals in the trials held. Thanks to Evans's research and analysis of the sources indicated, with special reference to Amira, a division can be made between Thierstrafen ("animal punishment"), and Thierprocesse ("animal trial"). Sadakat Kadri's The Trial: Four Thousand Years of Courtroom Drama (Random House, 2006) contains another detailed examination of the subject. Kadri shows that such trials of animals were part of a broader state of affairs, with prosecutions of corpses and inanimate objects, and argues that an echo of such rituals survives in modern attitudes towards the punishment of children and the mentally ill. Punishments of animals There were trials of animals accused of killing humans; the criminal procedure had some similarities with trials of humans: they had to be arrested and go through a trial hearing held by a secular court. If found guilty of homicide, the animal might suffer the death penalty. The animals that were most often punished by Thierstrafen were pigs. The work of Evans and Cohen has been used in jurisprudence about animal abuse that is currently debated in the Constitutional Court of Colombia, an institution that has cited this compilation of animal trials to debate animals' capacity and possibility to be subjects of law. In the same way, it is through the trials of pigs that not only the direct author of the crime is recognized, but there could also be "accomplices", as in the case of the village of Saint-Marcel-le-Jeussey in 1379, in which two herds of these animals were said to have rioted and expressed the approval of an infanticide committed by other pigs; although the pigs found guilty of homicide were sentenced to execution, thanks to the request of the owner of the two herds to the Duke of Burgundy, the animals accused of complicity were pardoned. In addition, there are also convictions of animals such as donkeys, horses, cows, bulls and mules. Types of animals put on trial Animals put on trial were almost invariably either domesticated ones (most often pigs, but also bulls, horses, and cows) or pests such as rats and weevils. Basel case According to Johannis Gross in Kurze Basler Chronik (1624), in 1474 a rooster was put on trial in the city of Basel for "the heinous and unnatural crime of laying an egg", which the townspeople were concerned was spawned by Satan and contained a cockatrice. Katya the Bear Katya the Bear is a female brown bear native to Kazakhstan who was imprisoned in 2004 after being found guilty of mauling two people in separate incidents. Katya was held in the Arkalyk Prison in Kostanay. The bear was released from imprisonment and allowed to congregate with other bears after serving a fifteen-year sentence. Handlers report Katya socializing well with other bears after her long imprisonment. Monkeys In September 2015, People for the Ethical Treatment of Animals sued David Slater on behalf of a monkey named Naruto. The judge dismissed the case, ruling that the monkey did not have legal standing. PETA later appealed the ruling, and the appeal was rejected on April 23, 2018. According to local folklore, a monkey was hanged in Hartlepool, England. During the Napoleonic Wars, a French ship was wrecked in a storm off the coast of Hartlepool. The only survivor from the ship was a monkey, allegedly dressed in a French army uniform to provide amusement for the crew. On finding the monkey on the beach, some locals decided to hold an impromptu trial; since the monkey was unable to answer their questions and because they had seen neither a monkey nor a Frenchman before, they concluded that the monkey must be a French spy. Being found guilty, the animal was sentenced to death and was hanged on the beach. The colloquial name for the resident people of Hartlepool is "monkey hanger". Ferron case Jacques Ferron was a Frenchman who was tried and hanged in 1750 for copulation with a jenny (female donkey). The trial took place in the commune of Vanves and Ferron was found guilty and sentenced to death by hanging. In cases such as these it was usual that the animal would also be sentenced to death, but in this case the she-ass was acquitted. The court decided that the animal was a victim and had not participated of her own free will. A document, dated 19 September 1750, was submitted to the court on behalf of the she-ass that attested to the virtuous nature of the animal. Signed by the parish priest and other principal residents of the commune it proclaimed that "they were willing to bear witness that she is in word and deed and in all her habits of life a most honest creature." Proceedings against animals In contrast to the ease of capturing an animal such as those indicated above, animal trials also sought to condemn pests for killing crops, in order to expel them. The ecclesiastical tribunal had to resort to other types of questions and techniques to judge them, so they requested the intervention of the church to begin with the pertinent metaphysical actions, such as exorcisms and incantations having as their main element the holy water. Evans collects several techniques of conjuration used against the plague: the author mentions a treatise by Kassianos Bassos, a Byzantine Bithynian who lived during the tenth century, in which he describes, step by step, a recipe to finish off the field mice, who are asked to leave the fields on pain of cutting them into seven pieces. It is found that the animals most judged through this kind of process were rats, locusts, mice, snails, weevils, flies, bumblebees, caterpillars and other kinds of insects or "vermin" that attacked crops or vineyards, according to the explanations of the church for "instigation of Satan". Evans' compilation covers trials from the 8th century until the early 20th century. He does not merely list them, but delves into the metaphysical, religious, legal and legislative issues that led humans to make judgments against animals. The insects' advocate When an animal was accused of committing a crime against a human being or against his property, he was notified and assigned a lawyer to defend him during the trial. The Israeli academic Esther Cohen remarked on the advocate role when an animal was called to trial, who constantly used procedural figures to exempt the possibility of continuing with the process, as an example of the objection for lack of jurisdiction, since the animals could not commit crimes as they were incapable before the law. Another option for the defense was to argue that the notification was not made in accordance with the law, since they were directed directly against locusts, rats or other insects, who did not have the will, much less the possibility of making use of reason to appear at a trial. The trials and arguments of the defense sometimes alluded to the role of animals in the world according to teleology, such is the case of Thomas Aquinas, who indicated that there should not be such judgments because the animals were creations of God and in this sense if an earthly judge accused them of committing crimes they were going against the divine will. In popular culture In Lewis Carroll's humorous 1876 poem The Hunting of the Snark, the Barrister dreams about the trial of a pig accused of deserting its sty. In the musical adaptation this features as the song The Pig Must Die. Julian Barnes describes a trial against a woodworm in his 1989 book A History of the World in 10½ Chapters. The 1993 film The Hour of the Pig, released as The Advocate in the United States, centers on the prosecution of a homicidal pig. Several episodes reflect historical events, and its scriptwriters evidently consulted actual trial transcripts, though the plot revolves around a historical conceit – Colin Firth plays the pig's defence lawyer, but there is no recorded instance of a lawyer representing an animal charged with murder. (There are several cases, by contrast, where lawyers appeared for creatures in ecclesiastical courts – and several rats and beetles, for example, won famous court victories as a result.) In Olga Tokarczuk's 2009 novel Drive Your Plow Over the Bones of the Dead, the main protagonist writes to police using historical examples of animal trials to justify her theory that animals are responsible for recent local murders. The 2013 visual novel adventure video game Phoenix Wright: Ace Attorney – Dual Destinies offers an additional court case as downloadable content, where the protagonist Phoenix Wright defends an orca accused of murder. See also Cadaver Synod Damnatio ad bestias Topsy (elephant) Mary (elephant) Tyke (elephant) Notes References . . External links The Criminal Prosecution and Capital Punishment of Animals (1906) at the Internet Archive  , Society and Animals, Volume 2, Number 1 (1994)   (2003) Nicholas Humphrey,  , Chapter 18 of The Mind Made Flesh, pp. 235–254, Oxford University Press (2002) Animals on Trial (MP3), BBC World Service documentary podcast, broadcast on 15 March 2011 Bugs and Beast Before the Law in The Public Domain Review Animal law History of criminal justice Types of trials
Clairvaux-d'Aveyron is a commune in the Aveyron department in southern France. Population See also Communes of the Aveyron department References Communes of Aveyron Aveyron communes articles needing translation from French Wikipedia
```c++ //===- llvm/Support/DiagnosticInfo.cpp - Diagnostic Definitions -*- C++ -*-===// // // See path_to_url for license information. // //===your_sha256_hash------===// // // This file defines a diagnostic printer relying on raw_ostream. // //===your_sha256_hash------===// #include "llvm/IR/DiagnosticPrinter.h" #include "llvm/ADT/Twine.h" #include "llvm/IR/Module.h" #include "llvm/IR/Value.h" #include "llvm/Support/SourceMgr.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(char C) { Stream << C; return *this; } DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(unsigned char C) { Stream << C; return *this; } DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(signed char C) { Stream << C; return *this; } DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(StringRef Str) { Stream << Str; return *this; } DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(const char *Str) { Stream << Str; return *this; } DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<( const std::string &Str) { Stream << Str; return *this; } DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(unsigned long N) { Stream << N; return *this; } DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(long N) { Stream << N; return *this; } DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<( unsigned long long N) { Stream << N; return *this; } DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(long long N) { Stream << N; return *this; } DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(const void *P) { Stream << P; return *this; } DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(unsigned int N) { Stream << N; return *this; } DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(int N) { Stream << N; return *this; } DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(double N) { Stream << N; return *this; } DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(const Twine &Str) { Str.print(Stream); return *this; } // IR related types. DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(const Value &V) { Stream << V.getName(); return *this; } DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(const Module &M) { Stream << M.getModuleIdentifier(); return *this; } // Other types. DiagnosticPrinter &DiagnosticPrinterRawOStream:: operator<<(const SMDiagnostic &Diag) { // We don't have to print the SMDiagnostic kind, as the diagnostic severity // is printed by the diagnostic handler. Diag.print("", Stream, /*ShowColors=*/true, /*ShowKindLabel=*/false); return *this; } ```
```xml <vector xmlns:android="path_to_url" xmlns:aapt="path_to_url" android:width="48dp" android:height="48dp" android:viewportWidth="48" android:viewportHeight="48"> <path android:pathData="M18.5,20C18.224,20 18,20.224 18,20.5V22.5C18,22.776 18.224,23 18.5,23H20.5C20.776,23 21,22.776 21,22.5V20.5C21,20.224 20.776,20 20.5,20H18.5Z"> <aapt:attr name="android:fillColor"> <gradient android:startX="24" android:startY="6" android:endX="24" android:endY="42" android:type="linear"> <item android:offset="0" android:color="#FFFB6361"/> <item android:offset="1" android:color="#FFF23433"/> </gradient> </aapt:attr> </path> <path android:pathData="M11,6H37C38.657,6 40,7.343 40,9V39C40,40.657 38.657,42 37,42H11C9.343,42 8,40.657 8,39V9C8,7.343 9.343,6 11,6ZM20,7H19V19H18.5C17.672,19 17,19.672 17,20.5V21H9V22H17V22.5C17,23.328 17.672,24 18.5,24H19V41H20V24H20.5C21.328,24 22,23.328 22,22.5V22H39V21H22V20.5C22,19.672 21.328,19 20.5,19H20V7Z" android:fillType="evenOdd"> <aapt:attr name="android:fillColor"> <gradient android:startX="24" android:startY="6" android:endX="24" android:endY="42" android:type="linear"> <item android:offset="0" android:color="#FFFB6361"/> <item android:offset="1" android:color="#FFF23433"/> </gradient> </aapt:attr> </path> </vector> ```
```objective-c // // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. #import "DBManagedObjectTableViewCell.h" static NSString *const DBManagedObjectTableViewCellTitleValueCellIdentifier = @"TitleValueCell"; @interface DBManagedObjectTableViewCell () <UITableViewDelegate, UITableViewDataSource> @property (nonatomic, weak) IBOutlet UILabel *titleLabel; @property (nonatomic, weak) IBOutlet UITableView *tableView; @property (nonatomic, weak) IBOutlet NSLayoutConstraint *tableViewHeightConstraint; @property (nonatomic, strong) NSManagedObject *managedObject; @property (nonatomic, strong) NSArray <NSString *> *attributeNames; @end @implementation DBManagedObjectTableViewCell - (void)awakeFromNib { [super awakeFromNib]; self.tableView.delegate = self; self.tableView.dataSource = self; } - (void)configureWithManagedObject:(NSManagedObject *)managedObject { self.titleLabel.text = managedObject.entity.name; self.managedObject = managedObject; self.attributeNames = managedObject.entity.attributesByName.allKeys; [self.tableView reloadData]; self.tableViewHeightConstraint.constant = self.tableView.contentSize.height; } #pragma mark - UITableViewDelegate - (CGFloat)tableView:(UITableView *)tableView heightForRowAtIndexPath:(NSIndexPath *)indexPath { return 24; } #pragma mark - UITableViewDataSource - (UITableViewCell *)tableView:(UITableView *)tableView cellForRowAtIndexPath:(NSIndexPath *)indexPath { UITableViewCell *cell = [tableView dequeueReusableCellWithIdentifier:DBManagedObjectTableViewCellTitleValueCellIdentifier]; if (cell == nil) { cell = [[UITableViewCell alloc] initWithStyle:UITableViewCellStyleValue1 reuseIdentifier:DBManagedObjectTableViewCellTitleValueCellIdentifier]; cell.textLabel.font = [cell.textLabel.font fontWithSize:12]; cell.detailTextLabel.font = [cell.detailTextLabel.font fontWithSize:12]; } NSString *attributeName = self.attributeNames[indexPath.row]; cell.textLabel.text = attributeName; cell.detailTextLabel.text = [NSString stringWithFormat:@"%@", [self.managedObject valueForKey:attributeName]]; return cell; } - (NSInteger)numberOfSectionsInTableView:(UITableView *)tableView { return 1; } - (NSInteger)tableView:(UITableView *)tableView numberOfRowsInSection:(NSInteger)section { return self.attributeNames.count; } @end ```
```python #!/usr/bin/env python3 """ Parser for U.S. Energy Information Administration, path_to_url . Aggregates and standardizes data from most of the US ISOs, and exposes them via a unified API. Requires an API key, set in the EIA_KEY environment variable. Get one here: path_to_url """ from datetime import datetime, timedelta from logging import Logger, getLogger from typing import Any import arrow from dateutil import parser, tz from requests import Session from electricitymap.contrib.lib.models.event_lists import ( ExchangeList, ProductionBreakdownList, TotalConsumptionList, ) from electricitymap.contrib.lib.models.events import ( EventSourceType, ProductionMix, StorageMix, ) from electricitymap.contrib.lib.types import ZoneKey from parsers.lib.config import refetch_frequency from parsers.lib.utils import get_token # Reverse exchanges need to be multiplied by -1, since they are reported in the opposite direction REVERSE_EXCHANGES = [ "US-CA->MX-BC", "MX-BC->US-CAL-CISO", "CA-SK->US-CENT-SWPP", "CA-MB->US-MIDW-MISO", "CA-ON->US-MIDW-MISO", "CA-QC->US-NE-ISNE", "CA-NB->US-NE-ISNE", "CA-BC->US-NW-BPAT", "CA-AB->US-NW-NWMT", "CA-QC->US-NY-NYIS", "CA-ON->US-NY-NYIS", "MX-NE->US-TEX-ERCO", "MX-NO->US-TEX-ERCO", "US-SW-PNM->US-SW-SRP", # For some reason EBA.SRP-PNM.ID.H exists in EIA, but PNM-SRP does not. Probably because it is unidirectional ] # Those threshold correspond to the ranges where the negative values are most likely # self consumption and should be set to 0 for production that is being injected into the grid. NEGATIVE_PRODUCTION_THRESHOLDS_TYPE = { "default": -10, "coal": -50, "gas": -20, "solar": -100, "wind": -20, "unknown": -50, } # based on path_to_url # or path_to_url # List includes regions and Balancing Authorities. REGIONS = { # Non-US regions - EIA "MX-BC": "CFE", "MX-NE": "CEN", "MX-NO": "CFE", "CA-SK": "SPC", "CA-MB": "MHEB", "CA-ON": "IESO", "CA-QC": "HQT", "CA-NB": "NBSO", "CA-BC": "BCHA", "CA-AB": "AESO", # New regions - EIA "US-CAL-BANC": "BANC", # Balancing Authority Of Northern California "US-CAL-CISO": "CISO", # California Independent System Operator "US-CAL-IID": "IID", # Imperial Irrigation District "US-CAL-LDWP": "LDWP", # Los Angeles Department Of Water And Power "US-CAL-TIDC": "TIDC", # Turlock Irrigation District "US-CAR-CPLE": "CPLE", # Duke Energy Progress East "US-CAR-CPLW": "CPLW", # Duke Energy Progress West "US-CAR-DUK": "DUK", # Duke Energy Carolinas "US-CAR-SC": "SC", # South Carolina Public Service Authority "US-CAR-SCEG": "SCEG", # South Carolina Electric & Gas Company "US-CAR-YAD": "YAD", # Alcoa Power Generating, Inc. - Yadkin Division "US-CENT-SPA": "SPA", # Southwestern Power Administration "US-CENT-SWPP": "SWPP", # Southwest Power Pool "US-FLA-FMPP": "FMPP", # Florida Municipal Power Pool "US-FLA-FPC": "FPC", # Duke Energy Florida Inc "US-FLA-FPL": "FPL", # Florida Power & Light Company "US-FLA-GVL": "GVL", # Gainesville Regional Utilities "US-FLA-HST": "HST", # City Of Homestead "US-FLA-JEA": "JEA", # Jea "US-FLA-NSB": "NSB", # Utilities Commission Of New Smyrna Beach, Decomissioned data is directly integrated in another balancing authority # Some solar plants within this zone are operated by Florida Power & Light, therefore on the map the zones got merged. "US-FLA-SEC": "SEC", # Seminole Electric Cooperative "US-FLA-TAL": "TAL", # City Of Tallahassee "US-FLA-TEC": "TEC", # Tampa Electric Company "US-MIDA-PJM": "PJM", # Pjm Interconnection, Llc "US-MIDW-AECI": "AECI", # Associated Electric Cooperative, Inc. # "US-MIDW-GLHB": "GLHB", # GridLiance US-MIDW-GLHB decommissioned no more powerplant "US-MIDW-LGEE": "LGEE", # Louisville Gas And Electric Company And Kentucky Utilities "US-MIDW-MISO": "MISO", # Midcontinent Independent Transmission System Operator, Inc.. "US-NE-ISNE": "ISNE", # Iso New England Inc. "US-NW-AVA": "AVA", # Avista Corporation "US-NW-AVRN": "AVRN", # Avangrid Renewables, LLC, integrated with US-NW-BPAT and US-NW-PACW "US-NW-BPAT": "BPAT", # Bonneville Power Administration "US-NW-CHPD": "CHPD", # Public Utility District No. 1 Of Chelan County "US-NW-DOPD": "DOPD", # Pud No. 1 Of Douglas County "US-NW-GCPD": "GCPD", # Public Utility District No. 2 Of Grant County, Washington "US-NW-GRID": "GRID", # Gridforce Energy Management, Llc "US-NW-GWA": "GWA", # Naturener Power Watch, Llc (Gwa), integrated with US-NW-NWMT "US-NW-IPCO": "IPCO", # Idaho Power Company "US-NW-NEVP": "NEVP", # Nevada Power Company "US-NW-NWMT": "NWMT", # Northwestern Energy (Nwmt) "US-NW-PACE": "PACE", # Pacificorp - East "US-NW-PACW": "PACW", # Pacificorp - West "US-NW-PGE": "PGE", # Portland General Electric Company "US-NW-PSCO": "PSCO", # Public Service Company Of Colorado "US-NW-PSEI": "PSEI", # Puget Sound Energy "US-NW-SCL": "SCL", # Seattle City Light "US-NW-TPWR": "TPWR", # City Of Tacoma, Department Of Public Utilities, Light Division "US-NW-WACM": "WACM", # Western Area Power Administration - Rocky Mountain Region "US-NW-WAUW": "WAUW", # Western Area Power Administration Ugp West "US-NW-WWA": "WWA", # Naturener Wind Watch, Llc, integrated with US-NW-NWMT "US-NY-NYIS": "NYIS", # New York Independent System Operator "US-SE-AEC": "AEC", # Powersouth Energy Cooperative, decomissioned merged with US-SE-SOCO # Though it is unclear which BA took over AEC. "US-SE-SEPA": "SEPA", # Southeastern Power Administration "US-SE-SOCO": "SOCO", # Southern Company Services, Inc. - Trans "US-SW-AZPS": "AZPS", # Arizona Public Service Company "US-SW-DEAA": "DEAA", # Arlington Valley, LLC, integrated with US-SW-SRP "US-SW-EPE": "EPE", # El Paso Electric Company "US-SW-GRIF": "GRIF", # Griffith Energy, Llc, integrated with US-SW-WALC "US-SW-GRMA": "GRMA", # Gila River Power, Llc Decommissioned, # The only gas power plant is owned by US-SW-SRP but there's a PPA with US-SW-AZPS, so it was merged with # US-SW-AZPS path_to_url "US-SW-HGMA": "HGMA", # New Harquahala Generating Company, Llc - Hgba, integrated with US-SW-SRP "US-SW-PNM": "PNM", # Public Service Company Of New Mexico "US-SW-SRP": "SRP", # Salt River Project "US-SW-TEPC": "TEPC", # Tucson Electric Power Company "US-SW-WALC": "WALC", # Western Area Power Administration - Desert Southwest Region "US-TEN-TVA": "TVA", # Tennessee Valley Authority "US-TEX-ERCO": "ERCO", # Electric Reliability Council Of Texas, Inc. } EXCHANGES = { # Exchanges to non-US BAs "MX-BC->US-CAL-CISO": "&facets[fromba][]=CISO&facets[toba][]=CFE", # Unable to verify if MX-BC is correct "CA-SK->US-CENT-SWPP": "&facets[fromba][]=SWPP&facets[toba][]=SPC", "CA-MB->US-MIDW-MISO": "&facets[fromba][]=MISO&facets[toba][]=MHEB", "CA-ON->US-MIDW-MISO": "&facets[fromba][]=MISO&facets[toba][]=IESO", "CA-QC->US-NE-ISNE": "&facets[fromba][]=ISNE&facets[toba][]=HQT", "CA-NB->US-NE-ISNE": "&facets[fromba][]=ISNE&facets[toba][]=NBSO", "CA-BC->US-NW-BPAT": "&facets[fromba][]=BPAT&facets[toba][]=BCHA", "CA-AB->US-NW-NWMT": "&facets[fromba][]=NWMT&facets[toba][]=AESO", "CA-QC->US-NY-NYIS": "&facets[fromba][]=NYIS&facets[toba][]=HQT", "CA-ON->US-NY-NYIS": "&facets[fromba][]=NYIS&facets[toba][]=IESO", "MX-NE->US-TEX-ERCO": "&facets[fromba][]=ERCO&facets[toba][]=CEN", # Unable to verify if MX-NE is correct "MX-NO->US-TEX-ERCO": "&facets[fromba][]=ERCO&facets[toba][]=CFE", # Unable to verify if MX-NO is correct # Exchanges to other US balancing authorities "US-CAL-BANC->US-NW-BPAT": "&facets[fromba][]=BANC&facets[toba][]=BPAT", "US-CAL-BANC->US-CAL-CISO": "&facets[fromba][]=BANC&facets[toba][]=CISO", "US-CAL-BANC->US-CAL-TIDC": "&facets[fromba][]=BANC&facets[toba][]=TIDC", "US-CAL-CISO->US-SW-AZPS": "&facets[fromba][]=CISO&facets[toba][]=AZPS", "US-CAL-CISO->US-NW-BPAT": "&facets[fromba][]=CISO&facets[toba][]=BPAT", "US-CAL-CISO->US-CAL-IID": "&facets[fromba][]=CISO&facets[toba][]=IID", "US-CAL-CISO->US-CAL-LDWP": "&facets[fromba][]=CISO&facets[toba][]=LDWP", "US-CAL-CISO->US-NW-NEVP": "&facets[fromba][]=CISO&facets[toba][]=NEVP", "US-CAL-CISO->US-NW-PACW": "&facets[fromba][]=CISO&facets[toba][]=PACW", "US-CAL-CISO->US-SW-SRP": "&facets[fromba][]=CISO&facets[toba][]=SRP", "US-CAL-CISO->US-CAL-TIDC": "&facets[fromba][]=CISO&facets[toba][]=TIDC", "US-CAL-CISO->US-SW-WALC": "&facets[fromba][]=CISO&facets[toba][]=WALC", "US-CAL-IID->US-SW-AZPS": "&facets[fromba][]=IID&facets[toba][]=AZPS", "US-CAL-IID->US-SW-WALC": "&facets[fromba][]=IID&facets[toba][]=WALC", "US-CAL-LDWP->US-SW-AZPS": "&facets[fromba][]=LDWP&facets[toba][]=AZPS", "US-CAL-LDWP->US-NW-BPAT": "&facets[fromba][]=LDWP&facets[toba][]=BPAT", "US-CAL-LDWP->US-NW-NEVP": "&facets[fromba][]=LDWP&facets[toba][]=NEVP", "US-CAL-LDWP->US-NW-PACE": "&facets[fromba][]=LDWP&facets[toba][]=PACE", "US-CAL-LDWP->US-SW-WALC": "&facets[fromba][]=LDWP&facets[toba][]=WALC", "US-CAR-CPLE->US-CAR-YAD": "&facets[fromba][]=CPLE&facets[toba][]=YAD", "US-CAR-CPLE->US-CAR-DUK": "&facets[fromba][]=CPLE&facets[toba][]=DUK", "US-CAR-CPLE->US-MIDA-PJM": "&facets[fromba][]=CPLE&facets[toba][]=PJM", "US-CAR-CPLE->US-CAR-SCEG": "&facets[fromba][]=CPLE&facets[toba][]=SCEG", "US-CAR-CPLE->US-CAR-SC": "&facets[fromba][]=CPLE&facets[toba][]=SC", "US-CAR-CPLW->US-CAR-DUK": "&facets[fromba][]=CPLW&facets[toba][]=DUK", "US-CAR-CPLW->US-MIDA-PJM": "&facets[fromba][]=CPLW&facets[toba][]=PJM", "US-CAR-CPLW->US-TEN-TVA": "&facets[fromba][]=CPLW&facets[toba][]=TVA", "US-CAR-DUK->US-CAR-YAD": "&facets[fromba][]=DUK&facets[toba][]=YAD", "US-CAR-DUK->US-MIDA-PJM": "&facets[fromba][]=DUK&facets[toba][]=PJM", "US-CAR-DUK->US-CAR-SCEG": "&facets[fromba][]=DUK&facets[toba][]=SCEG", "US-CAR-DUK->US-CAR-SC": "&facets[fromba][]=DUK&facets[toba][]=SC", "US-CAR-DUK->US-SE-SEPA": "&facets[fromba][]=DUK&facets[toba][]=SEPA", "US-CAR-DUK->US-SE-SOCO": "&facets[fromba][]=DUK&facets[toba][]=SOCO", "US-CAR-DUK->US-TEN-TVA": "&facets[fromba][]=DUK&facets[toba][]=TVA", "US-CAR-SC->US-CAR-SCEG": "&facets[fromba][]=SC&facets[toba][]=SCEG", "US-CAR-SC->US-SE-SEPA": "&facets[fromba][]=SC&facets[toba][]=SEPA", "US-CAR-SC->US-SE-SOCO": "&facets[fromba][]=SC&facets[toba][]=SOCO", "US-CAR-SCEG->US-SE-SEPA": "&facets[fromba][]=SCEG&facets[toba][]=SEPA", "US-CAR-SCEG->US-SE-SOCO": "&facets[fromba][]=SCEG&facets[toba][]=SOCO", "US-CENT-SPA->US-MIDW-AECI": "&facets[fromba][]=SPA&facets[toba][]=AECI", "US-CENT-SPA->US-MIDW-MISO": "&facets[fromba][]=SPA&facets[toba][]=MISO", "US-CENT-SPA->US-CENT-SWPP": "&facets[fromba][]=SPA&facets[toba][]=SWPP", "US-CENT-SWPP->US-MIDW-AECI": "&facets[fromba][]=SWPP&facets[toba][]=AECI", "US-CENT-SWPP->US-SW-EPE": "&facets[fromba][]=SWPP&facets[toba][]=EPE", "US-CENT-SWPP->US-TEX-ERCO": "&facets[fromba][]=SWPP&facets[toba][]=ERCO", "US-CENT-SWPP->US-MIDW-MISO": "&facets[fromba][]=SWPP&facets[toba][]=MISO", "US-CENT-SWPP->US-NW-PSCO": "&facets[fromba][]=SWPP&facets[toba][]=PSCO", "US-CENT-SWPP->US-SW-PNM": "&facets[fromba][]=SWPP&facets[toba][]=PNM", "US-CENT-SWPP->US-NW-WACM": "&facets[fromba][]=SWPP&facets[toba][]=WACM", "US-CENT-SWPP->US-NW-WAUW": "&facets[fromba][]=SWPP&facets[toba][]=WAUW", "US-FLA-FMPP->US-FLA-FPC": "&facets[fromba][]=FMPP&facets[toba][]=FPC", "US-FLA-FMPP->US-FLA-FPL": "&facets[fromba][]=FMPP&facets[toba][]=FPL", "US-FLA-FMPP->US-FLA-JEA": "&facets[fromba][]=FMPP&facets[toba][]=JEA", "US-FLA-FMPP->US-FLA-TEC": "&facets[fromba][]=FMPP&facets[toba][]=TEC", "US-FLA-FPC->US-FLA-TAL": "&facets[fromba][]=FPC&facets[toba][]=TAL", "US-FLA-FPC->US-FLA-FPL": "&facets[fromba][]=FPC&facets[toba][]=FPL", "US-FLA-FPC->US-FLA-GVL": "&facets[fromba][]=FPC&facets[toba][]=GVL", "US-FLA-FPC->US-FLA-SEC": "&facets[fromba][]=FPC&facets[toba][]=SEC", "US-FLA-FPC->US-SE-SOCO": "&facets[fromba][]=FPC&facets[toba][]=SOCO", "US-FLA-FPC->US-FLA-TEC": "&facets[fromba][]=FPC&facets[toba][]=TEC", "US-FLA-FPC->US-FLA-NSB": "&facets[fromba][]=FPC&facets[toba][]=NSB", # decomissioned NSB zone, merged with FPL, exchange transfered "US-FLA-FPL->US-FLA-HST": "&facets[fromba][]=FPL&facets[toba][]=HST", "US-FLA-FPL->US-FLA-GVL": "&facets[fromba][]=FPL&facets[toba][]=GVL", "US-FLA-FPL->US-FLA-JEA": "&facets[fromba][]=FPL&facets[toba][]=JEA", "US-FLA-FPL->US-FLA-SEC": "&facets[fromba][]=FPL&facets[toba][]=SEC", "US-FLA-FPL->US-SE-SOCO": "&facets[fromba][]=FPL&facets[toba][]=SOCO", "US-FLA-FPL->US-FLA-TEC": "&facets[fromba][]=FPL&facets[toba][]=TEC", # "US-FLA-FPL->US-FLA-NSB": "&facets[fromba][]=FPL&facets[toba][]=NSB", decomissioned NSB zone "US-FLA-JEA->US-FLA-SEC": "&facets[fromba][]=JEA&facets[toba][]=SEC", "US-FLA-SEC->US-FLA-TEC": "&facets[fromba][]=SEC&facets[toba][]=TEC", "US-FLA-TAL->US-SE-SOCO": "&facets[fromba][]=TAL&facets[toba][]=SOCO", "US-MIDA-PJM->US-MIDW-LGEE": "&facets[fromba][]=PJM&facets[toba][]=LGEE", "US-MIDA-PJM->US-MIDW-MISO": "&facets[fromba][]=PJM&facets[toba][]=MISO", "US-MIDA-PJM->US-NY-NYIS": "&facets[fromba][]=PJM&facets[toba][]=NYIS", "US-MIDA-PJM->US-TEN-TVA": "&facets[fromba][]=PJM&facets[toba][]=TVA", "US-MIDW-AECI->US-MIDW-MISO": "&facets[fromba][]=AECI&facets[toba][]=MISO", "US-MIDW-AECI->US-TEN-TVA": "&facets[fromba][]=AECI&facets[toba][]=TVA", # "US-MIDW-GLHB->US-MIDW-LGEE": "&facets[fromba][]=GLHB&facets[toba][]=LGEE", US-MIDW-GLHB decommissioned no more powerplant # "US-MIDW-GLHB->US-MIDW-MISO": "&facets[fromba][]=GLHB&facets[toba][]=MISO", US-MIDW-GLHB decommissioned no more powerplant # "US-MIDW-GLHB->US-TEN-TVA": "&facets[fromba][]=EEI&facets[toba][]=TVA", US-MIDW-GLHB decommissioned no more powerplant "US-MIDW-LGEE->US-MIDW-MISO": "&facets[fromba][]=LGEE&facets[toba][]=MISO", "US-MIDW-LGEE->US-TEN-TVA": "&facets[fromba][]=LGEE&facets[toba][]=TVA", "US-MIDW-MISO->US-SE-AEC": "&facets[fromba][]=MISO&facets[toba][]=AEC", # US-SE-AEC decommissioned, merged with US-SE-SOCO, exchange transfered "US-MIDW-MISO->US-SE-SOCO": "&facets[fromba][]=MISO&facets[toba][]=SOCO", "US-MIDW-MISO->US-TEN-TVA": "&facets[fromba][]=MISO&facets[toba][]=TVA", "US-NE-ISNE->US-NY-NYIS": "&facets[fromba][]=ISNE&facets[toba][]=NYIS", "US-NW-AVA->US-NW-BPAT": "&facets[fromba][]=AVA&facets[toba][]=BPAT", "US-NW-AVA->US-NW-IPCO": "&facets[fromba][]=AVA&facets[toba][]=IPCO", "US-NW-AVA->US-NW-NWMT": "&facets[fromba][]=AVA&facets[toba][]=NWMT", "US-NW-AVA->US-NW-PACW": "&facets[fromba][]=AVA&facets[toba][]=PACW", "US-NW-AVA->US-NW-CHPD": "&facets[fromba][]=AVA&facets[toba][]=CHPD", "US-NW-AVA->US-NW-GCPD": "&facets[fromba][]=AVA&facets[toba][]=GCPD", "US-NW-BPAT->US-NW-TPWR": "&facets[fromba][]=BPAT&facets[toba][]=TPWR", "US-NW-BPAT->US-NW-GRID": "&facets[fromba][]=BPAT&facets[toba][]=GRID", "US-NW-BPAT->US-NW-IPCO": "&facets[fromba][]=BPAT&facets[toba][]=IPCO", "US-NW-BPAT->US-NW-NEVP": "&facets[fromba][]=BPAT&facets[toba][]=NEVP", "US-NW-BPAT->US-NW-NWMT": "&facets[fromba][]=BPAT&facets[toba][]=NWMT", "US-NW-BPAT->US-NW-DOPD": "&facets[fromba][]=BPAT&facets[toba][]=DOPD", "US-NW-BPAT->US-NW-PACW": "&facets[fromba][]=BPAT&facets[toba][]=PACW", "US-NW-BPAT->US-NW-PGE": "&facets[fromba][]=BPAT&facets[toba][]=PGE", "US-NW-BPAT->US-NW-CHPD": "&facets[fromba][]=BPAT&facets[toba][]=CHPD", "US-NW-BPAT->US-NW-GCPD": "&facets[fromba][]=BPAT&facets[toba][]=GCPD", "US-NW-BPAT->US-NW-PSEI": "&facets[fromba][]=BPAT&facets[toba][]=PSEI", "US-NW-BPAT->US-NW-SCL": "&facets[fromba][]=BPAT&facets[toba][]=SCL", "US-NW-CHPD->US-NW-DOPD": "&facets[fromba][]=CHPD&facets[toba][]=DOPD", "US-NW-CHPD->US-NW-PSEI": "&facets[fromba][]=CHPD&facets[toba][]=PSEI", "US-NW-GCPD->US-NW-PACW": "&facets[fromba][]=GCPD&facets[toba][]=PACW", "US-NW-GCPD->US-NW-PSEI": "&facets[fromba][]=GCPD&facets[toba][]=PSEI", # "US-NW-GWA->US-NW-NWMT": "&facets[fromba][]=GWA&facets[toba][]=NWMT", integrated directly with US-NW-NWMT "US-NW-IPCO->US-NW-NEVP": "&facets[fromba][]=IPCO&facets[toba][]=NEVP", "US-NW-IPCO->US-NW-NWMT": "&facets[fromba][]=IPCO&facets[toba][]=NWMT", "US-NW-IPCO->US-NW-PACE": "&facets[fromba][]=IPCO&facets[toba][]=PACE", "US-NW-IPCO->US-NW-PACW": "&facets[fromba][]=IPCO&facets[toba][]=PACW", "US-NW-NEVP->US-NW-PACE": "&facets[fromba][]=NEVP&facets[toba][]=PACE", "US-NW-NEVP->US-SW-WALC": "&facets[fromba][]=NEVP&facets[toba][]=WALC", # "US-NW-NWMT->US-NW-WWA": "&facets[fromba][]=NWMT&facets[toba][]=WWA", intergrated directly with US-NW-NWMT "US-NW-NWMT->US-NW-PACE": "&facets[fromba][]=NWMT&facets[toba][]=PACE", "US-NW-NWMT->US-NW-WAUW": "&facets[fromba][]=NWMT&facets[toba][]=WAUW", "US-NW-PACE->US-SW-AZPS": "&facets[fromba][]=PACE&facets[toba][]=AZPS", "US-NW-PACE->US-NW-PACW": "&facets[fromba][]=PACE&facets[toba][]=PACW", "US-NW-PACE->US-NW-WACM": "&facets[fromba][]=PACE&facets[toba][]=WACM", "US-NW-PACW->US-NW-PGE": "&facets[fromba][]=PACW&facets[toba][]=PGE", "US-NW-PSCO->US-SW-PNM": "&facets[fromba][]=PSCO&facets[toba][]=PNM", "US-NW-PSCO->US-NW-WACM": "&facets[fromba][]=PSCO&facets[toba][]=WACM", "US-NW-PSEI->US-NW-TPWR": "&facets[fromba][]=PSEI&facets[toba][]=TPWR", "US-NW-PSEI->US-NW-SCL": "&facets[fromba][]=PSEI&facets[toba][]=SCL", "US-NW-WACM->US-SW-AZPS": "&facets[fromba][]=WACM&facets[toba][]=AZPS", "US-NW-WACM->US-SW-PNM": "&facets[fromba][]=WACM&facets[toba][]=PNM", "US-NW-WACM->US-SW-WALC": "&facets[fromba][]=WACM&facets[toba][]=WALC", "US-NW-WACM->US-NW-WAUW": "&facets[fromba][]=WACM&facets[toba][]=WAUW", # "US-SE-AEC->US-SE-SOCO": "&facets[fromba][]=AEC&facets[toba][]=SOCO", Decommisioned BA "US-SE-SEPA->US-SE-SOCO": "&facets[fromba][]=SEPA&facets[toba][]=SOCO", "US-SE-SOCO->US-TEN-TVA": "&facets[fromba][]=SOCO&facets[toba][]=TVA", # "US-SW-AZPS->US-SW-GRMA": "&facets[fromba][]=AZPS&facets[toba][]=GRMA", , directly integrated in US-SW-AZPS "US-SW-AZPS->US-SW-PNM": "&facets[fromba][]=AZPS&facets[toba][]=PNM", "US-SW-AZPS->US-SW-SRP": "&facets[fromba][]=AZPS&facets[toba][]=SRP", "US-SW-AZPS->US-SW-TEPC": "&facets[fromba][]=AZPS&facets[toba][]=TEPC", "US-SW-AZPS->US-SW-WALC": "&facets[fromba][]=AZPS&facets[toba][]=WALC", "US-SW-EPE->US-SW-PNM": "&facets[fromba][]=EPE&facets[toba][]=PNM", "US-SW-EPE->US-SW-TEPC": "&facets[fromba][]=EPE&facets[toba][]=TEPC", # "US-SW-GRIF->US-SW-WALC": "&facets[fromba][]=GRIF&facets[toba][]=WALC", directly integrated in US-WALC # "US-SW-HGMA->US-SW-SRP": "&facets[fromba][]=HGMA&facets[toba][]=SRP", directly integrated in US-SW-SRP "US-SW-PNM->US-SW-TEPC": "&facets[fromba][]=PNM&facets[toba][]=TEPC", "US-SW-PNM->US-SW-SRP": "&facets[fromba][]=SRP&facets[toba][]=PNM", "US-SW-SRP->US-SW-TEPC": "&facets[fromba][]=SRP&facets[toba][]=TEPC", "US-SW-SRP->US-SW-WALC": "&facets[fromba][]=SRP&facets[toba][]=WALC", "US-SW-TEPC->US-SW-WALC": "&facets[fromba][]=TEPC&facets[toba][]=WALC", } # Some zones transfer all or part of their productions to another zone. # To avoid having multiple small production zones with no consumption, # their production is directly integrated into supplied zones according # to the supplied percentage. SC_VIRGIL_OWNERSHIP = 0.3333333 PRODUCTION_ZONES_TRANSFERS = { # key receives production from the dict of keys "US-SW-SRP": {"all": {"US-SW-DEAA": 1.0, "US-SW-HGMA": 1.0}}, "US-NW-NWMT": {"all": {"US-NW-GWA": 1.0, "US-NW-WWA": 1.0}}, "US-SW-WALC": {"all": {"US-SW-GRIF": 1.0}}, "US-NW-PACW": {"gas": {"US-NW-AVRN": 1.0}}, "US-NW-BPAT": { "wind": {"US-NW-AVRN": 1.0}, }, "US-CAR-SC": {"nuclear": {"US-CAR-SCEG": SC_VIRGIL_OWNERSHIP}}, "US-SE-SOCO": {"all": {"US-SE-AEC": 1.0}}, "US-FLA-FPL": {"all": {"US-FLA-NSB": 1.0}}, "US-SW-AZPS": {"gas": {"US-SW-GRMA": 1.0}}, } EXCHANGE_TRANSFERS = { # key receives the exchange from the set of keys "US-FLA-FPC->US-FLA-FPL": {"US-FLA-FPC->US-FLA-NSB"}, "US-MIDW-MISO->US-SE-SOCO": {"US-MIDW-MISO->US-SE-AEC"}, } TYPES = { # 'biomass': 'BM', # not currently supported "coal": "COL", "gas": "NG", "hydro": "WAT", "nuclear": "NUC", "oil": "OIL", "unknown": "OTH", "solar": "SUN", "wind": "WND", } BASE_URL = "path_to_url" PRODUCTION = ( f"{BASE_URL}/region-data/data/" "?data[]=value&facets[respondent][]={}&facets[type][]=NG&frequency=hourly" ) CONSUMPTION = ( f"{BASE_URL}/region-data/data/" "?data[]=value&facets[respondent][]={}&facets[type][]=D&frequency=hourly" ) CONSUMPTION_FORECAST = ( f"{BASE_URL}/region-data/data/" "?data[]=value&facets[respondent][]={}&facets[type][]=DF&frequency=hourly" ) PRODUCTION_MIX = ( f"{BASE_URL}/fuel-type-data/data/" "?data[]=value&facets[respondent][]={}&facets[fueltype][]={}&frequency=hourly" ) EXCHANGE = f"{BASE_URL}/interchange-data/data/" "?data[]=value{}&frequency=hourly" FILTER_INCOMPLETE_DATA_BYPASSED_MODES = { "US-TEX-ERCO": ["biomass", "geothermal", "oil"], "US-NW-PGE": [ "biomass", "geothermal", "oil", "solar", ], # Solar is not reported by PGE. "US-NW-PACE": ["biomass", "geothermal", "oil"], "US-MIDW-MISO": ["biomass", "geothermal", "oil"], "US-TEN-TVA": ["biomass", "geothermal", "oil"], "US-SE-SOCO": ["biomass", "geothermal", "oil"], "US-FLA-FPL": ["biomass", "geothermal", "oil"], } @refetch_frequency(timedelta(days=1)) def fetch_production( zone_key: str, session: Session | None = None, target_datetime: datetime | None = None, logger: Logger = getLogger(__name__), ): return _fetch( zone_key, PRODUCTION.format(REGIONS[zone_key]), session=session, target_datetime=target_datetime, logger=logger, ) @refetch_frequency(timedelta(days=1)) def fetch_consumption( zone_key: ZoneKey, session: Session | None = None, target_datetime: datetime | None = None, logger: Logger = getLogger(__name__), ) -> list[dict[str, Any]]: consumption_list = TotalConsumptionList(logger) consumption = _fetch( zone_key, CONSUMPTION.format(REGIONS[zone_key]), session=session, target_datetime=target_datetime, logger=logger, ) for point in consumption: consumption_list.append( zoneKey=zone_key, datetime=point["datetime"], consumption=point["value"], source="eia.gov", ) return consumption_list.to_list() @refetch_frequency(timedelta(days=1)) def fetch_consumption_forecast( zone_key: ZoneKey, session: Session | None = None, target_datetime: datetime | None = None, logger: Logger = getLogger(__name__), ): consumptions = TotalConsumptionList(logger) consumption_forecasts = _fetch( zone_key, CONSUMPTION_FORECAST.format(REGIONS[zone_key]), session=session, target_datetime=target_datetime, logger=logger, ) for forecast in consumption_forecasts: consumptions.append( zoneKey=zone_key, datetime=forecast["datetime"], consumption=forecast["value"], source="eia.gov", sourceType=EventSourceType.forecasted, ) return consumptions.to_list() def create_production_storage( fuel_type: str, production_point: dict[str, float], negative_threshold: float ) -> tuple[ProductionMix | None, StorageMix | None]: """Create a production mix or a storage mix from a production point handling the special cases of hydro storage and self consumption""" production_value = production_point["value"] production_mix = ProductionMix() storage_mix = StorageMix() if production_value < 0 and fuel_type == "hydro": # Negative hydro is reported by some BAs, according to the EIA those are pumped storage. # path_to_url storage_mix.add_value("hydro", abs(production_value)) return None, storage_mix # production_value > negative_threshold, this is considered to be self consumption and should be reported as 0. # Lower values are set to None as they are most likely outliers. production_mix.add_value( fuel_type, production_value, production_value > negative_threshold ) return production_mix, None @refetch_frequency(timedelta(days=1)) def fetch_production_mix( zone_key: ZoneKey, session: Session | None = None, target_datetime: datetime | None = None, logger: Logger = getLogger(__name__), ): all_production_breakdowns: list[ProductionBreakdownList] = [] # TODO: We could be smarter in the future and only fetch the expected production types. for production_mode, code in TYPES.items(): negative_threshold = NEGATIVE_PRODUCTION_THRESHOLDS_TYPE.get( production_mode, NEGATIVE_PRODUCTION_THRESHOLDS_TYPE["default"] ) production_breakdown = ProductionBreakdownList(logger) url_prefix = PRODUCTION_MIX.format(REGIONS[zone_key], code) production_values = _fetch( zone_key, url_prefix, session=session, target_datetime=target_datetime, logger=logger, ) # TODO Currently manually filtering out datapoints with null values # As null values can cause problems in the estimation models if there's # only null values. # Integrate with data quality layer later. production_values = [ datapoint for datapoint in production_values if datapoint["value"] is not None ] # EIA does not currently split production from the Virgil Summer C # plant across the two owning/ utilizing BAs: # US-CAR-SCEG and US-CAR-SC, # but attributes it all to US-CAR-SCEG # Here we apply a temporary fix for that until EIA properly splits the production # This split can be found in the eGRID data, # path_to_url if zone_key == "US-CAR-SCEG" and production_mode == "nuclear": for point in production_values: point.update({"value": point["value"] * (1 - SC_VIRGIL_OWNERSHIP)}) for point in production_values: production_mix, storage_mix = create_production_storage( production_mode, point, negative_threshold ) production_breakdown.append( zoneKey=zone_key, datetime=point["datetime"], production=production_mix, storage=storage_mix, source="eia.gov", ) all_production_breakdowns.append(production_breakdown) # Integrate the supplier zones in the zones they supply supplying_zones = PRODUCTION_ZONES_TRANSFERS.get(zone_key, {}) zones_to_integrate = { **supplying_zones.get("all", {}), **supplying_zones.get(production_mode, {}), } for zone, percentage in zones_to_integrate.items(): url_prefix = PRODUCTION_MIX.format(REGIONS[zone], code) additional_breakdown = ProductionBreakdownList(logger) additional_production = _fetch( zone, url_prefix, session=session, target_datetime=target_datetime, logger=logger, ) # TODO Currently manually filtering out datapoints with null values # As null values can cause problems in the estimation models if there's # only null values. # Integrate with data quality layer later. additional_production = [ datapoint for datapoint in additional_production if datapoint["value"] is not None ] for point in additional_production: point.update({"value": point["value"] * percentage}) production_mix, storage_mix = create_production_storage( production_mode, point, negative_threshold ) additional_breakdown.append( zoneKey=zone_key, datetime=point["datetime"], production=production_mix, storage=storage_mix, source="eia.gov", ) all_production_breakdowns.append(additional_breakdown) all_production_breakdowns = list( filter(lambda x: len(x.events) > 0, all_production_breakdowns) ) if len(all_production_breakdowns) == 0: logger.warning(f"No production mix data found for {zone_key}") return ProductionBreakdownList(logger).to_list() # Some of the returned mixes could be for older timeframes. # Fx the latest oil data could be 6 months old. # In this case we want to discard the old data as we won't be able to merge it timeframes = [ sorted(x.datetime for x in breakdowns.events) for breakdowns in all_production_breakdowns if len(breakdowns.events) > 0 ] latest_timeframe = max(timeframes, key=lambda x: x[-1]) for production_list in all_production_breakdowns: correct_mix = [] for production_mix in production_list.events: if production_mix.datetime in latest_timeframe: correct_mix.append(production_mix) production_list.events = correct_mix events = ProductionBreakdownList.merge_production_breakdowns( all_production_breakdowns, logger ) if zone_key in FILTER_INCOMPLETE_DATA_BYPASSED_MODES: events = ProductionBreakdownList.filter_expected_modes( events, by_passed_modes=FILTER_INCOMPLETE_DATA_BYPASSED_MODES[zone_key] ) return events.to_list() @refetch_frequency(timedelta(days=1)) def fetch_exchange( zone_key1: ZoneKey, zone_key2: ZoneKey, session: Session | None = None, target_datetime: datetime | None = None, logger: Logger = getLogger(__name__), ) -> list[dict[str, Any]]: sortedcodes = "->".join(sorted([zone_key1, zone_key2])) exchange_list = ExchangeList(logger) exchange = _fetch( sortedcodes, url_prefix=EXCHANGE.format(EXCHANGES[sortedcodes]), session=session, target_datetime=target_datetime, logger=logger, ) for point in exchange: exchange_list.append( zoneKey=ZoneKey(point["zoneKey"]), datetime=point["datetime"], netFlow=-point["value"] if sortedcodes in REVERSE_EXCHANGES else point["value"], source="eia.gov", ) # Integrate remapped exchanges remapped_exchanges = EXCHANGE_TRANSFERS.get(sortedcodes, {}) remapped_exchange_list = ExchangeList(logger) for remapped_exchange in remapped_exchanges: exchange = _fetch( remapped_exchange, url_prefix=EXCHANGE.format(EXCHANGES[remapped_exchange]), session=session, target_datetime=target_datetime, logger=logger, ) for point in exchange: remapped_exchange_list.append( zoneKey=ZoneKey(sortedcodes), datetime=point["datetime"], netFlow=-point["value"] if remapped_exchange in REVERSE_EXCHANGES else point["value"], source="eia.gov", ) exchange_list = ExchangeList.merge_exchanges( [exchange_list, remapped_exchange_list], logger ) return exchange_list.to_list() def _fetch( zone_key: str, url_prefix: str, session: Session | None = None, target_datetime: datetime | None = None, logger: Logger = getLogger(__name__), ): # get EIA API key API_KEY = get_token("EIA_KEY") if target_datetime: try: target_datetime = arrow.get(target_datetime).datetime except arrow.parser.ParserError as e: raise ValueError( f"target_datetime must be a valid datetime - received {target_datetime}" ) from e utc = tz.gettz("UTC") eia_ts_format = "%Y-%m-%dT%H" end = target_datetime.astimezone(utc) + timedelta(hours=1) start = end - timedelta(days=1) url = f"{url_prefix}&api_key={API_KEY}&start={start.strftime(eia_ts_format)}&end={end.strftime(eia_ts_format)}" else: url = f"{url_prefix}&api_key={API_KEY}&sort[0][column]=period&sort[0][direction]=desc&length=24" s = session or Session() req = s.get(url) raw_data = req.json() if raw_data.get("response", {}).get("data", None) is None: return [] return [ { "zoneKey": zone_key, "datetime": _get_utc_datetime_from_datapoint( parser.parse(datapoint["period"]) ), "value": float(datapoint["value"]) if datapoint["value"] else None, "source": "eia.gov", } for datapoint in raw_data["response"]["data"] ] def _conform_timestamp_convention(dt: datetime): # The timestamp given by EIA represents the end of the time interval. # ElectricityMap using another convention, # where the timestamp represents the beginning of the interval. # So we need shift the datetime 1 hour back. return dt - timedelta(hours=1) def _get_utc_datetime_from_datapoint(dt: datetime): """update to beginning hour convention and timezone to utc""" dt_beginning_hour = _conform_timestamp_convention(dt) dt_utc = arrow.get(dt_beginning_hour).to("utc") return dt_utc.datetime if __name__ == "__main__": from pprint import pprint # pprint(fetch_production_mix("US-NW-NEVP")) # pprint(fetch_consumption_forecast('US-CAL-CISO')) pprint( fetch_exchange( zone_key1="US-CENT-SWPP", zone_key2="CA-SK", target_datetime=datetime(2022, 3, 1), ) ) ```
```gas ;******************** (C) COPYRIGHT 2011 STMicroelectronics ******************** ;* File Name : startup_stm32f10x_md_vl.s ;* Author : MCD Application Team ;* Version : V3.5.0 ;* Date : 11-March-2011 ;* Description : STM32F10x Medium Density Value Line Devices vector table ;* for EWARM toolchain. ;* This module performs: ;* - Set the initial SP ;* - Configure the clock system ;* - Set the initial PC == __iar_program_start, ;* - Set the vector table entries with the exceptions ISR ;* address. ;* After Reset the Cortex-M3 processor is in Thread mode, ;* priority is Privileged, and the Stack is set to Main. ;******************************************************************************** ;* THE PRESENT FIRMWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS ;* WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE TIME. ;* AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY DIRECT, ;* INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING FROM THE ;* CONTENT OF SUCH FIRMWARE AND/OR THE USE MADE BY CUSTOMERS OF THE CODING ;* INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS. ;******************************************************************************* ; ; ; The modules in this file are included in the libraries, and may be replaced ; by any user-defined modules that define the PUBLIC symbol _program_start or ; a user defined start symbol. ; To override the cstartup defined in the library, simply add your modified ; version to the workbench project. ; ; The vector table is normally located at address 0. ; When debugging in RAM, it can be located in RAM, aligned to at least 2^6. ; The name "__vector_table" has special meaning for C-SPY: ; it is where the SP start value is found, and the NVIC vector ; table register (VTOR) is initialized to this address if != 0. ; ; Cortex-M version ; MODULE ?cstartup ;; Forward declaration of sections. SECTION CSTACK:DATA:NOROOT(3) SECTION .intvec:CODE:NOROOT(2) EXTERN __iar_program_start EXTERN SystemInit PUBLIC __vector_table DATA __vector_table DCD sfe(CSTACK) DCD Reset_Handler ; Reset Handler DCD NMI_Handler ; NMI Handler DCD HardFault_Handler ; Hard Fault Handler DCD MemManage_Handler ; MPU Fault Handler DCD BusFault_Handler ; Bus Fault Handler DCD UsageFault_Handler ; Usage Fault Handler DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD SVC_Handler ; SVCall Handler DCD DebugMon_Handler ; Debug Monitor Handler DCD 0 ; Reserved DCD PendSV_Handler ; PendSV Handler DCD SysTick_Handler ; SysTick Handler ; External Interrupts DCD WWDG_IRQHandler ; Window Watchdog DCD PVD_IRQHandler ; PVD through EXTI Line detect DCD TAMPER_IRQHandler ; Tamper DCD RTC_IRQHandler ; RTC DCD FLASH_IRQHandler ; Flash DCD RCC_IRQHandler ; RCC DCD EXTI0_IRQHandler ; EXTI Line 0 DCD EXTI1_IRQHandler ; EXTI Line 1 DCD EXTI2_IRQHandler ; EXTI Line 2 DCD EXTI3_IRQHandler ; EXTI Line 3 DCD EXTI4_IRQHandler ; EXTI Line 4 DCD DMA1_Channel1_IRQHandler ; DMA1 Channel 1 DCD DMA1_Channel2_IRQHandler ; DMA1 Channel 2 DCD DMA1_Channel3_IRQHandler ; DMA1 Channel 3 DCD DMA1_Channel4_IRQHandler ; DMA1 Channel 4 DCD DMA1_Channel5_IRQHandler ; DMA1 Channel 5 DCD DMA1_Channel6_IRQHandler ; DMA1 Channel 6 DCD DMA1_Channel7_IRQHandler ; DMA1 Channel 7 DCD ADC1_IRQHandler ; ADC1 DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD EXTI9_5_IRQHandler ; EXTI Line 9..5 DCD TIM1_BRK_TIM15_IRQHandler ; TIM1 Break and TIM15 DCD TIM1_UP_TIM16_IRQHandler ; TIM1 Update and TIM16 DCD TIM1_TRG_COM_TIM17_IRQHandler ; TIM1 Trigger and Commutation and TIM17 DCD TIM1_CC_IRQHandler ; TIM1 Capture Compare DCD TIM2_IRQHandler ; TIM2 DCD TIM3_IRQHandler ; TIM3 DCD TIM4_IRQHandler ; TIM4 DCD I2C1_EV_IRQHandler ; I2C1 Event DCD I2C1_ER_IRQHandler ; I2C1 Error DCD I2C2_EV_IRQHandler ; I2C2 Event DCD I2C2_ER_IRQHandler ; I2C2 Error DCD SPI1_IRQHandler ; SPI1 DCD SPI2_IRQHandler ; SPI2 DCD USART1_IRQHandler ; USART1 DCD USART2_IRQHandler ; USART2 DCD USART3_IRQHandler ; USART3 DCD EXTI15_10_IRQHandler ; EXTI Line 15..10 DCD RTCAlarm_IRQHandler ; RTC Alarm through EXTI Line DCD CEC_IRQHandler ; HDMI-CEC DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD TIM6_DAC_IRQHandler ; TIM6 and DAC underrun DCD TIM7_IRQHandler ; TIM7 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; Default interrupt handlers. ;; THUMB PUBWEAK Reset_Handler SECTION .text:CODE:REORDER(2) Reset_Handler LDR R0, =SystemInit BLX R0 LDR R0, =__iar_program_start BX R0 PUBWEAK NMI_Handler SECTION .text:CODE:REORDER(1) NMI_Handler B NMI_Handler PUBWEAK HardFault_Handler SECTION .text:CODE:REORDER(1) HardFault_Handler B HardFault_Handler PUBWEAK MemManage_Handler SECTION .text:CODE:REORDER(1) MemManage_Handler B MemManage_Handler PUBWEAK BusFault_Handler SECTION .text:CODE:REORDER(1) BusFault_Handler B BusFault_Handler PUBWEAK UsageFault_Handler SECTION .text:CODE:REORDER(1) UsageFault_Handler B UsageFault_Handler PUBWEAK SVC_Handler SECTION .text:CODE:REORDER(1) SVC_Handler B SVC_Handler PUBWEAK DebugMon_Handler SECTION .text:CODE:REORDER(1) DebugMon_Handler B DebugMon_Handler PUBWEAK PendSV_Handler SECTION .text:CODE:REORDER(1) PendSV_Handler B PendSV_Handler PUBWEAK SysTick_Handler SECTION .text:CODE:REORDER(1) SysTick_Handler B SysTick_Handler PUBWEAK WWDG_IRQHandler SECTION .text:CODE:REORDER(1) WWDG_IRQHandler B WWDG_IRQHandler PUBWEAK PVD_IRQHandler SECTION .text:CODE:REORDER(1) PVD_IRQHandler B PVD_IRQHandler PUBWEAK TAMPER_IRQHandler SECTION .text:CODE:REORDER(1) TAMPER_IRQHandler B TAMPER_IRQHandler PUBWEAK RTC_IRQHandler SECTION .text:CODE:REORDER(1) RTC_IRQHandler B RTC_IRQHandler PUBWEAK FLASH_IRQHandler SECTION .text:CODE:REORDER(1) FLASH_IRQHandler B FLASH_IRQHandler PUBWEAK RCC_IRQHandler SECTION .text:CODE:REORDER(1) RCC_IRQHandler B RCC_IRQHandler PUBWEAK EXTI0_IRQHandler SECTION .text:CODE:REORDER(1) EXTI0_IRQHandler B EXTI0_IRQHandler PUBWEAK EXTI1_IRQHandler SECTION .text:CODE:REORDER(1) EXTI1_IRQHandler B EXTI1_IRQHandler PUBWEAK EXTI2_IRQHandler SECTION .text:CODE:REORDER(1) EXTI2_IRQHandler B EXTI2_IRQHandler PUBWEAK EXTI3_IRQHandler SECTION .text:CODE:REORDER(1) EXTI3_IRQHandler B EXTI3_IRQHandler PUBWEAK EXTI4_IRQHandler SECTION .text:CODE:REORDER(1) EXTI4_IRQHandler B EXTI4_IRQHandler PUBWEAK DMA1_Channel1_IRQHandler SECTION .text:CODE:REORDER(1) DMA1_Channel1_IRQHandler B DMA1_Channel1_IRQHandler PUBWEAK DMA1_Channel2_IRQHandler SECTION .text:CODE:REORDER(1) DMA1_Channel2_IRQHandler B DMA1_Channel2_IRQHandler PUBWEAK DMA1_Channel3_IRQHandler SECTION .text:CODE:REORDER(1) DMA1_Channel3_IRQHandler B DMA1_Channel3_IRQHandler PUBWEAK DMA1_Channel4_IRQHandler SECTION .text:CODE:REORDER(1) DMA1_Channel4_IRQHandler B DMA1_Channel4_IRQHandler PUBWEAK DMA1_Channel5_IRQHandler SECTION .text:CODE:REORDER(1) DMA1_Channel5_IRQHandler B DMA1_Channel5_IRQHandler PUBWEAK DMA1_Channel6_IRQHandler SECTION .text:CODE:REORDER(1) DMA1_Channel6_IRQHandler B DMA1_Channel6_IRQHandler PUBWEAK DMA1_Channel7_IRQHandler SECTION .text:CODE:REORDER(1) DMA1_Channel7_IRQHandler B DMA1_Channel7_IRQHandler PUBWEAK ADC1_IRQHandler SECTION .text:CODE:REORDER(1) ADC1_IRQHandler B ADC1_IRQHandler PUBWEAK EXTI9_5_IRQHandler SECTION .text:CODE:REORDER(1) EXTI9_5_IRQHandler B EXTI9_5_IRQHandler PUBWEAK TIM1_BRK_TIM15_IRQHandler SECTION .text:CODE:REORDER(1) TIM1_BRK_TIM15_IRQHandler B TIM1_BRK_TIM15_IRQHandler PUBWEAK TIM1_UP_TIM16_IRQHandler SECTION .text:CODE:REORDER(1) TIM1_UP_TIM16_IRQHandler B TIM1_UP_TIM16_IRQHandler PUBWEAK TIM1_TRG_COM_TIM17_IRQHandler SECTION .text:CODE:REORDER(1) TIM1_TRG_COM_TIM17_IRQHandler B TIM1_TRG_COM_TIM17_IRQHandler PUBWEAK TIM1_CC_IRQHandler SECTION .text:CODE:REORDER(1) TIM1_CC_IRQHandler B TIM1_CC_IRQHandler PUBWEAK TIM2_IRQHandler SECTION .text:CODE:REORDER(1) TIM2_IRQHandler B TIM2_IRQHandler PUBWEAK TIM3_IRQHandler SECTION .text:CODE:REORDER(1) TIM3_IRQHandler B TIM3_IRQHandler PUBWEAK TIM4_IRQHandler SECTION .text:CODE:REORDER(1) TIM4_IRQHandler B TIM4_IRQHandler PUBWEAK I2C1_EV_IRQHandler SECTION .text:CODE:REORDER(1) I2C1_EV_IRQHandler B I2C1_EV_IRQHandler PUBWEAK I2C1_ER_IRQHandler SECTION .text:CODE:REORDER(1) I2C1_ER_IRQHandler B I2C1_ER_IRQHandler PUBWEAK I2C2_EV_IRQHandler SECTION .text:CODE:REORDER(1) I2C2_EV_IRQHandler B I2C2_EV_IRQHandler PUBWEAK I2C2_ER_IRQHandler SECTION .text:CODE:REORDER(1) I2C2_ER_IRQHandler B I2C2_ER_IRQHandler PUBWEAK SPI1_IRQHandler SECTION .text:CODE:REORDER(1) SPI1_IRQHandler B SPI1_IRQHandler PUBWEAK SPI2_IRQHandler SECTION .text:CODE:REORDER(1) SPI2_IRQHandler B SPI2_IRQHandler PUBWEAK USART1_IRQHandler SECTION .text:CODE:REORDER(1) USART1_IRQHandler B USART1_IRQHandler PUBWEAK USART2_IRQHandler SECTION .text:CODE:REORDER(1) USART2_IRQHandler B USART2_IRQHandler PUBWEAK USART3_IRQHandler SECTION .text:CODE:REORDER(1) USART3_IRQHandler B USART3_IRQHandler PUBWEAK EXTI15_10_IRQHandler SECTION .text:CODE:REORDER(1) EXTI15_10_IRQHandler B EXTI15_10_IRQHandler PUBWEAK RTCAlarm_IRQHandler SECTION .text:CODE:REORDER(1) RTCAlarm_IRQHandler B RTCAlarm_IRQHandler PUBWEAK CEC_IRQHandler SECTION .text:CODE:REORDER(1) CEC_IRQHandler B CEC_IRQHandler PUBWEAK TIM6_DAC_IRQHandler SECTION .text:CODE:REORDER(1) TIM6_DAC_IRQHandler B TIM6_DAC_IRQHandler PUBWEAK TIM7_IRQHandler SECTION .text:CODE:REORDER(1) TIM7_IRQHandler B TIM7_IRQHandler END /******************* (C) COPYRIGHT 2011 STMicroelectronics *****END OF FILE****/ ```
```objective-c // // stdlib.h // // // The C Standard Library <stdlib.h> header. // #pragma once #define _INC_STDLIB #include <corecrt.h> #include <corecrt_malloc.h> #include <corecrt_search.h> #include <corecrt_wstdlib.h> #include <limits.h> _CRT_BEGIN_C_HEADER #ifndef _countof #define _countof __crt_countof #endif // Minimum and maximum macros #define __max(a,b) (((a) > (b)) ? (a) : (b)) #define __min(a,b) (((a) < (b)) ? (a) : (b)) _ACRTIMP void __cdecl _swab( _Inout_updates_(_SizeInBytes) _Post_readable_size_(_SizeInBytes) char* _Buf1, _Inout_updates_(_SizeInBytes) _Post_readable_size_(_SizeInBytes) char* _Buf2, _In_ int _SizeInBytes ); //-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // // Exit and Abort // //-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // Argument values for exit() #define EXIT_SUCCESS 0 #define EXIT_FAILURE 1 #if _CRT_FUNCTIONS_REQUIRED _ACRTIMP __declspec(noreturn) void __cdecl exit(_In_ int _Code); _ACRTIMP __declspec(noreturn) void __cdecl _exit(_In_ int _Code); _ACRTIMP __declspec(noreturn) void __cdecl _Exit(_In_ int _Code); _ACRTIMP __declspec(noreturn) void __cdecl quick_exit(_In_ int _Code); _ACRTIMP __declspec(noreturn) void __cdecl abort(void); #endif // _CRT_FUNCTIONS_REQUIRED // Argument values for _set_abort_behavior(). #define _WRITE_ABORT_MSG 0x1 // debug only, has no effect in release #define _CALL_REPORTFAULT 0x2 _CRT_DEPRECATE_TEXT("VC-LTL don't support _set_abort_behavior function. If you call abort function will silently quit the program.") _ACRTIMP unsigned int __cdecl _set_abort_behavior( _In_ unsigned int _Flags, _In_ unsigned int _Mask ); #ifndef _CRT_ONEXIT_T_DEFINED #define _CRT_ONEXIT_T_DEFINED typedef int (__CRTDECL* _onexit_t)(void); #ifdef _M_CEE typedef int (__clrcall* _onexit_m_t)(void); #endif #endif #if _CRT_INTERNAL_NONSTDC_NAMES // Non-ANSI name for compatibility #define onexit_t _onexit_t #endif #ifdef _M_CEE #pragma warning (push) #pragma warning (disable: 4985) _Check_return_ int __clrcall _atexit_m_appdomain(_In_opt_ void (__clrcall* _Function)(void)); _onexit_m_t __clrcall _onexit_m_appdomain(_onexit_m_t _Function); #ifdef _M_CEE_MIXED #ifdef __cplusplus [System::Security::SecurityCritical] #endif _Check_return_ int __clrcall _atexit_m(_In_opt_ void (__clrcall* _Function)(void)); _onexit_m_t __clrcall _onexit_m(_onexit_m_t _Function); #else #ifdef __cplusplus [System::Security::SecurityCritical] #endif _Check_return_ inline int __clrcall _atexit_m(_In_opt_ void (__clrcall* _Function)(void)) { return _atexit_m_appdomain(_Function); } inline _onexit_m_t __clrcall _onexit_m(_onexit_t _Function) { return _onexit_m_appdomain(_Function); } #endif #pragma warning (pop) #endif #ifdef _M_CEE_PURE // In pure mode, atexit is the same as atexit_m_appdomain extern "C++" { #ifdef __cplusplus [System::Security::SecurityCritical] #endif inline int __clrcall atexit(void (__clrcall* _Function)(void)) { return _atexit_m_appdomain(_Function); } inline _onexit_t __clrcall _onexit(_onexit_t _Function) { return _onexit_m_appdomain(_Function); } } // extern "C++" #else int __cdecl atexit(void (__cdecl*)(void)); _onexit_t __cdecl _onexit(_In_opt_ _onexit_t _Func); #endif int __cdecl at_quick_exit(void (__cdecl*)(void)); //-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // // Global State (errno, global handlers, etc.) // //-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ #ifndef _M_CEE_PURE // a purecall handler procedure. Never returns normally typedef void (__cdecl* _purecall_handler)(void); // Invalid parameter handler function pointer type typedef void (__cdecl* _invalid_parameter_handler)( wchar_t const*, wchar_t const*, wchar_t const*, unsigned int, uintptr_t ); // Establishes a purecall handler _VCRTIMP _purecall_handler __cdecl _set_purecall_handler( _In_opt_ _purecall_handler _Handler ); _VCRTIMP _purecall_handler __cdecl _get_purecall_handler(void); // Establishes an invalid parameter handler _ACRTIMP _invalid_parameter_handler __cdecl _set_invalid_parameter_handler( _In_opt_ _invalid_parameter_handler _Handler ); _ACRTIMP _invalid_parameter_handler __cdecl _get_invalid_parameter_handler(void); _ACRTIMP _invalid_parameter_handler __cdecl _set_thread_local_invalid_parameter_handler( _In_opt_ _invalid_parameter_handler _Handler ); _ACRTIMP _invalid_parameter_handler __cdecl _get_thread_local_invalid_parameter_handler(void); #endif #if defined __cplusplus && defined _M_CEE_PURE extern "C++" { typedef void (__clrcall* _purecall_handler)(void); typedef _purecall_handler _purecall_handler_m; _MRTIMP _purecall_handler __cdecl _set_purecall_handler( _In_opt_ _purecall_handler _Handler ); } // extern "C++" #endif // Argument values for _set_error_mode(). #define _OUT_TO_DEFAULT 0 #define _OUT_TO_STDERR 1 #define _OUT_TO_MSGBOX 2 #define _REPORT_ERRMODE 3 _Check_return_opt_ _ACRTIMP int __cdecl _set_error_mode(_In_ int _Mode); #if _CRT_FUNCTIONS_REQUIRED _ACRTIMP int* __cdecl _errno(void); #define errno (*_errno()) _ACRTIMP errno_t __cdecl _set_errno(_In_ int _Value); _ACRTIMP errno_t __cdecl _get_errno(_Out_ int* _Value); _ACRTIMP unsigned long* __cdecl __doserrno(void); #define _doserrno (*__doserrno()) _ACRTIMP errno_t __cdecl _set_doserrno(_In_ unsigned long _Value); _ACRTIMP errno_t __cdecl _get_doserrno(_Out_ unsigned long * _Value); // This is non-const for backwards compatibility; do not modify it. _ACRTIMP _CRT_INSECURE_DEPRECATE(strerror) char** __cdecl __sys_errlist(void); #define _sys_errlist (__sys_errlist()) _ACRTIMP _CRT_INSECURE_DEPRECATE(strerror) int * __cdecl __sys_nerr(void); #define _sys_nerr (*__sys_nerr()) _ACRTIMP void __cdecl perror(_In_opt_z_ char const* _ErrMsg); #endif // _CRT_FUNCTIONS_REQUIRED // These point to the executable module name. _CRT_INSECURE_DEPRECATE_GLOBALS(_get_pgmptr ) _ACRTIMP char** __cdecl __p__pgmptr (void); _CRT_INSECURE_DEPRECATE_GLOBALS(_get_wpgmptr) _ACRTIMP wchar_t** __cdecl __p__wpgmptr(void); _CRT_INSECURE_DEPRECATE_GLOBALS(_get_fmode ) _ACRTIMP int* __cdecl __p__fmode (void); #ifdef _CRT_DECLARE_GLOBAL_VARIABLES_DIRECTLY _CRT_INSECURE_DEPRECATE_GLOBALS(_get_pgmptr ) __declspec(dllimport) extern char* _pgmptr; _CRT_INSECURE_DEPRECATE_GLOBALS(_get_wpgmptr) __declspec(dllimport) extern wchar_t* _wpgmptr; #ifndef _CORECRT_BUILD _CRT_INSECURE_DEPRECATE_GLOBALS(_get_fmode ) __declspec(dllimport) extern int _fmode; #endif #else #define _pgmptr (*__p__pgmptr ()) #define _wpgmptr (*__p__wpgmptr()) #define _fmode (*__p__fmode ()) #endif _Success_(return == 0) _ACRTIMP errno_t __cdecl _get_pgmptr (_Outptr_result_z_ char** _Value); _Success_(return == 0) _ACRTIMP errno_t __cdecl _get_wpgmptr(_Outptr_result_z_ wchar_t** _Value); _ACRTIMP errno_t __cdecl _set_fmode (_In_ int _Mode ); _ACRTIMP errno_t __cdecl _get_fmode (_Out_ int* _PMode); //-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // // Math // //-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ typedef struct _div_t { int quot; int rem; } div_t; typedef struct _ldiv_t { long quot; long rem; } ldiv_t; typedef struct _lldiv_t { long long quot; long long rem; } lldiv_t; _Check_return_ int __cdecl abs (_In_ int _Number); _Check_return_ long __cdecl labs (_In_ long _Number); _Check_return_ long long __cdecl llabs (_In_ long long _Number); _Check_return_ __int64 __cdecl _abs64(_In_ __int64 _Number); _Check_return_ unsigned short __cdecl _byteswap_ushort(_In_ unsigned short _Number); _Check_return_ unsigned long __cdecl _byteswap_ulong (_In_ unsigned long _Number); _Check_return_ unsigned __int64 __cdecl _byteswap_uint64(_In_ unsigned __int64 _Number); _Check_return_ _ACRTIMP div_t __cdecl div (_In_ int _Numerator, _In_ int _Denominator); _Check_return_ _ACRTIMP ldiv_t __cdecl ldiv (_In_ long _Numerator, _In_ long _Denominator); _Check_return_ _ACRTIMP lldiv_t __cdecl lldiv(_In_ long long _Numerator, _In_ long long _Denominator); // These functions have declspecs in their declarations in the Windows headers, // which cause PREfast to fire 6540. #pragma warning (push) #pragma warning (disable:6540) unsigned int __cdecl _rotl( _In_ unsigned int _Value, _In_ int _Shift ); _Check_return_ unsigned long __cdecl _lrotl( _In_ unsigned long _Value, _In_ int _Shift ); unsigned __int64 __cdecl _rotl64( _In_ unsigned __int64 _Value, _In_ int _Shift ); unsigned int __cdecl _rotr( _In_ unsigned int _Value, _In_ int _Shift ); _Check_return_ unsigned long __cdecl _lrotr( _In_ unsigned long _Value, _In_ int _Shift ); unsigned __int64 __cdecl _rotr64( _In_ unsigned __int64 _Value, _In_ int _Shift ); #pragma warning (pop) // Maximum value that can be returned by the rand function: #define RAND_MAX 0x7fff _ACRTIMP void __cdecl srand(_In_ unsigned int _Seed); _Check_return_ _ACRTIMP int __cdecl rand(void); #if defined _CRT_RAND_S || defined _CRTBLD _ACRTIMP errno_t __cdecl rand_s(_Out_ unsigned int* _RandomValue); #endif #ifdef __cplusplus extern "C++" { inline long abs(long const _X) throw() { return labs(_X); } inline long long abs(long long const _X) throw() { return llabs(_X); } inline ldiv_t div(long const _A1, long const _A2) throw() { return ldiv(_A1, _A2); } inline lldiv_t div(long long const _A1, long long const _A2) throw() { return lldiv(_A1, _A2); } } #endif // __cplusplus // Structs used to fool the compiler into not generating floating point // instructions when copying and pushing [long] double values #define _CRT_DOUBLE_DEC #ifndef _LDSUPPORT #pragma pack(push, 4) typedef struct { unsigned char ld[10]; } _LDOUBLE; #pragma pack(pop) #define _PTR_LD(x) ((unsigned char*)(&(x)->ld)) #else // _LDSUPPORT // push and pop long, which is #defined as __int64 by a spec2k test #pragma push_macro("long") #undef long typedef long double _LDOUBLE; #pragma pop_macro("long") #define _PTR_LD(x) ((unsigned char *)(x)) #endif // _LDSUPPORT typedef struct { double x; } _CRT_DOUBLE; typedef struct { float f; } _CRT_FLOAT; // push and pop long, which is #defined as __int64 by a spec2k test #pragma push_macro("long") #undef long typedef struct { long double x; } _LONGDOUBLE; #pragma pop_macro("long") #pragma pack(push, 4) typedef struct { unsigned char ld12[12]; } _LDBL12; #pragma pack(pop) //-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // // Narrow String to Number Conversions // //-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ _Check_return_ _ACRTIMP double __cdecl atof (_In_z_ char const* _String); _Check_return_ _CRT_JIT_INTRINSIC _ACRTIMP int __cdecl atoi (_In_z_ char const* _String); _Check_return_ _ACRTIMP long __cdecl atol (_In_z_ char const* _String); _Check_return_ _ACRTIMP long long __cdecl atoll (_In_z_ char const* _String); _Check_return_ _ACRTIMP __int64 __cdecl _atoi64(_In_z_ char const* _String); _Check_return_ _ACRTIMP double __cdecl _atof_l (_In_z_ char const* _String, _In_opt_ _locale_t _Locale); _Check_return_ _ACRTIMP int __cdecl _atoi_l (_In_z_ char const* _String, _In_opt_ _locale_t _Locale); _Check_return_ _ACRTIMP long __cdecl _atol_l (_In_z_ char const* _String, _In_opt_ _locale_t _Locale); _Check_return_ _ACRTIMP long long __cdecl _atoll_l (_In_z_ char const* _String, _In_opt_ _locale_t _Locale); _Check_return_ _ACRTIMP __int64 __cdecl _atoi64_l(_In_z_ char const* _String, _In_opt_ _locale_t _Locale); _Check_return_ _ACRTIMP int __cdecl _atoflt (_Out_ _CRT_FLOAT* _Result, _In_z_ char const* _String); _Check_return_ _ACRTIMP int __cdecl _atodbl (_Out_ _CRT_DOUBLE* _Result, _In_z_ char* _String); _Check_return_ _ACRTIMP int __cdecl _atoldbl(_Out_ _LDOUBLE* _Result, _In_z_ char* _String); _Check_return_ _ACRTIMP int __cdecl _atoflt_l( _Out_ _CRT_FLOAT* _Result, _In_z_ char const* _String, _In_opt_ _locale_t _Locale ); _Check_return_ _ACRTIMP int __cdecl _atodbl_l( _Out_ _CRT_DOUBLE* _Result, _In_z_ char* _String, _In_opt_ _locale_t _Locale ); _Check_return_ _ACRTIMP int __cdecl _atoldbl_l( _Out_ _LDOUBLE* _Result, _In_z_ char* _String, _In_opt_ _locale_t _Locale ); _Check_return_ _ACRTIMP float __cdecl strtof( _In_z_ char const* _String, _Out_opt_ _Deref_post_z_ char** _EndPtr ); _Check_return_ _ACRTIMP float __cdecl _strtof_l( _In_z_ char const* _String, _Out_opt_ _Deref_post_z_ char** _EndPtr, _In_opt_ _locale_t _Locale ); _Check_return_ _ACRTIMP double __cdecl strtod( _In_z_ char const* _String, _Out_opt_ _Deref_post_z_ char** _EndPtr ); _Check_return_ _ACRTIMP double __cdecl _strtod_l( _In_z_ char const* _String, _Out_opt_ _Deref_post_z_ char** _EndPtr, _In_opt_ _locale_t _Locale ); _Check_return_ _ACRTIMP long double __cdecl strtold( _In_z_ char const* _String, _Out_opt_ _Deref_post_z_ char** _EndPtr ); _Check_return_ _ACRTIMP long double __cdecl _strtold_l( _In_z_ char const* _String, _Out_opt_ _Deref_post_z_ char** _EndPtr, _In_opt_ _locale_t _Locale ); _Check_return_ _ACRTIMP long __cdecl strtol( _In_z_ char const* _String, _Out_opt_ _Deref_post_z_ char** _EndPtr, _In_ int _Radix ); _Check_return_ _ACRTIMP long __cdecl _strtol_l( _In_z_ char const* _String, _Out_opt_ _Deref_post_z_ char** _EndPtr, _In_ int _Radix, _In_opt_ _locale_t _Locale ); _Check_return_ _ACRTIMP long long __cdecl strtoll( _In_z_ char const* _String, _Out_opt_ _Deref_post_z_ char** _EndPtr, _In_ int _Radix ); _Check_return_ _ACRTIMP long long __cdecl _strtoll_l( _In_z_ char const* _String, _Out_opt_ _Deref_post_z_ char** _EndPtr, _In_ int _Radix, _In_opt_ _locale_t _Locale ); _Check_return_ _ACRTIMP unsigned long __cdecl strtoul( _In_z_ char const* _String, _Out_opt_ _Deref_post_z_ char** _EndPtr, _In_ int _Radix ); _Check_return_ _ACRTIMP unsigned long __cdecl _strtoul_l( _In_z_ char const* _String, _Out_opt_ _Deref_post_z_ char** _EndPtr, _In_ int _Radix, _In_opt_ _locale_t _Locale ); _Check_return_ _ACRTIMP unsigned long long __cdecl strtoull( _In_z_ char const* _String, _Out_opt_ _Deref_post_z_ char** _EndPtr, _In_ int _Radix ); _Check_return_ _ACRTIMP unsigned long long __cdecl _strtoull_l( _In_z_ char const* _String, _Out_opt_ _Deref_post_z_ char** _EndPtr, _In_ int _Radix, _In_opt_ _locale_t _Locale ); _Check_return_ _ACRTIMP __int64 __cdecl _strtoi64( _In_z_ char const* _String, _Out_opt_ _Deref_post_z_ char** _EndPtr, _In_ int _Radix ); _Check_return_ _ACRTIMP __int64 __cdecl _strtoi64_l( _In_z_ char const* _String, _Out_opt_ _Deref_post_z_ char** _EndPtr, _In_ int _Radix, _In_opt_ _locale_t _Locale ); _Check_return_ _ACRTIMP unsigned __int64 __cdecl _strtoui64( _In_z_ char const* _String, _Out_opt_ _Deref_post_z_ char** _EndPtr, _In_ int _Radix ); _Check_return_ _ACRTIMP unsigned __int64 __cdecl _strtoui64_l( _In_z_ char const* _String, _Out_opt_ _Deref_post_z_ char** _EndPtr, _In_ int _Radix, _In_opt_ _locale_t _Locale ); //-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // // Number to Narrow String Conversions // //-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ _Success_(return == 0) _Check_return_opt_ _ACRTIMP errno_t __cdecl _itoa_s( _In_ int _Value, _Out_writes_z_(_BufferCount) char* _Buffer, _In_ size_t _BufferCount, _In_ int _Radix ); __DEFINE_CPP_OVERLOAD_SECURE_FUNC_1_1( _Success_(return == 0) errno_t, _itoa_s, _In_ int, _Value, char, _Buffer, _In_ int, _Radix ) #pragma warning(push) #pragma warning(disable: 28719) // __WARNING_BANNED_API_USAGE #pragma warning(disable: 28726) // __WARNING_BANNED_API_USAGEL2 __DEFINE_CPP_OVERLOAD_STANDARD_FUNC_1_1( char*, __RETURN_POLICY_DST, _ACRTIMP, _itoa, _In_ int, _Value, _Pre_notnull_ _Post_z_, char, _Buffer, _In_ int, _Radix ) #pragma warning(pop) _Success_(return == 0) _Check_return_opt_ _ACRTIMP errno_t __cdecl _ltoa_s( _In_ long _Value, _Out_writes_z_(_BufferCount) char* _Buffer, _In_ size_t _BufferCount, _In_ int _Radix ); __DEFINE_CPP_OVERLOAD_SECURE_FUNC_1_1( errno_t, _ltoa_s, _In_ long, _Value, char, _Buffer, _In_ int, _Radix ) __DEFINE_CPP_OVERLOAD_STANDARD_FUNC_1_1( char*, __RETURN_POLICY_DST, _ACRTIMP, _ltoa, _In_ long, _Value, _Pre_notnull_ _Post_z_, char, _Buffer, _In_ int, _Radix ) _Success_(return == 0) _Check_return_opt_ _ACRTIMP errno_t __cdecl _ultoa_s( _In_ unsigned long _Value, _Out_writes_z_(_BufferCount) char* _Buffer, _In_ size_t _BufferCount, _In_ int _Radix ); __DEFINE_CPP_OVERLOAD_SECURE_FUNC_1_1( errno_t, _ultoa_s, _In_ unsigned long, _Value, char, _Buffer, _In_ int, _Radix ) #pragma warning(push) #pragma warning(disable: 28726) // __WARNING_BANNED_API_USAGEL2 __DEFINE_CPP_OVERLOAD_STANDARD_FUNC_1_1( char*, __RETURN_POLICY_DST, _ACRTIMP, _ultoa, _In_ unsigned long, _Value, _Pre_notnull_ _Post_z_, char, _Buffer, _In_ int, _Radix ) #pragma warning(pop) _Success_(return == 0) _Check_return_opt_ _ACRTIMP errno_t __cdecl _i64toa_s( _In_ __int64 _Value, _Out_writes_z_(_BufferCount) char* _Buffer, _In_ size_t _BufferCount, _In_ int _Radix ); _Success_(return == 0) _CRT_INSECURE_DEPRECATE(_i64toa_s) _ACRTIMP char* __cdecl _i64toa( _In_ __int64 _Value, _Pre_notnull_ _Post_z_ char* _Buffer, _In_ int _Radix ); _Success_(return == 0) _Check_return_opt_ _ACRTIMP errno_t __cdecl _ui64toa_s( _In_ unsigned __int64 _Value, _Out_writes_z_(_BufferCount) char* _Buffer, _In_ size_t _BufferCount, _In_ int _Radix ); _CRT_INSECURE_DEPRECATE(_ui64toa_s) _ACRTIMP char* __cdecl _ui64toa( _In_ unsigned __int64 _Value, _Pre_notnull_ _Post_z_ char* _Buffer, _In_ int _Radix ); // _CVTBUFSIZE is the maximum size for the per-thread conversion buffer. It // should be at least as long as the number of digits in the largest double // precision value (?.?e308 in IEEE arithmetic). We will use the same size // buffer as is used in the printf support routines. // // (This value actually allows 40 additional decimal places; even though there // are only 16 digits of accuracy in a double precision IEEE number, the user may // ask for more to effect zero padding.) #define _CVTBUFSIZE (309 + 40) // # of digits in max. dp value + slop _Success_(return == 0) _Check_return_wat_ _ACRTIMP errno_t __cdecl _ecvt_s( _Out_writes_z_(_BufferCount) char* _Buffer, _In_ size_t _BufferCount, _In_ double _Value, _In_ int _DigitCount, _Out_ int* _PtDec, _Out_ int* _PtSign ); __DEFINE_CPP_OVERLOAD_SECURE_FUNC_0_4( errno_t, _ecvt_s, char, _Buffer, _In_ double, _Value, _In_ int, _DigitCount, _Out_ int*, _PtDec, _Out_ int*, _PtSign ) _Check_return_ _CRT_INSECURE_DEPRECATE(_ecvt_s) _ACRTIMP char* __cdecl _ecvt( _In_ double _Value, _In_ int _DigitCount, _Out_ int* _PtDec, _Out_ int* _PtSign ); _Success_(return == 0) _Check_return_wat_ _ACRTIMP errno_t __cdecl _fcvt_s( _Out_writes_z_(_BufferCount) char* _Buffer, _In_ size_t _BufferCount, _In_ double _Value, _In_ int _FractionalDigitCount, _Out_ int* _PtDec, _Out_ int* _PtSign ); __DEFINE_CPP_OVERLOAD_SECURE_FUNC_0_4( _Success_(return == 0) errno_t, _fcvt_s, char, _Buffer, _In_ double, _Value, _In_ int, _FractionalDigitCount, _Out_ int*, _PtDec, _Out_ int*, _PtSign ) _Success_(return == 0) _Check_return_ _CRT_INSECURE_DEPRECATE(_fcvt_s) _ACRTIMP char* __cdecl _fcvt( _In_ double _Value, _In_ int _FractionalDigitCount, _Out_ int* _PtDec, _Out_ int* _PtSign ); _Success_(return == 0) _ACRTIMP errno_t __cdecl _gcvt_s( _Out_writes_z_(_BufferCount) char* _Buffer, _In_ size_t _BufferCount, _In_ double _Value, _In_ int _DigitCount ); __DEFINE_CPP_OVERLOAD_SECURE_FUNC_0_2( _Success_(return == 0) errno_t, _gcvt_s, char, _Buffer, _In_ double, _Value, _In_ int, _DigitCount ) _CRT_INSECURE_DEPRECATE(_gcvt_s) _ACRTIMP char* __cdecl _gcvt( _In_ double _Value, _In_ int _DigitCount, _Pre_notnull_ _Post_z_ char* _Buffer ); //-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // // Multibyte String Operations and Conversions // //-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // Maximum number of bytes in multi-byte character in the current locale // (also defined in ctype.h). #ifndef MB_CUR_MAX #if defined _CRT_DISABLE_PERFCRIT_LOCKS && !defined _DLL #define MB_CUR_MAX __mb_cur_max #else #define MB_CUR_MAX ___mb_cur_max_func() #endif #ifdef _CRT_DECLARE_GLOBAL_VARIABLES_DIRECTLY __declspec(dllimport) extern int __mb_cur_max; #else #define __mb_cur_max (___mb_cur_max_func()) #endif _Post_satisfies_(return > 0 && return < MB_LEN_MAX) _ACRTIMP int __cdecl ___mb_cur_max_func(void); _Post_satisfies_(return > 0 && return < MB_LEN_MAX) _ACRTIMP int __cdecl ___mb_cur_max_l_func(_locale_t _Locale); #endif _Check_return_ _ACRTIMP int __cdecl mblen( _In_reads_bytes_opt_(_MaxCount) _Pre_opt_z_ char const* _Ch, _In_ size_t _MaxCount ); _Check_return_ _ACRTIMP int __cdecl _mblen_l( _In_reads_bytes_opt_(_MaxCount) _Pre_opt_z_ char const* _Ch, _In_ size_t _MaxCount, _In_opt_ _locale_t _Locale ); _Check_return_ _Post_satisfies_(return <= _String_length_(_String)) _ACRTIMP size_t __cdecl _mbstrlen( _In_z_ char const* _String ); _Check_return_ _Post_satisfies_(return <= _String_length_(_String) || return == (size_t)-1) _ACRTIMP size_t __cdecl _mbstrlen_l( _In_z_ char const* _String, _In_opt_ _locale_t _Locale ); _Check_return_ _Post_satisfies_((return <= _String_length_(_String) && return <= _MaxCount) || return == (size_t)-1) _ACRTIMP size_t __cdecl _mbstrnlen( _In_z_ char const* _String, _In_ size_t _MaxCount ); _Post_satisfies_((return <= _String_length_(_String) && return <= _MaxCount) || return == (size_t)-1) _Check_return_ _ACRTIMP size_t __cdecl _mbstrnlen_l( _In_z_ char const* _String, _In_ size_t _MaxCount, _In_opt_ _locale_t _Locale ); _Success_(return != -1) _ACRTIMP int __cdecl mbtowc( _Pre_notnull_ _Post_z_ wchar_t* _DstCh, _In_reads_or_z_opt_(_SrcSizeInBytes) char const* _SrcCh, _In_ size_t _SrcSizeInBytes ); _Success_(return != -1) _ACRTIMP int __cdecl _mbtowc_l( _Pre_notnull_ _Post_z_ wchar_t* _DstCh, _In_reads_or_z_opt_(_SrcSizeInBytes) char const* _SrcCh, _In_ size_t _SrcSizeInBytes, _In_opt_ _locale_t _Locale ); _Check_return_opt_ _ACRTIMP errno_t __cdecl mbstowcs_s( _Out_opt_ size_t* _PtNumOfCharConverted, _Out_writes_to_opt_(_SizeInWords, *_PtNumOfCharConverted) wchar_t* _DstBuf, _In_ size_t _SizeInWords, _In_reads_or_z_(_MaxCount) char const* _SrcBuf, _In_ size_t _MaxCount ); __DEFINE_CPP_OVERLOAD_SECURE_FUNC_1_2( errno_t, mbstowcs_s, _Out_opt_ size_t*, _PtNumOfCharConverted, _Post_z_ wchar_t, _Dest, _In_z_ char const*, _Source, _In_ size_t, _MaxCount ) __DEFINE_CPP_OVERLOAD_STANDARD_NFUNC_0_2_SIZE( _ACRTIMP, mbstowcs, _Out_writes_opt_z_(_MaxCount), wchar_t, _Dest, _In_z_ char const*, _Source, _In_ size_t, _MaxCount ) _Check_return_opt_ _ACRTIMP errno_t __cdecl _mbstowcs_s_l( _Out_opt_ size_t* _PtNumOfCharConverted, _Out_writes_to_opt_(_SizeInWords, *_PtNumOfCharConverted) wchar_t* _DstBuf, _In_ size_t _SizeInWords, _In_reads_or_z_(_MaxCount) char const* _SrcBuf, _In_ size_t _MaxCount, _In_opt_ _locale_t _Locale ); __DEFINE_CPP_OVERLOAD_SECURE_FUNC_1_3( errno_t, _mbstowcs_s_l, _Out_opt_ size_t*, _PtNumOfCharConverted, _Post_z_ wchar_t, _Dest, _In_z_ char const*, _Source, _In_ size_t, _MaxCount, _In_opt_ _locale_t, _Locale ) __DEFINE_CPP_OVERLOAD_STANDARD_NFUNC_0_3_SIZE_EX( _ACRTIMP, _mbstowcs_l, _mbstowcs_s_l, _Out_writes_opt_z_(_Size) wchar_t, _Out_writes_z_(_MaxCount), wchar_t, _Dest, _In_z_ char const*, _Source, _In_ size_t, _MaxCount, _In_opt_ _locale_t, _Locale ) _CRT_INSECURE_DEPRECATE(wctomb_s) _ACRTIMP int __cdecl wctomb( _Out_writes_opt_z_(MB_LEN_MAX) char* _MbCh, _In_ wchar_t _WCh ); _CRT_INSECURE_DEPRECATE(_wctomb_s_l) _ACRTIMP int __cdecl _wctomb_l( _Pre_maybenull_ _Post_z_ char* _MbCh, _In_ wchar_t _WCh, _In_opt_ _locale_t _Locale ); #if __STDC_WANT_SECURE_LIB__ _Check_return_wat_ _ACRTIMP errno_t __cdecl wctomb_s( _Out_opt_ int* _SizeConverted, _Out_writes_bytes_to_opt_(_SizeInBytes, *_SizeConverted) char* _MbCh, _In_ rsize_t _SizeInBytes, _In_ wchar_t _WCh ); #endif // __STDC_WANT_SECURE_LIB__ _Check_return_wat_ _ACRTIMP errno_t __cdecl _wctomb_s_l( _Out_opt_ int* _SizeConverted, _Out_writes_opt_z_(_SizeInBytes) char* _MbCh, _In_ size_t _SizeInBytes, _In_ wchar_t _WCh, _In_opt_ _locale_t _Locale); _Check_return_wat_ _ACRTIMP errno_t __cdecl wcstombs_s( _Out_opt_ size_t* _PtNumOfCharConverted, _Out_writes_bytes_to_opt_(_DstSizeInBytes, *_PtNumOfCharConverted) char* _Dst, _In_ size_t _DstSizeInBytes, _In_z_ wchar_t const* _Src, _In_ size_t _MaxCountInBytes ); __DEFINE_CPP_OVERLOAD_SECURE_FUNC_1_2( errno_t, wcstombs_s, _Out_opt_ size_t*, _PtNumOfCharConverted, _Out_writes_bytes_opt_(_Size) char, _Dest, _In_z_ wchar_t const*, _Source, _In_ size_t, _MaxCount ) __DEFINE_CPP_OVERLOAD_STANDARD_NFUNC_0_2_SIZE( _ACRTIMP, wcstombs, _Out_writes_opt_(_MaxCount), char, _Dest, _In_z_ wchar_t const*, _Source, _In_ size_t, _MaxCount ) _Check_return_wat_ _ACRTIMP errno_t __cdecl _wcstombs_s_l( _Out_opt_ size_t* _PtNumOfCharConverted, _Out_writes_bytes_to_opt_(_DstSizeInBytes, *_PtNumOfCharConverted) char* _Dst, _In_ size_t _DstSizeInBytes, _In_z_ wchar_t const* _Src, _In_ size_t _MaxCountInBytes, _In_opt_ _locale_t _Locale ); __DEFINE_CPP_OVERLOAD_SECURE_FUNC_1_3( errno_t, _wcstombs_s_l, _Out_opt_ size_t*, _PtNumOfCharConverted, _Out_writes_opt_(_Size) char, _Dest, _In_z_ wchar_t const*, _Source, _In_ size_t, _MaxCount, _In_opt_ _locale_t, _Locale ) __DEFINE_CPP_OVERLOAD_STANDARD_NFUNC_0_3_SIZE_EX( _ACRTIMP, _wcstombs_l, _wcstombs_s_l, _Out_writes_opt_z_(_Size) char, _Out_writes_(_MaxCount), char, _Dest, _In_z_ wchar_t const*, _Source, _In_ size_t, _MaxCount, _In_opt_ _locale_t, _Locale ) //-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // // Path Manipulation // //-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // Sizes for buffers used by the _makepath() and _splitpath() functions. // note that the sizes include space for 0-terminator #define _MAX_PATH 260 // max. length of full pathname #define _MAX_DRIVE 3 // max. length of drive component #define _MAX_DIR 256 // max. length of path component #define _MAX_FNAME 256 // max. length of file name component #define _MAX_EXT 256 // max. length of extension component #pragma push_macro("_fullpath") #undef _fullpath _Success_(return != 0) _Check_return_ _ACRTIMP _CRTALLOCATOR char* __cdecl _fullpath( _Out_writes_opt_z_(_BufferCount) char* _Buffer, _In_z_ char const* _Path, _In_ size_t _BufferCount ); #pragma pop_macro("_fullpath") _Check_return_wat_ _ACRTIMP errno_t __cdecl _makepath_s( _Out_writes_z_(_BufferCount) char* _Buffer, _In_ size_t _BufferCount, _In_opt_z_ char const* _Drive, _In_opt_z_ char const* _Dir, _In_opt_z_ char const* _Filename, _In_opt_z_ char const* _Ext ); __DEFINE_CPP_OVERLOAD_SECURE_FUNC_0_4( errno_t, _makepath_s, char, _Buffer, _In_opt_z_ char const*, _Drive, _In_opt_z_ char const*, _Dir, _In_opt_z_ char const*, _Filename, _In_opt_z_ char const*, _Ext ) #pragma warning(push) #pragma warning(disable: 28719) // __WARNING_BANNED_API_USAGE #pragma warning(disable: 28726) // __WARNING_BANNED_API_USAGEL2 __DEFINE_CPP_OVERLOAD_STANDARD_FUNC_0_4( void, __RETURN_POLICY_VOID, _ACRTIMP, _makepath, _Pre_notnull_ _Post_z_, char, _Buffer, _In_opt_z_ char const*, _Drive, _In_opt_z_ char const*, _Dir, _In_opt_z_ char const*, _Filename, _In_opt_z_ char const*, _Ext ) #pragma warning(pop) _CRT_INSECURE_DEPRECATE(_splitpath_s) _ACRTIMP void __cdecl _splitpath( _In_z_ char const* _FullPath, _Pre_maybenull_ _Post_z_ char* _Drive, _Pre_maybenull_ _Post_z_ char* _Dir, _Pre_maybenull_ _Post_z_ char* _Filename, _Pre_maybenull_ _Post_z_ char* _Ext ); _Check_return_wat_ _ACRTIMP errno_t __cdecl _splitpath_s( _In_z_ char const* _FullPath, _Out_writes_opt_z_(_DriveCount) char* _Drive, _In_ size_t _DriveCount, _Out_writes_opt_z_(_DirCount) char* _Dir, _In_ size_t _DirCount, _Out_writes_opt_z_(_FilenameCount) char* _Filename, _In_ size_t _FilenameCount, _Out_writes_opt_z_(_ExtCount) char* _Ext, _In_ size_t _ExtCount ); __DEFINE_CPP_OVERLOAD_SECURE_FUNC_SPLITPATH(errno_t, _splitpath_s, char, _Dest) //-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // // APIs Only Available in Desktop Apps // //-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ #ifdef _CRT_USE_WINAPI_FAMILY_DESKTOP_APP #if __STDC_WANT_SECURE_LIB__ _Check_return_opt_ _Success_(return == 0) _DCRTIMP errno_t __cdecl getenv_s( _Out_ size_t* _RequiredCount, _Out_writes_opt_z_(_BufferCount) char* _Buffer, _In_ rsize_t _BufferCount, _In_z_ char const* _VarName ); #endif // __STDC_WANT_SECURE_LIB__ _ACRTIMP int* __cdecl __p___argc (void); _ACRTIMP char*** __cdecl __p___argv (void); _ACRTIMP wchar_t*** __cdecl __p___wargv(void); #ifdef _CRT_DECLARE_GLOBAL_VARIABLES_DIRECTLY __declspec(dllimport) extern int __argc; __declspec(dllimport) extern char** __argv; __declspec(dllimport) extern wchar_t** __wargv; #else #define __argc (*__p___argc()) // Pointer to number of command line arguments #define __argv (*__p___argv()) // Pointer to table of narrow command line arguments #define __wargv (*__p___wargv()) // Pointer to table of wide command line arguments #endif _DCRTIMP char*** __cdecl __p__environ (void); _DCRTIMP wchar_t*** __cdecl __p__wenviron(void); #ifndef _CRT_BEST_PRACTICES_USAGE #define _CRT_V12_LEGACY_FUNCTIONALITY #endif #ifndef _CRT_V12_LEGACY_FUNCTIONALITY __declspec(dllimport) extern char ** _environ; __declspec(dllimport) extern wchar_t ** _wenviron; #else #define _environ (*__p__environ()) // Pointer to narrow environment table #define _wenviron (*__p__wenviron()) // Pointer to wide environment table #endif // Sizes for buffers used by the getenv/putenv family of functions. #define _MAX_ENV 32767 #if _CRT_FUNCTIONS_REQUIRED _Check_return_ _CRT_INSECURE_DEPRECATE(_dupenv_s) _DCRTIMP char* __cdecl getenv( _In_z_ char const* _VarName ); __DEFINE_CPP_OVERLOAD_SECURE_FUNC_1_1( errno_t, getenv_s, _Out_ size_t*, _RequiredCount, char, _Buffer, _In_z_ char const*, _VarName ) #if defined (_DEBUG) && defined (_CRTDBG_MAP_ALLOC) #pragma push_macro("_dupenv_s") #undef _dupenv_s #endif _Check_return_opt_ _DCRTIMP errno_t __cdecl _dupenv_s( _Outptr_result_buffer_maybenull_(*_BufferCount) _Outptr_result_maybenull_z_ char** _Buffer, _Out_opt_ size_t* _BufferCount, _In_z_ char const* _VarName ); #if defined (_DEBUG) && defined (_CRTDBG_MAP_ALLOC) #pragma pop_macro("_dupenv_s") #endif _DCRTIMP int __cdecl system( _In_opt_z_ char const* _Command ); // The functions below have declspecs in their declarations in the Windows // headers, causing PREfast to fire 6540 here #pragma warning (push) #pragma warning (disable:6540) _Check_return_ _DCRTIMP int __cdecl _putenv( _In_z_ char const* _EnvString ); _Check_return_wat_ _DCRTIMP errno_t __cdecl _putenv_s( _In_z_ char const* _Name, _In_z_ char const* _Value ); #pragma warning (pop) _DCRTIMP errno_t __cdecl _searchenv_s( _In_z_ char const* _Filename, _In_z_ char const* _VarName, _Out_writes_z_(_BufferCount) char* _Buffer, _In_ size_t _BufferCount ); __DEFINE_CPP_OVERLOAD_SECURE_FUNC_2_0( errno_t, _searchenv_s, _In_z_ char const*, _Filename, _In_z_ char const*, _VarName, char, _Buffer ) __DEFINE_CPP_OVERLOAD_STANDARD_FUNC_2_0( void, __RETURN_POLICY_VOID, _DCRTIMP, _searchenv, _In_z_ char const*, _Filename, _In_z_ char const*, _VarName, _Pre_notnull_ _Post_z_, char, _Buffer ) // The Win32 API SetErrorMode, Beep and Sleep should be used instead. _CRT_OBSOLETE(SetErrorMode) _DCRTIMP void __cdecl _seterrormode( _In_ int _Mode ); _CRT_OBSOLETE(Beep) _DCRTIMP void __cdecl _beep( _In_ unsigned _Frequency, _In_ unsigned _Duration ); _CRT_OBSOLETE(Sleep) _DCRTIMP void __cdecl _sleep( _In_ unsigned long _Duration ); #endif // _CRT_FUNCTIONS_REQUIRED #endif // _CRT_USE_WINAPI_FAMILY_DESKTOP_APP //-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // // Non-ANSI Names for Compatibility // //-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ #if _CRT_INTERNAL_NONSTDC_NAMES #ifndef __cplusplus #define max(a,b) (((a) > (b)) ? (a) : (b)) #define min(a,b) (((a) < (b)) ? (a) : (b)) #endif #define sys_errlist _sys_errlist #define sys_nerr _sys_nerr #pragma warning(push) #pragma warning(disable: 4141) // Using deprecated twice _Check_return_ _CRT_NONSTDC_DEPRECATE(_ecvt) _CRT_INSECURE_DEPRECATE(_ecvt_s) _ACRTIMP char* __cdecl ecvt( _In_ double _Value, _In_ int _DigitCount, _Out_ int* _PtDec, _Out_ int* _PtSign ); _Check_return_ _CRT_NONSTDC_DEPRECATE(_fcvt) _CRT_INSECURE_DEPRECATE(_fcvt_s) _ACRTIMP char* __cdecl fcvt( _In_ double _Value, _In_ int _FractionalDigitCount, _Out_ int* _PtDec, _Out_ int* _PtSign ); _CRT_NONSTDC_DEPRECATE(_gcvt) _CRT_INSECURE_DEPRECATE(_fcvt_s) _ACRTIMP char* __cdecl gcvt( _In_ double _Value, _In_ int _DigitCount, _Pre_notnull_ _Post_z_ char* _DstBuf ); _CRT_NONSTDC_DEPRECATE(_itoa) _CRT_INSECURE_DEPRECATE(_itoa_s) _ACRTIMP char* __cdecl itoa( _In_ int _Value, _Pre_notnull_ _Post_z_ char* _Buffer, _In_ int _Radix ); _CRT_NONSTDC_DEPRECATE(_ltoa) _CRT_INSECURE_DEPRECATE(_ltoa_s) _ACRTIMP char* __cdecl ltoa( _In_ long _Value, _Pre_notnull_ _Post_z_ char* _Buffer, _In_ int _Radix ); _CRT_NONSTDC_DEPRECATE(_swab) _ACRTIMP void __cdecl swab( _Inout_updates_z_(_SizeInBytes) char* _Buf1, _Inout_updates_z_(_SizeInBytes) char* _Buf2, _In_ int _SizeInBytes ); _CRT_NONSTDC_DEPRECATE(_ultoa) _CRT_INSECURE_DEPRECATE(_ultoa_s) _ACRTIMP char* __cdecl ultoa( _In_ unsigned long _Value, _Pre_notnull_ _Post_z_ char* _Buffer, _In_ int _Radix ); #ifdef _CRT_USE_WINAPI_FAMILY_DESKTOP_APP #define environ _environ _Check_return_ _CRT_NONSTDC_DEPRECATE(_putenv) _DCRTIMP int __cdecl putenv( _In_z_ char const* _EnvString ); #endif // _CRT_USE_WINAPI_FAMILY_DESKTOP_APP #pragma warning(pop) onexit_t __cdecl onexit(_In_opt_ onexit_t _Func); #endif // _CRT_INTERNAL_NONSTDC_NAMES _CRT_END_C_HEADER ```
```go // +build !ignore_autogenerated /* path_to_url Unless required by applicable law or agreed to in writing, software WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ // This file was autogenerated by conversion-gen. Do not edit it manually! package v1beta1 import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" api "k8s.io/client-go/pkg/api" api_v1 "k8s.io/client-go/pkg/api/v1" extensions "k8s.io/client-go/pkg/apis/extensions" unsafe "unsafe" ) func init() { SchemeBuilder.Register(RegisterConversions) } // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(scheme *runtime.Scheme) error { return scheme.AddGeneratedConversionFuncs( Convert_v1beta1_APIVersion_To_extensions_APIVersion, Convert_extensions_APIVersion_To_v1beta1_APIVersion, your_sha256_hashtricCurrentStatus, your_sha256_hashtricCurrentStatus, your_sha256_hashomMetricCurrentStatusList, your_sha256_hashomMetricCurrentStatusList, your_sha256_hashget, your_sha256_hashget, your_sha256_hashcTargetList, your_sha256_hashcTargetList, Convert_v1beta1_DaemonSet_To_extensions_DaemonSet, Convert_extensions_DaemonSet_To_v1beta1_DaemonSet, Convert_v1beta1_DaemonSetList_To_extensions_DaemonSetList, Convert_extensions_DaemonSetList_To_v1beta1_DaemonSetList, Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec, Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec, Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus, Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus, your_sha256_hashpdateStrategy, your_sha256_hashpdateStrategy, Convert_v1beta1_Deployment_To_extensions_Deployment, Convert_extensions_Deployment_To_v1beta1_Deployment, your_sha256_hashition, your_sha256_hashition, Convert_v1beta1_DeploymentList_To_extensions_DeploymentList, Convert_extensions_DeploymentList_To_v1beta1_DeploymentList, your_sha256_hashack, your_sha256_hashack, Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec, Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec, Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus, Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus, your_sha256_hashegy, your_sha256_hashegy, your_sha256_hashtegyOptions, your_sha256_hashtegyOptions, Convert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath, Convert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath, your_sha256_hashleValue, your_sha256_hashleValue, Convert_v1beta1_HostPortRange_To_extensions_HostPortRange, Convert_extensions_HostPortRange_To_v1beta1_HostPortRange, Convert_v1beta1_Ingress_To_extensions_Ingress, Convert_extensions_Ingress_To_v1beta1_Ingress, Convert_v1beta1_IngressBackend_To_extensions_IngressBackend, Convert_extensions_IngressBackend_To_v1beta1_IngressBackend, Convert_v1beta1_IngressList_To_extensions_IngressList, Convert_extensions_IngressList_To_v1beta1_IngressList, Convert_v1beta1_IngressRule_To_extensions_IngressRule, Convert_extensions_IngressRule_To_v1beta1_IngressRule, Convert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue, Convert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue, Convert_v1beta1_IngressSpec_To_extensions_IngressSpec, Convert_extensions_IngressSpec_To_v1beta1_IngressSpec, Convert_v1beta1_IngressStatus_To_extensions_IngressStatus, Convert_extensions_IngressStatus_To_v1beta1_IngressStatus, Convert_v1beta1_IngressTLS_To_extensions_IngressTLS, Convert_extensions_IngressTLS_To_v1beta1_IngressTLS, Convert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy, Convert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy, your_sha256_hashlicyIngressRule, your_sha256_hashlicyIngressRule, your_sha256_hasht, your_sha256_hasht, your_sha256_hashr, your_sha256_hashr, your_sha256_hasht, your_sha256_hasht, your_sha256_hashc, your_sha256_hashc, your_sha256_hashy, your_sha256_hashy, your_sha256_hasholicyList, your_sha256_hasholicyList, your_sha256_hasholicySpec, your_sha256_hasholicySpec, Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet, Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet, your_sha256_hashition, your_sha256_hashition, Convert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList, Convert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList, Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec, Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec, Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus, Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus, your_sha256_hashtionControllerDummy, your_sha256_hashtionControllerDummy, Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig, Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig, your_sha256_hashteDaemonSet, your_sha256_hashteDaemonSet, your_sha256_hashateDeployment, your_sha256_hashateDeployment, your_sha256_hashStrategyOptions, your_sha256_hashStrategyOptions, your_sha256_hashtegyOptions, your_sha256_hashtegyOptions, Convert_v1beta1_Scale_To_extensions_Scale, Convert_extensions_Scale_To_v1beta1_Scale, Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec, Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec, Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus, Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus, your_sha256_hashSupplementalGroupsStrategyOptions, your_sha256_hashSupplementalGroupsStrategyOptions, your_sha256_hashrce, your_sha256_hashrce, your_sha256_hashesourceData, your_sha256_hashesourceData, your_sha256_hashrtyResourceDataList, your_sha256_hashrtyResourceDataList, your_sha256_hashesourceList, your_sha256_hashesourceList, ) } func autoConvert_v1beta1_APIVersion_To_extensions_APIVersion(in *APIVersion, out *extensions.APIVersion, s conversion.Scope) error { out.Name = in.Name return nil } // Convert_v1beta1_APIVersion_To_extensions_APIVersion is an autogenerated conversion function. func Convert_v1beta1_APIVersion_To_extensions_APIVersion(in *APIVersion, out *extensions.APIVersion, s conversion.Scope) error { return autoConvert_v1beta1_APIVersion_To_extensions_APIVersion(in, out, s) } func autoConvert_extensions_APIVersion_To_v1beta1_APIVersion(in *extensions.APIVersion, out *APIVersion, s conversion.Scope) error { out.Name = in.Name return nil } // Convert_extensions_APIVersion_To_v1beta1_APIVersion is an autogenerated conversion function. func Convert_extensions_APIVersion_To_v1beta1_APIVersion(in *extensions.APIVersion, out *APIVersion, s conversion.Scope) error { return autoConvert_extensions_APIVersion_To_v1beta1_APIVersion(in, out, s) } func your_sha256_hashomMetricCurrentStatus(in *CustomMetricCurrentStatus, out *extensions.CustomMetricCurrentStatus, s conversion.Scope) error { out.Name = in.Name out.CurrentValue = in.CurrentValue return nil } // your_sha256_hashtricCurrentStatus is an autogenerated conversion function. func your_sha256_hashtricCurrentStatus(in *CustomMetricCurrentStatus, out *extensions.CustomMetricCurrentStatus, s conversion.Scope) error { return your_sha256_hashomMetricCurrentStatus(in, out, s) } func your_sha256_hashomMetricCurrentStatus(in *extensions.CustomMetricCurrentStatus, out *CustomMetricCurrentStatus, s conversion.Scope) error { out.Name = in.Name out.CurrentValue = in.CurrentValue return nil } // your_sha256_hashtricCurrentStatus is an autogenerated conversion function. func your_sha256_hashtricCurrentStatus(in *extensions.CustomMetricCurrentStatus, out *CustomMetricCurrentStatus, s conversion.Scope) error { return your_sha256_hashomMetricCurrentStatus(in, out, s) } func your_sha256_hashCustomMetricCurrentStatusList(in *CustomMetricCurrentStatusList, out *extensions.CustomMetricCurrentStatusList, s conversion.Scope) error { out.Items = *(*[]extensions.CustomMetricCurrentStatus)(unsafe.Pointer(&in.Items)) return nil } // your_sha256_hashomMetricCurrentStatusList is an autogenerated conversion function. func your_sha256_hashomMetricCurrentStatusList(in *CustomMetricCurrentStatusList, out *extensions.CustomMetricCurrentStatusList, s conversion.Scope) error { return your_sha256_hashCustomMetricCurrentStatusList(in, out, s) } func your_sha256_hashCustomMetricCurrentStatusList(in *extensions.CustomMetricCurrentStatusList, out *CustomMetricCurrentStatusList, s conversion.Scope) error { if in.Items == nil { out.Items = make([]CustomMetricCurrentStatus, 0) } else { out.Items = *(*[]CustomMetricCurrentStatus)(unsafe.Pointer(&in.Items)) } return nil } // your_sha256_hashomMetricCurrentStatusList is an autogenerated conversion function. func your_sha256_hashomMetricCurrentStatusList(in *extensions.CustomMetricCurrentStatusList, out *CustomMetricCurrentStatusList, s conversion.Scope) error { return your_sha256_hashCustomMetricCurrentStatusList(in, out, s) } func your_sha256_hashcTarget(in *CustomMetricTarget, out *extensions.CustomMetricTarget, s conversion.Scope) error { out.Name = in.Name out.TargetValue = in.TargetValue return nil } // your_sha256_hashget is an autogenerated conversion function. func your_sha256_hashget(in *CustomMetricTarget, out *extensions.CustomMetricTarget, s conversion.Scope) error { return your_sha256_hashcTarget(in, out, s) } func your_sha256_hashcTarget(in *extensions.CustomMetricTarget, out *CustomMetricTarget, s conversion.Scope) error { out.Name = in.Name out.TargetValue = in.TargetValue return nil } // your_sha256_hashget is an autogenerated conversion function. func your_sha256_hashget(in *extensions.CustomMetricTarget, out *CustomMetricTarget, s conversion.Scope) error { return your_sha256_hashcTarget(in, out, s) } func your_sha256_hashetricTargetList(in *CustomMetricTargetList, out *extensions.CustomMetricTargetList, s conversion.Scope) error { out.Items = *(*[]extensions.CustomMetricTarget)(unsafe.Pointer(&in.Items)) return nil } // your_sha256_hashcTargetList is an autogenerated conversion function. func your_sha256_hashcTargetList(in *CustomMetricTargetList, out *extensions.CustomMetricTargetList, s conversion.Scope) error { return your_sha256_hashetricTargetList(in, out, s) } func your_sha256_hashetricTargetList(in *extensions.CustomMetricTargetList, out *CustomMetricTargetList, s conversion.Scope) error { if in.Items == nil { out.Items = make([]CustomMetricTarget, 0) } else { out.Items = *(*[]CustomMetricTarget)(unsafe.Pointer(&in.Items)) } return nil } // your_sha256_hashcTargetList is an autogenerated conversion function. func your_sha256_hashcTargetList(in *extensions.CustomMetricTargetList, out *CustomMetricTargetList, s conversion.Scope) error { return your_sha256_hashetricTargetList(in, out, s) } func autoConvert_v1beta1_DaemonSet_To_extensions_DaemonSet(in *DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } // Convert_v1beta1_DaemonSet_To_extensions_DaemonSet is an autogenerated conversion function. func Convert_v1beta1_DaemonSet_To_extensions_DaemonSet(in *DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { return autoConvert_v1beta1_DaemonSet_To_extensions_DaemonSet(in, out, s) } func autoConvert_extensions_DaemonSet_To_v1beta1_DaemonSet(in *extensions.DaemonSet, out *DaemonSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } // Convert_extensions_DaemonSet_To_v1beta1_DaemonSet is an autogenerated conversion function. func Convert_extensions_DaemonSet_To_v1beta1_DaemonSet(in *extensions.DaemonSet, out *DaemonSet, s conversion.Scope) error { return autoConvert_extensions_DaemonSet_To_v1beta1_DaemonSet(in, out, s) } func autoConvert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in *DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]extensions.DaemonSet, len(*in)) for i := range *in { if err := Convert_v1beta1_DaemonSet_To_extensions_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { out.Items = nil } return nil } // Convert_v1beta1_DaemonSetList_To_extensions_DaemonSetList is an autogenerated conversion function. func Convert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in *DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { return autoConvert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in, out, s) } func autoConvert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in *extensions.DaemonSetList, out *DaemonSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]DaemonSet, len(*in)) for i := range *in { if err := Convert_extensions_DaemonSet_To_v1beta1_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { out.Items = make([]DaemonSet, 0) } return nil } // Convert_extensions_DaemonSetList_To_v1beta1_DaemonSetList is an autogenerated conversion function. func Convert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in *extensions.DaemonSetList, out *DaemonSetList, s conversion.Scope) error { return autoConvert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in, out, s) } func autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if err := your_sha256_hashpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds out.TemplateGeneration = in.TemplateGeneration out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit)) return nil } // Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec is an autogenerated conversion function. func Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { return autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in, out, s) } func autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *DaemonSetSpec, s conversion.Scope) error { out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if err := your_sha256_hashpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds out.TemplateGeneration = in.TemplateGeneration out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit)) return nil } // Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec is an autogenerated conversion function. func Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *DaemonSetSpec, s conversion.Scope) error { return autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in, out, s) } func your_sha256_hashs(in *DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { out.CurrentNumberScheduled = in.CurrentNumberScheduled out.NumberMisscheduled = in.NumberMisscheduled out.DesiredNumberScheduled = in.DesiredNumberScheduled out.NumberReady = in.NumberReady out.ObservedGeneration = in.ObservedGeneration out.UpdatedNumberScheduled = in.UpdatedNumberScheduled out.NumberAvailable = in.NumberAvailable out.NumberUnavailable = in.NumberUnavailable out.CollisionCount = (*int64)(unsafe.Pointer(in.CollisionCount)) return nil } // Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus is an autogenerated conversion function. func Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { return your_sha256_hashs(in, out, s) } func your_sha256_hashs(in *extensions.DaemonSetStatus, out *DaemonSetStatus, s conversion.Scope) error { out.CurrentNumberScheduled = in.CurrentNumberScheduled out.NumberMisscheduled = in.NumberMisscheduled out.DesiredNumberScheduled = in.DesiredNumberScheduled out.NumberReady = in.NumberReady out.ObservedGeneration = in.ObservedGeneration out.UpdatedNumberScheduled = in.UpdatedNumberScheduled out.NumberAvailable = in.NumberAvailable out.NumberUnavailable = in.NumberUnavailable out.CollisionCount = (*int64)(unsafe.Pointer(in.CollisionCount)) return nil } // Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus is an autogenerated conversion function. func Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *extensions.DaemonSetStatus, out *DaemonSetStatus, s conversion.Scope) error { return your_sha256_hashs(in, out, s) } func your_sha256_hashSetUpdateStrategy(in *DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error { out.Type = extensions.DaemonSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate *out = new(extensions.RollingUpdateDaemonSet) if err := your_sha256_hashteDaemonSet(*in, *out, s); err != nil { return err } } else { out.RollingUpdate = nil } return nil } // your_sha256_hashpdateStrategy is an autogenerated conversion function. func your_sha256_hashpdateStrategy(in *DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error { return your_sha256_hashSetUpdateStrategy(in, out, s) } func your_sha256_hashSetUpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *DaemonSetUpdateStrategy, s conversion.Scope) error { out.Type = DaemonSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate *out = new(RollingUpdateDaemonSet) if err := your_sha256_hashteDaemonSet(*in, *out, s); err != nil { return err } } else { out.RollingUpdate = nil } return nil } // your_sha256_hashpdateStrategy is an autogenerated conversion function. func your_sha256_hashpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *DaemonSetUpdateStrategy, s conversion.Scope) error { return your_sha256_hashSetUpdateStrategy(in, out, s) } func autoConvert_v1beta1_Deployment_To_extensions_Deployment(in *Deployment, out *extensions.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } // Convert_v1beta1_Deployment_To_extensions_Deployment is an autogenerated conversion function. func Convert_v1beta1_Deployment_To_extensions_Deployment(in *Deployment, out *extensions.Deployment, s conversion.Scope) error { return autoConvert_v1beta1_Deployment_To_extensions_Deployment(in, out, s) } func autoConvert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployment, out *Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } // Convert_extensions_Deployment_To_v1beta1_Deployment is an autogenerated conversion function. func Convert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployment, out *Deployment, s conversion.Scope) error { return autoConvert_extensions_Deployment_To_v1beta1_Deployment(in, out, s) } func your_sha256_hashCondition(in *DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { out.Type = extensions.DeploymentConditionType(in.Type) out.Status = api.ConditionStatus(in.Status) out.LastUpdateTime = in.LastUpdateTime out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason out.Message = in.Message return nil } // your_sha256_hashition is an autogenerated conversion function. func your_sha256_hashition(in *DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { return your_sha256_hashCondition(in, out, s) } func your_sha256_hashCondition(in *extensions.DeploymentCondition, out *DeploymentCondition, s conversion.Scope) error { out.Type = DeploymentConditionType(in.Type) out.Status = api_v1.ConditionStatus(in.Status) out.LastUpdateTime = in.LastUpdateTime out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason out.Message = in.Message return nil } // your_sha256_hashition is an autogenerated conversion function. func your_sha256_hashition(in *extensions.DeploymentCondition, out *DeploymentCondition, s conversion.Scope) error { return your_sha256_hashCondition(in, out, s) } func autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]extensions.Deployment, len(*in)) for i := range *in { if err := Convert_v1beta1_Deployment_To_extensions_Deployment(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { out.Items = nil } return nil } // Convert_v1beta1_DeploymentList_To_extensions_DeploymentList is an autogenerated conversion function. func Convert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { return autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in, out, s) } func autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensions.DeploymentList, out *DeploymentList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Deployment, len(*in)) for i := range *in { if err := Convert_extensions_Deployment_To_v1beta1_Deployment(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { out.Items = make([]Deployment, 0) } return nil } // Convert_extensions_DeploymentList_To_v1beta1_DeploymentList is an autogenerated conversion function. func Convert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensions.DeploymentList, out *DeploymentList, s conversion.Scope) error { return autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in, out, s) } func your_sha256_hashollback(in *DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error { out.Name = in.Name out.UpdatedAnnotations = *(*map[string]string)(unsafe.Pointer(&in.UpdatedAnnotations)) if err := Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { return err } return nil } // your_sha256_hashack is an autogenerated conversion function. func your_sha256_hashack(in *DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error { return your_sha256_hashollback(in, out, s) } func your_sha256_hashollback(in *extensions.DeploymentRollback, out *DeploymentRollback, s conversion.Scope) error { out.Name = in.Name out.UpdatedAnnotations = *(*map[string]string)(unsafe.Pointer(&in.UpdatedAnnotations)) if err := Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { return err } return nil } // your_sha256_hashack is an autogenerated conversion function. func your_sha256_hashack(in *extensions.DeploymentRollback, out *DeploymentRollback, s conversion.Scope) error { return your_sha256_hashollback(in, out, s) } func autoConvert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if err := your_sha256_hashegy(&in.Strategy, &out.Strategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit)) out.Paused = in.Paused out.RollbackTo = (*extensions.RollbackConfig)(unsafe.Pointer(in.RollbackTo)) out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds)) return nil } func autoConvert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *DeploymentSpec, s conversion.Scope) error { if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if err := your_sha256_hashegy(&in.Strategy, &out.Strategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit)) out.Paused = in.Paused out.RollbackTo = (*RollbackConfig)(unsafe.Pointer(in.RollbackTo)) out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds)) return nil } func your_sha256_hashtus(in *DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { out.ObservedGeneration = in.ObservedGeneration out.Replicas = in.Replicas out.UpdatedReplicas = in.UpdatedReplicas out.ReadyReplicas = in.ReadyReplicas out.AvailableReplicas = in.AvailableReplicas out.UnavailableReplicas = in.UnavailableReplicas out.Conditions = *(*[]extensions.DeploymentCondition)(unsafe.Pointer(&in.Conditions)) out.CollisionCount = (*int64)(unsafe.Pointer(in.CollisionCount)) return nil } // Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus is an autogenerated conversion function. func Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in *DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { return your_sha256_hashtus(in, out, s) } func your_sha256_hashtus(in *extensions.DeploymentStatus, out *DeploymentStatus, s conversion.Scope) error { out.ObservedGeneration = in.ObservedGeneration out.Replicas = in.Replicas out.UpdatedReplicas = in.UpdatedReplicas out.ReadyReplicas = in.ReadyReplicas out.AvailableReplicas = in.AvailableReplicas out.UnavailableReplicas = in.UnavailableReplicas out.Conditions = *(*[]DeploymentCondition)(unsafe.Pointer(&in.Conditions)) out.CollisionCount = (*int64)(unsafe.Pointer(in.CollisionCount)) return nil } // Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus is an autogenerated conversion function. func Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *extensions.DeploymentStatus, out *DeploymentStatus, s conversion.Scope) error { return your_sha256_hashtus(in, out, s) } func your_sha256_hashtrategy(in *DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { out.Type = extensions.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate *out = new(extensions.RollingUpdateDeployment) if err := your_sha256_hashateDeployment(*in, *out, s); err != nil { return err } } else { out.RollingUpdate = nil } return nil } func your_sha256_hashtrategy(in *extensions.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error { out.Type = DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate *out = new(RollingUpdateDeployment) if err := your_sha256_hashateDeployment(*in, *out, s); err != nil { return err } } else { out.RollingUpdate = nil } return nil } func your_sha256_hashStrategyOptions(in *FSGroupStrategyOptions, out *extensions.FSGroupStrategyOptions, s conversion.Scope) error { out.Rule = extensions.FSGroupStrategyType(in.Rule) out.Ranges = *(*[]extensions.GroupIDRange)(unsafe.Pointer(&in.Ranges)) return nil } // your_sha256_hashtegyOptions is an autogenerated conversion function. func your_sha256_hashtegyOptions(in *FSGroupStrategyOptions, out *extensions.FSGroupStrategyOptions, s conversion.Scope) error { return your_sha256_hashStrategyOptions(in, out, s) } func your_sha256_hashStrategyOptions(in *extensions.FSGroupStrategyOptions, out *FSGroupStrategyOptions, s conversion.Scope) error { out.Rule = FSGroupStrategyType(in.Rule) out.Ranges = *(*[]IDRange)(unsafe.Pointer(&in.Ranges)) return nil } // your_sha256_hashtegyOptions is an autogenerated conversion function. func your_sha256_hashtegyOptions(in *extensions.FSGroupStrategyOptions, out *FSGroupStrategyOptions, s conversion.Scope) error { return your_sha256_hashStrategyOptions(in, out, s) } func your_sha256_hashh(in *HTTPIngressPath, out *extensions.HTTPIngressPath, s conversion.Scope) error { out.Path = in.Path if err := Convert_v1beta1_IngressBackend_To_extensions_IngressBackend(&in.Backend, &out.Backend, s); err != nil { return err } return nil } // Convert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath is an autogenerated conversion function. func Convert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath(in *HTTPIngressPath, out *extensions.HTTPIngressPath, s conversion.Scope) error { return your_sha256_hashh(in, out, s) } func your_sha256_hashh(in *extensions.HTTPIngressPath, out *HTTPIngressPath, s conversion.Scope) error { out.Path = in.Path if err := Convert_extensions_IngressBackend_To_v1beta1_IngressBackend(&in.Backend, &out.Backend, s); err != nil { return err } return nil } // Convert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath is an autogenerated conversion function. func Convert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in *extensions.HTTPIngressPath, out *HTTPIngressPath, s conversion.Scope) error { return your_sha256_hashh(in, out, s) } func your_sha256_hashssRuleValue(in *HTTPIngressRuleValue, out *extensions.HTTPIngressRuleValue, s conversion.Scope) error { out.Paths = *(*[]extensions.HTTPIngressPath)(unsafe.Pointer(&in.Paths)) return nil } // your_sha256_hashleValue is an autogenerated conversion function. func your_sha256_hashleValue(in *HTTPIngressRuleValue, out *extensions.HTTPIngressRuleValue, s conversion.Scope) error { return your_sha256_hashssRuleValue(in, out, s) } func your_sha256_hashssRuleValue(in *extensions.HTTPIngressRuleValue, out *HTTPIngressRuleValue, s conversion.Scope) error { if in.Paths == nil { out.Paths = make([]HTTPIngressPath, 0) } else { out.Paths = *(*[]HTTPIngressPath)(unsafe.Pointer(&in.Paths)) } return nil } // your_sha256_hashleValue is an autogenerated conversion function. func your_sha256_hashleValue(in *extensions.HTTPIngressRuleValue, out *HTTPIngressRuleValue, s conversion.Scope) error { return your_sha256_hashssRuleValue(in, out, s) } func autoConvert_v1beta1_HostPortRange_To_extensions_HostPortRange(in *HostPortRange, out *extensions.HostPortRange, s conversion.Scope) error { out.Min = int(in.Min) out.Max = int(in.Max) return nil } // Convert_v1beta1_HostPortRange_To_extensions_HostPortRange is an autogenerated conversion function. func Convert_v1beta1_HostPortRange_To_extensions_HostPortRange(in *HostPortRange, out *extensions.HostPortRange, s conversion.Scope) error { return autoConvert_v1beta1_HostPortRange_To_extensions_HostPortRange(in, out, s) } func autoConvert_extensions_HostPortRange_To_v1beta1_HostPortRange(in *extensions.HostPortRange, out *HostPortRange, s conversion.Scope) error { out.Min = int32(in.Min) out.Max = int32(in.Max) return nil } // Convert_extensions_HostPortRange_To_v1beta1_HostPortRange is an autogenerated conversion function. func Convert_extensions_HostPortRange_To_v1beta1_HostPortRange(in *extensions.HostPortRange, out *HostPortRange, s conversion.Scope) error { return autoConvert_extensions_HostPortRange_To_v1beta1_HostPortRange(in, out, s) } func autoConvert_v1beta1_Ingress_To_extensions_Ingress(in *Ingress, out *extensions.Ingress, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_IngressSpec_To_extensions_IngressSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := Convert_v1beta1_IngressStatus_To_extensions_IngressStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } // Convert_v1beta1_Ingress_To_extensions_Ingress is an autogenerated conversion function. func Convert_v1beta1_Ingress_To_extensions_Ingress(in *Ingress, out *extensions.Ingress, s conversion.Scope) error { return autoConvert_v1beta1_Ingress_To_extensions_Ingress(in, out, s) } func autoConvert_extensions_Ingress_To_v1beta1_Ingress(in *extensions.Ingress, out *Ingress, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_extensions_IngressSpec_To_v1beta1_IngressSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := Convert_extensions_IngressStatus_To_v1beta1_IngressStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } // Convert_extensions_Ingress_To_v1beta1_Ingress is an autogenerated conversion function. func Convert_extensions_Ingress_To_v1beta1_Ingress(in *extensions.Ingress, out *Ingress, s conversion.Scope) error { return autoConvert_extensions_Ingress_To_v1beta1_Ingress(in, out, s) } func autoConvert_v1beta1_IngressBackend_To_extensions_IngressBackend(in *IngressBackend, out *extensions.IngressBackend, s conversion.Scope) error { out.ServiceName = in.ServiceName out.ServicePort = in.ServicePort return nil } // Convert_v1beta1_IngressBackend_To_extensions_IngressBackend is an autogenerated conversion function. func Convert_v1beta1_IngressBackend_To_extensions_IngressBackend(in *IngressBackend, out *extensions.IngressBackend, s conversion.Scope) error { return autoConvert_v1beta1_IngressBackend_To_extensions_IngressBackend(in, out, s) } func autoConvert_extensions_IngressBackend_To_v1beta1_IngressBackend(in *extensions.IngressBackend, out *IngressBackend, s conversion.Scope) error { out.ServiceName = in.ServiceName out.ServicePort = in.ServicePort return nil } // Convert_extensions_IngressBackend_To_v1beta1_IngressBackend is an autogenerated conversion function. func Convert_extensions_IngressBackend_To_v1beta1_IngressBackend(in *extensions.IngressBackend, out *IngressBackend, s conversion.Scope) error { return autoConvert_extensions_IngressBackend_To_v1beta1_IngressBackend(in, out, s) } func autoConvert_v1beta1_IngressList_To_extensions_IngressList(in *IngressList, out *extensions.IngressList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]extensions.Ingress)(unsafe.Pointer(&in.Items)) return nil } // Convert_v1beta1_IngressList_To_extensions_IngressList is an autogenerated conversion function. func Convert_v1beta1_IngressList_To_extensions_IngressList(in *IngressList, out *extensions.IngressList, s conversion.Scope) error { return autoConvert_v1beta1_IngressList_To_extensions_IngressList(in, out, s) } func autoConvert_extensions_IngressList_To_v1beta1_IngressList(in *extensions.IngressList, out *IngressList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items == nil { out.Items = make([]Ingress, 0) } else { out.Items = *(*[]Ingress)(unsafe.Pointer(&in.Items)) } return nil } // Convert_extensions_IngressList_To_v1beta1_IngressList is an autogenerated conversion function. func Convert_extensions_IngressList_To_v1beta1_IngressList(in *extensions.IngressList, out *IngressList, s conversion.Scope) error { return autoConvert_extensions_IngressList_To_v1beta1_IngressList(in, out, s) } func autoConvert_v1beta1_IngressRule_To_extensions_IngressRule(in *IngressRule, out *extensions.IngressRule, s conversion.Scope) error { out.Host = in.Host if err := Convert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, s); err != nil { return err } return nil } // Convert_v1beta1_IngressRule_To_extensions_IngressRule is an autogenerated conversion function. func Convert_v1beta1_IngressRule_To_extensions_IngressRule(in *IngressRule, out *extensions.IngressRule, s conversion.Scope) error { return autoConvert_v1beta1_IngressRule_To_extensions_IngressRule(in, out, s) } func autoConvert_extensions_IngressRule_To_v1beta1_IngressRule(in *extensions.IngressRule, out *IngressRule, s conversion.Scope) error { out.Host = in.Host if err := Convert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, s); err != nil { return err } return nil } // Convert_extensions_IngressRule_To_v1beta1_IngressRule is an autogenerated conversion function. func Convert_extensions_IngressRule_To_v1beta1_IngressRule(in *extensions.IngressRule, out *IngressRule, s conversion.Scope) error { return autoConvert_extensions_IngressRule_To_v1beta1_IngressRule(in, out, s) } func your_sha256_hashlue(in *IngressRuleValue, out *extensions.IngressRuleValue, s conversion.Scope) error { out.HTTP = (*extensions.HTTPIngressRuleValue)(unsafe.Pointer(in.HTTP)) return nil } // Convert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue is an autogenerated conversion function. func Convert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(in *IngressRuleValue, out *extensions.IngressRuleValue, s conversion.Scope) error { return your_sha256_hashlue(in, out, s) } func your_sha256_hashlue(in *extensions.IngressRuleValue, out *IngressRuleValue, s conversion.Scope) error { out.HTTP = (*HTTPIngressRuleValue)(unsafe.Pointer(in.HTTP)) return nil } // Convert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue is an autogenerated conversion function. func Convert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(in *extensions.IngressRuleValue, out *IngressRuleValue, s conversion.Scope) error { return your_sha256_hashlue(in, out, s) } func autoConvert_v1beta1_IngressSpec_To_extensions_IngressSpec(in *IngressSpec, out *extensions.IngressSpec, s conversion.Scope) error { out.Backend = (*extensions.IngressBackend)(unsafe.Pointer(in.Backend)) out.TLS = *(*[]extensions.IngressTLS)(unsafe.Pointer(&in.TLS)) out.Rules = *(*[]extensions.IngressRule)(unsafe.Pointer(&in.Rules)) return nil } // Convert_v1beta1_IngressSpec_To_extensions_IngressSpec is an autogenerated conversion function. func Convert_v1beta1_IngressSpec_To_extensions_IngressSpec(in *IngressSpec, out *extensions.IngressSpec, s conversion.Scope) error { return autoConvert_v1beta1_IngressSpec_To_extensions_IngressSpec(in, out, s) } func autoConvert_extensions_IngressSpec_To_v1beta1_IngressSpec(in *extensions.IngressSpec, out *IngressSpec, s conversion.Scope) error { out.Backend = (*IngressBackend)(unsafe.Pointer(in.Backend)) out.TLS = *(*[]IngressTLS)(unsafe.Pointer(&in.TLS)) out.Rules = *(*[]IngressRule)(unsafe.Pointer(&in.Rules)) return nil } // Convert_extensions_IngressSpec_To_v1beta1_IngressSpec is an autogenerated conversion function. func Convert_extensions_IngressSpec_To_v1beta1_IngressSpec(in *extensions.IngressSpec, out *IngressSpec, s conversion.Scope) error { return autoConvert_extensions_IngressSpec_To_v1beta1_IngressSpec(in, out, s) } func autoConvert_v1beta1_IngressStatus_To_extensions_IngressStatus(in *IngressStatus, out *extensions.IngressStatus, s conversion.Scope) error { // TODO: Inefficient conversion - can we improve it? if err := s.Convert(&in.LoadBalancer, &out.LoadBalancer, 0); err != nil { return err } return nil } // Convert_v1beta1_IngressStatus_To_extensions_IngressStatus is an autogenerated conversion function. func Convert_v1beta1_IngressStatus_To_extensions_IngressStatus(in *IngressStatus, out *extensions.IngressStatus, s conversion.Scope) error { return autoConvert_v1beta1_IngressStatus_To_extensions_IngressStatus(in, out, s) } func autoConvert_extensions_IngressStatus_To_v1beta1_IngressStatus(in *extensions.IngressStatus, out *IngressStatus, s conversion.Scope) error { // TODO: Inefficient conversion - can we improve it? if err := s.Convert(&in.LoadBalancer, &out.LoadBalancer, 0); err != nil { return err } return nil } // Convert_extensions_IngressStatus_To_v1beta1_IngressStatus is an autogenerated conversion function. func Convert_extensions_IngressStatus_To_v1beta1_IngressStatus(in *extensions.IngressStatus, out *IngressStatus, s conversion.Scope) error { return autoConvert_extensions_IngressStatus_To_v1beta1_IngressStatus(in, out, s) } func autoConvert_v1beta1_IngressTLS_To_extensions_IngressTLS(in *IngressTLS, out *extensions.IngressTLS, s conversion.Scope) error { out.Hosts = *(*[]string)(unsafe.Pointer(&in.Hosts)) out.SecretName = in.SecretName return nil } // Convert_v1beta1_IngressTLS_To_extensions_IngressTLS is an autogenerated conversion function. func Convert_v1beta1_IngressTLS_To_extensions_IngressTLS(in *IngressTLS, out *extensions.IngressTLS, s conversion.Scope) error { return autoConvert_v1beta1_IngressTLS_To_extensions_IngressTLS(in, out, s) } func autoConvert_extensions_IngressTLS_To_v1beta1_IngressTLS(in *extensions.IngressTLS, out *IngressTLS, s conversion.Scope) error { out.Hosts = *(*[]string)(unsafe.Pointer(&in.Hosts)) out.SecretName = in.SecretName return nil } // Convert_extensions_IngressTLS_To_v1beta1_IngressTLS is an autogenerated conversion function. func Convert_extensions_IngressTLS_To_v1beta1_IngressTLS(in *extensions.IngressTLS, out *IngressTLS, s conversion.Scope) error { return autoConvert_extensions_IngressTLS_To_v1beta1_IngressTLS(in, out, s) } func autoConvert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy(in *NetworkPolicy, out *extensions.NetworkPolicy, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := your_sha256_hashc(&in.Spec, &out.Spec, s); err != nil { return err } return nil } // Convert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy is an autogenerated conversion function. func Convert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy(in *NetworkPolicy, out *extensions.NetworkPolicy, s conversion.Scope) error { return autoConvert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy(in, out, s) } func autoConvert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy(in *extensions.NetworkPolicy, out *NetworkPolicy, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := your_sha256_hashc(&in.Spec, &out.Spec, s); err != nil { return err } return nil } // Convert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy is an autogenerated conversion function. func Convert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy(in *extensions.NetworkPolicy, out *NetworkPolicy, s conversion.Scope) error { return autoConvert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy(in, out, s) } func your_sha256_hashrkPolicyIngressRule(in *NetworkPolicyIngressRule, out *extensions.NetworkPolicyIngressRule, s conversion.Scope) error { out.Ports = *(*[]extensions.NetworkPolicyPort)(unsafe.Pointer(&in.Ports)) out.From = *(*[]extensions.NetworkPolicyPeer)(unsafe.Pointer(&in.From)) return nil } // your_sha256_hashlicyIngressRule is an autogenerated conversion function. func your_sha256_hashlicyIngressRule(in *NetworkPolicyIngressRule, out *extensions.NetworkPolicyIngressRule, s conversion.Scope) error { return your_sha256_hashrkPolicyIngressRule(in, out, s) } func your_sha256_hashrkPolicyIngressRule(in *extensions.NetworkPolicyIngressRule, out *NetworkPolicyIngressRule, s conversion.Scope) error { out.Ports = *(*[]NetworkPolicyPort)(unsafe.Pointer(&in.Ports)) out.From = *(*[]NetworkPolicyPeer)(unsafe.Pointer(&in.From)) return nil } // your_sha256_hashlicyIngressRule is an autogenerated conversion function. func your_sha256_hashlicyIngressRule(in *extensions.NetworkPolicyIngressRule, out *NetworkPolicyIngressRule, s conversion.Scope) error { return your_sha256_hashrkPolicyIngressRule(in, out, s) } func your_sha256_hashyList(in *NetworkPolicyList, out *extensions.NetworkPolicyList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]extensions.NetworkPolicy)(unsafe.Pointer(&in.Items)) return nil } // your_sha256_hasht is an autogenerated conversion function. func your_sha256_hasht(in *NetworkPolicyList, out *extensions.NetworkPolicyList, s conversion.Scope) error { return your_sha256_hashyList(in, out, s) } func your_sha256_hashyList(in *extensions.NetworkPolicyList, out *NetworkPolicyList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items == nil { out.Items = make([]NetworkPolicy, 0) } else { out.Items = *(*[]NetworkPolicy)(unsafe.Pointer(&in.Items)) } return nil } // your_sha256_hasht is an autogenerated conversion function. func your_sha256_hasht(in *extensions.NetworkPolicyList, out *NetworkPolicyList, s conversion.Scope) error { return your_sha256_hashyList(in, out, s) } func your_sha256_hashyPeer(in *NetworkPolicyPeer, out *extensions.NetworkPolicyPeer, s conversion.Scope) error { out.PodSelector = (*v1.LabelSelector)(unsafe.Pointer(in.PodSelector)) out.NamespaceSelector = (*v1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector)) return nil } // your_sha256_hashr is an autogenerated conversion function. func your_sha256_hashr(in *NetworkPolicyPeer, out *extensions.NetworkPolicyPeer, s conversion.Scope) error { return your_sha256_hashyPeer(in, out, s) } func your_sha256_hashyPeer(in *extensions.NetworkPolicyPeer, out *NetworkPolicyPeer, s conversion.Scope) error { out.PodSelector = (*v1.LabelSelector)(unsafe.Pointer(in.PodSelector)) out.NamespaceSelector = (*v1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector)) return nil } // your_sha256_hashr is an autogenerated conversion function. func your_sha256_hashr(in *extensions.NetworkPolicyPeer, out *NetworkPolicyPeer, s conversion.Scope) error { return your_sha256_hashyPeer(in, out, s) } func your_sha256_hashyPort(in *NetworkPolicyPort, out *extensions.NetworkPolicyPort, s conversion.Scope) error { out.Protocol = (*api.Protocol)(unsafe.Pointer(in.Protocol)) out.Port = (*intstr.IntOrString)(unsafe.Pointer(in.Port)) return nil } // your_sha256_hasht is an autogenerated conversion function. func your_sha256_hasht(in *NetworkPolicyPort, out *extensions.NetworkPolicyPort, s conversion.Scope) error { return your_sha256_hashyPort(in, out, s) } func your_sha256_hashyPort(in *extensions.NetworkPolicyPort, out *NetworkPolicyPort, s conversion.Scope) error { out.Protocol = (*api_v1.Protocol)(unsafe.Pointer(in.Protocol)) out.Port = (*intstr.IntOrString)(unsafe.Pointer(in.Port)) return nil } // your_sha256_hasht is an autogenerated conversion function. func your_sha256_hasht(in *extensions.NetworkPolicyPort, out *NetworkPolicyPort, s conversion.Scope) error { return your_sha256_hashyPort(in, out, s) } func your_sha256_hashySpec(in *NetworkPolicySpec, out *extensions.NetworkPolicySpec, s conversion.Scope) error { out.PodSelector = in.PodSelector out.Ingress = *(*[]extensions.NetworkPolicyIngressRule)(unsafe.Pointer(&in.Ingress)) return nil } // your_sha256_hashc is an autogenerated conversion function. func your_sha256_hashc(in *NetworkPolicySpec, out *extensions.NetworkPolicySpec, s conversion.Scope) error { return your_sha256_hashySpec(in, out, s) } func your_sha256_hashySpec(in *extensions.NetworkPolicySpec, out *NetworkPolicySpec, s conversion.Scope) error { out.PodSelector = in.PodSelector out.Ingress = *(*[]NetworkPolicyIngressRule)(unsafe.Pointer(&in.Ingress)) return nil } // your_sha256_hashc is an autogenerated conversion function. func your_sha256_hashc(in *extensions.NetworkPolicySpec, out *NetworkPolicySpec, s conversion.Scope) error { return your_sha256_hashySpec(in, out, s) } func your_sha256_hasholicy(in *PodSecurityPolicy, out *extensions.PodSecurityPolicy, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := your_sha256_hasholicySpec(&in.Spec, &out.Spec, s); err != nil { return err } return nil } // your_sha256_hashy is an autogenerated conversion function. func your_sha256_hashy(in *PodSecurityPolicy, out *extensions.PodSecurityPolicy, s conversion.Scope) error { return your_sha256_hasholicy(in, out, s) } func your_sha256_hasholicy(in *extensions.PodSecurityPolicy, out *PodSecurityPolicy, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := your_sha256_hasholicySpec(&in.Spec, &out.Spec, s); err != nil { return err } return nil } // your_sha256_hashy is an autogenerated conversion function. func your_sha256_hashy(in *extensions.PodSecurityPolicy, out *PodSecurityPolicy, s conversion.Scope) error { return your_sha256_hasholicy(in, out, s) } func your_sha256_hashityPolicyList(in *PodSecurityPolicyList, out *extensions.PodSecurityPolicyList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]extensions.PodSecurityPolicy, len(*in)) for i := range *in { if err := your_sha256_hashy(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { out.Items = nil } return nil } // your_sha256_hasholicyList is an autogenerated conversion function. func your_sha256_hasholicyList(in *PodSecurityPolicyList, out *extensions.PodSecurityPolicyList, s conversion.Scope) error { return your_sha256_hashityPolicyList(in, out, s) } func your_sha256_hashityPolicyList(in *extensions.PodSecurityPolicyList, out *PodSecurityPolicyList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodSecurityPolicy, len(*in)) for i := range *in { if err := your_sha256_hashy(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { out.Items = make([]PodSecurityPolicy, 0) } return nil } // your_sha256_hasholicyList is an autogenerated conversion function. func your_sha256_hasholicyList(in *extensions.PodSecurityPolicyList, out *PodSecurityPolicyList, s conversion.Scope) error { return your_sha256_hashityPolicyList(in, out, s) } func your_sha256_hashityPolicySpec(in *PodSecurityPolicySpec, out *extensions.PodSecurityPolicySpec, s conversion.Scope) error { out.Privileged = in.Privileged out.DefaultAddCapabilities = *(*[]api.Capability)(unsafe.Pointer(&in.DefaultAddCapabilities)) out.RequiredDropCapabilities = *(*[]api.Capability)(unsafe.Pointer(&in.RequiredDropCapabilities)) out.AllowedCapabilities = *(*[]api.Capability)(unsafe.Pointer(&in.AllowedCapabilities)) out.Volumes = *(*[]extensions.FSType)(unsafe.Pointer(&in.Volumes)) out.HostNetwork = in.HostNetwork if in.HostPorts != nil { in, out := &in.HostPorts, &out.HostPorts *out = make([]extensions.HostPortRange, len(*in)) for i := range *in { if err := Convert_v1beta1_HostPortRange_To_extensions_HostPortRange(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { out.HostPorts = nil } out.HostPID = in.HostPID out.HostIPC = in.HostIPC if err := your_sha256_hashtegyOptions(&in.SELinux, &out.SELinux, s); err != nil { return err } if err := your_sha256_hashStrategyOptions(&in.RunAsUser, &out.RunAsUser, s); err != nil { return err } if err := your_sha256_hashSupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, s); err != nil { return err } if err := your_sha256_hashtegyOptions(&in.FSGroup, &out.FSGroup, s); err != nil { return err } out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem return nil } // your_sha256_hasholicySpec is an autogenerated conversion function. func your_sha256_hasholicySpec(in *PodSecurityPolicySpec, out *extensions.PodSecurityPolicySpec, s conversion.Scope) error { return your_sha256_hashityPolicySpec(in, out, s) } func your_sha256_hashityPolicySpec(in *extensions.PodSecurityPolicySpec, out *PodSecurityPolicySpec, s conversion.Scope) error { out.Privileged = in.Privileged out.DefaultAddCapabilities = *(*[]api_v1.Capability)(unsafe.Pointer(&in.DefaultAddCapabilities)) out.RequiredDropCapabilities = *(*[]api_v1.Capability)(unsafe.Pointer(&in.RequiredDropCapabilities)) out.AllowedCapabilities = *(*[]api_v1.Capability)(unsafe.Pointer(&in.AllowedCapabilities)) out.Volumes = *(*[]FSType)(unsafe.Pointer(&in.Volumes)) out.HostNetwork = in.HostNetwork if in.HostPorts != nil { in, out := &in.HostPorts, &out.HostPorts *out = make([]HostPortRange, len(*in)) for i := range *in { if err := Convert_extensions_HostPortRange_To_v1beta1_HostPortRange(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { out.HostPorts = nil } out.HostPID = in.HostPID out.HostIPC = in.HostIPC if err := your_sha256_hashtegyOptions(&in.SELinux, &out.SELinux, s); err != nil { return err } if err := your_sha256_hashStrategyOptions(&in.RunAsUser, &out.RunAsUser, s); err != nil { return err } if err := your_sha256_hashSupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, s); err != nil { return err } if err := your_sha256_hashtegyOptions(&in.FSGroup, &out.FSGroup, s); err != nil { return err } out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem return nil } // your_sha256_hasholicySpec is an autogenerated conversion function. func your_sha256_hasholicySpec(in *extensions.PodSecurityPolicySpec, out *PodSecurityPolicySpec, s conversion.Scope) error { return your_sha256_hashityPolicySpec(in, out, s) } func autoConvert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in *ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } // Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet is an autogenerated conversion function. func Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in *ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error { return autoConvert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in, out, s) } func autoConvert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in *extensions.ReplicaSet, out *ReplicaSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } // Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet is an autogenerated conversion function. func Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in *extensions.ReplicaSet, out *ReplicaSet, s conversion.Scope) error { return autoConvert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in, out, s) } func your_sha256_hashCondition(in *ReplicaSetCondition, out *extensions.ReplicaSetCondition, s conversion.Scope) error { out.Type = extensions.ReplicaSetConditionType(in.Type) out.Status = api.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason out.Message = in.Message return nil } // your_sha256_hashition is an autogenerated conversion function. func your_sha256_hashition(in *ReplicaSetCondition, out *extensions.ReplicaSetCondition, s conversion.Scope) error { return your_sha256_hashCondition(in, out, s) } func your_sha256_hashCondition(in *extensions.ReplicaSetCondition, out *ReplicaSetCondition, s conversion.Scope) error { out.Type = ReplicaSetConditionType(in.Type) out.Status = api_v1.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason out.Message = in.Message return nil } // your_sha256_hashition is an autogenerated conversion function. func your_sha256_hashition(in *extensions.ReplicaSetCondition, out *ReplicaSetCondition, s conversion.Scope) error { return your_sha256_hashCondition(in, out, s) } func autoConvert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in *ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]extensions.ReplicaSet, len(*in)) for i := range *in { if err := Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { out.Items = nil } return nil } // Convert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList is an autogenerated conversion function. func Convert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in *ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error { return autoConvert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in, out, s) } func autoConvert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in *extensions.ReplicaSetList, out *ReplicaSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ReplicaSet, len(*in)) for i := range *in { if err := Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { out.Items = make([]ReplicaSet, 0) } return nil } // Convert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList is an autogenerated conversion function. func Convert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in *extensions.ReplicaSetList, out *ReplicaSetList, s conversion.Scope) error { return autoConvert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in, out, s) } func autoConvert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } return nil } func autoConvert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *ReplicaSetSpec, s conversion.Scope) error { if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } return nil } func your_sha256_hashtus(in *ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { out.Replicas = in.Replicas out.FullyLabeledReplicas = in.FullyLabeledReplicas out.ReadyReplicas = in.ReadyReplicas out.AvailableReplicas = in.AvailableReplicas out.ObservedGeneration = in.ObservedGeneration out.Conditions = *(*[]extensions.ReplicaSetCondition)(unsafe.Pointer(&in.Conditions)) return nil } // Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus is an autogenerated conversion function. func Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { return your_sha256_hashtus(in, out, s) } func your_sha256_hashtus(in *extensions.ReplicaSetStatus, out *ReplicaSetStatus, s conversion.Scope) error { out.Replicas = in.Replicas out.FullyLabeledReplicas = in.FullyLabeledReplicas out.ReadyReplicas = in.ReadyReplicas out.AvailableReplicas = in.AvailableReplicas out.ObservedGeneration = in.ObservedGeneration out.Conditions = *(*[]ReplicaSetCondition)(unsafe.Pointer(&in.Conditions)) return nil } // Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus is an autogenerated conversion function. func Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *ReplicaSetStatus, s conversion.Scope) error { return your_sha256_hashtus(in, out, s) } func your_sha256_hashlicationControllerDummy(in *ReplicationControllerDummy, out *extensions.ReplicationControllerDummy, s conversion.Scope) error { return nil } // your_sha256_hashtionControllerDummy is an autogenerated conversion function. func your_sha256_hashtionControllerDummy(in *ReplicationControllerDummy, out *extensions.ReplicationControllerDummy, s conversion.Scope) error { return your_sha256_hashlicationControllerDummy(in, out, s) } func your_sha256_hashlicationControllerDummy(in *extensions.ReplicationControllerDummy, out *ReplicationControllerDummy, s conversion.Scope) error { return nil } // your_sha256_hashtionControllerDummy is an autogenerated conversion function. func your_sha256_hashtionControllerDummy(in *extensions.ReplicationControllerDummy, out *ReplicationControllerDummy, s conversion.Scope) error { return your_sha256_hashlicationControllerDummy(in, out, s) } func autoConvert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in *RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error { out.Revision = in.Revision return nil } // Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig is an autogenerated conversion function. func Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in *RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error { return autoConvert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in, out, s) } func autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions.RollbackConfig, out *RollbackConfig, s conversion.Scope) error { out.Revision = in.Revision return nil } // Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig is an autogenerated conversion function. func Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions.RollbackConfig, out *RollbackConfig, s conversion.Scope) error { return autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in, out, s) } func your_sha256_hashUpdateDaemonSet(in *RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } func your_sha256_hashUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *RollingUpdateDaemonSet, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } func your_sha256_hashgUpdateDeployment(in *RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) // WARNING: in.MaxSurge requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } func your_sha256_hashgUpdateDeployment(in *extensions.RollingUpdateDeployment, out *RollingUpdateDeployment, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) // WARNING: in.MaxSurge requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } func your_sha256_hashUserStrategyOptions(in *RunAsUserStrategyOptions, out *extensions.RunAsUserStrategyOptions, s conversion.Scope) error { out.Rule = extensions.RunAsUserStrategy(in.Rule) out.Ranges = *(*[]extensions.UserIDRange)(unsafe.Pointer(&in.Ranges)) return nil } // your_sha256_hashStrategyOptions is an autogenerated conversion function. func your_sha256_hashStrategyOptions(in *RunAsUserStrategyOptions, out *extensions.RunAsUserStrategyOptions, s conversion.Scope) error { return your_sha256_hashUserStrategyOptions(in, out, s) } func your_sha256_hashUserStrategyOptions(in *extensions.RunAsUserStrategyOptions, out *RunAsUserStrategyOptions, s conversion.Scope) error { out.Rule = RunAsUserStrategy(in.Rule) out.Ranges = *(*[]IDRange)(unsafe.Pointer(&in.Ranges)) return nil } // your_sha256_hashStrategyOptions is an autogenerated conversion function. func your_sha256_hashStrategyOptions(in *extensions.RunAsUserStrategyOptions, out *RunAsUserStrategyOptions, s conversion.Scope) error { return your_sha256_hashUserStrategyOptions(in, out, s) } func your_sha256_hashStrategyOptions(in *SELinuxStrategyOptions, out *extensions.SELinuxStrategyOptions, s conversion.Scope) error { out.Rule = extensions.SELinuxStrategy(in.Rule) out.SELinuxOptions = (*api.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) return nil } // your_sha256_hashtegyOptions is an autogenerated conversion function. func your_sha256_hashtegyOptions(in *SELinuxStrategyOptions, out *extensions.SELinuxStrategyOptions, s conversion.Scope) error { return your_sha256_hashStrategyOptions(in, out, s) } func your_sha256_hashStrategyOptions(in *extensions.SELinuxStrategyOptions, out *SELinuxStrategyOptions, s conversion.Scope) error { out.Rule = SELinuxStrategy(in.Rule) out.SELinuxOptions = (*api_v1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) return nil } // your_sha256_hashtegyOptions is an autogenerated conversion function. func your_sha256_hashtegyOptions(in *extensions.SELinuxStrategyOptions, out *SELinuxStrategyOptions, s conversion.Scope) error { return your_sha256_hashStrategyOptions(in, out, s) } func autoConvert_v1beta1_Scale_To_extensions_Scale(in *Scale, out *extensions.Scale, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } // Convert_v1beta1_Scale_To_extensions_Scale is an autogenerated conversion function. func Convert_v1beta1_Scale_To_extensions_Scale(in *Scale, out *extensions.Scale, s conversion.Scope) error { return autoConvert_v1beta1_Scale_To_extensions_Scale(in, out, s) } func autoConvert_extensions_Scale_To_v1beta1_Scale(in *extensions.Scale, out *Scale, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } // Convert_extensions_Scale_To_v1beta1_Scale is an autogenerated conversion function. func Convert_extensions_Scale_To_v1beta1_Scale(in *extensions.Scale, out *Scale, s conversion.Scope) error { return autoConvert_extensions_Scale_To_v1beta1_Scale(in, out, s) } func autoConvert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in *ScaleSpec, out *extensions.ScaleSpec, s conversion.Scope) error { out.Replicas = in.Replicas return nil } // Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec is an autogenerated conversion function. func Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in *ScaleSpec, out *extensions.ScaleSpec, s conversion.Scope) error { return autoConvert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in, out, s) } func autoConvert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in *extensions.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { out.Replicas = in.Replicas return nil } // Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec is an autogenerated conversion function. func Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in *extensions.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { return autoConvert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in, out, s) } func autoConvert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(in *ScaleStatus, out *extensions.ScaleStatus, s conversion.Scope) error { out.Replicas = in.Replicas // WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector) // WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type return nil } func autoConvert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(in *extensions.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { out.Replicas = in.Replicas // WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs map[string]string) return nil } func your_sha256_hashons_SupplementalGroupsStrategyOptions(in *SupplementalGroupsStrategyOptions, out *extensions.SupplementalGroupsStrategyOptions, s conversion.Scope) error { out.Rule = extensions.SupplementalGroupsStrategyType(in.Rule) out.Ranges = *(*[]extensions.GroupIDRange)(unsafe.Pointer(&in.Ranges)) return nil } // your_sha256_hashSupplementalGroupsStrategyOptions is an autogenerated conversion function. func your_sha256_hashSupplementalGroupsStrategyOptions(in *SupplementalGroupsStrategyOptions, out *extensions.SupplementalGroupsStrategyOptions, s conversion.Scope) error { return your_sha256_hashons_SupplementalGroupsStrategyOptions(in, out, s) } func your_sha256_hashta1_SupplementalGroupsStrategyOptions(in *extensions.SupplementalGroupsStrategyOptions, out *SupplementalGroupsStrategyOptions, s conversion.Scope) error { out.Rule = SupplementalGroupsStrategyType(in.Rule) out.Ranges = *(*[]IDRange)(unsafe.Pointer(&in.Ranges)) return nil } // your_sha256_hashSupplementalGroupsStrategyOptions is an autogenerated conversion function. func your_sha256_hashSupplementalGroupsStrategyOptions(in *extensions.SupplementalGroupsStrategyOptions, out *SupplementalGroupsStrategyOptions, s conversion.Scope) error { return your_sha256_hashta1_SupplementalGroupsStrategyOptions(in, out, s) } func your_sha256_hashesource(in *ThirdPartyResource, out *extensions.ThirdPartyResource, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Description = in.Description out.Versions = *(*[]extensions.APIVersion)(unsafe.Pointer(&in.Versions)) return nil } // your_sha256_hashrce is an autogenerated conversion function. func your_sha256_hashrce(in *ThirdPartyResource, out *extensions.ThirdPartyResource, s conversion.Scope) error { return your_sha256_hashesource(in, out, s) } func your_sha256_hashesource(in *extensions.ThirdPartyResource, out *ThirdPartyResource, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Description = in.Description out.Versions = *(*[]APIVersion)(unsafe.Pointer(&in.Versions)) return nil } // your_sha256_hashrce is an autogenerated conversion function. func your_sha256_hashrce(in *extensions.ThirdPartyResource, out *ThirdPartyResource, s conversion.Scope) error { return your_sha256_hashesource(in, out, s) } func your_sha256_hashrtyResourceData(in *ThirdPartyResourceData, out *extensions.ThirdPartyResourceData, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) return nil } // your_sha256_hashesourceData is an autogenerated conversion function. func your_sha256_hashesourceData(in *ThirdPartyResourceData, out *extensions.ThirdPartyResourceData, s conversion.Scope) error { return your_sha256_hashrtyResourceData(in, out, s) } func your_sha256_hashrtyResourceData(in *extensions.ThirdPartyResourceData, out *ThirdPartyResourceData, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) return nil } // your_sha256_hashesourceData is an autogenerated conversion function. func your_sha256_hashesourceData(in *extensions.ThirdPartyResourceData, out *ThirdPartyResourceData, s conversion.Scope) error { return your_sha256_hashrtyResourceData(in, out, s) } func your_sha256_hashrdPartyResourceDataList(in *ThirdPartyResourceDataList, out *extensions.ThirdPartyResourceDataList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]extensions.ThirdPartyResourceData)(unsafe.Pointer(&in.Items)) return nil } // your_sha256_hashrtyResourceDataList is an autogenerated conversion function. func your_sha256_hashrtyResourceDataList(in *ThirdPartyResourceDataList, out *extensions.ThirdPartyResourceDataList, s conversion.Scope) error { return your_sha256_hashrdPartyResourceDataList(in, out, s) } func your_sha256_hashrdPartyResourceDataList(in *extensions.ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items == nil { out.Items = make([]ThirdPartyResourceData, 0) } else { out.Items = *(*[]ThirdPartyResourceData)(unsafe.Pointer(&in.Items)) } return nil } // your_sha256_hashrtyResourceDataList is an autogenerated conversion function. func your_sha256_hashrtyResourceDataList(in *extensions.ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, s conversion.Scope) error { return your_sha256_hashrdPartyResourceDataList(in, out, s) } func your_sha256_hashrtyResourceList(in *ThirdPartyResourceList, out *extensions.ThirdPartyResourceList, s conversion.Scope) error { out.ListMeta = in.ListMeta out.Items = *(*[]extensions.ThirdPartyResource)(unsafe.Pointer(&in.Items)) return nil } // your_sha256_hashesourceList is an autogenerated conversion function. func your_sha256_hashesourceList(in *ThirdPartyResourceList, out *extensions.ThirdPartyResourceList, s conversion.Scope) error { return your_sha256_hashrtyResourceList(in, out, s) } func your_sha256_hashrtyResourceList(in *extensions.ThirdPartyResourceList, out *ThirdPartyResourceList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items == nil { out.Items = make([]ThirdPartyResource, 0) } else { out.Items = *(*[]ThirdPartyResource)(unsafe.Pointer(&in.Items)) } return nil } // your_sha256_hashesourceList is an autogenerated conversion function. func your_sha256_hashesourceList(in *extensions.ThirdPartyResourceList, out *ThirdPartyResourceList, s conversion.Scope) error { return your_sha256_hashrtyResourceList(in, out, s) } ```
The Changhe Freedom M70 is an MPV manufactured by Changhe, a sub-brand of BAIC. Overview The Changhe Freedom M70 was revealed in 2016 with prices ranging from 54,900 yuan to 64,900 yuan, while the Changhe Freedom M70 was available on the market from March 2017. Specifications The Changhe Freedom M70 is powered by a 1.5-litre engine mated t a 5-speed manual transmission. It is available with 5-, 7- and 8-seater configurations. References External links Cars of China Minivans 2010s cars BAIC Group vehicles
Glyptotendipes is a genus of non-biting midges in the subfamily Chironominae of the bloodworm family Chironomidae. Species G. aequalis (Kieffer, 1922) G. amplus Townes, 1945 G. barbipes (Stæger, 1839) G. caulicola (Kieffer, 1913) G. cauliginellus (Kieffer, 1913) G. dreisbachi Townes, 1945 G. foliicola Contreras-Lichtenberg, 1997 G. glaucus (Meigen, 1818) G. gripekoveni (Kieffer, 1913) G. imbecilis (Walker, 1856) G. lejcher (Hatfull, 2014) G. lobiferus (Say, 1823) G. meridionalis Dendy and Sublette, 1959 G. ospeli Contreras-Lichtenberg & Kiknadze, 1999 G. pallens (Meigen, 1804) G. paripes (Edwards, 1929) G. salinus Michailova, 1987 G. scirpi (Kieffer, 1915) G. seminole Townes, 1945 G. senilis (Johannsen, 1937) G. signatus (Kieffer, 1909) G. testaceus (Townes, 1945) G. unacus Townes, 1945 G. viridis (Macquart, 1834) References Chironomidae Nematoceran flies of Europe Culicomorpha genera
"I Can See It" is a song by English synth-pop duo Blancmange, released in April 1986 as a non-album single. The song is a re-recorded version of "Why Don't They Leave Things Alone?", which appeared on the duo's third studio album Believe You Me (1985). It was written by Neil Arthur and Stephen Luscombe, and produced by Greg Walsh. "I Can See It" reached number 71 in the UK Singles Chart and remains the duo's last appearance in the chart. Shortly after the single's release, Blancmange decided to disband. "I Can See It" was recorded at Eel Pie Studios in London, while the B-side, "Scream Down the House", was recorded at the Strongroom, London. The song's music video was directed by Gerald Casale of new wave band Devo. Critical reception On its release, Ian Cranna of Smash Hits described "Why Don't They Leave Things Alone?" as the "best song on the disappointing Believe You Me" and said that as "I Can See It", the song had been "reworked into an absolute cracker". He added, "Blancmange are fairly hit-and-miss, mostly due to their healthy madness being channelled into rhythms rather than tunes - but this has the best of both worlds." Betty Page of Record Mirror commented, "Gently persuasive, but needs a few concentrated listens before its charms become apparent. The Neil Arthur here is nothing like the Neil Arthur we used to know and lurve from 'Living on the Ceiling' days." Dave Ling of Number One felt the song was "stunningly average" and "hardly an obvious hit for Blancmange". He added, "This newie is quite an anonymous offering by comparison [to "Lose Your Love"]. In fact, without knowing who the artist was beforehand you'd be hard pushed to guess correctly." John Lee of the Huddersfield Daily Examiner described it as "fairly meritorious" but added that it "lacks the bite to change things for the better for this downward spiralling duo". Paul Benbow of the Reading Evening Post noted Walsh's "lavish production" but added that the "tedious vocals add up to hot air only used well to get a natty break from a trumpet". In a review of the 2017 deluxe edition of Believe You Me (1985), Paul Scott-Bates of Louder Than War described the original version as a "medium paced tune about the state of the World". He praised the single version as "arguably the band's finest single" and added that the extended version was "nothing short of superb". The Electricity Club commented that the song was one of the album's "finer moments", adding that "the use of cello and flute lends the finished piece a quiet quality". In a retrospective review of Believe You Me, Bill Cassel of AllMusic described it as the "loveliest, saddest ballad Blancmange ever recorded". Track listing 7" single "I Can See It" – 4:07 "Scream Down the House" – 4:08 12" single "I Can See It (Extended)" – 7:58 "Scream Down the House" – 4:08 12" single (UK promo) "I Can See It (Bonus Beats)" – 10:15 "Scream Down the House" – 4:08 Chart performance Personnel Blancmange Neil Arthur – lead vocals, arranger on "I Can See It", producer of "Scream Down the House" Stephen Luscombe – keyboards, synthesizers Additional personnel Greg Walsh – producer and arranger of "I Can See It" Brian Evans – engineer on "I Can See It" Phil Bodger – engineer on "Scream Down the House" Tony Bridge – mastering on "I Can See It" Other Stylorouge – design Mick Brownfield – illustration References External links 1985 songs 1986 singles Blancmange (band) songs London Records singles Songs written by Neil Arthur Songs written by Stephen Luscombe
"Microphone Master" is a song by American hip hop group Das EFX. It is the second single from their third studio album Hold It Down (1995). The song was produced by Easy Mo Bee. The official remix of the song features hip hop group Mobb Deep and was also released in 1995. Charts References 1995 songs 1996 singles Das EFX songs East West Records singles Song recordings produced by Easy Mo Bee Songs written by Easy Mo Bee
```xml export { default } from "./events"; ```
```php <?php return [ 'is_rtl' => 'false', 'action' => '', 'actions' => '', 'add' => '', 'add_folder' => '', 'add_new' => '', 'all_done' => '', 'are_you_sure' => '', 'are_you_sure_delete' => ' ', 'ascending' => 'Ascending', 'auto_increment' => '', 'bread' => 'BREAD', 'browse' => '', 'builder' => '', 'bulk_delete' => ' ', 'bulk_delete_confirm' => ' ', 'bulk_delete_nothing' => ' ', 'cancel' => '', 'choose_type' => ' ', 'click_here' => '', 'close' => 'Close', 'compass' => 'Compass', 'created_at' => 'Created at', 'custom' => 'Custom', 'dashboard' => 'Dashboard', 'database' => 'Database', 'default' => 'Default', 'delete' => 'Delete', 'delete_confirm' => ' ', 'delete_question' => ' ', 'delete_this_confirm' => ' ', 'descending' => 'Descending', 'deselect_all' => 'Deselect All', 'download' => 'Download', 'drag_drop_info' => 'Drag and drop the Items below to re-arrange them.', 'edit' => 'Edit', 'email' => 'E-mail', 'error_deleting' => 'Sorry it appears there was a problem deleting this', 'error_restoring' => 'Sorry it appears there was a problem restoring this', 'exception' => 'Exception', 'featured' => 'Featured', 'field_does_not_exist' => 'Field does not exist', 'find_by_place' => 'Find by Place', 'home' => 'Home', 'how_to_use' => 'How To Use', 'index' => 'Index', 'internal_error' => 'Internal error', 'items' => 'item(s)', 'keep_sidebar_open' => 'Yarr! Drop the anchors! (and keep the sidebar open)', 'key' => 'Key', 'last_modified' => 'Last modified', 'latitude' => 'Latitude', 'length' => 'Length', 'locale' => 'Locale', 'login' => 'Login', 'logout' => 'Logout', 'longitude' => 'Longitude', 'media' => 'Media', 'menu_builder' => 'Menu Builder', 'mimetype_not_allowed' => 'This mimetype is not allowed', 'move' => 'Move', 'name' => 'Name', 'new' => 'New', 'no' => 'No', 'no_thanks' => 'No Thanks', 'none' => 'None', 'not_null' => 'Not Null', 'no_results' => 'No results', 'open' => 'Open', 'options' => 'Options', 'password' => 'Password', 'permissions' => 'Permissions', 'profile' => 'Profile', 'public_url' => 'Public URL', 'read' => 'Read', 'rename' => 'Rename', 'remember_me' => 'Remember me', 'required' => 'Required', 'return_to_list' => 'Return to List', 'route' => 'Route', 'save' => 'Save', 'search' => 'Search', 'select_all' => 'Select All', 'select_group' => 'Select Existing Group or Add New', 'settings' => 'Settings', 'showing_entries' => 'Showing :from to :to of :all entry|Showing :from to :to of :all entries', 'submit' => 'Submit', 'successfully_added_new' => 'Successfully Added New', 'successfully_deleted' => 'Successfully Deleted', 'successfully_restored' => 'Successfully Restored', 'successfully_updated' => 'Successfully Updated', 'successfully_created' => 'Successfully created', 'sweet_success' => 'Sweet Success!', 'timestamp' => 'Timestamp', 'title' => 'Title', 'type' => 'Type', 'restore' => 'Restore', 'unsigned' => '', 'unstick_sidebar' => ' ', 'update' => 'Update', 'update_failed' => ' ', 'updated_order' => ' ', 'upload' => '', 'url' => 'URL', 'view' => 'View', 'viewing' => '', 'whoopsie' => 'Whoopsie!', 'yes' => '', 'yes_please' => '', ]; ```
Lizzobangers is the debut studio album by American rapper and singer Lizzo. It was released on Totally Gross National Product on October 15, 2013. In 2014, it was re-released on Virgin Records. Production Lizzobangers is produced by Lazerbeak and Ryan Olson. Some beats on the album are taken from Lazerbeak's 2012 album Lava Bangers. Release The album was released on Totally Gross National Product on October 15, 2013. In 2014, it was re-released on Virgin Records. In 2019, the album was removed from all streaming services and digital retailers, to aid in Lizzo's campaign for Best New Artist at the 62nd Annual Grammy Awards. A month after the ceremony on February 21, 2020, the album returned to streaming services. Music videos Music videos were created for "Batches & Cookies", "Faded", "Bus Passes and Happy Meals", and "Paris". Impose included the video for "Batches & Cookies" on the "Best Videos of 2013" list. Critical reception At Metacritic, which assigns a weighted average score out of 100 to reviews from mainstream critics, the album received an average score of 85, based on 5 reviews, indicating "universal acclaim". Dylan Kilby of MusicOMH gave the album four stars out of five, describing it as "a triumphant album by an extraordinary artist and woman, whose girl-empowering lyricism and social consciousness puts her at the top of the underground and alternative hip-hop community." Killian Fox of The Guardian gave the album four stars out of five, saying: "At times joyfully nonsensical, Lizzo's stream-of-consciousness rhymes can also be lethally pointed." Star Tribune placed the album at number 1 on the "Twin Cities Critics Tally 2013" list. Track listing Personnel Credits adapted from the 2014 vinyl edition's liner notes. Lizzo – vocals, flute Cliff Rhymes – vocals Sophia Eris – vocals (on "Batches & Cookies") Lazerbeak – production Ryan Olson – production Plain Ole Bill – turntables Jake Hansen – guitar Jim Anton – bass guitar James Buckley – bass guitar Erica Burton – viola Nelson Devereaux – saxophone Joey Van Phillips – vibraphone, percussion BJ Burton – mixing Huntley Miller – mastering Garrett Born – photography Jeffrey Barr – logo Paper Tiger – layout Drew Christopherson – layout References External links 2013 debut albums Lizzo albums Albums produced by Lazerbeak Totally Gross National Product albums Alternative hip hop albums by American artists
Kharal (Punjabi: ) is a Punjabi tribe predominantly found in the Sandal Bar region of Punjab, Pakistan, with significant numbers also present in Mandi Bahauddin, Pind Dadan Khan, Muzaffargarh, Multan and further south to Sindh. History and description The Kharals predominantly inhabit the western plains of Punjab that lie below the Salt Range and its surrounding areas. The Kharals seem to be most concentrated in the Ravi River Valley between Lahore and the former Montgomery District, this corresponds well to Ain-i-Akbari (1595 CE) listing of Kharal Zamindaris in different Parganas. Additionally, Kharals were designated as a Martial race being known for their bravery and fierceness. A journal by Government College University lists Kharals along other tribes as Jats. Modern Indian and Pakistani census reports mention Kharals as Jats who claim Kshatriya(Rajput) origin. The Kharals have numerous subdivisions and clans some of which include Upera, Lakhera, Begeke, Randhaira, Lalhaira, Rubera, Sahi, Lodike[y], Dehar, Churiara, Khar, Bhandra, Daulke[y], Sherke[y] and Gogera; the Kharals use many titles including Rai, Raja, Chaudhry and Malik but Rai is mostly used. Rai Ahmad Khan Kharal is a historical personality, who was the chieftain of the Kharal tribe, who revolted against the British in the 1857 revolt, where he consequently took leadership of the many local tribes in the region. He was killed while offering afternoon prayers by the British on September 21, 1857. The Kharals are also famous in the Indian Subcontinent due to the one of the greatest and tragic Punjabi romances called Mirza Sahiban. According to the story of Mirza Sahiban as narrated by the Punjabi writer Pilu, the story's protagonist is Mirza, son of the chief of the Kharals of Danabad, who falls in love with his cousin Sahiban, of the Sial tribe. To date there have been many film adaptations in both Pakistan and India of the Story of Mirza and Sahiban. References External links Kharal and Berkley (19th century British India) on Dawn (newspaper) Surnames Jat clans of Punjab Rajput clans of Punjab Punjabi tribes Punjabi-language surnames
```objective-c // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #ifndef PX_PHYSICS_NX_HEIGHTFIELD_GEOMETRY #define PX_PHYSICS_NX_HEIGHTFIELD_GEOMETRY /** \addtogroup geomutils @{ */ #include "geometry/PxTriangleMeshGeometry.h" #include "common/PxCoreUtilityTypes.h" #if !PX_DOXYGEN namespace physx { #endif #define PX_MIN_HEIGHTFIELD_XZ_SCALE 1e-8f #define PX_MIN_HEIGHTFIELD_Y_SCALE (0.0001f / PxReal(0xFFFF)) class PxHeightField; /** \brief Height field geometry class. This class allows to create a scaled height field geometry instance. There is a minimum allowed value for Y and XZ scaling - PX_MIN_HEIGHTFIELD_XZ_SCALE, heightfield creation will fail if XZ value is below this value. */ class PxHeightFieldGeometry : public PxGeometry { public: PX_INLINE PxHeightFieldGeometry() : PxGeometry (PxGeometryType::eHEIGHTFIELD), heightField (NULL), heightScale (1.0f), rowScale (1.0f), columnScale (1.0f), heightFieldFlags(0) {} PX_INLINE PxHeightFieldGeometry(PxHeightField* hf, PxMeshGeometryFlags flags, PxReal heightScale_, PxReal rowScale_, PxReal columnScale_) : PxGeometry (PxGeometryType::eHEIGHTFIELD), heightField (hf) , heightScale (heightScale_), rowScale (rowScale_), columnScale (columnScale_), heightFieldFlags (flags) { } /** \brief Returns true if the geometry is valid. \return True if the current settings are valid \note A valid height field has a positive scale value in each direction (heightScale > 0, rowScale > 0, columnScale > 0). It is illegal to call PxRigidActor::createShape and PxPhysics::createShape with a height field that has zero extents in any direction. @see PxRigidActor::createShape, PxPhysics::createShape */ PX_INLINE bool isValid() const; public: /** \brief The height field data. */ PxHeightField* heightField; /** \brief The scaling factor for the height field in vertical direction (y direction in local space). */ PxReal heightScale; /** \brief The scaling factor for the height field in the row direction (x direction in local space). */ PxReal rowScale; /** \brief The scaling factor for the height field in the column direction (z direction in local space). */ PxReal columnScale; /** \brief Flags to specify some collision properties for the height field. */ PxMeshGeometryFlags heightFieldFlags; PxPadding<3> paddingFromFlags; //!< padding for mesh flags. }; PX_INLINE bool PxHeightFieldGeometry::isValid() const { if (mType != PxGeometryType::eHEIGHTFIELD) return false; if (!PxIsFinite(heightScale) || !PxIsFinite(rowScale) || !PxIsFinite(columnScale)) return false; if (rowScale < PX_MIN_HEIGHTFIELD_XZ_SCALE || columnScale < PX_MIN_HEIGHTFIELD_XZ_SCALE || heightScale < PX_MIN_HEIGHTFIELD_Y_SCALE) return false; if (!heightField) return false; return true; } #if !PX_DOXYGEN } // namespace physx #endif /** @} */ #endif ```
Więckowice is a village in the administrative district of Gmina Proszowice, within Proszowice County, Lesser Poland Voivodeship, in southern Poland. References Villages in Proszowice County
Steven W. Hawkins (born July 10, 1962) is an American social justice leader and litigator who currently serves as president and CEO of the US Cannabis Council. He previously served as executive director of the Marijuana Policy Project and as executive director of Amnesty International USA. Prior to these roles, he served as the Executive Vice President and Chief Program Officer of the National Association for the Advancement of Colored People (NAACP). He also held position as executive director of the National Coalition to Abolish the Death Penalty, as senior program manager at Justice, Equality, Human Dignity and Tolerance Foundation, and as program executive at Atlantic Philanthropies and as an attorney for the NAACP Legal Defense Fund. Hawkins is known for bringing litigation that led to the release of three teenagers wrongfully convicted and sentenced to death row in Tennessee. Early life and education Hawkins was born in Peekskill, New York and raised in Ossining, New York, which was home to Sing Sing Correctional Facility. In high school Hawkins attended a field trip to Sing Sing where he met with inmates who opened his eyes and inspired his lifelong commitment to social justice advocacy. Hawkins grew up with reminders of the injustices of a U.S. criminal justice system that disproportionately targets minorities and the economically disadvantaged. Many of the inmates were Black Panthers or inmates from Attica Correctional Facility who fought inhumane prison conditions. Hawkins graduated from Harvard College with a B.S. in economics in 1984. In 1985, Hawkins spent a year at the University of Zimbabwe during the turmoil, repression and massacre of civilians at the hands of rebels during the country's first post-independence election. He also attended New York University School of Law as a Root Tilden scholar. After graduating in 1988, he clerked for Judge A. Leon Higginbotham of the United States Court of Appeals for the Third Circuit. Career As an attorney with the NAACP Legal Defense Fund, Hawkins represented African American men facing the death penalty throughout the Deep South. He continued his work in social justice focused on abolishing the death penalty. He led a partnership of organizations as executive director of the National Coalition to Abolish the Death Penalty in Washington, D.C. that successfully campaigned to abolish the death penalty for juvenile crimes. Following his tenure at the National Coalition to Abolish the Death Penalty, Hawkins moved into philanthropy to advocate for human rights and social justice causes at the JEHT Foundation and later at Atlantic Philanthropies. After returning to the NAACP as Executive Vice President and Chief Program Officer, Hawkins often worked in coalition with Amnesty International USA on abolishing the death penalty and national security issues. Career at NAACP During his six years at the NAACP Legal Defense Fund, represented African American men facing the death penalty throughout the Deep South. He investigated and brought litigation that saved the lives and led to the release of three black teens on death row wrongfully convicted in Tennessee. Career at Amnesty International USA In September 2013, he executive director of Amnesty International USA. Hawkins’ vision for AIUSA - to “Bring Human Rights Home” - relies heavily on the use of innovative digital platforms to connect human rights activists across the globe. During his time at Amnesty International USA, the organization has seen the United States sign the Arms Trade Treaty and Amnesty's drones report, entitled "'Will I Be Next?' US Drone Strikes in Pakistan" receive substantial media coverage. Hawkins left his position at Amnesty International USA in December, 2015. Recognition In the summer of 1997 Hawkins was given the illustrious title "The Snoop Dogg of Science" by his fans. In 2003, Hawkins was the recipient of the Law School's Public Interest Service Award. References 1962 births Living people 20th-century African-American people 21st-century African-American people African-American activists American community activists Amnesty International people Harvard College alumni People from Ossining, New York People from Peekskill, New York University of Zimbabwe alumni
```perl6 ################################################### # server boilerplate generator # released under the GNU GPL package Parse::Pidl::Samba4::NDR::Server; use strict; use Parse::Pidl::Util; use vars qw($VERSION); $VERSION = '0.01'; my($res); sub pidl($) { $res .= shift; } ##################################################### # generate the switch statement for function dispatch sub gen_dispatch_switch($) { my $interface = shift; foreach my $fn (@{$interface->{FUNCTIONS}}) { next if not defined($fn->{OPNUM}); pidl "\tcase $fn->{OPNUM}: {\n"; pidl "\t\tstruct $fn->{NAME} *r2 = (struct $fn->{NAME} *)r;\n"; pidl "\t\tif (DEBUGLEVEL >= 10) {\n"; pidl "\t\t\tNDR_PRINT_FUNCTION_DEBUG($fn->{NAME}, NDR_IN, r2);\n"; pidl "\t\t}\n"; if ($fn->{RETURN_TYPE} && $fn->{RETURN_TYPE} ne "void") { pidl "\t\tr2->out.result = dcesrv_$fn->{NAME}(dce_call, mem_ctx, r2);\n"; } else { pidl "\t\tdcesrv_$fn->{NAME}(dce_call, mem_ctx, r2);\n"; } pidl "\t\tif (dce_call->state_flags & DCESRV_CALL_STATE_FLAG_ASYNC) {\n"; pidl "\t\t\tDEBUG(5,(\"function $fn->{NAME} will reply async\\n\"));\n"; pidl "\t\t}\n"; pidl "\t\tbreak;\n\t}\n"; } } ##################################################### # generate the switch statement for function reply sub gen_reply_switch($) { my $interface = shift; foreach my $fn (@{$interface->{FUNCTIONS}}) { next if not defined($fn->{OPNUM}); pidl "\tcase $fn->{OPNUM}: {\n"; pidl "\t\tstruct $fn->{NAME} *r2 = (struct $fn->{NAME} *)r;\n"; pidl "\t\tif (dce_call->state_flags & DCESRV_CALL_STATE_FLAG_ASYNC) {\n"; pidl "\t\t\tDEBUG(5,(\"function $fn->{NAME} replied async\\n\"));\n"; pidl "\t\t}\n"; pidl "\t\tif (DEBUGLEVEL >= 10 && dce_call->fault_code == 0) {\n"; pidl "\t\t\tNDR_PRINT_FUNCTION_DEBUG($fn->{NAME}, NDR_OUT | NDR_SET_VALUES, r2);\n"; pidl "\t\t}\n"; pidl "\t\tif (dce_call->fault_code != 0) {\n"; pidl "\t\t\tDEBUG(2,(\"dcerpc_fault %s in $fn->{NAME}\\n\", dcerpc_errstr(mem_ctx, dce_call->fault_code)));\n"; pidl "\t\t}\n"; pidl "\t\tbreak;\n\t}\n"; } } ##################################################################### # produce boilerplate code for a interface sub Boilerplate_Iface($) { my($interface) = shift; my $name = $interface->{NAME}; my $uname = uc $name; my $uuid = lc($interface->{UUID}); my $if_version = $interface->{VERSION}; pidl " static NTSTATUS $name\__op_bind(struct dcesrv_call_state *dce_call, const struct dcesrv_interface *iface, uint32_t if_version) { #ifdef DCESRV_INTERFACE_$uname\_BIND return DCESRV_INTERFACE_$uname\_BIND(dce_call,iface); #else return NT_STATUS_OK; #endif } static void $name\__op_unbind(struct dcesrv_connection_context *context, const struct dcesrv_interface *iface) { #ifdef DCESRV_INTERFACE_$uname\_UNBIND DCESRV_INTERFACE_$uname\_UNBIND(context, iface); #else return; #endif } static NTSTATUS $name\__op_ndr_pull(struct dcesrv_call_state *dce_call, TALLOC_CTX *mem_ctx, struct ndr_pull *pull, void **r) { enum ndr_err_code ndr_err; uint16_t opnum = dce_call->pkt.u.request.opnum; dce_call->fault_code = 0; if (opnum >= ndr_table_$name.num_calls) { dce_call->fault_code = DCERPC_FAULT_OP_RNG_ERROR; return NT_STATUS_NET_WRITE_FAULT; } *r = talloc_named(mem_ctx, ndr_table_$name.calls[opnum].struct_size, \"struct %s\", ndr_table_$name.calls[opnum].name); NT_STATUS_HAVE_NO_MEMORY(*r); /* unravel the NDR for the packet */ ndr_err = ndr_table_$name.calls[opnum].ndr_pull(pull, NDR_IN, *r); if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) { dcerpc_log_packet(dce_call->conn->packet_log_dir, &ndr_table_$name, opnum, NDR_IN, &dce_call->pkt.u.request.stub_and_verifier); dce_call->fault_code = DCERPC_FAULT_NDR; return NT_STATUS_NET_WRITE_FAULT; } return NT_STATUS_OK; } static NTSTATUS $name\__op_dispatch(struct dcesrv_call_state *dce_call, TALLOC_CTX *mem_ctx, void *r) { uint16_t opnum = dce_call->pkt.u.request.opnum; switch (opnum) { "; gen_dispatch_switch($interface); pidl " default: dce_call->fault_code = DCERPC_FAULT_OP_RNG_ERROR; break; } if (dce_call->fault_code != 0) { dcerpc_log_packet(dce_call->conn->packet_log_dir, &ndr_table_$name, opnum, NDR_IN, &dce_call->pkt.u.request.stub_and_verifier); return NT_STATUS_NET_WRITE_FAULT; } return NT_STATUS_OK; } static NTSTATUS $name\__op_reply(struct dcesrv_call_state *dce_call, TALLOC_CTX *mem_ctx, void *r) { uint16_t opnum = dce_call->pkt.u.request.opnum; switch (opnum) { "; gen_reply_switch($interface); pidl " default: dce_call->fault_code = DCERPC_FAULT_OP_RNG_ERROR; break; } if (dce_call->fault_code != 0) { dcerpc_log_packet(dce_call->conn->packet_log_dir, &ndr_table_$name, opnum, NDR_IN, &dce_call->pkt.u.request.stub_and_verifier); return NT_STATUS_NET_WRITE_FAULT; } return NT_STATUS_OK; } static NTSTATUS $name\__op_ndr_push(struct dcesrv_call_state *dce_call, TALLOC_CTX *mem_ctx, struct ndr_push *push, const void *r) { enum ndr_err_code ndr_err; uint16_t opnum = dce_call->pkt.u.request.opnum; ndr_err = ndr_table_$name.calls[opnum].ndr_push(push, NDR_OUT, r); if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) { dce_call->fault_code = DCERPC_FAULT_NDR; return NT_STATUS_NET_WRITE_FAULT; } return NT_STATUS_OK; } const struct dcesrv_interface dcesrv\_$name\_interface = { .name = \"$name\", .syntax_id = {".print_uuid($uuid).",$if_version}, .bind = $name\__op_bind, .unbind = $name\__op_unbind, .ndr_pull = $name\__op_ndr_pull, .dispatch = $name\__op_dispatch, .reply = $name\__op_reply, .ndr_push = $name\__op_ndr_push }; "; } ##################################################################### # produce boilerplate code for an endpoint server sub Boilerplate_Ep_Server($) { my($interface) = shift; my $name = $interface->{NAME}; my $uname = uc $name; pidl " static NTSTATUS $name\__op_init_server(struct dcesrv_context *dce_ctx, const struct dcesrv_endpoint_server *ep_server) { int i; for (i=0;i<ndr_table_$name.endpoints->count;i++) { NTSTATUS ret; const char *name = ndr_table_$name.endpoints->names[i]; ret = dcesrv_interface_register(dce_ctx, name, &dcesrv_$name\_interface, NULL); if (!NT_STATUS_IS_OK(ret)) { DEBUG(1,(\"$name\_op_init_server: failed to register endpoint \'%s\'\\n\",name)); return ret; } } return NT_STATUS_OK; } static bool $name\__op_interface_by_uuid(struct dcesrv_interface *iface, const struct GUID *uuid, uint32_t if_version) { if (dcesrv_$name\_interface.syntax_id.if_version == if_version && GUID_equal(\&dcesrv\_$name\_interface.syntax_id.uuid, uuid)) { memcpy(iface,&dcesrv\_$name\_interface, sizeof(*iface)); return true; } return false; } static bool $name\__op_interface_by_name(struct dcesrv_interface *iface, const char *name) { if (strcmp(dcesrv_$name\_interface.name, name)==0) { memcpy(iface, &dcesrv_$name\_interface, sizeof(*iface)); return true; } return false; } NTSTATUS dcerpc_server_$name\_init(void) { NTSTATUS ret; struct dcesrv_endpoint_server ep_server; /* fill in our name */ ep_server.name = \"$name\"; /* fill in all the operations */ ep_server.init_server = $name\__op_init_server; ep_server.interface_by_uuid = $name\__op_interface_by_uuid; ep_server.interface_by_name = $name\__op_interface_by_name; /* register ourselves with the DCERPC subsystem. */ ret = dcerpc_register_ep_server(&ep_server); if (!NT_STATUS_IS_OK(ret)) { DEBUG(0,(\"Failed to register \'$name\' endpoint server!\\n\")); return ret; } return ret; } "; } ##################################################################### # dcerpc server boilerplate from a parsed IDL structure sub ParseInterface($) { my($interface) = shift; my $count = 0; $res .= "NTSTATUS dcerpc_server_$interface->{NAME}\_init(void);\n"; $res .= "\n"; if (!defined $interface->{PROPERTIES}->{uuid}) { return $res; } if (!defined $interface->{PROPERTIES}->{version}) { $interface->{PROPERTIES}->{version} = "0.0"; } foreach my $fn (@{$interface->{FUNCTIONS}}) { if (defined($fn->{OPNUM})) { $count++; } } if ($count == 0) { return $res; } $res .= "/* $interface->{NAME} - dcerpc server boilerplate generated by pidl */\n\n"; Boilerplate_Iface($interface); Boilerplate_Ep_Server($interface); return $res; } sub Parse($$) { my($ndr,$header) = @_; $res = ""; $res .= "/* server functions auto-generated by pidl */\n"; $res .= "#include \"$header\"\n"; $res .= "#include <util/debug.h>\n"; $res .= "\n"; foreach my $x (@{$ndr}) { ParseInterface($x) if ($x->{TYPE} eq "INTERFACE" and not defined($x->{PROPERTIES}{object})); } return $res; } 1; ```
Army Beach, Mazandaran ( – Plāzh-e Artash) is a village and military installation in Qareh Toghan Rural District, in the Central District of Neka County, Mazandaran Province, Iran. At the 2006 census, its population was 77, in 21 families. References Populated places in Neka County Military installations of Iran
```xml import { bind } from 'decko'; import { saveAs } from 'file-saver'; import { IDisposable, ITerminalAddon, Terminal } from '@xterm/xterm'; import * as Zmodem from 'zmodem.js/src/zmodem_browser'; import { TrzszFilter } from 'trzsz'; export interface ZmodeOptions { zmodem: boolean; trzsz: boolean; windows: boolean; trzszDragInitTimeout: number; onSend: () => void; sender: (data: string | Uint8Array) => void; writer: (data: string | Uint8Array) => void; } export class ZmodemAddon implements ITerminalAddon { private disposables: IDisposable[] = []; private terminal: Terminal; private sentry: Zmodem.Sentry; private session: Zmodem.Session; private denier: () => void; private trzszFilter: TrzszFilter; constructor(private options: ZmodeOptions) {} activate(terminal: Terminal) { this.terminal = terminal; if (this.options.zmodem) this.zmodemInit(); if (this.options.trzsz) this.trzszInit(); } dispose() { for (const d of this.disposables) { d.dispose(); } this.disposables.length = 0; } consume(data: ArrayBuffer) { try { if (this.options.trzsz) { this.trzszFilter.processServerOutput(data); } else { this.sentry.consume(data); } } catch (e) { console.error('[ttyd] zmodem consume: ', e); this.reset(); } } @bind private reset() { this.terminal.options.disableStdin = false; this.terminal.focus(); } private addDisposableListener(target: EventTarget, type: string, listener: EventListener) { target.addEventListener(type, listener); this.disposables.push({ dispose: () => target.removeEventListener(type, listener) }); } @bind private trzszInit() { const { terminal } = this; const { sender, writer, zmodem } = this.options; this.trzszFilter = new TrzszFilter({ writeToTerminal: data => { if (!this.trzszFilter.isTransferringFiles() && zmodem) { this.sentry.consume(data); } else { writer(typeof data === 'string' ? data : new Uint8Array(data as ArrayBuffer)); } }, sendToServer: data => sender(data), terminalColumns: terminal.cols, isWindowsShell: this.options.windows, dragInitTimeout: this.options.trzszDragInitTimeout, }); const element = terminal.element as EventTarget; this.addDisposableListener(element, 'dragover', event => event.preventDefault()); this.addDisposableListener(element, 'drop', event => { event.preventDefault(); this.trzszFilter .uploadFiles((event as DragEvent).dataTransfer?.items as DataTransferItemList) .then(() => console.log('[ttyd] upload success')) .catch(err => console.log('[ttyd] upload failed: ' + err)); }); this.disposables.push(terminal.onResize(size => this.trzszFilter.setTerminalColumns(size.cols))); } @bind private zmodemInit() { const { sender, writer } = this.options; const { terminal, reset, zmodemDetect } = this; this.session = null; this.sentry = new Zmodem.Sentry({ to_terminal: octets => writer(new Uint8Array(octets)), sender: octets => sender(new Uint8Array(octets)), on_retract: () => reset(), on_detect: detection => zmodemDetect(detection), }); this.disposables.push( terminal.onKey(e => { const event = e.domEvent; if (event.ctrlKey && event.key === 'c') { if (this.denier) this.denier(); } }) ); } @bind private zmodemDetect(detection: Zmodem.Detection): void { const { terminal, receiveFile } = this; terminal.options.disableStdin = true; this.denier = () => detection.deny(); this.session = detection.confirm(); this.session.on('session_end', () => this.reset()); if (this.session.type === 'send') { this.options.onSend(); } else { receiveFile(); } } @bind public sendFile(files: FileList) { const { session, writeProgress } = this; Zmodem.Browser.send_files(session, files, { on_progress: (_, offer) => writeProgress(offer), }) .then(() => session.close()) .catch(() => this.reset()); } @bind private receiveFile() { const { session, writeProgress } = this; session.on('offer', offer => { offer.on('input', () => writeProgress(offer)); offer .accept() .then(payloads => { const blob = new Blob(payloads, { type: 'application/octet-stream' }); saveAs(blob, offer.get_details().name); }) .catch(() => this.reset()); }); session.start(); } @bind private writeProgress(offer: Zmodem.Offer) { const { bytesHuman } = this; const file = offer.get_details(); const name = file.name; const size = file.size; const offset = offer.get_offset(); const percent = ((100 * offset) / size).toFixed(2); this.options.writer(`${name} ${percent}% ${bytesHuman(offset, 2)}/${bytesHuman(size, 2)}\r`); } // eslint-disable-next-line @typescript-eslint/no-explicit-any private bytesHuman(bytes: any, precision: number): string { if (!/^([-+])?|(\.\d+)(\d+(\.\d+)?|(\d+\.)|Infinity)$/.test(bytes)) { return '-'; } if (bytes === 0) return '0'; if (typeof precision === 'undefined') precision = 1; const units = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB']; const num = Math.floor(Math.log(bytes) / Math.log(1024)); const value = (bytes / Math.pow(1024, Math.floor(num))).toFixed(precision); return `${value} ${units[num]}`; } } ```
```go package os import ( "fmt" "math" "os" "regexp" "strconv" "github.com/emc-advanced-dev/pkg/errors" ) type DiskSize interface { ToPartedFormat() string ToBytes() Bytes } type Bytes int64 func (s Bytes) ToPartedFormat() string { return fmt.Sprintf("%dB", uint64(s)) } func (s Bytes) ToBytes() Bytes { return s } // ToMegaBytes returns lowest whole number of size_MB so that size_MB >= (size_B / 1024^2) func (s Bytes) ToMegaBytes() MegaBytes { return MegaBytes(int(math.Ceil(float64(s) / float64(MegaBytes(1).ToBytes())))) } type MegaBytes int64 func (s MegaBytes) ToPartedFormat() string { return fmt.Sprintf("%dMiB", uint64(s)) } func (s MegaBytes) ToBytes() Bytes { return Bytes(s << 20) } type GigaBytes int64 func (s GigaBytes) ToPartedFormat() string { return fmt.Sprintf("%dGiB", uint64(s)) } func (s GigaBytes) ToBytes() Bytes { return Bytes(s << 30) } type Sectors int64 const SectorSize = 512 func (s Sectors) ToPartedFormat() string { return fmt.Sprintf("%ds", uint64(s)) } func (s Sectors) ToBytes() Bytes { return Bytes(s * SectorSize) } func ToSectors(b DiskSize) (Sectors, error) { inBytes := b.ToBytes() if inBytes%SectorSize != 0 { return 0, errors.New("can't convert to sectors", nil) } return Sectors(inBytes / SectorSize), nil } type BlockDevice string func (b BlockDevice) Name() string { return string(b) } type Partitioner interface { MakeTable() error MakePart(partType string, start, size DiskSize) error } type Resource interface { Acquire() (BlockDevice, error) Release() error } type Part interface { Resource Size() DiskSize Offset() DiskSize Get() BlockDevice } func IsExists(f string) bool { _, err := os.Stat(f) return !os.IsNotExist(err) } // ParseSize parses disk size string (e.g. "10GB" or "150MB") into MegaBytes // If no unit string is provided, megabytes are assumed func ParseSize(sizeStr string) (MegaBytes, error) { r, _ := regexp.Compile("^([0-9]+)(m|mb|M|MB|g|gb|G|GB)?$") match := r.FindStringSubmatch(sizeStr) if len(match) != 3 { return -1, fmt.Errorf("%s: unrecognized size", sizeStr) } size, _ := strconv.ParseInt(match[1], 10, 64) unit := match[2] switch unit { case "g", "gb", "G", "GB": size *= 1024 } if size == 0 { return -1, fmt.Errorf("%s: size must be larger than zero", sizeStr) } return MegaBytes(size), nil } ```
```smalltalk /***************************************************************************** * * ReoGrid - .NET * path_to_url * * ReoGrid MIT * * * * * path_to_url * ****************************************************************************/ using System; using System.Windows.Forms; namespace unvell.ReoGrid.Demo.WorksheetDemo.EdgeFreeze { public partial class LeftFreezeDemo : UserControl { public LeftFreezeDemo() { InitializeComponent(); } protected override void OnLoad(EventArgs e) { base.OnLoad(e); var worksheet = reoGridControl.CurrentWorksheet; // freeze to left worksheet.FreezeToCell(0, 5, FreezeArea.Left); worksheet[5, 1] = "frozen region"; worksheet[5, 7] = "active region"; } } } ```
The Socialist Union was a British political party active from February 1886 to 1888. The group was formed by socialists around C. L. Fitzgerald who left the Social Democratic Federation (SDF) in protest at SDF leader H. M. Hyndman's acceptance of money which Maltman Barry had obtained from the Conservative Party, in order to campaign against the Liberal Party. The group published a newspaper, The Socialist. It succeeded in gaining the support of the Bristol Socialist Society, but elsewhere, membership was small. The group disbanded in 1887. James MacDonald, a prominent leader, rejoined the SDF, after Hyndman promised never again to accept money from bourgeois politicians. Other notable members of the group included future Prime Minister Ramsay MacDonald. References Social Democratic Federation breakaway groups 1880s in the United Kingdom 1886 establishments in the United Kingdom 1888 disestablishments in the United Kingdom Political parties established in 1886 Political parties disestablished in 1888 Defunct socialist parties in the United Kingdom
Rose City Antifa (RCA) is an antifascist group founded in 2007 in Portland, Oregon. A leftist group, it is the oldest known active antifa group in the United States. While anti-fascist activism in the United States dates back to the 1980s, Rose City Antifa is the first to adopt the abbreviated moniker antifa. Since 2016, Rose City Antifa has been one of the nine chapters of the Torch Network coalition. History Rose City Antifa was formed in 2007 to coordinate opposition to a music festival that was planned to be held near Portland by neo-Nazis associated with White Aryan Resistance. According to one of its leaders, the group concentrates on "outing" people whom they believe to be neo-Nazis. According to Alexander Reid Ross, the author of the book Against the Fascist Creep, Rose City Antifa grew out of the group Anti-Racist Action (ARA) which first appeared in 1987. Through Rose City Antifa, "the European and American models were sort of synthesized and the current model of Antifa in the US was developed". Between 2007 and 2013, Rose City Antifa was part of Anti-Racist Action. Since 2016, Rose City Antifa has been part of the Torch Network. In a 2020 interview, RCA activists described the group as having "a strong feminist and queer component", as opposed to a tendency toward toxic masculinity in ARA, and as pursuing tactics going beyond street confrontations with the far right. Rose City Antifa has campaigned against the white separatist organization Volksfront, the band Death in June, the Ku Klux Klan and the American Renaissance website. The group has organized opposition to Patriot Prayer rallies in Portland. Rose City Antifa clashed with law enforcement officials and supporters of the presidency of Donald Trump following the 2016 United States presidential election. Before a June 2017 rally, the group released a statement saying they would be "unapologetic" over the use of "physical militancy". In August 2020 RCA and Popular Mobilization organized a counter-protest against the Proud Boys in Portland. In September 2020, RCA published a photograph of Patriot Prayer founder Joey Gibson with Chester Doles, a former Imperial Wizard of the Ku Klux Klan. RCA argues that the Portland Police Bureau tolerates crimes by far-right groups while suppressing protests by left-wing groups. The group has sought to counter the argument that anti-fascist activism infringes on the freedom of speech of the far right. They have argued that the First Amendment to the United States Constitution "protect[s] citizens from state interference, not from criticism by the public ... we do not have a powerful state apparatus at our disposal ... therefore the concepts of 'censorship' and 'free speech rights' are not in any reasonable way applicable." Rose City Antifa also argues that anti-fascism does not target the speech of the far right, but rather targets its political organizing. See also Antifa (Germany) Anti-racism Post-World War II anti-fascism Iron Front Andy Ngo References Bibliography External links Interview with Rose City Antifa, February 24, 2020 2007 establishments in Oregon Anti-fascist organizations in the United States Left-wing militant groups in the United States Political violence in the United States Organizations based in Portland, Oregon
Nalšia or Nalšėnai (sometimes Nalsen, Nalse) was an ancient land in the early stages of the Grand Duchy of Lithuania. It is mentioned in written sources from 1229 to 1298. The references to it cease as it was fully incorporated into the Grand Duchy. While it is known that it was on the north-eastern border of Lithuania proper, the exact location is unknown and is debated among historians. It is believed that Nalšia was between Livonia and the Duchy of Lithuania and bordered Deltuva. Towns of Švenčionys and Utena are often identified as the most prominent settlements in the land. Several dukes of Nalšia are known. The most prominent of them was Daumantas of Pskov. Others were Lengvenis, nephew of Mindaugas, Suksė (Suxe), who defected to the Teutonic Knights, and Gerdenis, rival to Daumantas. References 13th century in Lithuania
```objective-c /* * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #ifndef __DCN301_DCCG_H__ #define __DCN301_DCCG_H__ #include "dcn20/dcn20_dccg.h" #define DCCG_REG_LIST_DCN301() \ SR(DPPCLK_DTO_CTRL),\ DCCG_SRII(DTO_PARAM, DPPCLK, 0),\ DCCG_SRII(DTO_PARAM, DPPCLK, 1),\ DCCG_SRII(DTO_PARAM, DPPCLK, 2),\ DCCG_SRII(DTO_PARAM, DPPCLK, 3),\ SR(REFCLK_CNTL) #define DCCG_MASK_SH_LIST_DCN301(mask_sh) \ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 0, mask_sh),\ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 1, mask_sh),\ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 1, mask_sh),\ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 2, mask_sh),\ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 2, mask_sh),\ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 3, mask_sh),\ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 3, mask_sh),\ DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_PHASE, mask_sh),\ DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_MODULO, mask_sh),\ DCCG_SF(REFCLK_CNTL, REFCLK_CLOCK_EN, mask_sh),\ DCCG_SF(REFCLK_CNTL, REFCLK_SRC_SEL, mask_sh) struct dccg *dccg301_create( struct dc_context *ctx, const struct dccg_registers *regs, const struct dccg_shift *dccg_shift, const struct dccg_mask *dccg_mask); struct dccg *dccg301_create( struct dc_context *ctx, const struct dccg_registers *regs, const struct dccg_shift *dccg_shift, const struct dccg_mask *dccg_mask); #endif //__DCN301_DCCG_H__ ```
```c /* $OpenBSD: rget.c,v 1.8 2015/08/31 02:53:57 guenther Exp $ */ /*- * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Chris Torek. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include <stdio.h> #include "local.h" /* * Handle getc() when the buffer ran out: * Refill, then return the first character * in the newly-filled buffer. */ int __srget(FILE *fp) { _SET_ORIENTATION(fp, -1); if (__srefill(fp) == 0) { fp->_r--; return (*fp->_p++); } return (EOF); } DEF_STRONG(__srget); ```
```c++ #include "source/extensions/filters/http/oauth2/oauth_client.h" #include <chrono> #include "envoy/http/async_client.h" #include "envoy/http/message.h" #include "envoy/upstream/cluster_manager.h" #include "source/common/common/base64.h" #include "source/common/common/empty_string.h" #include "source/common/common/fmt.h" #include "source/common/common/logger.h" #include "source/common/http/message_impl.h" #include "source/common/http/utility.h" #include "source/common/protobuf/message_validator_impl.h" #include "source/common/protobuf/utility.h" #include "source/extensions/filters/http/oauth2/oauth_response.pb.h" using namespace std::chrono_literals; namespace Envoy { namespace Extensions { namespace HttpFilters { namespace Oauth2 { namespace { constexpr const char* UrlBodyTemplateWithCredentialsForAuthCode = "grant_type=authorization_code&code={0}&client_id={1}&client_secret={2}&redirect_uri={3}"; constexpr const char* UrlBodyTemplateWithoutCredentialsForAuthCode = "grant_type=authorization_code&code={0}&redirect_uri={1}"; constexpr const char* UrlBodyTemplateWithCredentialsForRefreshToken = "grant_type=refresh_token&refresh_token={0}&client_id={1}&client_secret={2}"; constexpr const char* UrlBodyTemplateWithoutCredentialsForRefreshToken = "grant_type=refresh_token&refresh_token={0}"; } // namespace void OAuth2ClientImpl::asyncGetAccessToken(const std::string& auth_code, const std::string& client_id, const std::string& secret, const std::string& cb_url, AuthType auth_type) { ASSERT(state_ == OAuthState::Idle); state_ = OAuthState::PendingAccessToken; const auto encoded_cb_url = Http::Utility::PercentEncoding::encode(cb_url, ":/=&?"); Http::RequestMessagePtr request = createPostRequest(); std::string body; switch (auth_type) { case AuthType::UrlEncodedBody: body = fmt::format(UrlBodyTemplateWithCredentialsForAuthCode, auth_code, Http::Utility::PercentEncoding::encode(client_id, ":/=&?"), Http::Utility::PercentEncoding::encode(secret, ":/=&?"), encoded_cb_url); break; case AuthType::BasicAuth: const auto basic_auth_token = absl::StrCat(client_id, ":", secret); const auto encoded_token = Base64::encode(basic_auth_token.data(), basic_auth_token.size()); const auto basic_auth_header_value = absl::StrCat("Basic ", encoded_token); request->headers().appendCopy(Http::CustomHeaders::get().Authorization, basic_auth_header_value); body = fmt::format(UrlBodyTemplateWithoutCredentialsForAuthCode, auth_code, encoded_cb_url); break; } request->body().add(body); request->headers().setContentLength(body.length()); ENVOY_LOG(debug, "Dispatching OAuth request for access token."); dispatchRequest(std::move(request)); } void OAuth2ClientImpl::asyncRefreshAccessToken(const std::string& refresh_token, const std::string& client_id, const std::string& secret, AuthType auth_type) { ASSERT(state_ == OAuthState::Idle); state_ = OAuthState::PendingAccessTokenByRefreshToken; Http::RequestMessagePtr request = createPostRequest(); std::string body; switch (auth_type) { case AuthType::UrlEncodedBody: body = fmt::format(UrlBodyTemplateWithCredentialsForRefreshToken, Http::Utility::PercentEncoding::encode(refresh_token, ":/=&?"), Http::Utility::PercentEncoding::encode(client_id, ":/=&?"), Http::Utility::PercentEncoding::encode(secret, ":/=&?")); break; case AuthType::BasicAuth: const auto basic_auth_token = absl::StrCat(client_id, ":", secret); const auto encoded_token = Base64::encode(basic_auth_token.data(), basic_auth_token.size()); const auto basic_auth_header_value = absl::StrCat("Basic ", encoded_token); request->headers().appendCopy(Http::CustomHeaders::get().Authorization, basic_auth_header_value); body = fmt::format(UrlBodyTemplateWithoutCredentialsForRefreshToken, Http::Utility::PercentEncoding::encode(refresh_token)); break; } request->body().add(body); request->headers().setContentLength(body.length()); ENVOY_LOG(debug, "Dispatching OAuth request for update access token by refresh token."); dispatchRequest(std::move(request)); } void OAuth2ClientImpl::dispatchRequest(Http::RequestMessagePtr&& msg) { const auto thread_local_cluster = cm_.getThreadLocalCluster(uri_.cluster()); if (thread_local_cluster != nullptr) { in_flight_request_ = thread_local_cluster->httpAsyncClient().send( std::move(msg), *this, Http::AsyncClient::RequestOptions().setTimeout( std::chrono::milliseconds(PROTOBUF_GET_MS_REQUIRED(uri_, timeout)))); } else { parent_->sendUnauthorizedResponse(); } } void OAuth2ClientImpl::onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& message) { in_flight_request_ = nullptr; ASSERT(state_ == OAuthState::PendingAccessToken || state_ == OAuthState::PendingAccessTokenByRefreshToken); const OAuthState oldState = state_; state_ = OAuthState::Idle; // Check that the auth cluster returned a happy response. const auto response_code = message->headers().Status()->value().getStringView(); if (response_code != "200") { ENVOY_LOG(debug, "Oauth response code: {}", response_code); ENVOY_LOG(debug, "Oauth response body: {}", message->bodyAsString()); switch (oldState) { case OAuthState::PendingAccessToken: parent_->sendUnauthorizedResponse(); break; case OAuthState::PendingAccessTokenByRefreshToken: parent_->onRefreshAccessTokenFailure(); break; default: PANIC("Malformed oauth client state"); } return; } const std::string response_body = message->bodyAsString(); envoy::extensions::http_filters::oauth2::OAuthResponse response; TRY_NEEDS_AUDIT { MessageUtil::loadFromJson(response_body, response, ProtobufMessage::getNullValidationVisitor()); } END_TRY catch (EnvoyException& e) { ENVOY_LOG(debug, "Error parsing response body, received exception: {}", e.what()); ENVOY_LOG(debug, "Response body: {}", response_body); parent_->sendUnauthorizedResponse(); return; } // TODO(snowp): Should this be a pgv validation instead? A more readable log // message might be good enough reason to do this manually? if (!response.has_access_token()) { ENVOY_LOG(debug, "No access token after asyncGetAccessToken"); parent_->sendUnauthorizedResponse(); return; } const std::string access_token{PROTOBUF_GET_WRAPPED_REQUIRED(response, access_token)}; const std::string id_token{PROTOBUF_GET_WRAPPED_OR_DEFAULT(response, id_token, EMPTY_STRING)}; const std::string refresh_token{ PROTOBUF_GET_WRAPPED_OR_DEFAULT(response, refresh_token, EMPTY_STRING)}; std::chrono::seconds expires_in = default_expires_in_; if (response.has_expires_in()) { expires_in = std::chrono::seconds{response.expires_in().value()}; } if (expires_in <= 0s) { ENVOY_LOG(debug, "No default or explicit access token expiration after asyncGetAccessToken"); parent_->sendUnauthorizedResponse(); return; } switch (oldState) { case OAuthState::PendingAccessToken: parent_->onGetAccessTokenSuccess(access_token, id_token, refresh_token, expires_in); break; case OAuthState::PendingAccessTokenByRefreshToken: parent_->onRefreshAccessTokenSuccess(access_token, id_token, refresh_token, expires_in); break; default: PANIC("Malformed oauth client state"); } } void OAuth2ClientImpl::onFailure(const Http::AsyncClient::Request&, Http::AsyncClient::FailureReason) { ENVOY_LOG(debug, "OAuth request failed."); in_flight_request_ = nullptr; const OAuthState oldState = state_; state_ = OAuthState::Idle; switch (oldState) { case OAuthState::PendingAccessToken: parent_->sendUnauthorizedResponse(); break; case OAuthState::PendingAccessTokenByRefreshToken: parent_->onRefreshAccessTokenFailure(); break; default: PANIC("Malformed oauth client state"); } } } // namespace Oauth2 } // namespace HttpFilters } // namespace Extensions } // namespace Envoy ```
Murrumbidgee is the name of a river in New South Wales, Australia. It may refer to: Murrumbidgee River Murrumbidgee River Railway Bridge Murrumbidgee Irrigation Area Murrumbidgee Red Gums Important Bird Area Murrumbidgee Co-operative Milling, defunct flour-milling company Murrumbidgee District, historical district Murrumbidgee Shire, local government area Murrumbidgee, 1977 album by The Bushwackers (band) Murrumbidgee electorate, a current Australian Capital Territory Legislative Assembly electorate Electoral district of Murrumbidgee, a former New South Wales Legislative Assembly district
```java /* * Bytecode Analysis Framework * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * * You should have received a copy of the GNU Lesser General Public * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ package edu.umd.cs.findbugs.ba; import java.io.PrintStream; import org.apache.bcel.generic.InstructionHandle; /** * CFGPrinter class which prints dataflow values at each basic block and * instruction. */ public class DataflowCFGPrinter<Fact, AnalysisType extends DataflowAnalysis<Fact>> extends CFGPrinter { private final Dataflow<Fact, AnalysisType> dataflow; /** * Constructor. * * @param dataflow * the Dataflow object whose values should be used to annotate * the printed CFG */ public DataflowCFGPrinter(Dataflow<Fact, AnalysisType> dataflow) { super(dataflow.getCFG()); this.dataflow = dataflow; setIsForwards(dataflow.getAnalysis().isForwards()); } /* * (non-Javadoc) * * @see * edu.umd.cs.findbugs.ba.CFGPrinter#edgeAnnotate(edu.umd.cs.findbugs.ba * .Edge) */ @Override public String edgeAnnotate(Edge edge) { String edgeAnnotation = ""; try { edgeAnnotation = " " + dataflow.getAnalysis().factToString(dataflow.getAnalysis().getFactOnEdge(edge)); } catch (Throwable e) { // ignore } return edgeAnnotation; } @Override public String blockStartAnnotate(BasicBlock bb) { boolean flip = isForwards() != dataflow.getAnalysis().isForwards(); Fact fact = flip ? dataflow.getResultFact(bb) : dataflow.getStartFact(bb); return " " + dataflow.getAnalysis().factToString(fact); } @Override public String blockAnnotate(BasicBlock bb) { boolean flip = isForwards() != dataflow.getAnalysis().isForwards(); Fact fact = flip ? dataflow.getStartFact(bb) : dataflow.getResultFact(bb); return " " + dataflow.getAnalysis().factToString(fact); } @Override public String instructionAnnotate(InstructionHandle handle, BasicBlock bb) { try { boolean flip = isForwards() != dataflow.getAnalysis().isForwards(); Location loc = new Location(handle, bb); Fact fact = flip ? dataflow.getAnalysis().getFactAfterLocation(loc) : dataflow.getAnalysis().getFactAtLocation(loc); return " " + dataflow.getAnalysis().factToString(fact); } catch (DataflowAnalysisException e) { throw new IllegalStateException("Caught exception: " + e.toString()); } } /** * Print CFG annotated with results from given dataflow analysis. * * @param <Fact> * Dataflow fact type * @param <AnalysisType> * Dataflow analysis type * @param dataflow * dataflow driver * @param out * PrintStream to use */ public static <Fact, AnalysisType extends BasicAbstractDataflowAnalysis<Fact>> void printCFG( Dataflow<Fact, AnalysisType> dataflow, PrintStream out) { DataflowCFGPrinter<Fact, AnalysisType> printer = new DataflowCFGPrinter<>(dataflow); printer.print(out); } } ```
The election for Resident Commissioner to the United States House of Representatives took place on November 4, 2008, the same day as the larger Puerto Rican general election and the United States elections, 2008. Background The incumbent one-term Resident Commissioner (same as non-voting territorial delegate) Luis Fortuño, of the (NPP/R), was retiring from his House seat to run for Governor of Puerto Rico. Pedro Pierluisi (NPP), the former Puerto Rican Secretary of Justice under former Governor Pedro Rosselló, was the favorite to succeed Fortuño over economist Alfredo Salazar (PDP). Regardless of which of the two men won, the seat would switch from Republican to Democratic hands in January as both candidates would caucuses with the Democrats. However, this seat would not have impacted which party controls the chamber. Candidates for Resident Commissioner Jessica Martínez Birriel for the Puerto Rican Independence Party Pedro Pierluisi for the New Progressive Party Alfredo Salazar for the Popular Democratic Party Carlos Alberto Velázquez López for the Puerto Ricans for Puerto Rico Party Election results See also Puerto Rican general election, 2008 2. Pierluisi is leading Salazar 51% to 34% as of October 30, 2008. https://web.archive.org/web/20081102101723/http://www.vocero.com/noticia-5761-slido_fortuo.html References United States House of Representatives Puerto Rico 2008
```xml <?xml version="1.0" encoding="utf-8"?> <set xmlns:android="path_to_url"> <translate android:duration="300" android:fromYDelta="100%p" android:toYDelta="0"/> <alpha android:duration="300" android:fromAlpha="0.0" android:toAlpha="1.0"/> </set> ```
Paulin Colonna d'Istria (27 July 1905 – 4 June 1982) was a French Gendarmerie officer, awarded the Compagnon de la Libération after playing a major part in the liberation of Corsica. Early life Colonna d'Istria was born on 27 July 1905 in Petreto-Bicchisano to a career soldier. He trained at the Collège d'Autun then the officer training academy in Saint-Maixent before fighting in the Rif War until 1926. Career In 1936, Colonna d'Istria was sent to north Africa and was still there when war broke out with Germany in September 1939. In 1940, learning of the Appeal of 18 June, he joined the Resistance and was sent by Henri Giraud to relieve two of the four first agents from Operation Pearl Harbour who returned to Algeria on 14 March aboard the Casabianca. After coordinating between resistance networks, organising arms drops from the Casabianca and gaining information on enemy positions, the Resistance members Toussaint Griffi and Laurent Preziosi were ordered to meet him in the office of Colonel de Villeneuve, head of the Deuxième Bureau, on rue Charras in Algiers. They provided him with everything needed to make his mission a success, knowing that the Resistance radio operator Pierre Griffi was still in place to help him. He was secretly landed on the east coast of Corsica by a British submarine on 4 April 1943 and travelled to Niolo, where he set up a temporary command post. Until the island's liberation was completed on 4 October that year, he coordinated and commanded the Resistance movements around the National Front with the aim of enabling landings without imposing a particular political viewpoint on the island's liberation. The National Front rose from 2000 to 12000 volunteers that September and was able to create more than fifty parachute landing zones for arms drops, despite repression by Italian occupying troops. From 9 June to 3 July he returned to Algiers, returning on the Casabianca with 12 tonnes of weapons and other materiel. He joined the National Front's departmental committee and organised the preparatory campaign for the landings. A Corsican uprising began on 8 September, the date of the Cassibile Armistice between the western Allies and the Italians, leading to total liberation three weeks later. In November 1943 Colonna d'Istria joined the staff of the Free French forces in the United Kingdom and entered Paris on 25 August 1944 as part of General Leclerc's 2nd Armoured Division. After the war, at the rank of lieutenant-colonel, he returned to the Gendarmerie. In 1951 he was elected parliamentary deputy for Algiers, but was soon dismissed from that role. He was made a général de brigade in 1956, taking command of the Gendarmerie in the French Occupied Zone of Germany. His active service ended in 1963. Personal life Colonna d'Istria died in Toulon on 4 June 1982 and is buried in Marseille. Honours and legacy The 116th class of the prestigious École des Officiers de la gendarmerie (EOGN) was named after him in 2011. Honours Grand officier de la Légion d'honneur Compagnon de la Libération – decree of 16 August 1944 Croix de guerre 1939-1945 (2 citations) Médaille coloniale (with "Maroc" clasp) Croix des services militaires volontaires 3ème classe Médaille d'or de l'éducation physique et des sports Peace of Morocco Medal (Spain) Distinguished Service Order (United Kingdom) References 1905 births 1982 deaths People from Corse-du-Sud Rally of the French People politicians Deputies of the 2nd National Assembly of the French Fourth Republic Free French military personnel of World War II Companions of the Liberation Grand Officers of the Legion of Honour Recipients of the Croix de Guerre 1939–1945 (France) Companions of the Distinguished Service Order
```vue <template> <slot-comp v-slot="{ a, b }"> {{ a + b + c }} </slot-comp> </template> <script lang="ts"> import Vue from 'vue' export default Vue.extend({ }) </script> ```
Colorado Student Space Weather Experiment (CSSWE) was the sixth National Science Foundation sponsored CubeSat mission. It was built by students at the University of Colorado at Boulder with advising from professionals at the Laboratory for Atmospheric and Space Physics. The CSSWE mission was a joint effort by the University of Colorado's Department of Aerospace Engineering Sciences and Laboratory for Atmospheric and Space Physics. The mission principal investigator was Prof. Xinlin Li, and the Co-PIs are Prof. Scott Palo and Dr. Shri Kanekal. The project manager for the project was Dr. Lauren Blum, the system engineer was Dr. David Gerhardt, and the instrument scientist was Dr. Quintin Schiller. CSSWE launched on September 13, 2012, on an Atlas V rocket by the United Launch Alliance on ELaNa-VI as part of the NASA's CubeSat Launch Initiative (CSLI). The CSSWE team released its science products to the public for download on NASA's Coordinated Data Analysis Web Site (CDAWeb). As of December 22, 2014, CSSWE showed severe battery degradation, likely due to pushing the battery thousands of cycles beyond the battery's design specs. As a result, CSSWE cannot retain enough power to receive or transmit data. Mission Objective CSSWE's mission objective is to study space weather from a near-Earth orbit (480 km x 780 km). Specifically, CSSWE works in conjunction with concurrent missions (such as the Van Allen Probes, BARREL, and SAMPEX) to address the following questions: 1) How does solar flare location, magnitude, and frequency elate to the timing, duration, and energy spectrum of solar energetic particles (SEPs) reaching Earth and 2) How the spectrum and dynamics of Earth's radiation belt electrons evolve. Science Instrument CSSWE's science instrument, the Relativistic Electron and Proton Telescope integrated little experiment (REPTile), is the only science instrument aboard and meets the mission objectives. It is a scaled-down version of the Relativistic Electron and Proton Telescope (REPT) instrument, which is part of the Energetic Particle, Composition, and Thermal Plasma (ECT) Instrument Suite on board the Van Allen Probes. REPTile fulfills the mission objectives by measuring electrons from 0.58 to >3.8 Megaelectronvolts (MeV) and protons from 8 to 40 MeV. Also on the CubeSat is an onboard magnetometer to provide knowledge of spacecraft and instrument orientation with respect to Earth's magnetic field. Pre-Flight Testing CSSWE underwent the same rigorous testing that all space-based assets at LASP do. In addition to component and subsystem level testing, the spacecraft underwent numerous system level tests. It passed the thermal vacuum chamber test, in which 11 orbital cycles of the spacecraft were simulated in vacuum by increasing and decreasing the spacecraft temperature to reproduce thermal models which predict actual on-orbit temperatures. The first few hours of the mission were reproduced by simulating launch (in which the deployment switch is released, initiating automated commissioning phase) from a mesa nearby the LASP ground station. CSSWE passed this test by completing the initial commissioning phase, deploying its antenna, and establishing contact with the LASP ground station. Orbital attitude tests were performed as well, including Helmholtz cage and error ellipse tests. Launch CSSWE was originally scheduled to launch on August 2, 2012 aboard the National Reconnaissance Office Launch-36 (NROL-36). However, the launch was delayed three times to provide additional time for resolution of a range instrumentation issue, according to the United Launch Alliance official statement. The Atlas V 401 eventually launched on September 13, 2012 from Vandenberg AFB Space Launch Complex 3. The primary payload aboard NROL-36 was a classified NRO payload, so no spacecraft or orbit information was provided. However, there were 11 CubeSats on board the rocket as secondary payloads. The launch vehicle delivered the CubeSats into a 480x780 km orbit with an inclination of 65 degrees. The CubeSats were carried in eight PPOD dispensers attached to the end of the Centaur rocket via the Aft Bulkhead Carrier, which replaced an unnecessary Helium tank. Four of the CubeSats were launched as part of the NASA's Educational Launch of Nanosatellites (ELaNa) program - CSSWE (University of Colorado - Boulder), CINEMA 1 (University of California - Berkeley et al.), CXBN (Morehead State University), and CP5 (California Polytechnic University). The remaining seven were Aeneas (operated by the University of Southern California), two SMDC-ONE (US Army), STARE-A (Lawrence Livermore National Laboratory), and three AeroCube-4 (Aerospace Corporation). On Orbit Success The spacecraft uses a measuring tape as an antenna to communicate with ground stations. CSSWE was first heard beaconing telemetry packets by amateur radio operator call sign DK3WN almost exactly two hours after deployment from the PPOD, overcoming its first major hurdle. The spacecraft completed science commissioning and was commanded into full science mode 22 days later on October 5. Full mission success occurred on January 5, 2013 after three months of science data. The CSSWE mission ended in December 2014 due to battery degradation. The first science results and updated science results were presented, respectively, at the 2012 and 2013 Fall American Geophysical Union in San Francisco, CA. and published in peer-reviewed Journals such as Geophysical Review Letters, the Journal of Geophysical Research, and Science. CSSWE now has 24 associated peer-reviewed scientific or engineering journal publications, including a paper published in Nature on 13 December 2017. References CubeSats Student satellites Spacecraft launched in 2012
Leucanopsis dallipa is a moth of the family Erebidae. It was described by E. Dukinfield Jones in 1908. It is found in Brazil and Paraguay. References dallipa Moths described in 1908
```html <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "path_to_url"> <html xmlns="path_to_url"> <head> <meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/> <title>Index</title> <link href="tabs.css" rel="stylesheet" type="text/css"/> <link href="cmsis.css" rel="stylesheet" type="text/css" /> <link href="navtree.css" rel="stylesheet" type="text/css"/> <script type="text/javascript" src="jquery.js"></script> <script type="text/javascript" src="resize.js"></script> <script type="text/javascript" src="navtree.js"></script> <script type="text/javascript"> $(document).ready(initResizable); </script> <link href="search/search.css" rel="stylesheet" type="text/css"/> <script type="text/javascript" src="search/search.js"></script> <script type="text/javascript"> $(document).ready(function() { searchBox.OnSelectItem(0); }); </script> </head> <body> <div id="top"><!-- do not remove this div! --> <div id="titlearea"> <table cellspacing="0" cellpadding="0"> <tbody> <tr style="height: 46px;"> <td id="proglogo"><img alt="CMSIS Logo" src="CMSIS_Logo_Final.png"></td> <td style="padding-left: 0.5em;"> <div id="projectname">CMSIS-RTOS &#160;<span id="projectnumber">Version 1.00</span> </div> <div id="projectbrief">CMSIS-RTOS API: Generic RTOS interface for Cortex-M processor-based devices.</div> </td> </tr> </tbody> </table> </div> <div id="CMSISnav" class="tabs1"> <ul class="tablist"> <li><a href="../../General/html/index.html"><span>CMSIS</span></a></li> <li><a href="../../Core/html/index.html"><span>CORE</span></a></li> <li><a href="../../DSP/html/index.html"><span>DSP</span></a></li> <li class="current"><a href="../../RTOS/html/index.html"><span>RTOS API</span></a></li> <li><a href="../../SVD/html/index.html"><span>SVD</span></a></li> </ul> </div> <!-- Generated by Doxygen 1.7.5.1 --> <script type="text/javascript"> var searchBox = new SearchBox("searchBox", "search",false,'Search'); </script> <div id="navrow1" class="tabs"> <ul class="tablist"> <li><a href="index.html"><span>Main&#160;Page</span></a></li> <li><a href="pages.html"><span>Usage&#160;and&#160;Description</span></a></li> <li><a href="modules.html"><span>Reference</span></a></li> <li> <div id="MSearchBox" class="MSearchBoxInactive"> <span class="left"> <img id="MSearchSelect" src="search/mag_sel.png" onmouseover="return searchBox.OnSearchSelectShow()" onmouseout="return searchBox.OnSearchSelectHide()" alt=""/> <input type="text" id="MSearchField" value="Search" accesskey="S" onfocus="searchBox.OnSearchFieldFocus(true)" onblur="searchBox.OnSearchFieldFocus(false)" onkeyup="searchBox.OnSearchFieldChange(event)"/> </span><span class="right"> <a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a> </span> </div> </li> </ul> </div> <div id="navrow3" class="tabs2"> <ul class="tablist"> <li><a href="globals.html"><span>All</span></a></li> <li class="current"><a href="globals_func.html"><span>Functions</span></a></li> <li><a href="globals_type.html"><span>Typedefs</span></a></li> <li><a href="globals_enum.html"><span>Enumerations</span></a></li> <li><a href="globals_eval.html"><span>Enumerator</span></a></li> <li><a href="globals_defs.html"><span>Defines</span></a></li> </ul> </div> <div id="navrow4" class="tabs3"> <ul class="tablist"> <li><a href="#index_o"><span>o</span></a></li> </ul> </div> </div> <div id="side-nav" class="ui-resizable side-nav-resizable"> <div id="nav-tree"> <div id="nav-tree-contents"> </div> </div> <div id="splitbar" style="-moz-user-select:none;" class="ui-resizable-handle"> </div> </div> <script type="text/javascript"> initNavTree('globals.html',''); </script> <div id="doc-content"> <div class="contents"> &#160; <h3><a class="anchor" id="index_o"></a>- o -</h3><ul> <li>osDelay() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___wait.html#ga02e19d5e723bfb06ba9324d625162255">cmsis_os.h</a> </li> <li>osKernelRunning() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___kernel_ctrl.html#ga3b571de44cd3094c643247a7397f86b5">cmsis_os.h</a> </li> <li>osKernelStart() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___kernel_ctrl.html#ga2865f10e5030a67d93424e32134881c8">cmsis_os.h</a> </li> <li>osMailAlloc() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___mail.html#gadf5ce811bd6a56e617e902a1db6c2194">cmsis_os.h</a> </li> <li>osMailCAlloc() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___mail.html#ga8fde74f6fe5b9e88f75cc5eb8f2124fd">cmsis_os.h</a> </li> <li>osMailCreate() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___mail.html#gae5313fdeb9b8a0791e2affb602a182f0">cmsis_os.h</a> </li> <li>osMailFree() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___mail.html#ga27c1060cf21393f96b4fd1ed1c0167cc">cmsis_os.h</a> </li> <li>osMailGet() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___mail.html#gac6ad7e6e7d6c4a80e60da22c57a42ccd">cmsis_os.h</a> </li> <li>osMailPut() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___mail.html#ga485ef6f81854ebda8ffbce4832181e02">cmsis_os.h</a> </li> <li>osMessageCreate() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___message.html#gad2d08f3d9250d19b045c83762b7b599f">cmsis_os.h</a> </li> <li>osMessageGet() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___message.html#ga6c6892b8f2296cca6becd57ca2d7e1ae">cmsis_os.h</a> </li> <li>osMessagePut() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___message.html#gac0dcf462fc92de8ffaba6cc004514a6d">cmsis_os.h</a> </li> <li>osMutexCreate() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___mutex_mgmt.html#ga6356ddcf2e34ada892c57f13b284105e">cmsis_os.h</a> </li> <li>osMutexRelease() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___mutex_mgmt.html#ga006e4744d741e8e132c3d5bbc295afe1">cmsis_os.h</a> </li> <li>osMutexWait() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___mutex_mgmt.html#ga5e1752b73f573ee015dbd9ef1edaba13">cmsis_os.h</a> </li> <li>osPoolAlloc() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___pool_mgmt.html#gaa0b2994f1a866c19e0d11e6e0d44f543">cmsis_os.h</a> </li> <li>osPoolCAlloc() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___pool_mgmt.html#ga9f129fcad4730fbd1048ad4fa262f36a">cmsis_os.h</a> </li> <li>osPoolCreate() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___pool_mgmt.html#ga2e265bdc4fcd4f001e34c9321be16d6f">cmsis_os.h</a> </li> <li>osPoolFree() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___pool_mgmt.html#ga4a861e9c469c9d0daf5721bf174f8e54">cmsis_os.h</a> </li> <li>osSemaphoreCreate() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___semaphore_mgmt.html#ga5ede9e5c5c3747cae928ff4f9d13e76d">cmsis_os.h</a> </li> <li>osSemaphoreRelease() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___semaphore_mgmt.html#gab108914997c49e14d8ff1ae0d1988ca0">cmsis_os.h</a> </li> <li>osSemaphoreWait() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___semaphore_mgmt.html#gacc15b0fc8ce1167fe43da33042e62098">cmsis_os.h</a> </li> <li>osSignalClear() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___signal_mgmt.html#gafcb3a9bd9a3c4c99f2f86d5d33faffd8">cmsis_os.h</a> </li> <li>osSignalGet() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___signal_mgmt.html#ga071c0d1c3bdcac9e75fad0b25a5cd8f1">cmsis_os.h</a> </li> <li>osSignalSet() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___signal_mgmt.html#ga029340f7007656c06fdb8eeeae7b056e">cmsis_os.h</a> </li> <li>osSignalWait() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___signal_mgmt.html#ga38860acda96df47da6923348d96fc4c9">cmsis_os.h</a> </li> <li>osThreadCreate() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___thread_mgmt.html#ga9e8ce62ab5f8c169d025af8c52e715db">cmsis_os.h</a> </li> <li>osThreadGetId() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___thread_mgmt.html#gab1df2a28925862ef8f9cf4e1c995c5a7">cmsis_os.h</a> </li> <li>osThreadGetPriority() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___thread_mgmt.html#ga4299d838978bc2aae5e4350754e6a4e9">cmsis_os.h</a> </li> <li>osThreadSetPriority() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___thread_mgmt.html#ga0dfb90ccf1f6e4b54b9251b12d1cbc8b">cmsis_os.h</a> </li> <li>osThreadTerminate() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___thread_mgmt.html#gaea135bb90eb853eff39e0800b91bbeab">cmsis_os.h</a> </li> <li>osThreadYield() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___thread_mgmt.html#gaf13a667493c5d629a90c13e113b99233">cmsis_os.h</a> </li> <li>osTimerCreate() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___timer_mgmt.html#ga12cbe501cd7f1aec940cfeb4d8c73c89">cmsis_os.h</a> </li> <li>osTimerStart() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___timer_mgmt.html#ga27a797a401b068e2644d1125f22a07ca">cmsis_os.h</a> </li> <li>osTimerStop() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___timer_mgmt.html#ga58f36b121a812936435cacc6e1e0e091">cmsis_os.h</a> </li> <li>osWait() : <a class="el" href="group___c_m_s_i_s___r_t_o_s___wait.html#gaad5030efe48c1ae9902502e73873bc70">cmsis_os.h</a> </li> </ul> </div> </div> <div id="nav-path" class="navpath"> <ul> <!-- window showing the filter options --> <div id="MSearchSelectWindow" onmouseover="return searchBox.OnSearchSelectShow()" onmouseout="return searchBox.OnSearchSelectHide()" onkeydown="return searchBox.OnSearchSelectKey(event)"> <a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(0)"><span class="SelectionMark">&#160;</span>All</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(1)"><span class="SelectionMark">&#160;</span>Data Structures</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(2)"><span class="SelectionMark">&#160;</span>Files</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(3)"><span class="SelectionMark">&#160;</span>Functions</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(4)"><span class="SelectionMark">&#160;</span>Variables</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(5)"><span class="SelectionMark">&#160;</span>Typedefs</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(6)"><span class="SelectionMark">&#160;</span>Enumerations</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(7)"><span class="SelectionMark">&#160;</span>Enumerator</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(8)"><span class="SelectionMark">&#160;</span>Defines</a></div> <!-- iframe showing the search results (closed by default) --> <div id="MSearchResultsWindow"> <iframe src="javascript:void(0)" frameborder="0" name="MSearchResults" id="MSearchResults"> </iframe> </div> <li class="footer">Generated on Wed Mar 28 2012 15:38:10 for CMSIS-RTOS by ARM Ltd. All rights reserved. <!-- <a href="path_to_url"> <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.7.5.1 </li> --> </li> </ul> </div> </body> </html> ```
XHEJU-FM is a community radio station on 95.3 FM in Ejutla de Crespo, Oaxaca. It is known as La Ejuteca Radio and owned by the civil association Colectivo Oaxaqueño para la Difusión de la Cultura y las Artes, A.C. History XHEJU began broadcasting in 2010 as a pirate on 98.1 MHz. The station received its concession in December 2016 and in March 2018, 15 months later, finally moved to its newly assigned frequency of 95.3. References Radio stations in Oaxaca Community radio stations in Mexico Former pirate radio stations Radio stations established in 2010
The 26th Dalmatia Division (Serbo-Croatian Latin: Dvadesetšesta dalmatinska divizija) was a Yugoslav Partisan division formed on 8 October 1943. It was formed from the 11th, 12th and 13th Dalmatia Brigades. In January 1944, the 13th Dalmatia Brigade left the division while the 1st Dalmatia Brigade joined it. The 3rd Overseas Brigade joined the division in March 1944. The division mostly operated in the Southern Dalmatia where it fought against parts of the 2nd Panzer Army, 118th Jäger Division, 7th SS Division, and 369th Infantry Division. References Divisions of the Yugoslav Partisans Military units and formations established in 1943
```javascript 'use strict'; function readableStreamFromArray(array) { return new ReadableStream({ start(controller) { for (let entry of array) { controller.enqueue(entry); } controller.close(); } }); } ```
```objective-c /** * This file is part of ORB-SLAM2. * * For more information see <path_to_url * * ORB-SLAM2 is free software: you can redistribute it and/or modify * (at your option) any later version. * * ORB-SLAM2 is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with ORB-SLAM2. If not, see <path_to_url */ #ifndef VIEWERAR_H #define VIEWERAR_H #include <mutex> #include <opencv2/core/core.hpp> #include <pangolin/pangolin.h> #include <string> #include"../../../include/System.h" namespace ORB_SLAM2 { class Plane { public: Plane(const std::vector<MapPoint*> &vMPs, const cv::Mat &Tcw); Plane(const float &nx, const float &ny, const float &nz, const float &ox, const float &oy, const float &oz); void Recompute(); //normal cv::Mat n; //origin cv::Mat o; //arbitrary orientation along normal float rang; //transformation from world to the plane cv::Mat Tpw; pangolin::OpenGlMatrix glTpw; //MapPoints that define the plane std::vector<MapPoint*> mvMPs; //camera pose when the plane was first observed (to compute normal direction) cv::Mat mTcw, XC; }; class ViewerAR { public: ViewerAR(); void SetFPS(const float fps){ mFPS = fps; mT=1e3/fps; } void SetSLAM(ORB_SLAM2::System* pSystem){ mpSystem = pSystem; } // Main thread function. void Run(); void SetCameraCalibration(const float &fx_, const float &fy_, const float &cx_, const float &cy_){ fx = fx_; fy = fy_; cx = cx_; cy = cy_; } void SetImagePose(const cv::Mat &im, const cv::Mat &Tcw, const int &status, const std::vector<cv::KeyPoint> &vKeys, const std::vector<MapPoint*> &vMPs); void GetImagePose(cv::Mat &im, cv::Mat &Tcw, int &status, std::vector<cv::KeyPoint> &vKeys, std::vector<MapPoint*> &vMPs); private: //SLAM ORB_SLAM2::System* mpSystem; void PrintStatus(const int &status, const bool &bLocMode, cv::Mat &im); void AddTextToImage(const std::string &s, cv::Mat &im, const int r=0, const int g=0, const int b=0); void LoadCameraPose(const cv::Mat &Tcw); void DrawImageTexture(pangolin::GlTexture &imageTexture, cv::Mat &im); void DrawCube(const float &size, const float x=0, const float y=0, const float z=0); void DrawPlane(int ndivs, float ndivsize); void DrawPlane(Plane* pPlane, int ndivs, float ndivsize); void DrawTrackedPoints(const std::vector<cv::KeyPoint> &vKeys, const std::vector<MapPoint*> &vMPs, cv::Mat &im); Plane* DetectPlane(const cv::Mat Tcw, const std::vector<MapPoint*> &vMPs, const int iterations=50); // frame rate float mFPS, mT; float fx,fy,cx,cy; // Last processed image and computed pose by the SLAM std::mutex mMutexPoseImage; cv::Mat mTcw; cv::Mat mImage; int mStatus; std::vector<cv::KeyPoint> mvKeys; std::vector<MapPoint*> mvMPs; }; } #endif // VIEWERAR_H ```
Lake Emma Township is a township in Hubbard County, Minnesota, United States. The population was 900 at the 2000 census. Geography According to the United States Census Bureau, the township has a total area of , of which is land and (24.31%) is water. Demographics As of the census of 2000, there were 900 people, 379 households, and 300 families residing in the township. The population density was . There were 973 housing units at an average density of . The racial makeup of the township was 99.11% White, 0.22% Native American, 0.11% from other races, and 0.56% from two or more races. Hispanic or Latino of any race were 0.56% of the population. There were 379 households, out of which 24.0% had children under the age of 18 living with them, 72.8% were married couples living together, 3.7% had a female householder with no husband present, and 20.6% were non-families. 18.2% of all households were made up of individuals, and 6.9% had someone living alone who was 65 years of age or older. The average household size was 2.37 and the average family size was 2.66. In the township the population was spread out, with 19.8% under the age of 18, 4.8% from 18 to 24, 20.3% from 25 to 44, 34.8% from 45 to 64, and 20.3% who were 65 years of age or older. The median age was 48 years. For every 100 females, there were 99.6 males. For every 100 females age 18 and over, there were 96.2 males. The median income for a household in the township was $45,563, and the median income for a family was $49,375. Males had a median income of $35,000 versus $22,188 for females. The per capita income for the township was $25,380. About 4.3% of families and 5.3% of the population were below the poverty line, including 7.0% of those under age 18 and 2.2% of those age 65 or over. References Townships in Hubbard County, Minnesota Townships in Minnesota
"Welcome to My Life" is a 2004 song by Simple Plan. Welcome to My Life may also refer to: Welcome to My Life (Jonathan Fagerlund song), a 2009 song by Swedish singer Jonathan Fagerlund from his album Welcome to My World Welcome to My Life, a 2009 song by Sunrise Avenue "Welcome to My Life" (Empire of the Sun song), a 2016 song by the Australian electronic music duo Empire of the Sun Welcome to My Life (film), a 2017 documentary about Chris Brown Welcome to My Life, a 2015 pilot by Elizabeth Ito for Cartoon Network
```javascript module.exports = { trailingComma: "all", proseWrap: "always", overrides: [ { files: "CRYSTAL_FLOW.md", options: { printWidth: 120, }, }, { // Due to the following Prettier issue, it's unsafe to proseWrap MDX currently // path_to_url files: ["**/website/**/*.mdx", "**/website/**/*.md"], options: { proseWrap: "preserve", }, }, { files: "grafast/grafast/vendor/graphql-js/**", options: { singleQuote: true, }, }, ], }; ```
```shell #!/bin/sh echo "$1" if [ "$1" == "watch" ]; then run_with_user=www-data # prevent error if uid is already in use if ! cut -d: -f3 /etc/passwd | grep -q $(stat -c '%u' /data); then adduser -u $(stat -c '%u' /data) -D www-data else $run_with_user=$(awk -F: "/:$UNISON_OWNER_UID:/{print \$1}" /etc/passwd) fi while true; do su $run_with_userr -c 'date >> /data/filefromcontainer.txt'; sleep 5; done fi exec "$@" ```
```java // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package org.chromium.chrome.browser; import android.app.Activity; import org.chromium.chrome.browser.infobar.InfoBarIdentifier; import org.chromium.chrome.browser.infobar.SimpleConfirmInfoBarBuilder; import org.chromium.chrome.browser.tab.Tab; import org.chromium.ui.base.ActivityWindowAndroid; /** * The window that has access to the main activity and is able to create and receive intents, * and show error messages. */ public class ChromeWindow extends ActivityWindowAndroid { /** * Creates Chrome specific ActivityWindowAndroid. * @param activity The activity that owns the ChromeWindow. */ public ChromeWindow(ChromeActivity activity) { super(activity); } /** * Shows an infobar error message overriding the WindowAndroid implementation. */ @Override protected void showCallbackNonExistentError(String error) { Activity activity = getActivity().get(); // We can assume that activity is a ChromeActivity because we require one to be passed in // in the constructor. Tab tab = activity != null ? ((ChromeActivity) activity).getActivityTab() : null; if (tab != null) { SimpleConfirmInfoBarBuilder.create( tab, InfoBarIdentifier.CHROME_WINDOW_ERROR, error, false); } else { super.showCallbackNonExistentError(error); } } } ```
Principality of Kastrioti () was one of the Albanian principalities during the Late Middle Ages. It was formed by Pal Kastrioti who ruled it until 1407, after which his son, Gjon Kastrioti ruled until his death in 1437 and then ruled by the national hero of Albania, Skanderbeg. Formation Gjon Kastrioti originally had only two small villages. In a short time, John Kastrioti managed to expand its lands so as to become the undisputed lord of Central Albania. He married Voisava Tripalda who bore 5 daughters, Mara, Jela, Angjelina, Vlajka, and Mamica, and 4 sons, Reposh, Stanisha, Kostandin and Gjergj Kastrioti (who would come to be known as Skanderbeg). Gjon Kastrioti was among those who opposed the early incursion of Ottoman Bayezid I, however his resistance was ineffectual. The Sultan, having accepted his submissions, obliged him to pay tribute to ensure the fealty of local rulers, and to send his three sons Gjergj Kastrioti to the Sultan's court as hostages. After his conversion to Islam, the young Skanderbeg attended military school in Edirne and led many victorious battles for the Ottoman Empire. For his military victories, he received the title Arnavutlu İskender Bey, (Albanian: Skënderbe shqiptari, English: Lord Alexander, the Albanian) comparing Kastrioti's military brilliance to that of Alexander the Great. Rise of Skanderbeg Skanderbeg was distinguished as one of the best officers in several Ottoman campaigns both in Asia Minor and in Europe, and the Sultan appointed him General. He fought against Greeks, Serbs and Hungarians, and some sources say that he used to maintain secret links with Ragusa, Venice, Ladislaus V of Hungary, and Alfonso I of Naples. Sultan Murat II gave him the title Vali which made him General Governor. On November 28, 1443, Skanderbeg saw his opportunity to rebel after the Battle of Niš against the Hungarians led by John Hunyadi in Niš as part of the Crusade of Varna. He switched sides along with 300 other Albanians serving in the Ottoman army. After a long trek to Albania he eventually captured Krujë by forging a letter from the Sultan to the Governor of Krujë, which granted him control of the territory. After capturing the castle, Skanderbeg abjured Islam and proclaimed himself the avenger of his family and country. He raised a flag showing a double-headed eagle, an ancient symbol used by various cultures of Balkans (especially the Byzantine Empire), which later became the Albanian flag. The Governor was killed as he was returning to Edirne, unaware of Skanderbeg's intentions. Skanderbeg allied with George Arianite (born Gjergj Arianit Komneni) and married his daughter Donika (born Marina Donika Arianiti). League of Lezhë Following the capture of Krujë, Skanderbeg managed to bring together all the Albanian princes in the town of Lezhë Historian Edward Gibbon writes that: With this support, Skanderbeg built fortresses and organized a mobile defense force that forced the Ottomans to disperse their troops, leaving them vulnerable to the hit-and-run tactics of the Albanians. He managed to create the League of Lezhë, a federation of all Albanian Principalities.The main members of the league were the Arianiti, Balšić, Dukagjini, Muzaka, Spani, Thopia and Crnojević noble families. For 25 years, from 1443–1468, Skanderbeg's 10,000 man army marched through Ottoman territory winning against consistently larger and better supplied Ottoman forces. Threatened by Ottoman advances in their homeland, Hungary, and later Naples and Venice – their former enemies – provided the financial backbone and support for Skanderbeg's army. By 1450 it had certainly ceased to function as originally intended, and only the core of the alliance under Scanderbeg and Araniti Comino continued to fight on. The League of Lezhë first distinguished itself under Skanderbeg at the Battle of Torvioll where he defeated the Ottoman forces. Skanderbeg's victory was praised throughout the rest of Europe. The battle of Torvioll thus opened up the quarter-century war between Skanderbeg's Albania and the Ottoman Empire. On 14 May 1450, an Ottoman army, larger than any previous force encountered by Skanderbeg or his men, stormed and overwhelmed the castle of the city of Kruja, capital of the Principality of Kastrioti. This city was particularly symbolic to Skanderbeg because he had been appointed suba of Kruja in 1438 by the Ottomans. The fighting lasted four months and over one thousand Albanians lost their lives while over 20,000 Ottomans died in battle. Even so, the Ottoman forces were unable to capture the city and had no choice but to retreat before winter set in. In June 1446, Mehmed II, known as "the conqueror", led an army of 150,000 soldiers back to Kruja but failed to capture the castle. Skanderbeg's death in 1468 did not end the struggle for independence, and fighting continued until 1481, under Lekë Dukagjini, when the Albanian lands were forced to succumb to the Ottoman armies. See also House of Kastrioti League of Lezhë Albanian principalities History of Albania References "History of Albanian People" Albanian Academy of Science. 1444 disestablishments States and territories established in 1389 Albanian principalities Former countries in the Balkans Vassal states of the Ottoman Empire Former monarchies
```c /* * linux/drivers/ide/ide-disk.c Version 1.18 Mar 05, 2003 * * Andre Hedrick <andre@linux-ide.org> */ /* * Mostly written by Mark Lord <mlord@pobox.com> * and Gadi Oxman <gadio@netvision.net.il> * and Andre Hedrick <andre@linux-ide.org> * * This is the IDE/ATA disk driver, as evolved from hd.c and ide.c. * * Version 1.00 move disk only code from ide.c to ide-disk.c * support optional byte-swapping of all data * Version 1.01 fix previous byte-swapping code * Version 1.02 remove ", LBA" from drive identification msgs * Version 1.03 fix display of id->buf_size for big-endian * Version 1.04 add /proc configurable settings and S.M.A.R.T support * Version 1.05 add capacity support for ATA3 >= 8GB * Version 1.06 get boot-up messages to show full cyl count * Version 1.07 disable door-locking if it fails * Version 1.08 fixed CHS/LBA translations for ATA4 > 8GB, * process of adding new ATA4 compliance. * fixed problems in allowing fdisk to see * the entire disk. * Version 1.09 added increment of rq->sector in ide_multwrite * added UDMA 3/4 reporting * Version 1.10 request queue changes, Ultra DMA 100 * Version 1.11 added 48-bit lba * Version 1.12 adding taskfile io access method * Version 1.13 added standby and flush-cache for notifier * Version 1.14 added acoustic-wcache * Version 1.15 convert all calls to ide_raw_taskfile * since args will return register content. * Version 1.16 added suspend-resume-checkpower * Version 1.17 do flush on standy, do flush on ATA < ATA6 * fix wcache setup. */ #define IDEDISK_VERSION "1.18" #undef REALLY_SLOW_IO /* most systems can safely undef this */ //#define DEBUG #include <linux/config.h> #include <linux/module.h> #include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/major.h> #include <linux/errno.h> #include <linux/genhd.h> #include <linux/slab.h> #include <linux/delay.h> #define _IDE_DISK #include <linux/ide.h> #include <asm/byteorder.h> #include <asm/irq.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/div64.h> struct ide_disk_obj { ide_drive_t *drive; ide_driver_t *driver; struct gendisk *disk; struct kref kref; }; static DECLARE_MUTEX(idedisk_ref_sem); #define to_ide_disk(obj) container_of(obj, struct ide_disk_obj, kref) #define ide_disk_g(disk) \ container_of((disk)->private_data, struct ide_disk_obj, driver) static struct ide_disk_obj *ide_disk_get(struct gendisk *disk) { struct ide_disk_obj *idkp = NULL; down(&idedisk_ref_sem); idkp = ide_disk_g(disk); if (idkp) kref_get(&idkp->kref); up(&idedisk_ref_sem); return idkp; } static void ide_disk_release(struct kref *); static void ide_disk_put(struct ide_disk_obj *idkp) { down(&idedisk_ref_sem); kref_put(&idkp->kref, ide_disk_release); up(&idedisk_ref_sem); } /* * lba_capacity_is_ok() performs a sanity check on the claimed "lba_capacity" * value for this drive (from its reported identification information). * * Returns: 1 if lba_capacity looks sensible * 0 otherwise * * It is called only once for each drive. */ static int lba_capacity_is_ok (struct hd_driveid *id) { unsigned long lba_sects, chs_sects, head, tail; /* No non-LBA info .. so valid! */ if (id->cyls == 0) return 1; /* * The ATA spec tells large drives to return * C/H/S = 16383/16/63 independent of their size. * Some drives can be jumpered to use 15 heads instead of 16. * Some drives can be jumpered to use 4092 cyls instead of 16383. */ if ((id->cyls == 16383 || (id->cyls == 4092 && id->cur_cyls == 16383)) && id->sectors == 63 && (id->heads == 15 || id->heads == 16) && (id->lba_capacity >= 16383*63*id->heads)) return 1; lba_sects = id->lba_capacity; chs_sects = id->cyls * id->heads * id->sectors; /* perform a rough sanity check on lba_sects: within 10% is OK */ if ((lba_sects - chs_sects) < chs_sects/10) return 1; /* some drives have the word order reversed */ head = ((lba_sects >> 16) & 0xffff); tail = (lba_sects & 0xffff); lba_sects = (head | (tail << 16)); if ((lba_sects - chs_sects) < chs_sects/10) { id->lba_capacity = lba_sects; return 1; /* lba_capacity is (now) good */ } return 0; /* lba_capacity value may be bad */ } /* * __ide_do_rw_disk() issues READ and WRITE commands to a disk, * using LBA if supported, or CHS otherwise, to address sectors. */ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq, sector_t block) { ide_hwif_t *hwif = HWIF(drive); unsigned int dma = drive->using_dma; u8 lba48 = (drive->addressing == 1) ? 1 : 0; task_ioreg_t command = WIN_NOP; ata_nsector_t nsectors; nsectors.all = (u16) rq->nr_sectors; if (hwif->no_lba48_dma && lba48 && dma) { if (block + rq->nr_sectors > 1ULL << 28) dma = 0; else lba48 = 0; } if (!dma) { ide_init_sg_cmd(drive, rq); ide_map_sg(drive, rq); } if (IDE_CONTROL_REG) hwif->OUTB(drive->ctl, IDE_CONTROL_REG); /* FIXME: SELECT_MASK(drive, 0) ? */ if (drive->select.b.lba) { if (lba48) { task_ioreg_t tasklets[10]; pr_debug("%s: LBA=0x%012llx\n", drive->name, (unsigned long long)block); tasklets[0] = 0; tasklets[1] = 0; tasklets[2] = nsectors.b.low; tasklets[3] = nsectors.b.high; tasklets[4] = (task_ioreg_t) block; tasklets[5] = (task_ioreg_t) (block>>8); tasklets[6] = (task_ioreg_t) (block>>16); tasklets[7] = (task_ioreg_t) (block>>24); if (sizeof(block) == 4) { tasklets[8] = (task_ioreg_t) 0; tasklets[9] = (task_ioreg_t) 0; } else { tasklets[8] = (task_ioreg_t)((u64)block >> 32); tasklets[9] = (task_ioreg_t)((u64)block >> 40); } #ifdef DEBUG printk("%s: 0x%02x%02x 0x%02x%02x%02x%02x%02x%02x\n", drive->name, tasklets[3], tasklets[2], tasklets[9], tasklets[8], tasklets[7], tasklets[6], tasklets[5], tasklets[4]); #endif hwif->OUTB(tasklets[1], IDE_FEATURE_REG); hwif->OUTB(tasklets[3], IDE_NSECTOR_REG); hwif->OUTB(tasklets[7], IDE_SECTOR_REG); hwif->OUTB(tasklets[8], IDE_LCYL_REG); hwif->OUTB(tasklets[9], IDE_HCYL_REG); hwif->OUTB(tasklets[0], IDE_FEATURE_REG); hwif->OUTB(tasklets[2], IDE_NSECTOR_REG); hwif->OUTB(tasklets[4], IDE_SECTOR_REG); hwif->OUTB(tasklets[5], IDE_LCYL_REG); hwif->OUTB(tasklets[6], IDE_HCYL_REG); hwif->OUTB(0x00|drive->select.all,IDE_SELECT_REG); } else { hwif->OUTB(0x00, IDE_FEATURE_REG); hwif->OUTB(nsectors.b.low, IDE_NSECTOR_REG); hwif->OUTB(block, IDE_SECTOR_REG); hwif->OUTB(block>>=8, IDE_LCYL_REG); hwif->OUTB(block>>=8, IDE_HCYL_REG); hwif->OUTB(((block>>8)&0x0f)|drive->select.all,IDE_SELECT_REG); } } else { unsigned int sect,head,cyl,track; track = (int)block / drive->sect; sect = (int)block % drive->sect + 1; hwif->OUTB(sect, IDE_SECTOR_REG); head = track % drive->head; cyl = track / drive->head; pr_debug("%s: CHS=%u/%u/%u\n", drive->name, cyl, head, sect); hwif->OUTB(0x00, IDE_FEATURE_REG); hwif->OUTB(nsectors.b.low, IDE_NSECTOR_REG); hwif->OUTB(cyl, IDE_LCYL_REG); hwif->OUTB(cyl>>8, IDE_HCYL_REG); hwif->OUTB(head|drive->select.all,IDE_SELECT_REG); } if (dma) { if (!hwif->dma_setup(drive)) { if (rq_data_dir(rq)) { command = lba48 ? WIN_WRITEDMA_EXT : WIN_WRITEDMA; if (drive->vdma) command = lba48 ? WIN_WRITE_EXT: WIN_WRITE; } else { command = lba48 ? WIN_READDMA_EXT : WIN_READDMA; if (drive->vdma) command = lba48 ? WIN_READ_EXT: WIN_READ; } hwif->dma_exec_cmd(drive, command); hwif->dma_start(drive); return ide_started; } /* fallback to PIO */ ide_init_sg_cmd(drive, rq); } if (rq_data_dir(rq) == READ) { if (drive->mult_count) { hwif->data_phase = TASKFILE_MULTI_IN; command = lba48 ? WIN_MULTREAD_EXT : WIN_MULTREAD; } else { hwif->data_phase = TASKFILE_IN; command = lba48 ? WIN_READ_EXT : WIN_READ; } ide_execute_command(drive, command, &task_in_intr, WAIT_CMD, NULL); return ide_started; } else { if (drive->mult_count) { hwif->data_phase = TASKFILE_MULTI_OUT; command = lba48 ? WIN_MULTWRITE_EXT : WIN_MULTWRITE; } else { hwif->data_phase = TASKFILE_OUT; command = lba48 ? WIN_WRITE_EXT : WIN_WRITE; } /* FIXME: ->OUTBSYNC ? */ hwif->OUTB(command, IDE_COMMAND_REG); return pre_task_out_intr(drive, rq); } } /* * 268435455 == 137439 MB or 28bit limit * 320173056 == 163929 MB or 48bit addressing * 1073741822 == 549756 MB or 48bit addressing fake drive */ static ide_startstop_t ide_do_rw_disk (ide_drive_t *drive, struct request *rq, sector_t block) { ide_hwif_t *hwif = HWIF(drive); BUG_ON(drive->blocked); if (!blk_fs_request(rq)) { blk_dump_rq_flags(rq, "ide_do_rw_disk - bad command"); ide_end_request(drive, 0, 0); return ide_stopped; } pr_debug("%s: %sing: block=%llu, sectors=%lu, buffer=0x%08lx\n", drive->name, rq_data_dir(rq) == READ ? "read" : "writ", (unsigned long long)block, rq->nr_sectors, (unsigned long)rq->buffer); if (hwif->rw_disk) hwif->rw_disk(drive, rq); return __ide_do_rw_disk(drive, rq, block); } /* * Queries for true maximum capacity of the drive. * Returns maximum LBA address (> 0) of the drive, 0 if failed. */ static unsigned long idedisk_read_native_max_address(ide_drive_t *drive) { ide_task_t args; unsigned long addr = 0; /* Create IDE/ATA command request structure */ memset(&args, 0, sizeof(ide_task_t)); args.tfRegister[IDE_SELECT_OFFSET] = 0x40; args.tfRegister[IDE_COMMAND_OFFSET] = WIN_READ_NATIVE_MAX; args.command_type = IDE_DRIVE_TASK_NO_DATA; args.handler = &task_no_data_intr; /* submit command request */ ide_raw_taskfile(drive, &args, NULL); /* if OK, compute maximum address value */ if ((args.tfRegister[IDE_STATUS_OFFSET] & 0x01) == 0) { addr = ((args.tfRegister[IDE_SELECT_OFFSET] & 0x0f) << 24) | ((args.tfRegister[ IDE_HCYL_OFFSET] ) << 16) | ((args.tfRegister[ IDE_LCYL_OFFSET] ) << 8) | ((args.tfRegister[IDE_SECTOR_OFFSET] )); addr++; /* since the return value is (maxlba - 1), we add 1 */ } return addr; } static unsigned long long idedisk_read_native_max_address_ext(ide_drive_t *drive) { ide_task_t args; unsigned long long addr = 0; /* Create IDE/ATA command request structure */ memset(&args, 0, sizeof(ide_task_t)); args.tfRegister[IDE_SELECT_OFFSET] = 0x40; args.tfRegister[IDE_COMMAND_OFFSET] = WIN_READ_NATIVE_MAX_EXT; args.command_type = IDE_DRIVE_TASK_NO_DATA; args.handler = &task_no_data_intr; /* submit command request */ ide_raw_taskfile(drive, &args, NULL); /* if OK, compute maximum address value */ if ((args.tfRegister[IDE_STATUS_OFFSET] & 0x01) == 0) { u32 high = (args.hobRegister[IDE_HCYL_OFFSET] << 16) | (args.hobRegister[IDE_LCYL_OFFSET] << 8) | args.hobRegister[IDE_SECTOR_OFFSET]; u32 low = ((args.tfRegister[IDE_HCYL_OFFSET])<<16) | ((args.tfRegister[IDE_LCYL_OFFSET])<<8) | (args.tfRegister[IDE_SECTOR_OFFSET]); addr = ((__u64)high << 24) | low; addr++; /* since the return value is (maxlba - 1), we add 1 */ } return addr; } /* * Sets maximum virtual LBA address of the drive. * Returns new maximum virtual LBA address (> 0) or 0 on failure. */ static unsigned long idedisk_set_max_address(ide_drive_t *drive, unsigned long addr_req) { ide_task_t args; unsigned long addr_set = 0; addr_req--; /* Create IDE/ATA command request structure */ memset(&args, 0, sizeof(ide_task_t)); args.tfRegister[IDE_SECTOR_OFFSET] = ((addr_req >> 0) & 0xff); args.tfRegister[IDE_LCYL_OFFSET] = ((addr_req >> 8) & 0xff); args.tfRegister[IDE_HCYL_OFFSET] = ((addr_req >> 16) & 0xff); args.tfRegister[IDE_SELECT_OFFSET] = ((addr_req >> 24) & 0x0f) | 0x40; args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SET_MAX; args.command_type = IDE_DRIVE_TASK_NO_DATA; args.handler = &task_no_data_intr; /* submit command request */ ide_raw_taskfile(drive, &args, NULL); /* if OK, read new maximum address value */ if ((args.tfRegister[IDE_STATUS_OFFSET] & 0x01) == 0) { addr_set = ((args.tfRegister[IDE_SELECT_OFFSET] & 0x0f) << 24) | ((args.tfRegister[ IDE_HCYL_OFFSET] ) << 16) | ((args.tfRegister[ IDE_LCYL_OFFSET] ) << 8) | ((args.tfRegister[IDE_SECTOR_OFFSET] )); addr_set++; } return addr_set; } static unsigned long long idedisk_set_max_address_ext(ide_drive_t *drive, unsigned long long addr_req) { ide_task_t args; unsigned long long addr_set = 0; addr_req--; /* Create IDE/ATA command request structure */ memset(&args, 0, sizeof(ide_task_t)); args.tfRegister[IDE_SECTOR_OFFSET] = ((addr_req >> 0) & 0xff); args.tfRegister[IDE_LCYL_OFFSET] = ((addr_req >>= 8) & 0xff); args.tfRegister[IDE_HCYL_OFFSET] = ((addr_req >>= 8) & 0xff); args.tfRegister[IDE_SELECT_OFFSET] = 0x40; args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SET_MAX_EXT; args.hobRegister[IDE_SECTOR_OFFSET] = (addr_req >>= 8) & 0xff; args.hobRegister[IDE_LCYL_OFFSET] = (addr_req >>= 8) & 0xff; args.hobRegister[IDE_HCYL_OFFSET] = (addr_req >>= 8) & 0xff; args.hobRegister[IDE_SELECT_OFFSET] = 0x40; args.hobRegister[IDE_CONTROL_OFFSET_HOB]= (drive->ctl|0x80); args.command_type = IDE_DRIVE_TASK_NO_DATA; args.handler = &task_no_data_intr; /* submit command request */ ide_raw_taskfile(drive, &args, NULL); /* if OK, compute maximum address value */ if ((args.tfRegister[IDE_STATUS_OFFSET] & 0x01) == 0) { u32 high = (args.hobRegister[IDE_HCYL_OFFSET] << 16) | (args.hobRegister[IDE_LCYL_OFFSET] << 8) | args.hobRegister[IDE_SECTOR_OFFSET]; u32 low = ((args.tfRegister[IDE_HCYL_OFFSET])<<16) | ((args.tfRegister[IDE_LCYL_OFFSET])<<8) | (args.tfRegister[IDE_SECTOR_OFFSET]); addr_set = ((__u64)high << 24) | low; addr_set++; } return addr_set; } static unsigned long long sectors_to_MB(unsigned long long n) { n <<= 9; /* make it bytes */ do_div(n, 1000000); /* make it MB */ return n; } /* * Bits 10 of command_set_1 and cfs_enable_1 must be equal, * so on non-buggy drives we need test only one. * However, we should also check whether these fields are valid. */ static inline int idedisk_supports_hpa(const struct hd_driveid *id) { return (id->command_set_1 & 0x0400) && (id->cfs_enable_1 & 0x0400); } /* * The same here. */ static inline int idedisk_supports_lba48(const struct hd_driveid *id) { return (id->command_set_2 & 0x0400) && (id->cfs_enable_2 & 0x0400) && id->lba_capacity_2; } static void idedisk_check_hpa(ide_drive_t *drive) { unsigned long long capacity, set_max; int lba48 = idedisk_supports_lba48(drive->id); capacity = drive->capacity64; if (lba48) set_max = idedisk_read_native_max_address_ext(drive); else set_max = idedisk_read_native_max_address(drive); if (set_max <= capacity) return; printk(KERN_INFO "%s: Host Protected Area detected.\n" "\tcurrent capacity is %llu sectors (%llu MB)\n" "\tnative capacity is %llu sectors (%llu MB)\n", drive->name, capacity, sectors_to_MB(capacity), set_max, sectors_to_MB(set_max)); if (lba48) set_max = idedisk_set_max_address_ext(drive, set_max); else set_max = idedisk_set_max_address(drive, set_max); if (set_max) { drive->capacity64 = set_max; printk(KERN_INFO "%s: Host Protected Area disabled.\n", drive->name); } } /* * Compute drive->capacity, the full capacity of the drive * Called with drive->id != NULL. * * To compute capacity, this uses either of * * 1. CHS value set by user (whatever user sets will be trusted) * 2. LBA value from target drive (require new ATA feature) * 3. LBA value from system BIOS (new one is OK, old one may break) * 4. CHS value from system BIOS (traditional style) * * in above order (i.e., if value of higher priority is available, * reset will be ignored). */ static void init_idedisk_capacity (ide_drive_t *drive) { struct hd_driveid *id = drive->id; /* * If this drive supports the Host Protected Area feature set, * then we may need to change our opinion about the drive's capacity. */ int hpa = idedisk_supports_hpa(id); if (idedisk_supports_lba48(id)) { /* drive speaks 48-bit LBA */ drive->select.b.lba = 1; drive->capacity64 = id->lba_capacity_2; if (hpa) idedisk_check_hpa(drive); } else if ((id->capability & 2) && lba_capacity_is_ok(id)) { /* drive speaks 28-bit LBA */ drive->select.b.lba = 1; drive->capacity64 = id->lba_capacity; if (hpa) idedisk_check_hpa(drive); } else { /* drive speaks boring old 28-bit CHS */ drive->capacity64 = drive->cyl * drive->head * drive->sect; } } static sector_t idedisk_capacity (ide_drive_t *drive) { return drive->capacity64 - drive->sect0; } #ifdef CONFIG_PROC_FS static int smart_enable(ide_drive_t *drive) { ide_task_t args; memset(&args, 0, sizeof(ide_task_t)); args.tfRegister[IDE_FEATURE_OFFSET] = SMART_ENABLE; args.tfRegister[IDE_LCYL_OFFSET] = SMART_LCYL_PASS; args.tfRegister[IDE_HCYL_OFFSET] = SMART_HCYL_PASS; args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SMART; args.command_type = IDE_DRIVE_TASK_NO_DATA; args.handler = &task_no_data_intr; return ide_raw_taskfile(drive, &args, NULL); } static int get_smart_values(ide_drive_t *drive, u8 *buf) { ide_task_t args; memset(&args, 0, sizeof(ide_task_t)); args.tfRegister[IDE_FEATURE_OFFSET] = SMART_READ_VALUES; args.tfRegister[IDE_NSECTOR_OFFSET] = 0x01; args.tfRegister[IDE_LCYL_OFFSET] = SMART_LCYL_PASS; args.tfRegister[IDE_HCYL_OFFSET] = SMART_HCYL_PASS; args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SMART; args.command_type = IDE_DRIVE_TASK_IN; args.data_phase = TASKFILE_IN; args.handler = &task_in_intr; (void) smart_enable(drive); return ide_raw_taskfile(drive, &args, buf); } static int get_smart_thresholds(ide_drive_t *drive, u8 *buf) { ide_task_t args; memset(&args, 0, sizeof(ide_task_t)); args.tfRegister[IDE_FEATURE_OFFSET] = SMART_READ_THRESHOLDS; args.tfRegister[IDE_NSECTOR_OFFSET] = 0x01; args.tfRegister[IDE_LCYL_OFFSET] = SMART_LCYL_PASS; args.tfRegister[IDE_HCYL_OFFSET] = SMART_HCYL_PASS; args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SMART; args.command_type = IDE_DRIVE_TASK_IN; args.data_phase = TASKFILE_IN; args.handler = &task_in_intr; (void) smart_enable(drive); return ide_raw_taskfile(drive, &args, buf); } static int proc_idedisk_read_cache (char *page, char **start, off_t off, int count, int *eof, void *data) { ide_drive_t *drive = (ide_drive_t *) data; char *out = page; int len; if (drive->id_read) len = sprintf(out,"%i\n", drive->id->buf_size / 2); else len = sprintf(out,"(none)\n"); PROC_IDE_READ_RETURN(page,start,off,count,eof,len); } static int proc_idedisk_read_capacity (char *page, char **start, off_t off, int count, int *eof, void *data) { ide_drive_t*drive = (ide_drive_t *)data; int len; len = sprintf(page,"%llu\n", (long long)idedisk_capacity(drive)); PROC_IDE_READ_RETURN(page,start,off,count,eof,len); } static int proc_idedisk_read_smart_thresholds (char *page, char **start, off_t off, int count, int *eof, void *data) { ide_drive_t *drive = (ide_drive_t *)data; int len = 0, i = 0; if (!get_smart_thresholds(drive, page)) { unsigned short *val = (unsigned short *) page; char *out = ((char *)val) + (SECTOR_WORDS * 4); page = out; do { out += sprintf(out, "%04x%c", le16_to_cpu(*val), (++i & 7) ? ' ' : '\n'); val += 1; } while (i < (SECTOR_WORDS * 2)); len = out - page; } PROC_IDE_READ_RETURN(page,start,off,count,eof,len); } static int proc_idedisk_read_smart_values (char *page, char **start, off_t off, int count, int *eof, void *data) { ide_drive_t *drive = (ide_drive_t *)data; int len = 0, i = 0; if (!get_smart_values(drive, page)) { unsigned short *val = (unsigned short *) page; char *out = ((char *)val) + (SECTOR_WORDS * 4); page = out; do { out += sprintf(out, "%04x%c", le16_to_cpu(*val), (++i & 7) ? ' ' : '\n'); val += 1; } while (i < (SECTOR_WORDS * 2)); len = out - page; } PROC_IDE_READ_RETURN(page,start,off,count,eof,len); } static ide_proc_entry_t idedisk_proc[] = { { "cache", S_IFREG|S_IRUGO, proc_idedisk_read_cache, NULL }, { "capacity", S_IFREG|S_IRUGO, proc_idedisk_read_capacity, NULL }, { "geometry", S_IFREG|S_IRUGO, proc_ide_read_geometry, NULL }, { "smart_values", S_IFREG|S_IRUSR, proc_idedisk_read_smart_values, NULL }, { "smart_thresholds", S_IFREG|S_IRUSR, proc_idedisk_read_smart_thresholds, NULL }, { NULL, 0, NULL, NULL } }; #else #define idedisk_proc NULL #endif /* CONFIG_PROC_FS */ static void idedisk_prepare_flush(request_queue_t *q, struct request *rq) { ide_drive_t *drive = q->queuedata; memset(rq->cmd, 0, sizeof(rq->cmd)); if (ide_id_has_flush_cache_ext(drive->id) && (drive->capacity64 >= (1UL << 28))) rq->cmd[0] = WIN_FLUSH_CACHE_EXT; else rq->cmd[0] = WIN_FLUSH_CACHE; rq->flags |= REQ_DRIVE_TASK; rq->buffer = rq->cmd; } static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk, sector_t *error_sector) { ide_drive_t *drive = q->queuedata; struct request *rq; int ret; if (!drive->wcache) return 0; rq = blk_get_request(q, WRITE, __GFP_WAIT); idedisk_prepare_flush(q, rq); ret = blk_execute_rq(q, disk, rq, 0); /* * if we failed and caller wants error offset, get it */ if (ret && error_sector) *error_sector = ide_get_error_location(drive, rq->cmd); blk_put_request(rq); return ret; } /* * This is tightly woven into the driver->do_special can not touch. * DON'T do it again until a total personality rewrite is committed. */ static int set_multcount(ide_drive_t *drive, int arg) { struct request rq; if (drive->special.b.set_multmode) return -EBUSY; ide_init_drive_cmd (&rq); rq.flags = REQ_DRIVE_CMD; drive->mult_req = arg; drive->special.b.set_multmode = 1; (void) ide_do_drive_cmd (drive, &rq, ide_wait); return (drive->mult_count == arg) ? 0 : -EIO; } static int set_nowerr(ide_drive_t *drive, int arg) { if (ide_spin_wait_hwgroup(drive)) return -EBUSY; drive->nowerr = arg; drive->bad_wstat = arg ? BAD_R_STAT : BAD_W_STAT; spin_unlock_irq(&ide_lock); return 0; } static void update_ordered(ide_drive_t *drive) { struct hd_driveid *id = drive->id; unsigned ordered = QUEUE_ORDERED_NONE; prepare_flush_fn *prep_fn = NULL; issue_flush_fn *issue_fn = NULL; if (drive->wcache) { unsigned long long capacity; int barrier; /* * We must avoid issuing commands a drive does not * understand or we may crash it. We check flush cache * is supported. We also check we have the LBA48 flush * cache if the drive capacity is too large. By this * time we have trimmed the drive capacity if LBA48 is * not available so we don't need to recheck that. */ capacity = idedisk_capacity(drive); barrier = ide_id_has_flush_cache(id) && (drive->addressing == 0 || capacity <= (1ULL << 28) || ide_id_has_flush_cache_ext(id)); printk(KERN_INFO "%s: cache flushes %ssupported\n", drive->name, barrier ? "" : "not "); if (barrier) { ordered = QUEUE_ORDERED_DRAIN_FLUSH; prep_fn = idedisk_prepare_flush; issue_fn = idedisk_issue_flush; } } else ordered = QUEUE_ORDERED_DRAIN; blk_queue_ordered(drive->queue, ordered, prep_fn); blk_queue_issue_flush_fn(drive->queue, issue_fn); } static int write_cache(ide_drive_t *drive, int arg) { ide_task_t args; int err = 1; if (ide_id_has_flush_cache(drive->id)) { memset(&args, 0, sizeof(ide_task_t)); args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ? SETFEATURES_EN_WCACHE : SETFEATURES_DIS_WCACHE; args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES; args.command_type = IDE_DRIVE_TASK_NO_DATA; args.handler = &task_no_data_intr; err = ide_raw_taskfile(drive, &args, NULL); if (err == 0) drive->wcache = arg; } update_ordered(drive); return err; } static int do_idedisk_flushcache (ide_drive_t *drive) { ide_task_t args; memset(&args, 0, sizeof(ide_task_t)); if (ide_id_has_flush_cache_ext(drive->id)) args.tfRegister[IDE_COMMAND_OFFSET] = WIN_FLUSH_CACHE_EXT; else args.tfRegister[IDE_COMMAND_OFFSET] = WIN_FLUSH_CACHE; args.command_type = IDE_DRIVE_TASK_NO_DATA; args.handler = &task_no_data_intr; return ide_raw_taskfile(drive, &args, NULL); } static int set_acoustic (ide_drive_t *drive, int arg) { ide_task_t args; memset(&args, 0, sizeof(ide_task_t)); args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ? SETFEATURES_EN_AAM : SETFEATURES_DIS_AAM; args.tfRegister[IDE_NSECTOR_OFFSET] = arg; args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES; args.command_type = IDE_DRIVE_TASK_NO_DATA; args.handler = &task_no_data_intr; ide_raw_taskfile(drive, &args, NULL); drive->acoustic = arg; return 0; } /* * drive->addressing: * 0: 28-bit * 1: 48-bit * 2: 48-bit capable doing 28-bit */ static int set_lba_addressing(ide_drive_t *drive, int arg) { drive->addressing = 0; if (HWIF(drive)->no_lba48) return 0; if (!idedisk_supports_lba48(drive->id)) return -EIO; drive->addressing = arg; return 0; } static void idedisk_add_settings(ide_drive_t *drive) { struct hd_driveid *id = drive->id; ide_add_setting(drive, "bios_cyl", SETTING_RW, -1, -1, TYPE_INT, 0, 65535, 1, 1, &drive->bios_cyl, NULL); ide_add_setting(drive, "bios_head", SETTING_RW, -1, -1, TYPE_BYTE, 0, 255, 1, 1, &drive->bios_head, NULL); ide_add_setting(drive, "bios_sect", SETTING_RW, -1, -1, TYPE_BYTE, 0, 63, 1, 1, &drive->bios_sect, NULL); ide_add_setting(drive, "address", SETTING_RW, HDIO_GET_ADDRESS, HDIO_SET_ADDRESS, TYPE_INTA, 0, 2, 1, 1, &drive->addressing, set_lba_addressing); ide_add_setting(drive, "bswap", SETTING_READ, -1, -1, TYPE_BYTE, 0, 1, 1, 1, &drive->bswap, NULL); ide_add_setting(drive, "multcount", id ? SETTING_RW : SETTING_READ, HDIO_GET_MULTCOUNT, HDIO_SET_MULTCOUNT, TYPE_BYTE, 0, id ? id->max_multsect : 0, 1, 1, &drive->mult_count, set_multcount); ide_add_setting(drive, "nowerr", SETTING_RW, HDIO_GET_NOWERR, HDIO_SET_NOWERR, TYPE_BYTE, 0, 1, 1, 1, &drive->nowerr, set_nowerr); ide_add_setting(drive, "lun", SETTING_RW, -1, -1, TYPE_INT, 0, 7, 1, 1, &drive->lun, NULL); ide_add_setting(drive, "wcache", SETTING_RW, HDIO_GET_WCACHE, HDIO_SET_WCACHE, TYPE_BYTE, 0, 1, 1, 1, &drive->wcache, write_cache); ide_add_setting(drive, "acoustic", SETTING_RW, HDIO_GET_ACOUSTIC, HDIO_SET_ACOUSTIC, TYPE_BYTE, 0, 254, 1, 1, &drive->acoustic, set_acoustic); ide_add_setting(drive, "failures", SETTING_RW, -1, -1, TYPE_INT, 0, 65535, 1, 1, &drive->failures, NULL); ide_add_setting(drive, "max_failures", SETTING_RW, -1, -1, TYPE_INT, 0, 65535, 1, 1, &drive->max_failures, NULL); } static void idedisk_setup (ide_drive_t *drive) { struct hd_driveid *id = drive->id; unsigned long long capacity; idedisk_add_settings(drive); if (drive->id_read == 0) return; if (drive->removable) { /* * Removable disks (eg. SYQUEST); ignore 'WD' drives */ if (id->model[0] != 'W' || id->model[1] != 'D') { drive->doorlocking = 1; } } (void)set_lba_addressing(drive, 1); if (drive->addressing == 1) { ide_hwif_t *hwif = HWIF(drive); int max_s = 2048; if (max_s > hwif->rqsize) max_s = hwif->rqsize; blk_queue_max_sectors(drive->queue, max_s); } printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name, drive->queue->max_sectors / 2); /* calculate drive capacity, and select LBA if possible */ init_idedisk_capacity (drive); /* limit drive capacity to 137GB if LBA48 cannot be used */ if (drive->addressing == 0 && drive->capacity64 > 1ULL << 28) { printk(KERN_WARNING "%s: cannot use LBA48 - full capacity " "%llu sectors (%llu MB)\n", drive->name, (unsigned long long)drive->capacity64, sectors_to_MB(drive->capacity64)); drive->capacity64 = 1ULL << 28; } if (drive->hwif->no_lba48_dma && drive->addressing) { if (drive->capacity64 > 1ULL << 28) { printk(KERN_INFO "%s: cannot use LBA48 DMA - PIO mode will" " be used for accessing sectors > %u\n", drive->name, 1 << 28); } else drive->addressing = 0; } /* * if possible, give fdisk access to more of the drive, * by correcting bios_cyls: */ capacity = idedisk_capacity (drive); if (!drive->forced_geom) { if (idedisk_supports_lba48(drive->id)) { /* compatibility */ drive->bios_sect = 63; drive->bios_head = 255; } if (drive->bios_sect && drive->bios_head) { unsigned int cap0 = capacity; /* truncate to 32 bits */ unsigned int cylsz, cyl; if (cap0 != capacity) drive->bios_cyl = 65535; else { cylsz = drive->bios_sect * drive->bios_head; cyl = cap0 / cylsz; if (cyl > 65535) cyl = 65535; if (cyl > drive->bios_cyl) drive->bios_cyl = cyl; } } } printk(KERN_INFO "%s: %llu sectors (%llu MB)", drive->name, capacity, sectors_to_MB(capacity)); /* Only print cache size when it was specified */ if (id->buf_size) printk (" w/%dKiB Cache", id->buf_size/2); printk(", CHS=%d/%d/%d", drive->bios_cyl, drive->bios_head, drive->bios_sect); if (drive->using_dma) ide_dma_verbose(drive); printk("\n"); drive->no_io_32bit = id->dword_io ? 1 : 0; /* write cache enabled? */ if ((id->csfo & 1) || (id->cfs_enable_1 & (1 << 5))) drive->wcache = 1; write_cache(drive, 1); } static void ide_cacheflush_p(ide_drive_t *drive) { if (!drive->wcache || !ide_id_has_flush_cache(drive->id)) return; if (do_idedisk_flushcache(drive)) printk(KERN_INFO "%s: wcache flush failed!\n", drive->name); } static void ide_disk_remove(ide_drive_t *drive) { struct ide_disk_obj *idkp = drive->driver_data; struct gendisk *g = idkp->disk; ide_unregister_subdriver(drive, idkp->driver); del_gendisk(g); ide_cacheflush_p(drive); ide_disk_put(idkp); } static void ide_disk_release(struct kref *kref) { struct ide_disk_obj *idkp = to_ide_disk(kref); ide_drive_t *drive = idkp->drive; struct gendisk *g = idkp->disk; drive->driver_data = NULL; drive->devfs_name[0] = '\0'; g->private_data = NULL; put_disk(g); kfree(idkp); } static int ide_disk_probe(ide_drive_t *drive); static void ide_device_shutdown(ide_drive_t *drive) { #ifdef CONFIG_ALPHA /* On Alpha, halt(8) doesn't actually turn the machine off, it puts you into the sort of firmware monitor. Typically, it's used to boot another kernel image, so it's not much different from reboot(8). Therefore, we don't need to spin down the disk in this case, especially since Alpha firmware doesn't handle disks in standby mode properly. On the other hand, it's reasonably safe to turn the power off when the shutdown process reaches the firmware prompt, as the firmware initialization takes rather long time - at least 10 seconds, which should be sufficient for the disk to expire its write cache. */ if (system_state != SYSTEM_POWER_OFF) { #else if (system_state == SYSTEM_RESTART) { #endif ide_cacheflush_p(drive); return; } printk("Shutdown: %s\n", drive->name); drive->gendev.bus->suspend(&drive->gendev, PMSG_SUSPEND); } static ide_driver_t idedisk_driver = { .gen_driver = { .owner = THIS_MODULE, .name = "ide-disk", .bus = &ide_bus_type, }, .probe = ide_disk_probe, .remove = ide_disk_remove, .shutdown = ide_device_shutdown, .version = IDEDISK_VERSION, .media = ide_disk, .supports_dsc_overlap = 0, .do_request = ide_do_rw_disk, .end_request = ide_end_request, .error = __ide_error, .abort = __ide_abort, .proc = idedisk_proc, }; static int idedisk_open(struct inode *inode, struct file *filp) { struct gendisk *disk = inode->i_bdev->bd_disk; struct ide_disk_obj *idkp; ide_drive_t *drive; if (!(idkp = ide_disk_get(disk))) return -ENXIO; drive = idkp->drive; drive->usage++; if (drive->removable && drive->usage == 1) { ide_task_t args; memset(&args, 0, sizeof(ide_task_t)); args.tfRegister[IDE_COMMAND_OFFSET] = WIN_DOORLOCK; args.command_type = IDE_DRIVE_TASK_NO_DATA; args.handler = &task_no_data_intr; check_disk_change(inode->i_bdev); /* * Ignore the return code from door_lock, * since the open() has already succeeded, * and the door_lock is irrelevant at this point. */ if (drive->doorlocking && ide_raw_taskfile(drive, &args, NULL)) drive->doorlocking = 0; } return 0; } static int idedisk_release(struct inode *inode, struct file *filp) { struct gendisk *disk = inode->i_bdev->bd_disk; struct ide_disk_obj *idkp = ide_disk_g(disk); ide_drive_t *drive = idkp->drive; if (drive->usage == 1) ide_cacheflush_p(drive); if (drive->removable && drive->usage == 1) { ide_task_t args; memset(&args, 0, sizeof(ide_task_t)); args.tfRegister[IDE_COMMAND_OFFSET] = WIN_DOORUNLOCK; args.command_type = IDE_DRIVE_TASK_NO_DATA; args.handler = &task_no_data_intr; if (drive->doorlocking && ide_raw_taskfile(drive, &args, NULL)) drive->doorlocking = 0; } drive->usage--; ide_disk_put(idkp); return 0; } static int idedisk_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct ide_disk_obj *idkp = ide_disk_g(bdev->bd_disk); ide_drive_t *drive = idkp->drive; geo->heads = drive->bios_head; geo->sectors = drive->bios_sect; geo->cylinders = (u16)drive->bios_cyl; /* truncate */ return 0; } static int idedisk_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { struct block_device *bdev = inode->i_bdev; struct ide_disk_obj *idkp = ide_disk_g(bdev->bd_disk); return generic_ide_ioctl(idkp->drive, file, bdev, cmd, arg); } static int idedisk_media_changed(struct gendisk *disk) { struct ide_disk_obj *idkp = ide_disk_g(disk); ide_drive_t *drive = idkp->drive; /* do not scan partitions twice if this is a removable device */ if (drive->attach) { drive->attach = 0; return 0; } /* if removable, always assume it was changed */ return drive->removable; } static int idedisk_revalidate_disk(struct gendisk *disk) { struct ide_disk_obj *idkp = ide_disk_g(disk); set_capacity(disk, idedisk_capacity(idkp->drive)); return 0; } static struct block_device_operations idedisk_ops = { .owner = THIS_MODULE, .open = idedisk_open, .release = idedisk_release, .ioctl = idedisk_ioctl, .getgeo = idedisk_getgeo, .media_changed = idedisk_media_changed, .revalidate_disk= idedisk_revalidate_disk }; MODULE_DESCRIPTION("ATA DISK Driver"); static int ide_disk_probe(ide_drive_t *drive) { struct ide_disk_obj *idkp; struct gendisk *g; /* strstr("foo", "") is non-NULL */ if (!strstr("ide-disk", drive->driver_req)) goto failed; if (!drive->present) goto failed; if (drive->media != ide_disk) goto failed; idkp = kzalloc(sizeof(*idkp), GFP_KERNEL); if (!idkp) goto failed; g = alloc_disk_node(1 << PARTN_BITS, hwif_to_node(drive->hwif)); if (!g) goto out_free_idkp; ide_init_disk(g, drive); ide_register_subdriver(drive, &idedisk_driver); kref_init(&idkp->kref); idkp->drive = drive; idkp->driver = &idedisk_driver; idkp->disk = g; g->private_data = &idkp->driver; drive->driver_data = idkp; idedisk_setup(drive); if ((!drive->head || drive->head > 16) && !drive->select.b.lba) { printk(KERN_ERR "%s: INVALID GEOMETRY: %d PHYSICAL HEADS?\n", drive->name, drive->head); drive->attach = 0; } else drive->attach = 1; g->minors = 1 << PARTN_BITS; strcpy(g->devfs_name, drive->devfs_name); g->driverfs_dev = &drive->gendev; g->flags = drive->removable ? GENHD_FL_REMOVABLE : 0; set_capacity(g, idedisk_capacity(drive)); g->fops = &idedisk_ops; add_disk(g); return 0; out_free_idkp: kfree(idkp); failed: return -ENODEV; } static void __exit idedisk_exit (void) { driver_unregister(&idedisk_driver.gen_driver); } static int __init idedisk_init(void) { return driver_register(&idedisk_driver.gen_driver); } MODULE_ALIAS("ide:*m-disk*"); module_init(idedisk_init); module_exit(idedisk_exit); MODULE_LICENSE("GPL"); ```
```xml import { Button, CollapseContent, ControlLabel, FormControl, FormGroup, MainStyleModalFooter as ModalFooter } from '@erxes/ui/src'; import React, { useState } from 'react'; import { IConfigsMap } from '../types'; import { __ } from 'coreui/utils'; type Props = { configsMap: IConfigsMap; config: any; currentConfigKey: string; save: (configsMap: IConfigsMap) => void; }; const MainConfig = (props: Props) => { const [config, setConfig] = useState(props.config || {}); const { configsMap } = props; const onSave = (e) => { e.preventDefault(); configsMap.loansConfig = config; props.save(configsMap); }; const onChangeConfig = (code: string, value) => { config[code] = value; setConfig(config); }; const onChangeInput = (code: string, e) => { onChangeConfig(code, e.target.value); }; const onChangeInputNumber = (code: string, e) => { onChangeConfig(code, Number(e.target.value)); }; const onChangeCheck = (code: string, e) => { onChangeConfig(code, e.target.checked); }; return ( <> <CollapseContent title={__(config.title)} open={false}> <FormGroup> <ControlLabel required={true}>{__('Organization type')}</ControlLabel> <FormControl name="organizationType" componentclass="select" defaultValue={config['organizationType']} onChange={onChangeInput.bind(this, 'organizationType')} > {['bbsb', 'entity'].map((typeName, index) => ( <option key={index} value={typeName}> {__(typeName)} </option> ))} </FormControl> </FormGroup> <FormGroup> <ControlLabel>{__('Calculation number fixed')}</ControlLabel> <FormControl defaultValue={config['calculationFixed']} type="number" min={0} max={100} onChange={onChangeInputNumber.bind(this, 'calculationFixed')} required={true} /> </FormGroup> <ModalFooter> <Button btnStyle="primary" icon="check-circle" onClick={onSave} uppercase={false} > {__('Save')} </Button> </ModalFooter> </CollapseContent> <CollapseContent title={__('period lock config')} open={false}> <FormGroup> <ControlLabel required={true}>{__('Period lock type')}</ControlLabel> <FormControl name="periodLockType" componentclass="select" defaultValue={config['periodLockType']} onChange={onChangeInput.bind(this, 'periodLockType')} > {['daily', 'endOfMonth', 'manual'].map((typeName, index) => ( <option key={typeName} value={typeName}> {__(typeName)} </option> ))} </FormControl> </FormGroup> <FormGroup> <ControlLabel>{__('Is Store Interest')}</ControlLabel> <FormControl className="flex-item" type="checkbox" componentclass="checkbox" name="isStoreInterest" checked={config['isStoreInterest']} onChange={onChangeCheck.bind(this, 'isStoreInterest')} /> </FormGroup> <FormGroup> <ControlLabel>{__('Is Create Invoice')}</ControlLabel> <FormControl className="flex-item" type="checkbox" componentclass="checkbox" name="isCreateInvoice" checked={config['isCreateInvoice']} onChange={onChangeCheck.bind(this, 'isCreateInvoice')} /> </FormGroup> <FormGroup> <ControlLabel>{__('Is Change Classification')}</ControlLabel> <FormControl className="flex-item" type="checkbox" componentclass="checkbox" name="isChangeClassification" checked={config['isChangeClassification']} onChange={onChangeCheck.bind(this, 'isChangeClassification')} /> </FormGroup> <ModalFooter> <Button btnStyle="primary" icon="check-circle" onClick={onSave} uppercase={false} > {__('Save')} </Button> </ModalFooter> </CollapseContent> <CollapseContent title={__('classification config')} open={false}> <FormGroup> <ControlLabel>{__('Normal /Day/ ')}</ControlLabel> <FormControl defaultValue={config['classificationNormal']} type="number" min={0} max={100} onChange={onChangeInputNumber.bind(this, 'classificationNormal')} required={true} /> </FormGroup> <FormGroup> <ControlLabel>{__('Expired /Day/ ')}</ControlLabel> <FormControl defaultValue={config['classificationExpired']} type="number" min={0} max={100} onChange={onChangeInputNumber.bind(this, 'classificationExpired')} required={true} /> </FormGroup> <FormGroup> <ControlLabel>{__('Doubt /Day/ ')}</ControlLabel> <FormControl defaultValue={config['classificationDoubt']} type="number" min={0} max={100} onChange={onChangeInputNumber.bind(this, 'classificationDoubt')} required={true} /> </FormGroup> <FormGroup> <ControlLabel>{__('Negative /Day/ ')}</ControlLabel> <FormControl defaultValue={config['classificationNegative']} type="number" min={0} max={100} onChange={onChangeInputNumber.bind(this, 'classificationNegative')} required={true} /> </FormGroup> <FormGroup> <ControlLabel>{__('Bad /Day/ ')}</ControlLabel> <FormControl defaultValue={config['classificationBad']} type="number" min={0} max={100} onChange={onChangeInputNumber.bind(this, 'classificationBad')} required={true} /> </FormGroup> <ModalFooter> <Button btnStyle="primary" icon="check-circle" onClick={onSave} uppercase={false} > {__('Save')} </Button> </ModalFooter> </CollapseContent> <CollapseContent title={__('internet bank config')} open={false}> <FormGroup> <ControlLabel>{__('Loan give limit')}</ControlLabel> <FormControl defaultValue={config['loanGiveLimit']} type="number" onChange={onChangeInputNumber.bind(this, 'loanGiveLimit')} required={true} /> </FormGroup> <FormGroup> <ControlLabel>{__('Loan give account type')}</ControlLabel> <FormControl name="loanGiveAccountType" componentclass="select" defaultValue={config['loanGiveAccountType']} onChange={onChangeInput.bind(this, 'loanGiveAccountType')} > {['khanbank', 'golomt'].map((typeName, index) => ( <option key={typeName} value={typeName}> {__(typeName)} </option> ))} </FormControl> </FormGroup> <FormGroup> <ControlLabel>{__('Loan give account number')}</ControlLabel> <FormControl defaultValue={config['loanGiveAccountNumber']} type="number" onChange={onChangeInputNumber.bind(this, 'loanGiveAccountNumber')} required={true} /> </FormGroup> <ModalFooter> <Button btnStyle="primary" icon="check-circle" onClick={onSave} uppercase={false} > {__('Save')} </Button> </ModalFooter> </CollapseContent> </> ); }; export default MainConfig; ```
The SCW World Tag Team Championship was the top tag team championship in Southwest Championship Wrestling from its establishment in 1980 until 1984, when the title was abandoned. History References External links SWCW World Tag Team title history World Tage Team Championship Tag team wrestling championships
Güinope is a municipality in the Honduran department of El Paraíso. According to a 2001 census, it had 6.941 inhabitants. Founded by Esteban Rodríguez in 1747, it celebrates the "Festival of Oranges" every year since 1981. Also, in May they celebrate in honour of San Isidro Labrador. The name Güinope originates in Mexico and means In the water of pigeons. In 2021 Güinope's "RUTA NARANJA" was awarded as one of the Honduras 30 Wonders. Villages The municipality is made up by ten villages: Güinope (main village) Arrayanes Casitas Galeras Las Liquidámbas Lavanderos Mansaragua Pacayas Santa Rosa Silisgualagua Municipalities of the El Paraíso Department
Kader Attia (born 30 December 1970) is an Algerian-French artist. Early life Attia was born in Dugny, France to Algerian parents and was raised in Paris and Algeria. He studied at the l'école Duperré de Paris, l'école des arts appliqués La Massana de Barcelone and graduated from the Ecole nationale superieure des arts decoratifs Paris, in 1998. Work Attia's work often examines social injustice, marginalized communities and postcolonialism. In 2016, Attia founded La Colonie, a gallery near Paris' Gare du Nord train station. In March 2020, La Colonie closed permanently due to the coronavirus pandemic. In March 2021, Attia was announced as the curator for the 12th Berlin Biennale. He is the first artist to curate the biennale since New-York based collective DIS, who presented the 9th edition in 2016. In November 2021, he had an exhibition entitled "On Silence" at the Mathaf: Arab Museum of Modern Art in Doha. Collections Attia's work is included in the permanent collections of: Museum of Modern Art, New York, Sharjah Art Foundation, Tate Museum, Centre Georges Pompidou, Paris, Institute of Contemporary Art, Boston, Guggenheim Museum in New York. Awards In 2016, Attia won France's Prix Marcel Duchamp. In 2018, he was awarded the Joan Miró Prize. In 2019, Attia was a member of the jury that selected Arthur Jafa as winner of the Prince Pierre Foundation's International Contemporary Art Prize. Notes References External links 1970 births 21st-century French male artists Artists from Paris Living people Algerian artists Algerian contemporary artists French people of Algerian descent Decolonial artists