text
stringlengths
5
1.04M
/*--- function [scanposes, corres] = jpr_interface(datapoints, scan_offsets, paras) Input: Output --*/ #include "mex.h" #include "point_cloud.h" #include "linear_algebra.h" #include "affine_transformation.h" #include "simul_reg_and_recons.h" #include <vector> #include <algorithm> using namespace std; // The three input matrices are // 1) The mesh vertices // 2) The mesh faces // 3) The camera parameters // The output parameters // 1) The intersecting pixels void mexFunction( int nargout, mxArray *output[], int nargin, const mxArray *input[]) { /* check argument */ if (nargin != 3) { mexErrMsgTxt("Three input arguments required."); } if (nargout != 3) { mexErrMsgTxt("Incorrect number of output arguments."); } double *pointData = (double*)mxGetData(input[0]); unsigned numPoints = static_cast<unsigned> (mxGetN(input[0])); double *offsetData = (double*)mxGetData(input[1]); unsigned numScans = static_cast<unsigned> (mxGetN(input[1]))-1; vector<PointCloud> scans; scans.resize(numScans); for (unsigned id = 0; id < numScans; ++id) { vector<Surfel3D> *surfels = scans[id].GetPointArray(); int left = static_cast<int> (offsetData[id]); int right = static_cast<int> (offsetData[id+1]); surfels->resize(right-left); for (int i = left; i < right; ++i) { Surfel3D *sur = &(*surfels)[i-left]; int off = 9*i; for (int k = 0; k < 3; ++k) { sur->position[k] = static_cast<float> (pointData[off+k]); sur->normal[k] = static_cast<float> (pointData[off+3+k]); sur->color[k] = 0.f; } } } // Compute the bounding box of all the input points BoundingBox box_all; box_all.Initialize(); for (unsigned id = 0; id < numScans; ++id) { scans[id].ComputeBoundingBox(); box_all.Insert_A_Box(scans[id].GetBoundingBox()); } double maxDim = max(max(box_all.size[0], box_all.size[1]), box_all.size[2]); // Setup the parameters double *data = (double*)mxGetData(input[2]); // Code for simultaneous registration and reconstruction SimulRegAndRecons srar; SimulRegAndReconsPara para; para.stride = static_cast<int> (data[0]); para.weightPoint2PlaneDis = static_cast<double> (data[1]); para.minNumPointsPerCell = static_cast<int> (data[2]); para.maxNumPointsPerCell = static_cast<int> (data[3]); para.numAlternatingIterations = static_cast<int> (data[4]); // Perform santity check, so that gridSize_coarse and gridSize_fine fall into // suitable ranges (you should set them carefully) double gridSize_coarse = static_cast<double> (data[5]); double gridSize_fine = static_cast<double> (data[6]); gridSize_coarse = max(maxDim / 128.0, min(maxDim / 32.0, gridSize_coarse)); gridSize_fine = max(maxDim / 384.0, min (maxDim / 96.0, gridSize_fine)); int num_levels = static_cast<int> (data[7]); num_levels = min(4, max(1, num_levels)); vector<Affine3d> opt_poses; opt_poses.resize(numScans); for (int level_id = 0; level_id < num_levels; ++level_id) { double t = 0.0; if (num_levels > 1) t = static_cast<double> (level_id) / (num_levels - 1.0); para.gridSize = exp(log(gridSize_coarse) * (1 - t) + log(gridSize_fine) * t); // srar.AlternatingOpt(scans, para, &opt_poses); } PointCloud super_scan; vector<vector<PointCorres>> corres; para.stride = 1; srar.LatentSurfOpt(scans, opt_poses, para, &corres, &super_scan); // Output optimized scan poses output[0] = mxCreateDoubleMatrix(12, numScans, mxREAL); data = mxGetPr(output[0]); for (unsigned scanid = 0; scanid < numScans; ++scanid) { for (int i = 0; i < 4; ++i) { for (int j = 0; j < 3; ++j) { data[12*scanid+i*3+j] = opt_poses[scanid][i][j]; } } } // Output the latent surface output[1] = mxCreateDoubleMatrix(9, super_scan.GetPointArray()->size(), mxREAL); data = mxGetPr(output[1]); for (unsigned pid = 0; pid < super_scan.GetPointArray()->size(); ++pid) { Surfel3D *sur = &(*super_scan.GetPointArray())[pid]; for (int k = 0; k < 3; ++k) { data[9*pid+k] = sur->position[k]; data[9*pid+k+3] = sur->normal[k]; data[9*pid+k+6] = sur->color[k]; } } // Output the dense correspondences between the input scans and the latent surface unsigned numcorres = 0; for (unsigned scanid = 0; scanid < corres.size(); ++scanid) { numcorres += corres[scanid].size(); } output[2] = mxCreateDoubleMatrix(4, numcorres, mxREAL); data = mxGetPr(output[2]); unsigned off = 0; for (unsigned scanid = 0; scanid < corres.size(); ++scanid) { for (unsigned j = 0; j < corres[scanid].size(); ++j) { data[4*off] = scanid + 1; data[4*off+1] = corres[scanid][j].sourcePointId + 1; data[4*off+2] = corres[scanid][j].targetPointId + 1; data[4*off+3] = corres[scanid][j].weight; off++; } } }
#include "UEngine.h" #include "Material.h" void UEngine::Material::Awake() { colorBuffer = DXRenderer::DXConstantBuffer::Instantiate ( GetGameObject()->GetScene()->ResourceManager.GetNextCBufferID(), DXRenderer::Get(), GetGameObject()->GetScene()->ResourceManager.GetCBufferPreset(typeid(Color).raw_name()) ); spriteBuffer = DXRenderer::DXConstantBuffer::Instantiate ( GetGameObject()->GetScene()->ResourceManager.GetNextCBufferID(), DXRenderer::Get(), GetGameObject()->GetScene()->ResourceManager.GetCBufferPreset(typeid(UV).raw_name()) ); uv = UV{ 0, 0, 1, 1 }; colorBuffer->AttachData(&color.value, sizeof(Color)); spriteBuffer->AttachData(&uv.value, sizeof(UV)); GetGameObject()->GetScene()->ResourceManager.AddResource<DXRenderer::DXConstantBuffer>(colorBuffer->UID, colorBuffer); GetGameObject()->GetScene()->ResourceManager.AddResource<DXRenderer::DXConstantBuffer>(spriteBuffer->UID, spriteBuffer); } void UEngine::Material::Update() { auto renderComponent = GetComponent<RenderComponent>(); if (renderComponent == nullptr) return; if (renderObject != renderComponent->GetRenderObject()) { renderObject = renderComponent->GetRenderObject(); if (renderObject == nullptr) return; renderComponent->AddConstantBuffer(colorBuffer); renderComponent->AddConstantBuffer(spriteBuffer); if (imageTexture) renderComponent->AddImageTexture(imageTexture); if (imageSampler) renderComponent->AddImageSampler(imageSampler); } } void UEngine::Material::OnPreRender() { colorBuffer->Update(DXRenderer::Get()->GetImmediateDeviceContext()); spriteBuffer->Update(DXRenderer::Get()->GetImmediateDeviceContext()); } void UEngine::Material::OnDestroy() { auto renderComponent = GetComponent<RenderComponent>(); if (renderComponent == nullptr) return; if (renderObject != renderComponent->GetRenderObject()) { renderObject = renderComponent->GetRenderObject(); if (renderObject == nullptr) return; renderComponent->ClearConstantBuffers(); if (imageTexture) renderComponent->AddImageTexture(nullptr); if (imageSampler) renderComponent->AddImageSampler(nullptr); } } void UEngine::Material::DeSerialize(TiXmlNode* node) { Serializer::DeSerialize(node); std::wstring fileName; convert_utf8_to_unicode_string(fileName, this->fileName.value.c_str(), this->fileName.value.size()); using namespace std; std::string delimiter = "\n"; std::list<std::string> tokens; size_t pos = 0; while ((pos = samplerName.value.find(delimiter)) != std::string::npos) { tokens.emplace_back(samplerName.value.substr(0, pos)); samplerName.value.erase(0, pos + delimiter.length()); } D3D11_SAMPLER_DESC desc; desc.AddressU = static_cast<D3D11_TEXTURE_ADDRESS_MODE>(stoll(tokens.front())); tokens.pop_front(); desc.AddressV = static_cast<D3D11_TEXTURE_ADDRESS_MODE>(stoll(tokens.front())); tokens.pop_front(); desc.AddressW = static_cast<D3D11_TEXTURE_ADDRESS_MODE>(stoll(tokens.front())); tokens.pop_front(); desc.BorderColor[0] = stof(tokens.front()); tokens.pop_front(); desc.BorderColor[1] = stof(tokens.front()); tokens.pop_front(); desc.BorderColor[2] = stof(tokens.front()); tokens.pop_front(); desc.BorderColor[3] = stof(tokens.front()); tokens.pop_front(); desc.ComparisonFunc = static_cast<D3D11_COMPARISON_FUNC>(stoi(tokens.front())); tokens.pop_front(); desc.Filter = static_cast<D3D11_FILTER>(stoi(tokens.front())); tokens.pop_front(); desc.MaxAnisotropy = stoi(tokens.front()); tokens.pop_front(); desc.MaxLOD = stof(tokens.front()); tokens.pop_front(); desc.MinLOD = stof(tokens.front()); tokens.pop_front(); desc.MipLODBias = stof(tokens.front()); tokens.pop_front(); LoadImageMaterial(fileName, desc); } void UEngine::Material::LoadImageMaterial(std::wstring fileName) { // Sampler State D3D11_SAMPLER_DESC samplerDesc; ZeroMemory(&samplerDesc, sizeof(samplerDesc)); samplerDesc.AddressU = D3D11_TEXTURE_ADDRESS_CLAMP; samplerDesc.AddressV = D3D11_TEXTURE_ADDRESS_CLAMP; samplerDesc.AddressW = D3D11_TEXTURE_ADDRESS_CLAMP; samplerDesc.ComparisonFunc = D3D11_COMPARISON_NEVER; samplerDesc.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR; samplerDesc.MaxAnisotropy = 1; samplerDesc.MaxLOD = FLT_MAX; samplerDesc.MinLOD = -FLT_MAX; samplerDesc.MipLODBias = 0; this->fileName = fileName; this->samplerName = DXRenderer::DXSamplerState::MakeName(samplerDesc); auto scene = GetGameObject()->GetScene(); imageTexture = scene->ResourceManager.GetResource<DXRenderer::DXTexture>(this->fileName.value); if (imageTexture == nullptr) { imageTexture = DXRenderer::DXTexture::Load(fileName); GetGameObject()->GetScene()->ResourceManager.AddResource<DXRenderer::DXTexture>(this->fileName.value, imageTexture); } imageSampler = scene->ResourceManager.GetResource<DXRenderer::DXSamplerState>(this->samplerName.value); if (imageSampler == nullptr) { imageSampler = DXRenderer::DXSamplerState::Load(samplerDesc); GetGameObject()->GetScene()->ResourceManager.AddResource<DXRenderer::DXSamplerState>(samplerName.value, imageSampler); } color = Color{ 1, 1, 1, 1 }; if (renderObject != nullptr) GetComponent<RenderComponent>()->AddImageTexture(imageTexture); if (renderObject != nullptr) GetComponent<RenderComponent>()->AddImageSampler(imageSampler); } void UEngine::Material::LoadImageMaterial(std::wstring fileName, D3D11_TEXTURE_ADDRESS_MODE addressMode, D3D11_FILTER filter) { // Sampler State D3D11_SAMPLER_DESC samplerDesc; ZeroMemory(&samplerDesc, sizeof(samplerDesc)); samplerDesc.AddressU = addressMode; samplerDesc.AddressV = addressMode; samplerDesc.AddressW = addressMode; samplerDesc.ComparisonFunc = D3D11_COMPARISON_NEVER; samplerDesc.Filter = filter; samplerDesc.MaxAnisotropy = 1; samplerDesc.MaxLOD = FLT_MAX; samplerDesc.MinLOD = -FLT_MAX; samplerDesc.MipLODBias = 0; this->fileName = fileName; this->samplerName = DXRenderer::DXSamplerState::MakeName(samplerDesc); auto scene = GetGameObject()->GetScene(); if (imageTexture == nullptr) { imageTexture = DXRenderer::DXTexture::Load(fileName); GetGameObject()->GetScene()->ResourceManager.AddResource<DXRenderer::DXTexture>(this->fileName.value, imageTexture); } imageSampler = scene->ResourceManager.GetResource<DXRenderer::DXSamplerState>(this->samplerName.value); if (imageSampler == nullptr) { imageSampler = DXRenderer::DXSamplerState::Load(samplerDesc); GetGameObject()->GetScene()->ResourceManager.AddResource<DXRenderer::DXSamplerState>(samplerName.value, imageSampler); } color = Color{ 1, 1, 1, 1 }; if (renderObject != nullptr) GetComponent<RenderComponent>()->AddImageTexture(imageTexture); if (renderObject != nullptr) GetComponent<RenderComponent>()->AddImageSampler(imageSampler); } void UEngine::Material::LoadImageMaterial(std::wstring fileName, D3D11_SAMPLER_DESC desc) { this->fileName = fileName; this->samplerName = DXRenderer::DXSamplerState::MakeName(desc); auto scene = GetGameObject()->GetScene(); imageTexture = scene->ResourceManager.GetResource<DXRenderer::DXTexture>(this->fileName.value); if (imageTexture == nullptr) { imageTexture = DXRenderer::DXTexture::Load(fileName); GetGameObject()->GetScene()->ResourceManager.AddResource<DXRenderer::DXTexture>(this->fileName.value, imageTexture); } imageSampler = scene->ResourceManager.GetResource<DXRenderer::DXSamplerState>(this->samplerName.value); if (imageSampler == nullptr) { imageSampler = DXRenderer::DXSamplerState::Load(desc); GetGameObject()->GetScene()->ResourceManager.AddResource<DXRenderer::DXSamplerState>(samplerName.value, imageSampler); } color = Color{ 1, 1, 1, 1 }; if (renderObject != nullptr) GetComponent<RenderComponent>()->AddImageTexture(imageTexture); if (renderObject != nullptr) GetComponent<RenderComponent>()->AddImageSampler(imageSampler); }
/* MANGO Multimedia Development Platform Copyright (C) 2012-2019 Twilight Finland 3D Oy Ltd. All rights reserved. */ #pragma once #include "simd.hpp" namespace mango { namespace simd { // ----------------------------------------------------------------- // f32x8 // ----------------------------------------------------------------- static inline f32x8 f32x8_zero() { return _mm256_setzero_ps(); } static inline f32x8 f32x8_set(f32 s) { return _mm256_set1_ps(s); } static inline f32x8 f32x8_set(f32 s0, f32 s1, f32 s2, f32 s3, f32 s4, f32 s5, f32 s6, f32 s7) { return _mm256_setr_ps(s0, s1, s2, s3, s4, s5, s6, s7); } static inline f32x8 f32x8_uload(const f32* source) { return _mm256_loadu_ps(source); } static inline void f32x8_ustore(f32* dest, f32x8 a) { _mm256_storeu_ps(dest, a); } static inline f32x8 unpackhi(f32x8 a, f32x8 b) { return _mm256_unpackhi_ps(a, b); } static inline f32x8 unpacklo(f32x8 a, f32x8 b) { return _mm256_unpacklo_ps(a, b); } // bitwise static inline f32x8 bitwise_nand(f32x8 a, f32x8 b) { return _mm256_andnot_ps(a, b); } static inline f32x8 bitwise_and(f32x8 a, f32x8 b) { return _mm256_and_ps(a, b); } static inline f32x8 bitwise_or(f32x8 a, f32x8 b) { return _mm256_or_ps(a, b); } static inline f32x8 bitwise_xor(f32x8 a, f32x8 b) { return _mm256_xor_ps(a, b); } static inline f32x8 bitwise_not(f32x8 a) { const __m256i s = _mm256_castps_si256(a); return _mm256_castsi256_ps(_mm256_ternarylogic_epi32(s, s, s, 0x01)); } static inline f32x8 min(f32x8 a, f32x8 b) { return _mm256_min_ps(a, b); } static inline f32x8 max(f32x8 a, f32x8 b) { return _mm256_max_ps(a, b); } static inline f32x8 abs(f32x8 a) { return _mm256_max_ps(a, _mm256_sub_ps(_mm256_setzero_ps(), a)); } static inline f32x8 neg(f32x8 a) { return _mm256_sub_ps(_mm256_setzero_ps(), a); } static inline f32x8 sign(f32x8 a) { __m256 sign_mask = _mm256_set1_ps(-0.0f); __m256 value_mask = _mm256_cmp_ps(a, _mm256_setzero_ps(), _CMP_NEQ_UQ); __m256 sign_bits = _mm256_and_ps(a, sign_mask); __m256 value_bits = _mm256_and_ps(_mm256_set1_ps(1.0f), value_mask); return _mm256_or_ps(value_bits, sign_bits); } static inline f32x8 add(f32x8 a, f32x8 b) { return _mm256_add_ps(a, b); } static inline f32x8 sub(f32x8 a, f32x8 b) { return _mm256_sub_ps(a, b); } static inline f32x8 mul(f32x8 a, f32x8 b) { return _mm256_mul_ps(a, b); } static inline f32x8 div(f32x8 a, f32x8 b) { return _mm256_div_ps(a, b); } static inline f32x8 div(f32x8 a, f32 b) { return _mm256_div_ps(a, _mm256_set1_ps(b)); } static inline f32x8 hadd(f32x8 a, f32x8 b) { return _mm256_hadd_ps(a, b); } static inline f32x8 hsub(f32x8 a, f32x8 b) { return _mm256_hsub_ps(a, b); } #if defined(MANGO_ENABLE_FMA3) static inline f32x8 madd(f32x8 a, f32x8 b, f32x8 c) { return _mm256_fmadd_ps(b, c, a); } static inline f32x8 msub(f32x8 a, f32x8 b, f32x8 c) { return _mm256_fnmadd_ps(b, c, a); } #else static inline f32x8 madd(f32x8 a, f32x8 b, f32x8 c) { return _mm256_add_ps(a, _mm256_mul_ps(b, c)); } static inline f32x8 msub(f32x8 a, f32x8 b, f32x8 c) { return _mm256_sub_ps(a, _mm256_mul_ps(b, c)); } #endif #if defined(MANGO_FAST_MATH) static inline f32x8 rcp(f32x8 a) { return _mm256_rcp14_ps(a); } static inline f32x8 rsqrt(f32x8 a) { return _mm256_rsqrt_ps(a); } #else static inline f32x8 rcp(f32x8 a) { const __m256 n = _mm256_rcp_ps(a); const __m256 m = _mm256_mul_ps(_mm256_mul_ps(n, n), a); return _mm256_sub_ps(_mm256_add_ps(n, n), m); } static inline f32x8 rsqrt(f32x8 a) { __m256 n = _mm256_rsqrt_ps(a); __m256 e = _mm256_mul_ps(_mm256_mul_ps(n, n), a); n = _mm256_mul_ps(_mm256_set1_ps(0.5f), n); e = _mm256_sub_ps(_mm256_set1_ps(3.0f), e); return _mm256_mul_ps(n, e); } #endif static inline f32x8 sqrt(f32x8 a) { return _mm256_sqrt_ps(a); } // compare static inline mask32x8 compare_neq(f32x8 a, f32x8 b) { return _mm256_cmp_ps_mask(a, b, _CMP_NEQ_UQ); } static inline mask32x8 compare_eq(f32x8 a, f32x8 b) { return _mm256_cmp_ps_mask(a, b, _CMP_EQ_OQ); } static inline mask32x8 compare_lt(f32x8 a, f32x8 b) { return _mm256_cmp_ps_mask(a, b, _CMP_LT_OS); } static inline mask32x8 compare_le(f32x8 a, f32x8 b) { return _mm256_cmp_ps_mask(a, b, _CMP_LE_OS); } static inline mask32x8 compare_gt(f32x8 a, f32x8 b) { return _mm256_cmp_ps_mask(b, a, _CMP_LT_OS); } static inline mask32x8 compare_ge(f32x8 a, f32x8 b) { return _mm256_cmp_ps_mask(b, a, _CMP_LE_OS); } static inline f32x8 select(mask32x8 mask, f32x8 a, f32x8 b) { return _mm256_mask_blend_ps(mask, b, a); } // rounding static inline f32x8 round(f32x8 s) { return _mm256_round_ps(s, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); } static inline f32x8 trunc(f32x8 s) { //return _mm256_roundscale_ps(s, 0x13); return _mm256_round_ps(s, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); } static inline f32x8 floor(f32x8 s) { //return _mm256_roundscale_ps(s, 0x11); return _mm256_round_ps(s, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC); } static inline f32x8 ceil(f32x8 s) { //return _mm256_roundscale_ps(s, 0x12); return _mm256_round_ps(s, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC); } static inline f32x8 fract(f32x8 s) { return _mm256_sub_ps(s, floor(s)); } } // namespace simd } // namespace mango
// Copyright 2012 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #if V8_TARGET_ARCH_X87 #include "src/base/bits.h" #include "src/bootstrapper.h" #include "src/code-stubs.h" #include "src/codegen.h" #include "src/ic/handler-compiler.h" #include "src/ic/ic.h" #include "src/ic/stub-cache.h" #include "src/isolate.h" #include "src/regexp/jsregexp.h" #include "src/regexp/regexp-macro-assembler.h" #include "src/runtime/runtime.h" #include "src/x87/code-stubs-x87.h" #include "src/x87/frames-x87.h" namespace v8 { namespace internal { static void InitializeArrayConstructorDescriptor( Isolate* isolate, CodeStubDescriptor* descriptor, int constant_stack_parameter_count) { // register state // eax -- number of arguments // edi -- function // ebx -- allocation site with elements kind Address deopt_handler = Runtime::FunctionForId( Runtime::kArrayConstructor)->entry; if (constant_stack_parameter_count == 0) { descriptor->Initialize(deopt_handler, constant_stack_parameter_count, JS_FUNCTION_STUB_MODE); } else { descriptor->Initialize(eax, deopt_handler, constant_stack_parameter_count, JS_FUNCTION_STUB_MODE); } } static void InitializeInternalArrayConstructorDescriptor( Isolate* isolate, CodeStubDescriptor* descriptor, int constant_stack_parameter_count) { // register state // eax -- number of arguments // edi -- constructor function Address deopt_handler = Runtime::FunctionForId( Runtime::kInternalArrayConstructor)->entry; if (constant_stack_parameter_count == 0) { descriptor->Initialize(deopt_handler, constant_stack_parameter_count, JS_FUNCTION_STUB_MODE); } else { descriptor->Initialize(eax, deopt_handler, constant_stack_parameter_count, JS_FUNCTION_STUB_MODE); } } void ArrayNoArgumentConstructorStub::InitializeDescriptor( CodeStubDescriptor* descriptor) { InitializeArrayConstructorDescriptor(isolate(), descriptor, 0); } void ArraySingleArgumentConstructorStub::InitializeDescriptor( CodeStubDescriptor* descriptor) { InitializeArrayConstructorDescriptor(isolate(), descriptor, 1); } void ArrayNArgumentsConstructorStub::InitializeDescriptor( CodeStubDescriptor* descriptor) { InitializeArrayConstructorDescriptor(isolate(), descriptor, -1); } void InternalArrayNoArgumentConstructorStub::InitializeDescriptor( CodeStubDescriptor* descriptor) { InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0); } void InternalArraySingleArgumentConstructorStub::InitializeDescriptor( CodeStubDescriptor* descriptor) { InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1); } void InternalArrayNArgumentsConstructorStub::InitializeDescriptor( CodeStubDescriptor* descriptor) { InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1); } #define __ ACCESS_MASM(masm) void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm, ExternalReference miss) { // Update the static counter each time a new code stub is generated. isolate()->counters()->code_stubs()->Increment(); CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor(); int param_count = descriptor.GetRegisterParameterCount(); { // Call the runtime system in a fresh internal frame. FrameScope scope(masm, StackFrame::INTERNAL); DCHECK(param_count == 0 || eax.is(descriptor.GetRegisterParameter(param_count - 1))); // Push arguments for (int i = 0; i < param_count; ++i) { __ push(descriptor.GetRegisterParameter(i)); } __ CallExternalReference(miss, param_count); } __ ret(0); } void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { // We don't allow a GC during a store buffer overflow so there is no need to // store the registers in any particular way, but we do have to store and // restore them. __ pushad(); if (save_doubles()) { // Save FPU stat in m108byte. __ sub(esp, Immediate(108)); __ fnsave(Operand(esp, 0)); } const int argument_count = 1; AllowExternalCallThatCantCauseGC scope(masm); __ PrepareCallCFunction(argument_count, ecx); __ mov(Operand(esp, 0 * kPointerSize), Immediate(ExternalReference::isolate_address(isolate()))); __ CallCFunction( ExternalReference::store_buffer_overflow_function(isolate()), argument_count); if (save_doubles()) { // Restore FPU stat in m108byte. __ frstor(Operand(esp, 0)); __ add(esp, Immediate(108)); } __ popad(); __ ret(0); } class FloatingPointHelper : public AllStatic { public: enum ArgLocation { ARGS_ON_STACK, ARGS_IN_REGISTERS }; // Code pattern for loading a floating point value. Input value must // be either a smi or a heap number object (fp value). Requirements: // operand in register number. Returns operand as floating point number // on FPU stack. static void LoadFloatOperand(MacroAssembler* masm, Register number); // Test if operands are smi or number objects (fp). Requirements: // operand_1 in eax, operand_2 in edx; falls through on float // operands, jumps to the non_float label otherwise. static void CheckFloatOperands(MacroAssembler* masm, Label* non_float, Register scratch); }; void DoubleToIStub::Generate(MacroAssembler* masm) { Register input_reg = this->source(); Register final_result_reg = this->destination(); DCHECK(is_truncating()); Label check_negative, process_64_bits, done, done_no_stash; int double_offset = offset(); // Account for return address and saved regs if input is esp. if (input_reg.is(esp)) double_offset += 3 * kPointerSize; MemOperand mantissa_operand(MemOperand(input_reg, double_offset)); MemOperand exponent_operand(MemOperand(input_reg, double_offset + kDoubleSize / 2)); Register scratch1; { Register scratch_candidates[3] = { ebx, edx, edi }; for (int i = 0; i < 3; i++) { scratch1 = scratch_candidates[i]; if (!final_result_reg.is(scratch1) && !input_reg.is(scratch1)) break; } } // Since we must use ecx for shifts below, use some other register (eax) // to calculate the result if ecx is the requested return register. Register result_reg = final_result_reg.is(ecx) ? eax : final_result_reg; // Save ecx if it isn't the return register and therefore volatile, or if it // is the return register, then save the temp register we use in its stead for // the result. Register save_reg = final_result_reg.is(ecx) ? eax : ecx; __ push(scratch1); __ push(save_reg); bool stash_exponent_copy = !input_reg.is(esp); __ mov(scratch1, mantissa_operand); __ mov(ecx, exponent_operand); if (stash_exponent_copy) __ push(ecx); __ and_(ecx, HeapNumber::kExponentMask); __ shr(ecx, HeapNumber::kExponentShift); __ lea(result_reg, MemOperand(ecx, -HeapNumber::kExponentBias)); __ cmp(result_reg, Immediate(HeapNumber::kMantissaBits)); __ j(below, &process_64_bits); // Result is entirely in lower 32-bits of mantissa int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize; __ sub(ecx, Immediate(delta)); __ xor_(result_reg, result_reg); __ cmp(ecx, Immediate(31)); __ j(above, &done); __ shl_cl(scratch1); __ jmp(&check_negative); __ bind(&process_64_bits); // Result must be extracted from shifted 32-bit mantissa __ sub(ecx, Immediate(delta)); __ neg(ecx); if (stash_exponent_copy) { __ mov(result_reg, MemOperand(esp, 0)); } else { __ mov(result_reg, exponent_operand); } __ and_(result_reg, Immediate(static_cast<uint32_t>(Double::kSignificandMask >> 32))); __ add(result_reg, Immediate(static_cast<uint32_t>(Double::kHiddenBit >> 32))); __ shrd(result_reg, scratch1); __ shr_cl(result_reg); __ test(ecx, Immediate(32)); { Label skip_mov; __ j(equal, &skip_mov, Label::kNear); __ mov(scratch1, result_reg); __ bind(&skip_mov); } // If the double was negative, negate the integer result. __ bind(&check_negative); __ mov(result_reg, scratch1); __ neg(result_reg); if (stash_exponent_copy) { __ cmp(MemOperand(esp, 0), Immediate(0)); } else { __ cmp(exponent_operand, Immediate(0)); } { Label skip_mov; __ j(less_equal, &skip_mov, Label::kNear); __ mov(result_reg, scratch1); __ bind(&skip_mov); } // Restore registers __ bind(&done); if (stash_exponent_copy) { __ add(esp, Immediate(kDoubleSize / 2)); } __ bind(&done_no_stash); if (!final_result_reg.is(result_reg)) { DCHECK(final_result_reg.is(ecx)); __ mov(final_result_reg, result_reg); } __ pop(save_reg); __ pop(scratch1); __ ret(0); } void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, Register number) { Label load_smi, done; __ JumpIfSmi(number, &load_smi, Label::kNear); __ fld_d(FieldOperand(number, HeapNumber::kValueOffset)); __ jmp(&done, Label::kNear); __ bind(&load_smi); __ SmiUntag(number); __ push(number); __ fild_s(Operand(esp, 0)); __ pop(number); __ bind(&done); } void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, Label* non_float, Register scratch) { Label test_other, done; // Test if both operands are floats or smi -> scratch=k_is_float; // Otherwise scratch = k_not_float. __ JumpIfSmi(edx, &test_other, Label::kNear); __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset)); Factory* factory = masm->isolate()->factory(); __ cmp(scratch, factory->heap_number_map()); __ j(not_equal, non_float); // argument in edx is not a number -> NaN __ bind(&test_other); __ JumpIfSmi(eax, &done, Label::kNear); __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset)); __ cmp(scratch, factory->heap_number_map()); __ j(not_equal, non_float); // argument in eax is not a number -> NaN // Fall-through: Both operands are numbers. __ bind(&done); } void MathPowStub::Generate(MacroAssembler* masm) { const Register base = edx; const Register scratch = ecx; Label call_runtime; // We will call runtime helper function directly. if (exponent_type() == ON_STACK) { // The arguments are still on the stack. __ bind(&call_runtime); __ TailCallRuntime(Runtime::kMathPowRT); // The stub is called from non-optimized code, which expects the result // as heap number in exponent. __ AllocateHeapNumber(eax, scratch, base, &call_runtime); __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); __ ret(2 * kPointerSize); } else { // Currently it's only called from full-compiler and exponent type is // ON_STACK. UNIMPLEMENTED(); } } void FunctionPrototypeStub::Generate(MacroAssembler* masm) { Label miss; Register receiver = LoadDescriptor::ReceiverRegister(); // With careful management, we won't have to save slot and vector on // the stack. Simply handle the possibly missing case first. // TODO(mvstanton): this code can be more efficient. __ cmp(FieldOperand(receiver, JSFunction::kPrototypeOrInitialMapOffset), Immediate(isolate()->factory()->the_hole_value())); __ j(equal, &miss); __ TryGetFunctionPrototype(receiver, eax, ebx, &miss); __ ret(0); __ bind(&miss); PropertyAccessCompiler::TailCallBuiltin( masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC)); } void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) { // Return address is on the stack. Label slow; Register receiver = LoadDescriptor::ReceiverRegister(); Register key = LoadDescriptor::NameRegister(); Register scratch = eax; DCHECK(!scratch.is(receiver) && !scratch.is(key)); // Check that the key is an array index, that is Uint32. __ test(key, Immediate(kSmiTagMask | kSmiSignMask)); __ j(not_zero, &slow); // Everything is fine, call runtime. __ pop(scratch); __ push(receiver); // receiver __ push(key); // key __ push(scratch); // return address // Perform tail call to the entry. __ TailCallRuntime(Runtime::kLoadElementWithInterceptor); __ bind(&slow); PropertyAccessCompiler::TailCallBuiltin( masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC)); } void LoadIndexedStringStub::Generate(MacroAssembler* masm) { // Return address is on the stack. Label miss; Register receiver = LoadDescriptor::ReceiverRegister(); Register index = LoadDescriptor::NameRegister(); Register scratch = edi; DCHECK(!scratch.is(receiver) && !scratch.is(index)); Register result = eax; DCHECK(!result.is(scratch)); DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) && result.is(LoadDescriptor::SlotRegister())); // StringCharAtGenerator doesn't use the result register until it's passed // the different miss possibilities. If it did, we would have a conflict // when FLAG_vector_ics is true. StringCharAtGenerator char_at_generator(receiver, index, scratch, result, &miss, // When not a string. &miss, // When not a number. &miss, // When index out of range. STRING_INDEX_IS_ARRAY_INDEX, RECEIVER_IS_STRING); char_at_generator.GenerateFast(masm); __ ret(0); StubRuntimeCallHelper call_helper; char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper); __ bind(&miss); PropertyAccessCompiler::TailCallBuiltin( masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC)); } void RegExpExecStub::Generate(MacroAssembler* masm) { // Just jump directly to runtime if native RegExp is not selected at compile // time or if regexp entry in generated code is turned off runtime switch or // at compilation. #ifdef V8_INTERPRETED_REGEXP __ TailCallRuntime(Runtime::kRegExpExec); #else // V8_INTERPRETED_REGEXP // Stack frame on entry. // esp[0]: return address // esp[4]: last_match_info (expected JSArray) // esp[8]: previous index // esp[12]: subject string // esp[16]: JSRegExp object static const int kLastMatchInfoOffset = 1 * kPointerSize; static const int kPreviousIndexOffset = 2 * kPointerSize; static const int kSubjectOffset = 3 * kPointerSize; static const int kJSRegExpOffset = 4 * kPointerSize; Label runtime; Factory* factory = isolate()->factory(); // Ensure that a RegExp stack is allocated. ExternalReference address_of_regexp_stack_memory_address = ExternalReference::address_of_regexp_stack_memory_address(isolate()); ExternalReference address_of_regexp_stack_memory_size = ExternalReference::address_of_regexp_stack_memory_size(isolate()); __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size)); __ test(ebx, ebx); __ j(zero, &runtime); // Check that the first argument is a JSRegExp object. __ mov(eax, Operand(esp, kJSRegExpOffset)); STATIC_ASSERT(kSmiTag == 0); __ JumpIfSmi(eax, &runtime); __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx); __ j(not_equal, &runtime); // Check that the RegExp has been compiled (data contains a fixed array). __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset)); if (FLAG_debug_code) { __ test(ecx, Immediate(kSmiTagMask)); __ Check(not_zero, kUnexpectedTypeForRegExpDataFixedArrayExpected); __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx); __ Check(equal, kUnexpectedTypeForRegExpDataFixedArrayExpected); } // ecx: RegExp data (FixedArray) // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset)); __ cmp(ebx, Immediate(Smi::FromInt(JSRegExp::IRREGEXP))); __ j(not_equal, &runtime); // ecx: RegExp data (FixedArray) // Check that the number of captures fit in the static offsets vector buffer. __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset)); // Check (number_of_captures + 1) * 2 <= offsets vector size // Or number_of_captures * 2 <= offsets vector size - 2 // Multiplying by 2 comes for free since edx is smi-tagged. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2); __ cmp(edx, Isolate::kJSRegexpStaticOffsetsVectorSize - 2); __ j(above, &runtime); // Reset offset for possibly sliced string. __ Move(edi, Immediate(0)); __ mov(eax, Operand(esp, kSubjectOffset)); __ JumpIfSmi(eax, &runtime); __ mov(edx, eax); // Make a copy of the original subject string. // eax: subject string // edx: subject string // ecx: RegExp data (FixedArray) // Handle subject string according to its encoding and representation: // (1) Sequential two byte? If yes, go to (9). // (2) Sequential one byte? If yes, go to (5). // (3) Sequential or cons? If not, go to (6). // (4) Cons string. If the string is flat, replace subject with first string // and go to (1). Otherwise bail out to runtime. // (5) One byte sequential. Load regexp code for one byte. // (E) Carry on. /// [...] // Deferred code at the end of the stub: // (6) Long external string? If not, go to (10). // (7) External string. Make it, offset-wise, look like a sequential string. // (8) Is the external string one byte? If yes, go to (5). // (9) Two byte sequential. Load regexp code for two byte. Go to (E). // (10) Short external string or not a string? If yes, bail out to runtime. // (11) Sliced string. Replace subject with parent. Go to (1). Label seq_one_byte_string /* 5 */, seq_two_byte_string /* 9 */, external_string /* 7 */, check_underlying /* 1 */, not_seq_nor_cons /* 6 */, check_code /* E */, not_long_external /* 10 */; __ bind(&check_underlying); // (1) Sequential two byte? If yes, go to (9). __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); __ and_(ebx, kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask | kShortExternalStringMask); STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0); __ j(zero, &seq_two_byte_string); // Go to (9). // (2) Sequential one byte? If yes, go to (5). // Any other sequential string must be one byte. __ and_(ebx, Immediate(kIsNotStringMask | kStringRepresentationMask | kShortExternalStringMask)); __ j(zero, &seq_one_byte_string, Label::kNear); // Go to (5). // (3) Sequential or cons? If not, go to (6). // We check whether the subject string is a cons, since sequential strings // have already been covered. STATIC_ASSERT(kConsStringTag < kExternalStringTag); STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); STATIC_ASSERT(kIsNotStringMask > kExternalStringTag); STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag); __ cmp(ebx, Immediate(kExternalStringTag)); __ j(greater_equal, &not_seq_nor_cons); // Go to (6). // (4) Cons string. Check that it's flat. // Replace subject with first string and reload instance type. __ cmp(FieldOperand(eax, ConsString::kSecondOffset), factory->empty_string()); __ j(not_equal, &runtime); __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset)); __ jmp(&check_underlying); // eax: sequential subject string (or look-alike, external string) // edx: original subject string // ecx: RegExp data (FixedArray) // (5) One byte sequential. Load regexp code for one byte. __ bind(&seq_one_byte_string); // Load previous index and check range before edx is overwritten. We have // to use edx instead of eax here because it might have been only made to // look like a sequential string when it actually is an external string. __ mov(ebx, Operand(esp, kPreviousIndexOffset)); __ JumpIfNotSmi(ebx, &runtime); __ cmp(ebx, FieldOperand(edx, String::kLengthOffset)); __ j(above_equal, &runtime); __ mov(edx, FieldOperand(ecx, JSRegExp::kDataOneByteCodeOffset)); __ Move(ecx, Immediate(1)); // Type is one byte. // (E) Carry on. String handling is done. __ bind(&check_code); // edx: irregexp code // Check that the irregexp code has been generated for the actual string // encoding. If it has, the field contains a code object otherwise it contains // a smi (code flushing support). __ JumpIfSmi(edx, &runtime); // eax: subject string // ebx: previous index (smi) // edx: code // ecx: encoding of subject string (1 if one_byte, 0 if two_byte); // All checks done. Now push arguments for native regexp code. Counters* counters = isolate()->counters(); __ IncrementCounter(counters->regexp_entry_native(), 1); // Isolates: note we add an additional parameter here (isolate pointer). static const int kRegExpExecuteArguments = 9; __ EnterApiExitFrame(kRegExpExecuteArguments); // Argument 9: Pass current isolate address. __ mov(Operand(esp, 8 * kPointerSize), Immediate(ExternalReference::isolate_address(isolate()))); // Argument 8: Indicate that this is a direct call from JavaScript. __ mov(Operand(esp, 7 * kPointerSize), Immediate(1)); // Argument 7: Start (high end) of backtracking stack memory area. __ mov(esi, Operand::StaticVariable(address_of_regexp_stack_memory_address)); __ add(esi, Operand::StaticVariable(address_of_regexp_stack_memory_size)); __ mov(Operand(esp, 6 * kPointerSize), esi); // Argument 6: Set the number of capture registers to zero to force global // regexps to behave as non-global. This does not affect non-global regexps. __ mov(Operand(esp, 5 * kPointerSize), Immediate(0)); // Argument 5: static offsets vector buffer. __ mov(Operand(esp, 4 * kPointerSize), Immediate(ExternalReference::address_of_static_offsets_vector( isolate()))); // Argument 2: Previous index. __ SmiUntag(ebx); __ mov(Operand(esp, 1 * kPointerSize), ebx); // Argument 1: Original subject string. // The original subject is in the previous stack frame. Therefore we have to // use ebp, which points exactly to one pointer size below the previous esp. // (Because creating a new stack frame pushes the previous ebp onto the stack // and thereby moves up esp by one kPointerSize.) __ mov(esi, Operand(ebp, kSubjectOffset + kPointerSize)); __ mov(Operand(esp, 0 * kPointerSize), esi); // esi: original subject string // eax: underlying subject string // ebx: previous index // ecx: encoding of subject string (1 if one_byte 0 if two_byte); // edx: code // Argument 4: End of string data // Argument 3: Start of string data // Prepare start and end index of the input. // Load the length from the original sliced string if that is the case. __ mov(esi, FieldOperand(esi, String::kLengthOffset)); __ add(esi, edi); // Calculate input end wrt offset. __ SmiUntag(edi); __ add(ebx, edi); // Calculate input start wrt offset. // ebx: start index of the input string // esi: end index of the input string Label setup_two_byte, setup_rest; __ test(ecx, ecx); __ j(zero, &setup_two_byte, Label::kNear); __ SmiUntag(esi); __ lea(ecx, FieldOperand(eax, esi, times_1, SeqOneByteString::kHeaderSize)); __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4. __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqOneByteString::kHeaderSize)); __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3. __ jmp(&setup_rest, Label::kNear); __ bind(&setup_two_byte); STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize == 1); // esi is smi (powered by 2). __ lea(ecx, FieldOperand(eax, esi, times_1, SeqTwoByteString::kHeaderSize)); __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4. __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize)); __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3. __ bind(&setup_rest); // Locate the code entry and call it. __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag)); __ call(edx); // Drop arguments and come back to JS mode. __ LeaveApiExitFrame(true); // Check the result. Label success; __ cmp(eax, 1); // We expect exactly one result since we force the called regexp to behave // as non-global. __ j(equal, &success); Label failure; __ cmp(eax, NativeRegExpMacroAssembler::FAILURE); __ j(equal, &failure); __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION); // If not exception it can only be retry. Handle that in the runtime system. __ j(not_equal, &runtime); // Result must now be exception. If there is no pending exception already a // stack overflow (on the backtrack stack) was detected in RegExp code but // haven't created the exception yet. Handle that in the runtime system. // TODO(592): Rerunning the RegExp to get the stack overflow exception. ExternalReference pending_exception(Isolate::kPendingExceptionAddress, isolate()); __ mov(edx, Immediate(isolate()->factory()->the_hole_value())); __ mov(eax, Operand::StaticVariable(pending_exception)); __ cmp(edx, eax); __ j(equal, &runtime); // For exception, throw the exception again. __ TailCallRuntime(Runtime::kRegExpExecReThrow); __ bind(&failure); // For failure to match, return null. __ mov(eax, factory->null_value()); __ ret(4 * kPointerSize); // Load RegExp data. __ bind(&success); __ mov(eax, Operand(esp, kJSRegExpOffset)); __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset)); __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset)); // Calculate number of capture registers (number_of_captures + 1) * 2. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); __ add(edx, Immediate(2)); // edx was a smi. // edx: Number of capture registers // Load last_match_info which is still known to be a fast case JSArray. // Check that the fourth object is a JSArray object. __ mov(eax, Operand(esp, kLastMatchInfoOffset)); __ JumpIfSmi(eax, &runtime); __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx); __ j(not_equal, &runtime); // Check that the JSArray is in fast case. __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset)); __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset)); __ cmp(eax, factory->fixed_array_map()); __ j(not_equal, &runtime); // Check that the last match info has space for the capture registers and the // additional information. __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset)); __ SmiUntag(eax); __ sub(eax, Immediate(RegExpImpl::kLastMatchOverhead)); __ cmp(edx, eax); __ j(greater, &runtime); // ebx: last_match_info backing store (FixedArray) // edx: number of capture registers // Store the capture count. __ SmiTag(edx); // Number of capture registers to smi. __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx); __ SmiUntag(edx); // Number of capture registers back from smi. // Store last subject and last input. __ mov(eax, Operand(esp, kSubjectOffset)); __ mov(ecx, eax); __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax); __ RecordWriteField(ebx, RegExpImpl::kLastSubjectOffset, eax, edi, kDontSaveFPRegs); __ mov(eax, ecx); __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax); __ RecordWriteField(ebx, RegExpImpl::kLastInputOffset, eax, edi, kDontSaveFPRegs); // Get the static offsets vector filled by the native regexp code. ExternalReference address_of_static_offsets_vector = ExternalReference::address_of_static_offsets_vector(isolate()); __ mov(ecx, Immediate(address_of_static_offsets_vector)); // ebx: last_match_info backing store (FixedArray) // ecx: offsets vector // edx: number of capture registers Label next_capture, done; // Capture register counter starts from number of capture registers and // counts down until wraping after zero. __ bind(&next_capture); __ sub(edx, Immediate(1)); __ j(negative, &done, Label::kNear); // Read the value from the static offsets vector buffer. __ mov(edi, Operand(ecx, edx, times_int_size, 0)); __ SmiTag(edi); // Store the smi value in the last match info. __ mov(FieldOperand(ebx, edx, times_pointer_size, RegExpImpl::kFirstCaptureOffset), edi); __ jmp(&next_capture); __ bind(&done); // Return last match info. __ mov(eax, Operand(esp, kLastMatchInfoOffset)); __ ret(4 * kPointerSize); // Do the runtime call to execute the regexp. __ bind(&runtime); __ TailCallRuntime(Runtime::kRegExpExec); // Deferred code for string handling. // (6) Long external string? If not, go to (10). __ bind(&not_seq_nor_cons); // Compare flags are still set from (3). __ j(greater, &not_long_external, Label::kNear); // Go to (10). // (7) External string. Short external strings have been ruled out. __ bind(&external_string); // Reload instance type. __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); if (FLAG_debug_code) { // Assert that we do not have a cons or slice (indirect strings) here. // Sequential strings have already been ruled out. __ test_b(ebx, kIsIndirectStringMask); __ Assert(zero, kExternalStringExpectedButNotFound); } __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset)); // Move the pointer so that offset-wise, it looks like a sequential string. STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); __ sub(eax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); STATIC_ASSERT(kTwoByteStringTag == 0); // (8) Is the external string one byte? If yes, go to (5). __ test_b(ebx, kStringEncodingMask); __ j(not_zero, &seq_one_byte_string); // Go to (5). // eax: sequential subject string (or look-alike, external string) // edx: original subject string // ecx: RegExp data (FixedArray) // (9) Two byte sequential. Load regexp code for two byte. Go to (E). __ bind(&seq_two_byte_string); // Load previous index and check range before edx is overwritten. We have // to use edx instead of eax here because it might have been only made to // look like a sequential string when it actually is an external string. __ mov(ebx, Operand(esp, kPreviousIndexOffset)); __ JumpIfNotSmi(ebx, &runtime); __ cmp(ebx, FieldOperand(edx, String::kLengthOffset)); __ j(above_equal, &runtime); __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset)); __ Move(ecx, Immediate(0)); // Type is two byte. __ jmp(&check_code); // Go to (E). // (10) Not a string or a short external string? If yes, bail out to runtime. __ bind(&not_long_external); // Catch non-string subject or short external string. STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0); __ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag)); __ j(not_zero, &runtime); // (11) Sliced string. Replace subject with parent. Go to (1). // Load offset into edi and replace subject string with parent. __ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset)); __ mov(eax, FieldOperand(eax, SlicedString::kParentOffset)); __ jmp(&check_underlying); // Go to (1). #endif // V8_INTERPRETED_REGEXP } static int NegativeComparisonResult(Condition cc) { DCHECK(cc != equal); DCHECK((cc == less) || (cc == less_equal) || (cc == greater) || (cc == greater_equal)); return (cc == greater || cc == greater_equal) ? LESS : GREATER; } static void CheckInputType(MacroAssembler* masm, Register input, CompareICState::State expected, Label* fail) { Label ok; if (expected == CompareICState::SMI) { __ JumpIfNotSmi(input, fail); } else if (expected == CompareICState::NUMBER) { __ JumpIfSmi(input, &ok); __ cmp(FieldOperand(input, HeapObject::kMapOffset), Immediate(masm->isolate()->factory()->heap_number_map())); __ j(not_equal, fail); } // We could be strict about internalized/non-internalized here, but as long as // hydrogen doesn't care, the stub doesn't have to care either. __ bind(&ok); } static void BranchIfNotInternalizedString(MacroAssembler* masm, Label* label, Register object, Register scratch) { __ JumpIfSmi(object, label); __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset)); __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset)); STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); __ test(scratch, Immediate(kIsNotStringMask | kIsNotInternalizedMask)); __ j(not_zero, label); } void CompareICStub::GenerateGeneric(MacroAssembler* masm) { Label runtime_call, check_unequal_objects; Condition cc = GetCondition(); Label miss; CheckInputType(masm, edx, left(), &miss); CheckInputType(masm, eax, right(), &miss); // Compare two smis. Label non_smi, smi_done; __ mov(ecx, edx); __ or_(ecx, eax); __ JumpIfNotSmi(ecx, &non_smi, Label::kNear); __ sub(edx, eax); // Return on the result of the subtraction. __ j(no_overflow, &smi_done, Label::kNear); __ not_(edx); // Correct sign in case of overflow. edx is never 0 here. __ bind(&smi_done); __ mov(eax, edx); __ ret(0); __ bind(&non_smi); // NOTICE! This code is only reached after a smi-fast-case check, so // it is certain that at least one operand isn't a smi. // Identical objects can be compared fast, but there are some tricky cases // for NaN and undefined. Label generic_heap_number_comparison; { Label not_identical; __ cmp(eax, edx); __ j(not_equal, &not_identical); if (cc != equal) { // Check for undefined. undefined OP undefined is false even though // undefined == undefined. __ cmp(edx, isolate()->factory()->undefined_value()); Label check_for_nan; __ j(not_equal, &check_for_nan, Label::kNear); __ Move(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc)))); __ ret(0); __ bind(&check_for_nan); } // Test for NaN. Compare heap numbers in a general way, // to handle NaNs correctly. __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Immediate(isolate()->factory()->heap_number_map())); __ j(equal, &generic_heap_number_comparison, Label::kNear); if (cc != equal) { __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); // Call runtime on identical JSObjects. Otherwise return equal. __ cmpb(ecx, static_cast<uint8_t>(FIRST_JS_RECEIVER_TYPE)); __ j(above_equal, &runtime_call, Label::kFar); // Call runtime on identical symbols since we need to throw a TypeError. __ cmpb(ecx, static_cast<uint8_t>(SYMBOL_TYPE)); __ j(equal, &runtime_call, Label::kFar); // Call runtime on identical SIMD values since we must throw a TypeError. __ cmpb(ecx, static_cast<uint8_t>(SIMD128_VALUE_TYPE)); __ j(equal, &runtime_call, Label::kFar); } __ Move(eax, Immediate(Smi::FromInt(EQUAL))); __ ret(0); __ bind(&not_identical); } // Strict equality can quickly decide whether objects are equal. // Non-strict object equality is slower, so it is handled later in the stub. if (cc == equal && strict()) { Label slow; // Fallthrough label. Label not_smis; // If we're doing a strict equality comparison, we don't have to do // type conversion, so we generate code to do fast comparison for objects // and oddballs. Non-smi numbers and strings still go through the usual // slow-case code. // If either is a Smi (we know that not both are), then they can only // be equal if the other is a HeapNumber. If so, use the slow case. STATIC_ASSERT(kSmiTag == 0); DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0)); __ mov(ecx, Immediate(kSmiTagMask)); __ and_(ecx, eax); __ test(ecx, edx); __ j(not_zero, &not_smis, Label::kNear); // One operand is a smi. // Check whether the non-smi is a heap number. STATIC_ASSERT(kSmiTagMask == 1); // ecx still holds eax & kSmiTag, which is either zero or one. __ sub(ecx, Immediate(0x01)); __ mov(ebx, edx); __ xor_(ebx, eax); __ and_(ebx, ecx); // ebx holds either 0 or eax ^ edx. __ xor_(ebx, eax); // if eax was smi, ebx is now edx, else eax. // Check if the non-smi operand is a heap number. __ cmp(FieldOperand(ebx, HeapObject::kMapOffset), Immediate(isolate()->factory()->heap_number_map())); // If heap number, handle it in the slow case. __ j(equal, &slow, Label::kNear); // Return non-equal (ebx is not zero) __ mov(eax, ebx); __ ret(0); __ bind(&not_smis); // If either operand is a JSObject or an oddball value, then they are not // equal since their pointers are different // There is no test for undetectability in strict equality. // Get the type of the first operand. // If the first object is a JS object, we have done pointer comparison. Label first_non_object; STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx); __ j(below, &first_non_object, Label::kNear); // Return non-zero (eax is not zero) Label return_not_equal; STATIC_ASSERT(kHeapObjectTag != 0); __ bind(&return_not_equal); __ ret(0); __ bind(&first_non_object); // Check for oddballs: true, false, null, undefined. __ CmpInstanceType(ecx, ODDBALL_TYPE); __ j(equal, &return_not_equal); __ CmpObjectType(edx, FIRST_JS_RECEIVER_TYPE, ecx); __ j(above_equal, &return_not_equal); // Check for oddballs: true, false, null, undefined. __ CmpInstanceType(ecx, ODDBALL_TYPE); __ j(equal, &return_not_equal); // Fall through to the general case. __ bind(&slow); } // Generate the number comparison code. Label non_number_comparison; Label unordered; __ bind(&generic_heap_number_comparison); FloatingPointHelper::CheckFloatOperands( masm, &non_number_comparison, ebx); FloatingPointHelper::LoadFloatOperand(masm, eax); FloatingPointHelper::LoadFloatOperand(masm, edx); __ FCmp(); // Don't base result on EFLAGS when a NaN is involved. __ j(parity_even, &unordered, Label::kNear); Label below_label, above_label; // Return a result of -1, 0, or 1, based on EFLAGS. __ j(below, &below_label, Label::kNear); __ j(above, &above_label, Label::kNear); __ Move(eax, Immediate(0)); __ ret(0); __ bind(&below_label); __ mov(eax, Immediate(Smi::FromInt(-1))); __ ret(0); __ bind(&above_label); __ mov(eax, Immediate(Smi::FromInt(1))); __ ret(0); // If one of the numbers was NaN, then the result is always false. // The cc is never not-equal. __ bind(&unordered); DCHECK(cc != not_equal); if (cc == less || cc == less_equal) { __ mov(eax, Immediate(Smi::FromInt(1))); } else { __ mov(eax, Immediate(Smi::FromInt(-1))); } __ ret(0); // The number comparison code did not provide a valid result. __ bind(&non_number_comparison); // Fast negative check for internalized-to-internalized equality. Label check_for_strings; if (cc == equal) { BranchIfNotInternalizedString(masm, &check_for_strings, eax, ecx); BranchIfNotInternalizedString(masm, &check_for_strings, edx, ecx); // We've already checked for object identity, so if both operands // are internalized they aren't equal. Register eax already holds a // non-zero value, which indicates not equal, so just return. __ ret(0); } __ bind(&check_for_strings); __ JumpIfNotBothSequentialOneByteStrings(edx, eax, ecx, ebx, &check_unequal_objects); // Inline comparison of one-byte strings. if (cc == equal) { StringHelper::GenerateFlatOneByteStringEquals(masm, edx, eax, ecx, ebx); } else { StringHelper::GenerateCompareFlatOneByteStrings(masm, edx, eax, ecx, ebx, edi); } #ifdef DEBUG __ Abort(kUnexpectedFallThroughFromStringComparison); #endif __ bind(&check_unequal_objects); if (cc == equal && !strict()) { // Non-strict equality. Objects are unequal if // they are both JSObjects and not undetectable, // and their pointers are different. Label return_unequal, undetectable; // At most one is a smi, so we can test for smi by adding the two. // A smi plus a heap object has the low bit set, a heap object plus // a heap object has the low bit clear. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagMask == 1); __ lea(ecx, Operand(eax, edx, times_1, 0)); __ test(ecx, Immediate(kSmiTagMask)); __ j(not_zero, &runtime_call, Label::kNear); __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset)); __ test_b(FieldOperand(ebx, Map::kBitFieldOffset), 1 << Map::kIsUndetectable); __ j(not_zero, &undetectable, Label::kNear); __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsUndetectable); __ j(not_zero, &return_unequal, Label::kNear); __ CmpInstanceType(ebx, FIRST_JS_RECEIVER_TYPE); __ j(below, &runtime_call, Label::kNear); __ CmpInstanceType(ecx, FIRST_JS_RECEIVER_TYPE); __ j(below, &runtime_call, Label::kNear); __ bind(&return_unequal); // Return non-equal by returning the non-zero object pointer in eax. __ ret(0); // eax, edx were pushed __ bind(&undetectable); __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsUndetectable); __ j(zero, &return_unequal, Label::kNear); __ Move(eax, Immediate(EQUAL)); __ ret(0); // eax, edx were pushed } __ bind(&runtime_call); if (cc == equal) { { FrameScope scope(masm, StackFrame::INTERNAL); __ Push(edx); __ Push(eax); __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual); } // Turn true into 0 and false into some non-zero value. STATIC_ASSERT(EQUAL == 0); __ sub(eax, Immediate(isolate()->factory()->true_value())); __ Ret(); } else { // Push arguments below the return address. __ pop(ecx); __ push(edx); __ push(eax); __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc)))); // Restore return address on the stack. __ push(ecx); // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) // tagged as a small integer. __ TailCallRuntime(Runtime::kCompare); } __ bind(&miss); GenerateMiss(masm); } static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) { // eax : number of arguments to the construct function // ebx : feedback vector // edx : slot in feedback vector (Smi) // edi : the function to call { FrameScope scope(masm, StackFrame::INTERNAL); // Number-of-arguments register must be smi-tagged to call out. __ SmiTag(eax); __ push(eax); __ push(edi); __ push(edx); __ push(ebx); __ CallStub(stub); __ pop(ebx); __ pop(edx); __ pop(edi); __ pop(eax); __ SmiUntag(eax); } } static void GenerateRecordCallTarget(MacroAssembler* masm) { // Cache the called function in a feedback vector slot. Cache states // are uninitialized, monomorphic (indicated by a JSFunction), and // megamorphic. // eax : number of arguments to the construct function // ebx : feedback vector // edx : slot in feedback vector (Smi) // edi : the function to call Isolate* isolate = masm->isolate(); Label initialize, done, miss, megamorphic, not_array_function; // Load the cache state into ecx. __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize)); // A monomorphic cache hit or an already megamorphic state: invoke the // function without changing the state. // We don't know if ecx is a WeakCell or a Symbol, but it's harmless to read // at this position in a symbol (see static asserts in // type-feedback-vector.h). Label check_allocation_site; __ cmp(edi, FieldOperand(ecx, WeakCell::kValueOffset)); __ j(equal, &done, Label::kFar); __ CompareRoot(ecx, Heap::kmegamorphic_symbolRootIndex); __ j(equal, &done, Label::kFar); __ CompareRoot(FieldOperand(ecx, HeapObject::kMapOffset), Heap::kWeakCellMapRootIndex); __ j(not_equal, &check_allocation_site); // If the weak cell is cleared, we have a new chance to become monomorphic. __ JumpIfSmi(FieldOperand(ecx, WeakCell::kValueOffset), &initialize); __ jmp(&megamorphic); __ bind(&check_allocation_site); // If we came here, we need to see if we are the array function. // If we didn't have a matching function, and we didn't find the megamorph // sentinel, then we have in the slot either some other function or an // AllocationSite. __ CompareRoot(FieldOperand(ecx, 0), Heap::kAllocationSiteMapRootIndex); __ j(not_equal, &miss); // Make sure the function is the Array() function __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx); __ cmp(edi, ecx); __ j(not_equal, &megamorphic); __ jmp(&done, Label::kFar); __ bind(&miss); // A monomorphic miss (i.e, here the cache is not uninitialized) goes // megamorphic. __ CompareRoot(ecx, Heap::kuninitialized_symbolRootIndex); __ j(equal, &initialize); // MegamorphicSentinel is an immortal immovable object (undefined) so no // write-barrier is needed. __ bind(&megamorphic); __ mov( FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize), Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate))); __ jmp(&done, Label::kFar); // An uninitialized cache is patched with the function or sentinel to // indicate the ElementsKind if function is the Array constructor. __ bind(&initialize); // Make sure the function is the Array() function __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx); __ cmp(edi, ecx); __ j(not_equal, &not_array_function); // The target function is the Array constructor, // Create an AllocationSite if we don't already have it, store it in the // slot. CreateAllocationSiteStub create_stub(isolate); CallStubInRecordCallTarget(masm, &create_stub); __ jmp(&done); __ bind(&not_array_function); CreateWeakCellStub weak_cell_stub(isolate); CallStubInRecordCallTarget(masm, &weak_cell_stub); __ bind(&done); } void CallConstructStub::Generate(MacroAssembler* masm) { // eax : number of arguments // ebx : feedback vector // edx : slot in feedback vector (Smi, for RecordCallTarget) // edi : constructor function Label non_function; // Check that function is not a smi. __ JumpIfSmi(edi, &non_function); // Check that function is a JSFunction. __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); __ j(not_equal, &non_function); GenerateRecordCallTarget(masm); Label feedback_register_initialized; // Put the AllocationSite from the feedback vector into ebx, or undefined. __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize)); Handle<Map> allocation_site_map = isolate()->factory()->allocation_site_map(); __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map)); __ j(equal, &feedback_register_initialized); __ mov(ebx, isolate()->factory()->undefined_value()); __ bind(&feedback_register_initialized); __ AssertUndefinedOrAllocationSite(ebx); // Pass new target to construct stub. __ mov(edx, edi); // Tail call to the function-specific construct stub (still in the caller // context at this point). __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset)); __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize)); __ jmp(ecx); __ bind(&non_function); __ mov(edx, edi); __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET); } void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) { // edi - function // edx - slot id // ebx - vector __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx); __ cmp(edi, ecx); __ j(not_equal, miss); __ mov(eax, arg_count()); // Reload ecx. __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize)); // Increment the call count for monomorphic function calls. __ add(FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize + kPointerSize), Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement))); __ mov(ebx, ecx); __ mov(edx, edi); ArrayConstructorStub stub(masm->isolate(), arg_count()); __ TailCallStub(&stub); // Unreachable. } void CallICStub::Generate(MacroAssembler* masm) { // edi - function // edx - slot id // ebx - vector Isolate* isolate = masm->isolate(); Label extra_checks_or_miss, call, call_function; int argc = arg_count(); ParameterCount actual(argc); // The checks. First, does edi match the recorded monomorphic target? __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize)); // We don't know that we have a weak cell. We might have a private symbol // or an AllocationSite, but the memory is safe to examine. // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to // FixedArray. // WeakCell::kValueOffset - contains a JSFunction or Smi(0) // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not // computed, meaning that it can't appear to be a pointer. If the low bit is // 0, then hash is computed, but the 0 bit prevents the field from appearing // to be a pointer. STATIC_ASSERT(WeakCell::kSize >= kPointerSize); STATIC_ASSERT(AllocationSite::kTransitionInfoOffset == WeakCell::kValueOffset && WeakCell::kValueOffset == Symbol::kHashFieldSlot); __ cmp(edi, FieldOperand(ecx, WeakCell::kValueOffset)); __ j(not_equal, &extra_checks_or_miss); // The compare above could have been a SMI/SMI comparison. Guard against this // convincing us that we have a monomorphic JSFunction. __ JumpIfSmi(edi, &extra_checks_or_miss); // Increment the call count for monomorphic function calls. __ add(FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize + kPointerSize), Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement))); __ bind(&call_function); __ Set(eax, argc); __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(), tail_call_mode()), RelocInfo::CODE_TARGET); __ bind(&extra_checks_or_miss); Label uninitialized, miss, not_allocation_site; __ cmp(ecx, Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate))); __ j(equal, &call); // Check if we have an allocation site. __ CompareRoot(FieldOperand(ecx, HeapObject::kMapOffset), Heap::kAllocationSiteMapRootIndex); __ j(not_equal, &not_allocation_site); // We have an allocation site. HandleArrayCase(masm, &miss); __ bind(&not_allocation_site); // The following cases attempt to handle MISS cases without going to the // runtime. if (FLAG_trace_ic) { __ jmp(&miss); } __ cmp(ecx, Immediate(TypeFeedbackVector::UninitializedSentinel(isolate))); __ j(equal, &uninitialized); // We are going megamorphic. If the feedback is a JSFunction, it is fine // to handle it here. More complex cases are dealt with in the runtime. __ AssertNotSmi(ecx); __ CmpObjectType(ecx, JS_FUNCTION_TYPE, ecx); __ j(not_equal, &miss); __ mov( FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize), Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate))); __ bind(&call); __ Set(eax, argc); __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()), RelocInfo::CODE_TARGET); __ bind(&uninitialized); // We are going monomorphic, provided we actually have a JSFunction. __ JumpIfSmi(edi, &miss); // Goto miss case if we do not have a function. __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); __ j(not_equal, &miss); // Make sure the function is not the Array() function, which requires special // behavior on MISS. __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx); __ cmp(edi, ecx); __ j(equal, &miss); // Make sure the function belongs to the same native context. __ mov(ecx, FieldOperand(edi, JSFunction::kContextOffset)); __ mov(ecx, ContextOperand(ecx, Context::NATIVE_CONTEXT_INDEX)); __ cmp(ecx, NativeContextOperand()); __ j(not_equal, &miss); // Initialize the call counter. __ mov(FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize + kPointerSize), Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement))); // Store the function. Use a stub since we need a frame for allocation. // ebx - vector // edx - slot // edi - function { FrameScope scope(masm, StackFrame::INTERNAL); CreateWeakCellStub create_stub(isolate); __ push(edi); __ CallStub(&create_stub); __ pop(edi); } __ jmp(&call_function); // We are here because tracing is on or we encountered a MISS case we can't // handle here. __ bind(&miss); GenerateMiss(masm); __ jmp(&call); // Unreachable __ int3(); } void CallICStub::GenerateMiss(MacroAssembler* masm) { FrameScope scope(masm, StackFrame::INTERNAL); // Push the function and feedback info. __ push(edi); __ push(ebx); __ push(edx); // Call the entry. __ CallRuntime(Runtime::kCallIC_Miss); // Move result to edi and exit the internal frame. __ mov(edi, eax); } bool CEntryStub::NeedsImmovableCode() { return false; } void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { CEntryStub::GenerateAheadOfTime(isolate); StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); StubFailureTrampolineStub::GenerateAheadOfTime(isolate); // It is important that the store buffer overflow stubs are generated first. ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); CreateAllocationSiteStub::GenerateAheadOfTime(isolate); CreateWeakCellStub::GenerateAheadOfTime(isolate); BinaryOpICStub::GenerateAheadOfTime(isolate); BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate); StoreFastElementStub::GenerateAheadOfTime(isolate); TypeofStub::GenerateAheadOfTime(isolate); } void CodeStub::GenerateFPStubs(Isolate* isolate) { CEntryStub save_doubles(isolate, 1, kSaveFPRegs); // Stubs might already be in the snapshot, detect that and don't regenerate, // which would lead to code stub initialization state being messed up. Code* save_doubles_code; if (!save_doubles.FindCodeInCache(&save_doubles_code)) { save_doubles_code = *(save_doubles.GetCode()); } isolate->set_fp_stubs_generated(true); } void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { CEntryStub stub(isolate, 1, kDontSaveFPRegs); stub.GetCode(); } void CEntryStub::Generate(MacroAssembler* masm) { // eax: number of arguments including receiver // ebx: pointer to C function (C callee-saved) // ebp: frame pointer (restored after C call) // esp: stack pointer (restored after C call) // esi: current context (C callee-saved) // edi: JS function of the caller (C callee-saved) // // If argv_in_register(): // ecx: pointer to the first argument ProfileEntryHookStub::MaybeCallEntryHook(masm); // Reserve space on the stack for the three arguments passed to the call. If // result size is greater than can be returned in registers, also reserve // space for the hidden argument for the result location, and space for the // result itself. int arg_stack_space = result_size() < 3 ? 3 : 4 + result_size(); // Enter the exit frame that transitions from JavaScript to C++. if (argv_in_register()) { DCHECK(!save_doubles()); __ EnterApiExitFrame(arg_stack_space); // Move argc and argv into the correct registers. __ mov(esi, ecx); __ mov(edi, eax); } else { __ EnterExitFrame(arg_stack_space, save_doubles()); } // ebx: pointer to C function (C callee-saved) // ebp: frame pointer (restored after C call) // esp: stack pointer (restored after C call) // edi: number of arguments including receiver (C callee-saved) // esi: pointer to the first argument (C callee-saved) // Result returned in eax, or eax+edx if result size is 2. // Check stack alignment. if (FLAG_debug_code) { __ CheckStackAlignment(); } // Call C function. if (result_size() <= 2) { __ mov(Operand(esp, 0 * kPointerSize), edi); // argc. __ mov(Operand(esp, 1 * kPointerSize), esi); // argv. __ mov(Operand(esp, 2 * kPointerSize), Immediate(ExternalReference::isolate_address(isolate()))); } else { DCHECK_EQ(3, result_size()); // Pass a pointer to the result location as the first argument. __ lea(eax, Operand(esp, 4 * kPointerSize)); __ mov(Operand(esp, 0 * kPointerSize), eax); __ mov(Operand(esp, 1 * kPointerSize), edi); // argc. __ mov(Operand(esp, 2 * kPointerSize), esi); // argv. __ mov(Operand(esp, 3 * kPointerSize), Immediate(ExternalReference::isolate_address(isolate()))); } __ call(ebx); if (result_size() > 2) { DCHECK_EQ(3, result_size()); #ifndef _WIN32 // Restore the "hidden" argument on the stack which was popped by caller. __ sub(esp, Immediate(kPointerSize)); #endif // Read result values stored on stack. Result is stored above the arguments. __ mov(kReturnRegister0, Operand(esp, 4 * kPointerSize)); __ mov(kReturnRegister1, Operand(esp, 5 * kPointerSize)); __ mov(kReturnRegister2, Operand(esp, 6 * kPointerSize)); } // Result is in eax, edx:eax or edi:edx:eax - do not destroy these registers! // Check result for exception sentinel. Label exception_returned; __ cmp(eax, isolate()->factory()->exception()); __ j(equal, &exception_returned); // Check that there is no pending exception, otherwise we // should have returned the exception sentinel. if (FLAG_debug_code) { __ push(edx); __ mov(edx, Immediate(isolate()->factory()->the_hole_value())); Label okay; ExternalReference pending_exception_address( Isolate::kPendingExceptionAddress, isolate()); __ cmp(edx, Operand::StaticVariable(pending_exception_address)); // Cannot use check here as it attempts to generate call into runtime. __ j(equal, &okay, Label::kNear); __ int3(); __ bind(&okay); __ pop(edx); } // Exit the JavaScript to C++ exit frame. __ LeaveExitFrame(save_doubles(), !argv_in_register()); __ ret(0); // Handling of exception. __ bind(&exception_returned); ExternalReference pending_handler_context_address( Isolate::kPendingHandlerContextAddress, isolate()); ExternalReference pending_handler_code_address( Isolate::kPendingHandlerCodeAddress, isolate()); ExternalReference pending_handler_offset_address( Isolate::kPendingHandlerOffsetAddress, isolate()); ExternalReference pending_handler_fp_address( Isolate::kPendingHandlerFPAddress, isolate()); ExternalReference pending_handler_sp_address( Isolate::kPendingHandlerSPAddress, isolate()); // Ask the runtime for help to determine the handler. This will set eax to // contain the current pending exception, don't clobber it. ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler, isolate()); { FrameScope scope(masm, StackFrame::MANUAL); __ PrepareCallCFunction(3, eax); __ mov(Operand(esp, 0 * kPointerSize), Immediate(0)); // argc. __ mov(Operand(esp, 1 * kPointerSize), Immediate(0)); // argv. __ mov(Operand(esp, 2 * kPointerSize), Immediate(ExternalReference::isolate_address(isolate()))); __ CallCFunction(find_handler, 3); } // Retrieve the handler context, SP and FP. __ mov(esi, Operand::StaticVariable(pending_handler_context_address)); __ mov(esp, Operand::StaticVariable(pending_handler_sp_address)); __ mov(ebp, Operand::StaticVariable(pending_handler_fp_address)); // If the handler is a JS frame, restore the context to the frame. Note that // the context will be set to (esi == 0) for non-JS frames. Label skip; __ test(esi, esi); __ j(zero, &skip, Label::kNear); __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi); __ bind(&skip); // Compute the handler entry address and jump to it. __ mov(edi, Operand::StaticVariable(pending_handler_code_address)); __ mov(edx, Operand::StaticVariable(pending_handler_offset_address)); // Check whether it's a turbofanned exception handler code before jump to it. Label not_turbo; __ push(eax); __ mov(eax, Operand(edi, Code::kKindSpecificFlags1Offset - kHeapObjectTag)); __ and_(eax, Immediate(1 << Code::kIsTurbofannedBit)); __ j(zero, &not_turbo); __ fninit(); __ fld1(); __ bind(&not_turbo); __ pop(eax); __ lea(edi, FieldOperand(edi, edx, times_1, Code::kHeaderSize)); __ jmp(edi); } void JSEntryStub::Generate(MacroAssembler* masm) { Label invoke, handler_entry, exit; Label not_outermost_js, not_outermost_js_2; ProfileEntryHookStub::MaybeCallEntryHook(masm); // Set up frame. __ push(ebp); __ mov(ebp, esp); // Push marker in two places. int marker = type(); __ push(Immediate(Smi::FromInt(marker))); // context slot __ push(Immediate(Smi::FromInt(marker))); // function slot // Save callee-saved registers (C calling conventions). __ push(edi); __ push(esi); __ push(ebx); // Save copies of the top frame descriptor on the stack. ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate()); __ push(Operand::StaticVariable(c_entry_fp)); // If this is the outermost JS call, set js_entry_sp value. ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate()); __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0)); __ j(not_equal, &not_outermost_js, Label::kNear); __ mov(Operand::StaticVariable(js_entry_sp), ebp); __ push(Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); __ jmp(&invoke, Label::kNear); __ bind(&not_outermost_js); __ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME))); // Jump to a faked try block that does the invoke, with a faked catch // block that sets the pending exception. __ jmp(&invoke); __ bind(&handler_entry); handler_offset_ = handler_entry.pos(); // Caught exception: Store result (exception) in the pending exception // field in the JSEnv and return a failure sentinel. ExternalReference pending_exception(Isolate::kPendingExceptionAddress, isolate()); __ mov(Operand::StaticVariable(pending_exception), eax); __ mov(eax, Immediate(isolate()->factory()->exception())); __ jmp(&exit); // Invoke: Link this frame into the handler chain. __ bind(&invoke); __ PushStackHandler(); // Clear any pending exceptions. __ mov(edx, Immediate(isolate()->factory()->the_hole_value())); __ mov(Operand::StaticVariable(pending_exception), edx); // Fake a receiver (NULL). __ push(Immediate(0)); // receiver // Invoke the function by calling through JS entry trampoline builtin and // pop the faked function when we return. Notice that we cannot store a // reference to the trampoline code directly in this stub, because the // builtin stubs may not have been generated yet. if (type() == StackFrame::ENTRY_CONSTRUCT) { ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline, isolate()); __ mov(edx, Immediate(construct_entry)); } else { ExternalReference entry(Builtins::kJSEntryTrampoline, isolate()); __ mov(edx, Immediate(entry)); } __ mov(edx, Operand(edx, 0)); // deref address __ lea(edx, FieldOperand(edx, Code::kHeaderSize)); __ call(edx); // Unlink this frame from the handler chain. __ PopStackHandler(); __ bind(&exit); // Check if the current stack frame is marked as the outermost JS frame. __ pop(ebx); __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); __ j(not_equal, &not_outermost_js_2); __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0)); __ bind(&not_outermost_js_2); // Restore the top frame descriptor from the stack. __ pop(Operand::StaticVariable(ExternalReference( Isolate::kCEntryFPAddress, isolate()))); // Restore callee-saved registers (C calling conventions). __ pop(ebx); __ pop(esi); __ pop(edi); __ add(esp, Immediate(2 * kPointerSize)); // remove markers // Restore frame pointer and return. __ pop(ebp); __ ret(0); } void InstanceOfStub::Generate(MacroAssembler* masm) { Register const object = edx; // Object (lhs). Register const function = eax; // Function (rhs). Register const object_map = ecx; // Map of {object}. Register const function_map = ebx; // Map of {function}. Register const function_prototype = function_map; // Prototype of {function}. Register const scratch = edi; DCHECK(object.is(InstanceOfDescriptor::LeftRegister())); DCHECK(function.is(InstanceOfDescriptor::RightRegister())); // Check if {object} is a smi. Label object_is_smi; __ JumpIfSmi(object, &object_is_smi, Label::kNear); // Lookup the {function} and the {object} map in the global instanceof cache. // Note: This is safe because we clear the global instanceof cache whenever // we change the prototype of any object. Label fast_case, slow_case; __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset)); __ CompareRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex); __ j(not_equal, &fast_case, Label::kNear); __ CompareRoot(object_map, scratch, Heap::kInstanceofCacheMapRootIndex); __ j(not_equal, &fast_case, Label::kNear); __ LoadRoot(eax, Heap::kInstanceofCacheAnswerRootIndex); __ ret(0); // If {object} is a smi we can safely return false if {function} is a JS // function, otherwise we have to miss to the runtime and throw an exception. __ bind(&object_is_smi); __ JumpIfSmi(function, &slow_case); __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map); __ j(not_equal, &slow_case); __ LoadRoot(eax, Heap::kFalseValueRootIndex); __ ret(0); // Fast-case: The {function} must be a valid JSFunction. __ bind(&fast_case); __ JumpIfSmi(function, &slow_case); __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map); __ j(not_equal, &slow_case); // Ensure that {function} has an instance prototype. __ test_b(FieldOperand(function_map, Map::kBitFieldOffset), static_cast<uint8_t>(1 << Map::kHasNonInstancePrototype)); __ j(not_zero, &slow_case); // Get the "prototype" (or initial map) of the {function}. __ mov(function_prototype, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); __ AssertNotSmi(function_prototype); // Resolve the prototype if the {function} has an initial map. Afterwards the // {function_prototype} will be either the JSReceiver prototype object or the // hole value, which means that no instances of the {function} were created so // far and hence we should return false. Label function_prototype_valid; Register const function_prototype_map = scratch; __ CmpObjectType(function_prototype, MAP_TYPE, function_prototype_map); __ j(not_equal, &function_prototype_valid, Label::kNear); __ mov(function_prototype, FieldOperand(function_prototype, Map::kPrototypeOffset)); __ bind(&function_prototype_valid); __ AssertNotSmi(function_prototype); // Update the global instanceof cache with the current {object} map and // {function}. The cached answer will be set when it is known below. __ StoreRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex); __ StoreRoot(object_map, scratch, Heap::kInstanceofCacheMapRootIndex); // Loop through the prototype chain looking for the {function} prototype. // Assume true, and change to false if not found. Label done, loop, fast_runtime_fallback; __ mov(eax, isolate()->factory()->true_value()); __ bind(&loop); // Check if the object needs to be access checked. __ test_b(FieldOperand(object_map, Map::kBitFieldOffset), 1 << Map::kIsAccessCheckNeeded); __ j(not_zero, &fast_runtime_fallback, Label::kNear); // Check if the current object is a Proxy. __ CmpInstanceType(object_map, JS_PROXY_TYPE); __ j(equal, &fast_runtime_fallback, Label::kNear); __ mov(object, FieldOperand(object_map, Map::kPrototypeOffset)); __ cmp(object, function_prototype); __ j(equal, &done, Label::kNear); __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset)); __ cmp(object, isolate()->factory()->null_value()); __ j(not_equal, &loop); __ mov(eax, isolate()->factory()->false_value()); __ bind(&done); __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex); __ ret(0); // Found Proxy or access check needed: Call the runtime. __ bind(&fast_runtime_fallback); __ PopReturnAddressTo(scratch); __ Push(object); __ Push(function_prototype); __ PushReturnAddressFrom(scratch); // Invalidate the instanceof cache. __ Move(eax, Immediate(Smi::FromInt(0))); __ StoreRoot(eax, scratch, Heap::kInstanceofCacheFunctionRootIndex); __ TailCallRuntime(Runtime::kHasInPrototypeChain); // Slow-case: Call the %InstanceOf runtime function. __ bind(&slow_case); __ PopReturnAddressTo(scratch); __ Push(object); __ Push(function); __ PushReturnAddressFrom(scratch); __ TailCallRuntime(Runtime::kInstanceOf); } // ------------------------------------------------------------------------- // StringCharCodeAtGenerator void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { // If the receiver is a smi trigger the non-string case. if (check_mode_ == RECEIVER_IS_UNKNOWN) { __ JumpIfSmi(object_, receiver_not_string_); // Fetch the instance type of the receiver into result register. __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset)); __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); // If the receiver is not a string trigger the non-string case. __ test(result_, Immediate(kIsNotStringMask)); __ j(not_zero, receiver_not_string_); } // If the index is non-smi trigger the non-smi case. __ JumpIfNotSmi(index_, &index_not_smi_); __ bind(&got_smi_index_); // Check for index out of range. __ cmp(index_, FieldOperand(object_, String::kLengthOffset)); __ j(above_equal, index_out_of_range_); __ SmiUntag(index_); Factory* factory = masm->isolate()->factory(); StringCharLoadGenerator::Generate( masm, factory, object_, index_, result_, &call_runtime_); __ SmiTag(result_); __ bind(&exit_); } void StringCharCodeAtGenerator::GenerateSlow( MacroAssembler* masm, EmbedMode embed_mode, const RuntimeCallHelper& call_helper) { __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase); // Index is not a smi. __ bind(&index_not_smi_); // If index is a heap number, try converting it to an integer. __ CheckMap(index_, masm->isolate()->factory()->heap_number_map(), index_not_number_, DONT_DO_SMI_CHECK); call_helper.BeforeCall(masm); if (embed_mode == PART_OF_IC_HANDLER) { __ push(LoadWithVectorDescriptor::VectorRegister()); __ push(LoadDescriptor::SlotRegister()); } __ push(object_); __ push(index_); // Consumed by runtime conversion function. if (index_flags_ == STRING_INDEX_IS_NUMBER) { __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero); } else { DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); // NumberToSmi discards numbers that are not exact integers. __ CallRuntime(Runtime::kNumberToSmi); } if (!index_.is(eax)) { // Save the conversion result before the pop instructions below // have a chance to overwrite it. __ mov(index_, eax); } __ pop(object_); if (embed_mode == PART_OF_IC_HANDLER) { __ pop(LoadDescriptor::SlotRegister()); __ pop(LoadWithVectorDescriptor::VectorRegister()); } // Reload the instance type. __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset)); __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); call_helper.AfterCall(masm); // If index is still not a smi, it must be out of range. STATIC_ASSERT(kSmiTag == 0); __ JumpIfNotSmi(index_, index_out_of_range_); // Otherwise, return to the fast path. __ jmp(&got_smi_index_); // Call runtime. We get here when the receiver is a string and the // index is a number, but the code of getting the actual character // is too complex (e.g., when the string needs to be flattened). __ bind(&call_runtime_); call_helper.BeforeCall(masm); __ push(object_); __ SmiTag(index_); __ push(index_); __ CallRuntime(Runtime::kStringCharCodeAtRT); if (!result_.is(eax)) { __ mov(result_, eax); } call_helper.AfterCall(masm); __ jmp(&exit_); __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase); } // ------------------------------------------------------------------------- // StringCharFromCodeGenerator void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { // Fast case of Heap::LookupSingleCharacterStringFromCode. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiShiftSize == 0); DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1)); __ test(code_, Immediate(kSmiTagMask | ((~String::kMaxOneByteCharCodeU) << kSmiTagSize))); __ j(not_zero, &slow_case_); Factory* factory = masm->isolate()->factory(); __ Move(result_, Immediate(factory->single_character_string_cache())); STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiShiftSize == 0); // At this point code register contains smi tagged one byte char code. __ mov(result_, FieldOperand(result_, code_, times_half_pointer_size, FixedArray::kHeaderSize)); __ cmp(result_, factory->undefined_value()); __ j(equal, &slow_case_); __ bind(&exit_); } void StringCharFromCodeGenerator::GenerateSlow( MacroAssembler* masm, const RuntimeCallHelper& call_helper) { __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase); __ bind(&slow_case_); call_helper.BeforeCall(masm); __ push(code_); __ CallRuntime(Runtime::kStringCharFromCode); if (!result_.is(eax)) { __ mov(result_, eax); } call_helper.AfterCall(masm); __ jmp(&exit_); __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase); } void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, Register dest, Register src, Register count, Register scratch, String::Encoding encoding) { DCHECK(!scratch.is(dest)); DCHECK(!scratch.is(src)); DCHECK(!scratch.is(count)); // Nothing to do for zero characters. Label done; __ test(count, count); __ j(zero, &done); // Make count the number of bytes to copy. if (encoding == String::TWO_BYTE_ENCODING) { __ shl(count, 1); } Label loop; __ bind(&loop); __ mov_b(scratch, Operand(src, 0)); __ mov_b(Operand(dest, 0), scratch); __ inc(src); __ inc(dest); __ dec(count); __ j(not_zero, &loop); __ bind(&done); } void SubStringStub::Generate(MacroAssembler* masm) { Label runtime; // Stack frame on entry. // esp[0]: return address // esp[4]: to // esp[8]: from // esp[12]: string // Make sure first argument is a string. __ mov(eax, Operand(esp, 3 * kPointerSize)); STATIC_ASSERT(kSmiTag == 0); __ JumpIfSmi(eax, &runtime); Condition is_string = masm->IsObjectStringType(eax, ebx, ebx); __ j(NegateCondition(is_string), &runtime); // eax: string // ebx: instance type // Calculate length of sub string using the smi values. __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index. __ JumpIfNotSmi(ecx, &runtime); __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index. __ JumpIfNotSmi(edx, &runtime); __ sub(ecx, edx); __ cmp(ecx, FieldOperand(eax, String::kLengthOffset)); Label not_original_string; // Shorter than original string's length: an actual substring. __ j(below, &not_original_string, Label::kNear); // Longer than original string's length or negative: unsafe arguments. __ j(above, &runtime); // Return original string. Counters* counters = isolate()->counters(); __ IncrementCounter(counters->sub_string_native(), 1); __ ret(3 * kPointerSize); __ bind(&not_original_string); Label single_char; __ cmp(ecx, Immediate(Smi::FromInt(1))); __ j(equal, &single_char); // eax: string // ebx: instance type // ecx: sub string length (smi) // edx: from index (smi) // Deal with different string types: update the index if necessary // and put the underlying string into edi. Label underlying_unpacked, sliced_string, seq_or_external_string; // If the string is not indirect, it can only be sequential or external. STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag)); STATIC_ASSERT(kIsIndirectStringMask != 0); __ test(ebx, Immediate(kIsIndirectStringMask)); __ j(zero, &seq_or_external_string, Label::kNear); Factory* factory = isolate()->factory(); __ test(ebx, Immediate(kSlicedNotConsMask)); __ j(not_zero, &sliced_string, Label::kNear); // Cons string. Check whether it is flat, then fetch first part. // Flat cons strings have an empty second part. __ cmp(FieldOperand(eax, ConsString::kSecondOffset), factory->empty_string()); __ j(not_equal, &runtime); __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset)); // Update instance type. __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset)); __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); __ jmp(&underlying_unpacked, Label::kNear); __ bind(&sliced_string); // Sliced string. Fetch parent and adjust start index by offset. __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset)); __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset)); // Update instance type. __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset)); __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); __ jmp(&underlying_unpacked, Label::kNear); __ bind(&seq_or_external_string); // Sequential or external string. Just move string to the expected register. __ mov(edi, eax); __ bind(&underlying_unpacked); if (FLAG_string_slices) { Label copy_routine; // edi: underlying subject string // ebx: instance type of underlying subject string // edx: adjusted start index (smi) // ecx: length (smi) __ cmp(ecx, Immediate(Smi::FromInt(SlicedString::kMinLength))); // Short slice. Copy instead of slicing. __ j(less, &copy_routine); // Allocate new sliced string. At this point we do not reload the instance // type including the string encoding because we simply rely on the info // provided by the original string. It does not matter if the original // string's encoding is wrong because we always have to recheck encoding of // the newly created string's parent anyways due to externalized strings. Label two_byte_slice, set_slice_header; STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); __ test(ebx, Immediate(kStringEncodingMask)); __ j(zero, &two_byte_slice, Label::kNear); __ AllocateOneByteSlicedString(eax, ebx, no_reg, &runtime); __ jmp(&set_slice_header, Label::kNear); __ bind(&two_byte_slice); __ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime); __ bind(&set_slice_header); __ mov(FieldOperand(eax, SlicedString::kLengthOffset), ecx); __ mov(FieldOperand(eax, SlicedString::kHashFieldOffset), Immediate(String::kEmptyHashField)); __ mov(FieldOperand(eax, SlicedString::kParentOffset), edi); __ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx); __ IncrementCounter(counters->sub_string_native(), 1); __ ret(3 * kPointerSize); __ bind(&copy_routine); } // edi: underlying subject string // ebx: instance type of underlying subject string // edx: adjusted start index (smi) // ecx: length (smi) // The subject string can only be external or sequential string of either // encoding at this point. Label two_byte_sequential, runtime_drop_two, sequential_string; STATIC_ASSERT(kExternalStringTag != 0); STATIC_ASSERT(kSeqStringTag == 0); __ test_b(ebx, kExternalStringTag); __ j(zero, &sequential_string); // Handle external string. // Rule out short external strings. STATIC_ASSERT(kShortExternalStringTag != 0); __ test_b(ebx, kShortExternalStringMask); __ j(not_zero, &runtime); __ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset)); // Move the pointer so that offset-wise, it looks like a sequential string. STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); __ sub(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); __ bind(&sequential_string); // Stash away (adjusted) index and (underlying) string. __ push(edx); __ push(edi); __ SmiUntag(ecx); STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); __ test_b(ebx, kStringEncodingMask); __ j(zero, &two_byte_sequential); // Sequential one byte string. Allocate the result. __ AllocateOneByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two); // eax: result string // ecx: result string length // Locate first character of result. __ mov(edi, eax); __ add(edi, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag)); // Load string argument and locate character of sub string start. __ pop(edx); __ pop(ebx); __ SmiUntag(ebx); __ lea(edx, FieldOperand(edx, ebx, times_1, SeqOneByteString::kHeaderSize)); // eax: result string // ecx: result length // edi: first character of result // edx: character of sub string start StringHelper::GenerateCopyCharacters( masm, edi, edx, ecx, ebx, String::ONE_BYTE_ENCODING); __ IncrementCounter(counters->sub_string_native(), 1); __ ret(3 * kPointerSize); __ bind(&two_byte_sequential); // Sequential two-byte string. Allocate the result. __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two); // eax: result string // ecx: result string length // Locate first character of result. __ mov(edi, eax); __ add(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); // Load string argument and locate character of sub string start. __ pop(edx); __ pop(ebx); // As from is a smi it is 2 times the value which matches the size of a two // byte character. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); __ lea(edx, FieldOperand(edx, ebx, times_1, SeqTwoByteString::kHeaderSize)); // eax: result string // ecx: result length // edi: first character of result // edx: character of sub string start StringHelper::GenerateCopyCharacters( masm, edi, edx, ecx, ebx, String::TWO_BYTE_ENCODING); __ IncrementCounter(counters->sub_string_native(), 1); __ ret(3 * kPointerSize); // Drop pushed values on the stack before tail call. __ bind(&runtime_drop_two); __ Drop(2); // Just jump to runtime to create the sub string. __ bind(&runtime); __ TailCallRuntime(Runtime::kSubString); __ bind(&single_char); // eax: string // ebx: instance type // ecx: sub string length (smi) // edx: from index (smi) StringCharAtGenerator generator(eax, edx, ecx, eax, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING); generator.GenerateFast(masm); __ ret(3 * kPointerSize); generator.SkipSlow(masm, &runtime); } void ToNumberStub::Generate(MacroAssembler* masm) { // The ToNumber stub takes one argument in eax. Label not_smi; __ JumpIfNotSmi(eax, &not_smi, Label::kNear); __ Ret(); __ bind(&not_smi); Label not_heap_number; __ CompareMap(eax, masm->isolate()->factory()->heap_number_map()); __ j(not_equal, &not_heap_number, Label::kNear); __ Ret(); __ bind(&not_heap_number); Label not_string, slow_string; __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edi); // eax: object // edi: object map __ j(above_equal, &not_string, Label::kNear); // Check if string has a cached array index. __ test(FieldOperand(eax, String::kHashFieldOffset), Immediate(String::kContainsCachedArrayIndexMask)); __ j(not_zero, &slow_string, Label::kNear); __ mov(eax, FieldOperand(eax, String::kHashFieldOffset)); __ IndexFromHash(eax, eax); __ Ret(); __ bind(&slow_string); __ pop(ecx); // Pop return address. __ push(eax); // Push argument. __ push(ecx); // Push return address. __ TailCallRuntime(Runtime::kStringToNumber); __ bind(&not_string); Label not_oddball; __ CmpInstanceType(edi, ODDBALL_TYPE); __ j(not_equal, &not_oddball, Label::kNear); __ mov(eax, FieldOperand(eax, Oddball::kToNumberOffset)); __ Ret(); __ bind(&not_oddball); __ pop(ecx); // Pop return address. __ push(eax); // Push argument. __ push(ecx); // Push return address. __ TailCallRuntime(Runtime::kToNumber); } void ToLengthStub::Generate(MacroAssembler* masm) { // The ToLength stub takes on argument in eax. Label not_smi, positive_smi; __ JumpIfNotSmi(eax, &not_smi, Label::kNear); STATIC_ASSERT(kSmiTag == 0); __ test(eax, eax); __ j(greater_equal, &positive_smi, Label::kNear); __ xor_(eax, eax); __ bind(&positive_smi); __ Ret(); __ bind(&not_smi); __ pop(ecx); // Pop return address. __ push(eax); // Push argument. __ push(ecx); // Push return address. __ TailCallRuntime(Runtime::kToLength); } void ToStringStub::Generate(MacroAssembler* masm) { // The ToString stub takes one argument in eax. Label is_number; __ JumpIfSmi(eax, &is_number, Label::kNear); Label not_string; __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edi); // eax: receiver // edi: receiver map __ j(above_equal, &not_string, Label::kNear); __ Ret(); __ bind(&not_string); Label not_heap_number; __ CompareMap(eax, masm->isolate()->factory()->heap_number_map()); __ j(not_equal, &not_heap_number, Label::kNear); __ bind(&is_number); NumberToStringStub stub(isolate()); __ TailCallStub(&stub); __ bind(&not_heap_number); Label not_oddball; __ CmpInstanceType(edi, ODDBALL_TYPE); __ j(not_equal, &not_oddball, Label::kNear); __ mov(eax, FieldOperand(eax, Oddball::kToStringOffset)); __ Ret(); __ bind(&not_oddball); __ pop(ecx); // Pop return address. __ push(eax); // Push argument. __ push(ecx); // Push return address. __ TailCallRuntime(Runtime::kToString); } void ToNameStub::Generate(MacroAssembler* masm) { // The ToName stub takes one argument in eax. Label is_number; __ JumpIfSmi(eax, &is_number, Label::kNear); Label not_name; STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE); __ CmpObjectType(eax, LAST_NAME_TYPE, edi); // eax: receiver // edi: receiver map __ j(above, &not_name, Label::kNear); __ Ret(); __ bind(&not_name); Label not_heap_number; __ CompareMap(eax, masm->isolate()->factory()->heap_number_map()); __ j(not_equal, &not_heap_number, Label::kNear); __ bind(&is_number); NumberToStringStub stub(isolate()); __ TailCallStub(&stub); __ bind(&not_heap_number); Label not_oddball; __ CmpInstanceType(edi, ODDBALL_TYPE); __ j(not_equal, &not_oddball, Label::kNear); __ mov(eax, FieldOperand(eax, Oddball::kToStringOffset)); __ Ret(); __ bind(&not_oddball); __ pop(ecx); // Pop return address. __ push(eax); // Push argument. __ push(ecx); // Push return address. __ TailCallRuntime(Runtime::kToName); } void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm, Register left, Register right, Register scratch1, Register scratch2) { Register length = scratch1; // Compare lengths. Label strings_not_equal, check_zero_length; __ mov(length, FieldOperand(left, String::kLengthOffset)); __ cmp(length, FieldOperand(right, String::kLengthOffset)); __ j(equal, &check_zero_length, Label::kNear); __ bind(&strings_not_equal); __ Move(eax, Immediate(Smi::FromInt(NOT_EQUAL))); __ ret(0); // Check if the length is zero. Label compare_chars; __ bind(&check_zero_length); STATIC_ASSERT(kSmiTag == 0); __ test(length, length); __ j(not_zero, &compare_chars, Label::kNear); __ Move(eax, Immediate(Smi::FromInt(EQUAL))); __ ret(0); // Compare characters. __ bind(&compare_chars); GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, &strings_not_equal, Label::kNear); // Characters are equal. __ Move(eax, Immediate(Smi::FromInt(EQUAL))); __ ret(0); } void StringHelper::GenerateCompareFlatOneByteStrings( MacroAssembler* masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3) { Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->string_compare_native(), 1); // Find minimum length. Label left_shorter; __ mov(scratch1, FieldOperand(left, String::kLengthOffset)); __ mov(scratch3, scratch1); __ sub(scratch3, FieldOperand(right, String::kLengthOffset)); Register length_delta = scratch3; __ j(less_equal, &left_shorter, Label::kNear); // Right string is shorter. Change scratch1 to be length of right string. __ sub(scratch1, length_delta); __ bind(&left_shorter); Register min_length = scratch1; // If either length is zero, just compare lengths. Label compare_lengths; __ test(min_length, min_length); __ j(zero, &compare_lengths, Label::kNear); // Compare characters. Label result_not_equal; GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2, &result_not_equal, Label::kNear); // Compare lengths - strings up to min-length are equal. __ bind(&compare_lengths); __ test(length_delta, length_delta); Label length_not_equal; __ j(not_zero, &length_not_equal, Label::kNear); // Result is EQUAL. STATIC_ASSERT(EQUAL == 0); STATIC_ASSERT(kSmiTag == 0); __ Move(eax, Immediate(Smi::FromInt(EQUAL))); __ ret(0); Label result_greater; Label result_less; __ bind(&length_not_equal); __ j(greater, &result_greater, Label::kNear); __ jmp(&result_less, Label::kNear); __ bind(&result_not_equal); __ j(above, &result_greater, Label::kNear); __ bind(&result_less); // Result is LESS. __ Move(eax, Immediate(Smi::FromInt(LESS))); __ ret(0); // Result is GREATER. __ bind(&result_greater); __ Move(eax, Immediate(Smi::FromInt(GREATER))); __ ret(0); } void StringHelper::GenerateOneByteCharsCompareLoop( MacroAssembler* masm, Register left, Register right, Register length, Register scratch, Label* chars_not_equal, Label::Distance chars_not_equal_near) { // Change index to run from -length to -1 by adding length to string // start. This means that loop ends when index reaches zero, which // doesn't need an additional compare. __ SmiUntag(length); __ lea(left, FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize)); __ lea(right, FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize)); __ neg(length); Register index = length; // index = -length; // Compare loop. Label loop; __ bind(&loop); __ mov_b(scratch, Operand(left, index, times_1, 0)); __ cmpb(scratch, Operand(right, index, times_1, 0)); __ j(not_equal, chars_not_equal, chars_not_equal_near); __ inc(index); __ j(not_zero, &loop); } void StringCompareStub::Generate(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- edx : left string // -- eax : right string // -- esp[0] : return address // ----------------------------------- __ AssertString(edx); __ AssertString(eax); Label not_same; __ cmp(edx, eax); __ j(not_equal, &not_same, Label::kNear); __ Move(eax, Immediate(Smi::FromInt(EQUAL))); __ IncrementCounter(isolate()->counters()->string_compare_native(), 1); __ Ret(); __ bind(&not_same); // Check that both objects are sequential one-byte strings. Label runtime; __ JumpIfNotBothSequentialOneByteStrings(edx, eax, ecx, ebx, &runtime); // Compare flat one-byte strings. __ IncrementCounter(isolate()->counters()->string_compare_native(), 1); StringHelper::GenerateCompareFlatOneByteStrings(masm, edx, eax, ecx, ebx, edi); // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) // tagged as a small integer. __ bind(&runtime); __ PopReturnAddressTo(ecx); __ Push(edx); __ Push(eax); __ PushReturnAddressFrom(ecx); __ TailCallRuntime(Runtime::kStringCompare); } void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- edx : left // -- eax : right // -- esp[0] : return address // ----------------------------------- // Load ecx with the allocation site. We stick an undefined dummy value here // and replace it with the real allocation site later when we instantiate this // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate(). __ mov(ecx, handle(isolate()->heap()->undefined_value())); // Make sure that we actually patched the allocation site. if (FLAG_debug_code) { __ test(ecx, Immediate(kSmiTagMask)); __ Assert(not_equal, kExpectedAllocationSite); __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), isolate()->factory()->allocation_site_map()); __ Assert(equal, kExpectedAllocationSite); } // Tail call into the stub that handles binary operations with allocation // sites. BinaryOpWithAllocationSiteStub stub(isolate(), state()); __ TailCallStub(&stub); } void CompareICStub::GenerateBooleans(MacroAssembler* masm) { DCHECK_EQ(CompareICState::BOOLEAN, state()); Label miss; Label::Distance const miss_distance = masm->emit_debug_code() ? Label::kFar : Label::kNear; __ JumpIfSmi(edx, &miss, miss_distance); __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset)); __ JumpIfSmi(eax, &miss, miss_distance); __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); __ JumpIfNotRoot(ecx, Heap::kBooleanMapRootIndex, &miss, miss_distance); __ JumpIfNotRoot(ebx, Heap::kBooleanMapRootIndex, &miss, miss_distance); if (!Token::IsEqualityOp(op())) { __ mov(eax, FieldOperand(eax, Oddball::kToNumberOffset)); __ AssertSmi(eax); __ mov(edx, FieldOperand(edx, Oddball::kToNumberOffset)); __ AssertSmi(edx); __ xchg(eax, edx); } __ sub(eax, edx); __ Ret(); __ bind(&miss); GenerateMiss(masm); } void CompareICStub::GenerateSmis(MacroAssembler* masm) { DCHECK(state() == CompareICState::SMI); Label miss; __ mov(ecx, edx); __ or_(ecx, eax); __ JumpIfNotSmi(ecx, &miss, Label::kNear); if (GetCondition() == equal) { // For equality we do not care about the sign of the result. __ sub(eax, edx); } else { Label done; __ sub(edx, eax); __ j(no_overflow, &done, Label::kNear); // Correct sign of result in case of overflow. __ not_(edx); __ bind(&done); __ mov(eax, edx); } __ ret(0); __ bind(&miss); GenerateMiss(masm); } void CompareICStub::GenerateNumbers(MacroAssembler* masm) { DCHECK(state() == CompareICState::NUMBER); Label generic_stub, check_left; Label unordered, maybe_undefined1, maybe_undefined2; Label miss; if (left() == CompareICState::SMI) { __ JumpIfNotSmi(edx, &miss); } if (right() == CompareICState::SMI) { __ JumpIfNotSmi(eax, &miss); } // Inlining the double comparison and falling back to the general compare // stub if NaN is involved or SSE2 or CMOV is unsupported. __ JumpIfSmi(eax, &check_left, Label::kNear); __ cmp(FieldOperand(eax, HeapObject::kMapOffset), isolate()->factory()->heap_number_map()); __ j(not_equal, &maybe_undefined1, Label::kNear); __ bind(&check_left); __ JumpIfSmi(edx, &generic_stub, Label::kNear); __ cmp(FieldOperand(edx, HeapObject::kMapOffset), isolate()->factory()->heap_number_map()); __ j(not_equal, &maybe_undefined2, Label::kNear); __ bind(&unordered); __ bind(&generic_stub); CompareICStub stub(isolate(), op(), CompareICState::GENERIC, CompareICState::GENERIC, CompareICState::GENERIC); __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET); __ bind(&maybe_undefined1); if (Token::IsOrderedRelationalCompareOp(op())) { __ cmp(eax, Immediate(isolate()->factory()->undefined_value())); __ j(not_equal, &miss); __ JumpIfSmi(edx, &unordered); __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx); __ j(not_equal, &maybe_undefined2, Label::kNear); __ jmp(&unordered); } __ bind(&maybe_undefined2); if (Token::IsOrderedRelationalCompareOp(op())) { __ cmp(edx, Immediate(isolate()->factory()->undefined_value())); __ j(equal, &unordered); } __ bind(&miss); GenerateMiss(masm); } void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) { DCHECK(state() == CompareICState::INTERNALIZED_STRING); DCHECK(GetCondition() == equal); // Registers containing left and right operands respectively. Register left = edx; Register right = eax; Register tmp1 = ecx; Register tmp2 = ebx; // Check that both operands are heap objects. Label miss; __ mov(tmp1, left); STATIC_ASSERT(kSmiTag == 0); __ and_(tmp1, right); __ JumpIfSmi(tmp1, &miss, Label::kNear); // Check that both operands are internalized strings. __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset)); __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset)); __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset)); __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); __ or_(tmp1, tmp2); __ test(tmp1, Immediate(kIsNotStringMask | kIsNotInternalizedMask)); __ j(not_zero, &miss, Label::kNear); // Internalized strings are compared by identity. Label done; __ cmp(left, right); // Make sure eax is non-zero. At this point input operands are // guaranteed to be non-zero. DCHECK(right.is(eax)); __ j(not_equal, &done, Label::kNear); STATIC_ASSERT(EQUAL == 0); STATIC_ASSERT(kSmiTag == 0); __ Move(eax, Immediate(Smi::FromInt(EQUAL))); __ bind(&done); __ ret(0); __ bind(&miss); GenerateMiss(masm); } void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) { DCHECK(state() == CompareICState::UNIQUE_NAME); DCHECK(GetCondition() == equal); // Registers containing left and right operands respectively. Register left = edx; Register right = eax; Register tmp1 = ecx; Register tmp2 = ebx; // Check that both operands are heap objects. Label miss; __ mov(tmp1, left); STATIC_ASSERT(kSmiTag == 0); __ and_(tmp1, right); __ JumpIfSmi(tmp1, &miss, Label::kNear); // Check that both operands are unique names. This leaves the instance // types loaded in tmp1 and tmp2. __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset)); __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset)); __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset)); __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); __ JumpIfNotUniqueNameInstanceType(tmp1, &miss, Label::kNear); __ JumpIfNotUniqueNameInstanceType(tmp2, &miss, Label::kNear); // Unique names are compared by identity. Label done; __ cmp(left, right); // Make sure eax is non-zero. At this point input operands are // guaranteed to be non-zero. DCHECK(right.is(eax)); __ j(not_equal, &done, Label::kNear); STATIC_ASSERT(EQUAL == 0); STATIC_ASSERT(kSmiTag == 0); __ Move(eax, Immediate(Smi::FromInt(EQUAL))); __ bind(&done); __ ret(0); __ bind(&miss); GenerateMiss(masm); } void CompareICStub::GenerateStrings(MacroAssembler* masm) { DCHECK(state() == CompareICState::STRING); Label miss; bool equality = Token::IsEqualityOp(op()); // Registers containing left and right operands respectively. Register left = edx; Register right = eax; Register tmp1 = ecx; Register tmp2 = ebx; Register tmp3 = edi; // Check that both operands are heap objects. __ mov(tmp1, left); STATIC_ASSERT(kSmiTag == 0); __ and_(tmp1, right); __ JumpIfSmi(tmp1, &miss); // Check that both operands are strings. This leaves the instance // types loaded in tmp1 and tmp2. __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset)); __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset)); __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset)); __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); __ mov(tmp3, tmp1); STATIC_ASSERT(kNotStringTag != 0); __ or_(tmp3, tmp2); __ test(tmp3, Immediate(kIsNotStringMask)); __ j(not_zero, &miss); // Fast check for identical strings. Label not_same; __ cmp(left, right); __ j(not_equal, &not_same, Label::kNear); STATIC_ASSERT(EQUAL == 0); STATIC_ASSERT(kSmiTag == 0); __ Move(eax, Immediate(Smi::FromInt(EQUAL))); __ ret(0); // Handle not identical strings. __ bind(&not_same); // Check that both strings are internalized. If they are, we're done // because we already know they are not identical. But in the case of // non-equality compare, we still need to determine the order. We // also know they are both strings. if (equality) { Label do_compare; STATIC_ASSERT(kInternalizedTag == 0); __ or_(tmp1, tmp2); __ test(tmp1, Immediate(kIsNotInternalizedMask)); __ j(not_zero, &do_compare, Label::kNear); // Make sure eax is non-zero. At this point input operands are // guaranteed to be non-zero. DCHECK(right.is(eax)); __ ret(0); __ bind(&do_compare); } // Check that both strings are sequential one-byte. Label runtime; __ JumpIfNotBothSequentialOneByteStrings(left, right, tmp1, tmp2, &runtime); // Compare flat one byte strings. Returns when done. if (equality) { StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2); } else { StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1, tmp2, tmp3); } // Handle more complex cases in runtime. __ bind(&runtime); __ pop(tmp1); // Return address. __ push(left); __ push(right); __ push(tmp1); if (equality) { __ TailCallRuntime(Runtime::kStringEquals); } else { __ TailCallRuntime(Runtime::kStringCompare); } __ bind(&miss); GenerateMiss(masm); } void CompareICStub::GenerateReceivers(MacroAssembler* masm) { DCHECK_EQ(CompareICState::RECEIVER, state()); Label miss; __ mov(ecx, edx); __ and_(ecx, eax); __ JumpIfSmi(ecx, &miss, Label::kNear); STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx); __ j(below, &miss, Label::kNear); __ CmpObjectType(edx, FIRST_JS_RECEIVER_TYPE, ecx); __ j(below, &miss, Label::kNear); DCHECK_EQ(equal, GetCondition()); __ sub(eax, edx); __ ret(0); __ bind(&miss); GenerateMiss(masm); } void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) { Label miss; Handle<WeakCell> cell = Map::WeakCellForMap(known_map_); __ mov(ecx, edx); __ and_(ecx, eax); __ JumpIfSmi(ecx, &miss, Label::kNear); __ GetWeakValue(edi, cell); __ cmp(edi, FieldOperand(eax, HeapObject::kMapOffset)); __ j(not_equal, &miss, Label::kNear); __ cmp(edi, FieldOperand(edx, HeapObject::kMapOffset)); __ j(not_equal, &miss, Label::kNear); if (Token::IsEqualityOp(op())) { __ sub(eax, edx); __ ret(0); } else { __ PopReturnAddressTo(ecx); __ Push(edx); __ Push(eax); __ Push(Immediate(Smi::FromInt(NegativeComparisonResult(GetCondition())))); __ PushReturnAddressFrom(ecx); __ TailCallRuntime(Runtime::kCompare); } __ bind(&miss); GenerateMiss(masm); } void CompareICStub::GenerateMiss(MacroAssembler* masm) { { // Call the runtime system in a fresh internal frame. FrameScope scope(masm, StackFrame::INTERNAL); __ push(edx); // Preserve edx and eax. __ push(eax); __ push(edx); // And also use them as the arguments. __ push(eax); __ push(Immediate(Smi::FromInt(op()))); __ CallRuntime(Runtime::kCompareIC_Miss); // Compute the entry point of the rewritten stub. __ lea(edi, FieldOperand(eax, Code::kHeaderSize)); __ pop(eax); __ pop(edx); } // Do a tail call to the rewritten stub. __ jmp(edi); } // Helper function used to check that the dictionary doesn't contain // the property. This function may return false negatives, so miss_label // must always call a backup property check that is complete. // This function is safe to call if the receiver has fast properties. // Name must be a unique name and receiver must be a heap object. void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, Label* miss, Label* done, Register properties, Handle<Name> name, Register r0) { DCHECK(name->IsUniqueName()); // If names of slots in range from 1 to kProbes - 1 for the hash value are // not equal to the name and kProbes-th slot is not used (its name is the // undefined value), it guarantees the hash table doesn't contain the // property. It's true even if some slots represent deleted properties // (their names are the hole value). for (int i = 0; i < kInlinedProbes; i++) { // Compute the masked index: (hash + i + i * i) & mask. Register index = r0; // Capacity is smi 2^n. __ mov(index, FieldOperand(properties, kCapacityOffset)); __ dec(index); __ and_(index, Immediate(Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)))); // Scale the index by multiplying by the entry size. STATIC_ASSERT(NameDictionary::kEntrySize == 3); __ lea(index, Operand(index, index, times_2, 0)); // index *= 3. Register entity_name = r0; // Having undefined at this place means the name is not contained. STATIC_ASSERT(kSmiTagSize == 1); __ mov(entity_name, Operand(properties, index, times_half_pointer_size, kElementsStartOffset - kHeapObjectTag)); __ cmp(entity_name, masm->isolate()->factory()->undefined_value()); __ j(equal, done); // Stop if found the property. __ cmp(entity_name, Handle<Name>(name)); __ j(equal, miss); Label good; // Check for the hole and skip. __ cmp(entity_name, masm->isolate()->factory()->the_hole_value()); __ j(equal, &good, Label::kNear); // Check if the entry name is not a unique name. __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset)); __ JumpIfNotUniqueNameInstanceType( FieldOperand(entity_name, Map::kInstanceTypeOffset), miss); __ bind(&good); } NameDictionaryLookupStub stub(masm->isolate(), properties, r0, r0, NEGATIVE_LOOKUP); __ push(Immediate(Handle<Object>(name))); __ push(Immediate(name->Hash())); __ CallStub(&stub); __ test(r0, r0); __ j(not_zero, miss); __ jmp(done); } // Probe the name dictionary in the |elements| register. Jump to the // |done| label if a property with the given name is found leaving the // index into the dictionary in |r0|. Jump to the |miss| label // otherwise. void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, Label* miss, Label* done, Register elements, Register name, Register r0, Register r1) { DCHECK(!elements.is(r0)); DCHECK(!elements.is(r1)); DCHECK(!name.is(r0)); DCHECK(!name.is(r1)); __ AssertName(name); __ mov(r1, FieldOperand(elements, kCapacityOffset)); __ shr(r1, kSmiTagSize); // convert smi to int __ dec(r1); // Generate an unrolled loop that performs a few probes before // giving up. Measurements done on Gmail indicate that 2 probes // cover ~93% of loads from dictionaries. for (int i = 0; i < kInlinedProbes; i++) { // Compute the masked index: (hash + i + i * i) & mask. __ mov(r0, FieldOperand(name, Name::kHashFieldOffset)); __ shr(r0, Name::kHashShift); if (i > 0) { __ add(r0, Immediate(NameDictionary::GetProbeOffset(i))); } __ and_(r0, r1); // Scale the index by multiplying by the entry size. STATIC_ASSERT(NameDictionary::kEntrySize == 3); __ lea(r0, Operand(r0, r0, times_2, 0)); // r0 = r0 * 3 // Check if the key is identical to the name. __ cmp(name, Operand(elements, r0, times_4, kElementsStartOffset - kHeapObjectTag)); __ j(equal, done); } NameDictionaryLookupStub stub(masm->isolate(), elements, r1, r0, POSITIVE_LOOKUP); __ push(name); __ mov(r0, FieldOperand(name, Name::kHashFieldOffset)); __ shr(r0, Name::kHashShift); __ push(r0); __ CallStub(&stub); __ test(r1, r1); __ j(zero, miss); __ jmp(done); } void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { // This stub overrides SometimesSetsUpAFrame() to return false. That means // we cannot call anything that could cause a GC from this stub. // Stack frame on entry: // esp[0 * kPointerSize]: return address. // esp[1 * kPointerSize]: key's hash. // esp[2 * kPointerSize]: key. // Registers: // dictionary_: NameDictionary to probe. // result_: used as scratch. // index_: will hold an index of entry if lookup is successful. // might alias with result_. // Returns: // result_ is zero if lookup failed, non zero otherwise. Label in_dictionary, maybe_in_dictionary, not_in_dictionary; Register scratch = result(); __ mov(scratch, FieldOperand(dictionary(), kCapacityOffset)); __ dec(scratch); __ SmiUntag(scratch); __ push(scratch); // If names of slots in range from 1 to kProbes - 1 for the hash value are // not equal to the name and kProbes-th slot is not used (its name is the // undefined value), it guarantees the hash table doesn't contain the // property. It's true even if some slots represent deleted properties // (their names are the null value). for (int i = kInlinedProbes; i < kTotalProbes; i++) { // Compute the masked index: (hash + i + i * i) & mask. __ mov(scratch, Operand(esp, 2 * kPointerSize)); if (i > 0) { __ add(scratch, Immediate(NameDictionary::GetProbeOffset(i))); } __ and_(scratch, Operand(esp, 0)); // Scale the index by multiplying by the entry size. STATIC_ASSERT(NameDictionary::kEntrySize == 3); __ lea(index(), Operand(scratch, scratch, times_2, 0)); // index *= 3. // Having undefined at this place means the name is not contained. STATIC_ASSERT(kSmiTagSize == 1); __ mov(scratch, Operand(dictionary(), index(), times_pointer_size, kElementsStartOffset - kHeapObjectTag)); __ cmp(scratch, isolate()->factory()->undefined_value()); __ j(equal, &not_in_dictionary); // Stop if found the property. __ cmp(scratch, Operand(esp, 3 * kPointerSize)); __ j(equal, &in_dictionary); if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) { // If we hit a key that is not a unique name during negative // lookup we have to bailout as this key might be equal to the // key we are looking for. // Check if the entry name is not a unique name. __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset)); __ JumpIfNotUniqueNameInstanceType( FieldOperand(scratch, Map::kInstanceTypeOffset), &maybe_in_dictionary); } } __ bind(&maybe_in_dictionary); // If we are doing negative lookup then probing failure should be // treated as a lookup success. For positive lookup probing failure // should be treated as lookup failure. if (mode() == POSITIVE_LOOKUP) { __ mov(result(), Immediate(0)); __ Drop(1); __ ret(2 * kPointerSize); } __ bind(&in_dictionary); __ mov(result(), Immediate(1)); __ Drop(1); __ ret(2 * kPointerSize); __ bind(&not_in_dictionary); __ mov(result(), Immediate(0)); __ Drop(1); __ ret(2 * kPointerSize); } void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( Isolate* isolate) { StoreBufferOverflowStub stub(isolate, kDontSaveFPRegs); stub.GetCode(); StoreBufferOverflowStub stub2(isolate, kSaveFPRegs); stub2.GetCode(); } // Takes the input in 3 registers: address_ value_ and object_. A pointer to // the value has just been written into the object, now this stub makes sure // we keep the GC informed. The word in the object where the value has been // written is in the address register. void RecordWriteStub::Generate(MacroAssembler* masm) { Label skip_to_incremental_noncompacting; Label skip_to_incremental_compacting; // The first two instructions are generated with labels so as to get the // offset fixed up correctly by the bind(Label*) call. We patch it back and // forth between a compare instructions (a nop in this position) and the // real branch when we start and stop incremental heap marking. __ jmp(&skip_to_incremental_noncompacting, Label::kNear); __ jmp(&skip_to_incremental_compacting, Label::kFar); if (remembered_set_action() == EMIT_REMEMBERED_SET) { __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(), MacroAssembler::kReturnAtEnd); } else { __ ret(0); } __ bind(&skip_to_incremental_noncompacting); GenerateIncremental(masm, INCREMENTAL); __ bind(&skip_to_incremental_compacting); GenerateIncremental(masm, INCREMENTAL_COMPACTION); // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. // Will be checked in IncrementalMarking::ActivateGeneratedStub. masm->set_byte_at(0, kTwoByteNopInstruction); masm->set_byte_at(2, kFiveByteNopInstruction); } void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { regs_.Save(masm); if (remembered_set_action() == EMIT_REMEMBERED_SET) { Label dont_need_remembered_set; __ mov(regs_.scratch0(), Operand(regs_.address(), 0)); __ JumpIfNotInNewSpace(regs_.scratch0(), // Value. regs_.scratch0(), &dont_need_remembered_set); __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(), &dont_need_remembered_set); // First notify the incremental marker if necessary, then update the // remembered set. CheckNeedsToInformIncrementalMarker( masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode); InformIncrementalMarker(masm); regs_.Restore(masm); __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(), MacroAssembler::kReturnAtEnd); __ bind(&dont_need_remembered_set); } CheckNeedsToInformIncrementalMarker( masm, kReturnOnNoNeedToInformIncrementalMarker, mode); InformIncrementalMarker(masm); regs_.Restore(masm); __ ret(0); } void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) { regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode()); int argument_count = 3; __ PrepareCallCFunction(argument_count, regs_.scratch0()); __ mov(Operand(esp, 0 * kPointerSize), regs_.object()); __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot. __ mov(Operand(esp, 2 * kPointerSize), Immediate(ExternalReference::isolate_address(isolate()))); AllowExternalCallThatCantCauseGC scope(masm); __ CallCFunction( ExternalReference::incremental_marking_record_write_function(isolate()), argument_count); regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode()); } void RecordWriteStub::CheckNeedsToInformIncrementalMarker( MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need, Mode mode) { Label object_is_black, need_incremental, need_incremental_pop_object; __ mov(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask)); __ and_(regs_.scratch0(), regs_.object()); __ mov(regs_.scratch1(), Operand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset)); __ sub(regs_.scratch1(), Immediate(1)); __ mov(Operand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset), regs_.scratch1()); __ j(negative, &need_incremental); // Let's look at the color of the object: If it is not black we don't have // to inform the incremental marker. __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &object_is_black, Label::kNear); regs_.Restore(masm); if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(), MacroAssembler::kReturnAtEnd); } else { __ ret(0); } __ bind(&object_is_black); // Get the value from the slot. __ mov(regs_.scratch0(), Operand(regs_.address(), 0)); if (mode == INCREMENTAL_COMPACTION) { Label ensure_not_white; __ CheckPageFlag(regs_.scratch0(), // Contains value. regs_.scratch1(), // Scratch. MemoryChunk::kEvacuationCandidateMask, zero, &ensure_not_white, Label::kNear); __ CheckPageFlag(regs_.object(), regs_.scratch1(), // Scratch. MemoryChunk::kSkipEvacuationSlotsRecordingMask, not_zero, &ensure_not_white, Label::kNear); __ jmp(&need_incremental); __ bind(&ensure_not_white); } // We need an extra register for this, so we push the object register // temporarily. __ push(regs_.object()); __ JumpIfWhite(regs_.scratch0(), // The value. regs_.scratch1(), // Scratch. regs_.object(), // Scratch. &need_incremental_pop_object, Label::kNear); __ pop(regs_.object()); regs_.Restore(masm); if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(), MacroAssembler::kReturnAtEnd); } else { __ ret(0); } __ bind(&need_incremental_pop_object); __ pop(regs_.object()); __ bind(&need_incremental); // Fall through when we need to inform the incremental marker. } void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { CEntryStub ces(isolate(), 1, kSaveFPRegs); __ call(ces.GetCode(), RelocInfo::CODE_TARGET); int parameter_count_offset = StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; __ mov(ebx, MemOperand(ebp, parameter_count_offset)); masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); __ pop(ecx); int additional_offset = function_mode() == JS_FUNCTION_STUB_MODE ? kPointerSize : 0; __ lea(esp, MemOperand(esp, ebx, times_pointer_size, additional_offset)); __ jmp(ecx); // Return to IC Miss stub, continuation still on stack. } void LoadICTrampolineStub::Generate(MacroAssembler* masm) { __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister()); LoadICStub stub(isolate(), state()); stub.GenerateForTrampoline(masm); } void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) { __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister()); KeyedLoadICStub stub(isolate(), state()); stub.GenerateForTrampoline(masm); } static void HandleArrayCases(MacroAssembler* masm, Register receiver, Register key, Register vector, Register slot, Register feedback, bool is_polymorphic, Label* miss) { // feedback initially contains the feedback array Label next, next_loop, prepare_next; Label load_smi_map, compare_map; Label start_polymorphic; __ push(receiver); __ push(vector); Register receiver_map = receiver; Register cached_map = vector; // Receiver might not be a heap object. __ JumpIfSmi(receiver, &load_smi_map); __ mov(receiver_map, FieldOperand(receiver, 0)); __ bind(&compare_map); __ mov(cached_map, FieldOperand(feedback, FixedArray::OffsetOfElementAt(0))); // A named keyed load might have a 2 element array, all other cases can count // on an array with at least 2 {map, handler} pairs, so they can go right // into polymorphic array handling. __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset)); __ j(not_equal, is_polymorphic ? &start_polymorphic : &next); // found, now call handler. Register handler = feedback; __ mov(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1))); __ pop(vector); __ pop(receiver); __ lea(handler, FieldOperand(handler, Code::kHeaderSize)); __ jmp(handler); if (!is_polymorphic) { __ bind(&next); __ cmp(FieldOperand(feedback, FixedArray::kLengthOffset), Immediate(Smi::FromInt(2))); __ j(not_equal, &start_polymorphic); __ pop(vector); __ pop(receiver); __ jmp(miss); } // Polymorphic, we have to loop from 2 to N __ bind(&start_polymorphic); __ push(key); Register counter = key; __ mov(counter, Immediate(Smi::FromInt(2))); __ bind(&next_loop); __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size, FixedArray::kHeaderSize)); __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset)); __ j(not_equal, &prepare_next); __ mov(handler, FieldOperand(feedback, counter, times_half_pointer_size, FixedArray::kHeaderSize + kPointerSize)); __ pop(key); __ pop(vector); __ pop(receiver); __ lea(handler, FieldOperand(handler, Code::kHeaderSize)); __ jmp(handler); __ bind(&prepare_next); __ add(counter, Immediate(Smi::FromInt(2))); __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset)); __ j(less, &next_loop); // We exhausted our array of map handler pairs. __ pop(key); __ pop(vector); __ pop(receiver); __ jmp(miss); __ bind(&load_smi_map); __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); __ jmp(&compare_map); } static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver, Register key, Register vector, Register slot, Register weak_cell, Label* miss) { // feedback initially contains the feedback array Label compare_smi_map; // Move the weak map into the weak_cell register. Register ic_map = weak_cell; __ mov(ic_map, FieldOperand(weak_cell, WeakCell::kValueOffset)); // Receiver might not be a heap object. __ JumpIfSmi(receiver, &compare_smi_map); __ cmp(ic_map, FieldOperand(receiver, 0)); __ j(not_equal, miss); Register handler = weak_cell; __ mov(handler, FieldOperand(vector, slot, times_half_pointer_size, FixedArray::kHeaderSize + kPointerSize)); __ lea(handler, FieldOperand(handler, Code::kHeaderSize)); __ jmp(handler); // In microbenchmarks, it made sense to unroll this code so that the call to // the handler is duplicated for a HeapObject receiver and a Smi receiver. __ bind(&compare_smi_map); __ CompareRoot(ic_map, Heap::kHeapNumberMapRootIndex); __ j(not_equal, miss); __ mov(handler, FieldOperand(vector, slot, times_half_pointer_size, FixedArray::kHeaderSize + kPointerSize)); __ lea(handler, FieldOperand(handler, Code::kHeaderSize)); __ jmp(handler); } void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); } void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) { GenerateImpl(masm, true); } void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // edx Register name = LoadWithVectorDescriptor::NameRegister(); // ecx Register vector = LoadWithVectorDescriptor::VectorRegister(); // ebx Register slot = LoadWithVectorDescriptor::SlotRegister(); // eax Register scratch = edi; __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size, FixedArray::kHeaderSize)); // Is it a weak cell? Label try_array; Label not_array, smi_key, key_okay, miss; __ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex); __ j(not_equal, &try_array); HandleMonomorphicCase(masm, receiver, name, vector, slot, scratch, &miss); // Is it a fixed array? __ bind(&try_array); __ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex); __ j(not_equal, &not_array); HandleArrayCases(masm, receiver, name, vector, slot, scratch, true, &miss); __ bind(&not_array); __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex); __ j(not_equal, &miss); __ push(slot); __ push(vector); Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags( Code::ComputeHandlerFlags(Code::LOAD_IC)); masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags, receiver, name, vector, scratch); __ pop(vector); __ pop(slot); __ bind(&miss); LoadIC::GenerateMiss(masm); } void KeyedLoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); } void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) { GenerateImpl(masm, true); } void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // edx Register key = LoadWithVectorDescriptor::NameRegister(); // ecx Register vector = LoadWithVectorDescriptor::VectorRegister(); // ebx Register slot = LoadWithVectorDescriptor::SlotRegister(); // eax Register feedback = edi; __ mov(feedback, FieldOperand(vector, slot, times_half_pointer_size, FixedArray::kHeaderSize)); // Is it a weak cell? Label try_array; Label not_array, smi_key, key_okay, miss; __ CompareRoot(FieldOperand(feedback, 0), Heap::kWeakCellMapRootIndex); __ j(not_equal, &try_array); HandleMonomorphicCase(masm, receiver, key, vector, slot, feedback, &miss); __ bind(&try_array); // Is it a fixed array? __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex); __ j(not_equal, &not_array); // We have a polymorphic element handler. Label polymorphic, try_poly_name; __ bind(&polymorphic); HandleArrayCases(masm, receiver, key, vector, slot, feedback, true, &miss); __ bind(&not_array); // Is it generic? __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex); __ j(not_equal, &try_poly_name); Handle<Code> megamorphic_stub = KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState()); __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET); __ bind(&try_poly_name); // We might have a name in feedback, and a fixed array in the next slot. __ cmp(key, feedback); __ j(not_equal, &miss); // If the name comparison succeeded, we know we have a fixed array with // at least one map/handler pair. __ mov(feedback, FieldOperand(vector, slot, times_half_pointer_size, FixedArray::kHeaderSize + kPointerSize)); HandleArrayCases(masm, receiver, key, vector, slot, feedback, false, &miss); __ bind(&miss); KeyedLoadIC::GenerateMiss(masm); } void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) { __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister()); VectorStoreICStub stub(isolate(), state()); stub.GenerateForTrampoline(masm); } void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) { __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister()); VectorKeyedStoreICStub stub(isolate(), state()); stub.GenerateForTrampoline(masm); } void VectorStoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); } void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) { GenerateImpl(masm, true); } // value is on the stack already. static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver, Register key, Register vector, Register slot, Register feedback, bool is_polymorphic, Label* miss) { // feedback initially contains the feedback array Label next, next_loop, prepare_next; Label load_smi_map, compare_map; Label start_polymorphic; Label pop_and_miss; ExternalReference virtual_register = ExternalReference::virtual_handler_register(masm->isolate()); __ push(receiver); __ push(vector); Register receiver_map = receiver; Register cached_map = vector; // Receiver might not be a heap object. __ JumpIfSmi(receiver, &load_smi_map); __ mov(receiver_map, FieldOperand(receiver, 0)); __ bind(&compare_map); __ mov(cached_map, FieldOperand(feedback, FixedArray::OffsetOfElementAt(0))); // A named keyed store might have a 2 element array, all other cases can count // on an array with at least 2 {map, handler} pairs, so they can go right // into polymorphic array handling. __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset)); __ j(not_equal, &start_polymorphic); // found, now call handler. Register handler = feedback; DCHECK(handler.is(VectorStoreICDescriptor::ValueRegister())); __ mov(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1))); __ pop(vector); __ pop(receiver); __ lea(handler, FieldOperand(handler, Code::kHeaderSize)); __ mov(Operand::StaticVariable(virtual_register), handler); __ pop(handler); // Pop "value". __ jmp(Operand::StaticVariable(virtual_register)); // Polymorphic, we have to loop from 2 to N __ bind(&start_polymorphic); __ push(key); Register counter = key; __ mov(counter, Immediate(Smi::FromInt(2))); if (!is_polymorphic) { // If is_polymorphic is false, we may only have a two element array. // Check against length now in that case. __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset)); __ j(greater_equal, &pop_and_miss); } __ bind(&next_loop); __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size, FixedArray::kHeaderSize)); __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset)); __ j(not_equal, &prepare_next); __ mov(handler, FieldOperand(feedback, counter, times_half_pointer_size, FixedArray::kHeaderSize + kPointerSize)); __ lea(handler, FieldOperand(handler, Code::kHeaderSize)); __ pop(key); __ pop(vector); __ pop(receiver); __ mov(Operand::StaticVariable(virtual_register), handler); __ pop(handler); // Pop "value". __ jmp(Operand::StaticVariable(virtual_register)); __ bind(&prepare_next); __ add(counter, Immediate(Smi::FromInt(2))); __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset)); __ j(less, &next_loop); // We exhausted our array of map handler pairs. __ bind(&pop_and_miss); __ pop(key); __ pop(vector); __ pop(receiver); __ jmp(miss); __ bind(&load_smi_map); __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); __ jmp(&compare_map); } static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver, Register key, Register vector, Register slot, Register weak_cell, Label* miss) { // The store ic value is on the stack. DCHECK(weak_cell.is(VectorStoreICDescriptor::ValueRegister())); ExternalReference virtual_register = ExternalReference::virtual_handler_register(masm->isolate()); // feedback initially contains the feedback array Label compare_smi_map; // Move the weak map into the weak_cell register. Register ic_map = weak_cell; __ mov(ic_map, FieldOperand(weak_cell, WeakCell::kValueOffset)); // Receiver might not be a heap object. __ JumpIfSmi(receiver, &compare_smi_map); __ cmp(ic_map, FieldOperand(receiver, 0)); __ j(not_equal, miss); __ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size, FixedArray::kHeaderSize + kPointerSize)); __ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize)); // Put the store ic value back in it's register. __ mov(Operand::StaticVariable(virtual_register), weak_cell); __ pop(weak_cell); // Pop "value". // jump to the handler. __ jmp(Operand::StaticVariable(virtual_register)); // In microbenchmarks, it made sense to unroll this code so that the call to // the handler is duplicated for a HeapObject receiver and a Smi receiver. __ bind(&compare_smi_map); __ CompareRoot(ic_map, Heap::kHeapNumberMapRootIndex); __ j(not_equal, miss); __ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size, FixedArray::kHeaderSize + kPointerSize)); __ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize)); __ mov(Operand::StaticVariable(virtual_register), weak_cell); __ pop(weak_cell); // Pop "value". // jump to the handler. __ jmp(Operand::StaticVariable(virtual_register)); } void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // edx Register key = VectorStoreICDescriptor::NameRegister(); // ecx Register value = VectorStoreICDescriptor::ValueRegister(); // eax Register vector = VectorStoreICDescriptor::VectorRegister(); // ebx Register slot = VectorStoreICDescriptor::SlotRegister(); // edi Label miss; __ push(value); Register scratch = value; __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size, FixedArray::kHeaderSize)); // Is it a weak cell? Label try_array; Label not_array, smi_key, key_okay; __ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex); __ j(not_equal, &try_array); HandleMonomorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss); // Is it a fixed array? __ bind(&try_array); __ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex); __ j(not_equal, &not_array); HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, true, &miss); __ bind(&not_array); __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex); __ j(not_equal, &miss); __ pop(value); __ push(slot); __ push(vector); Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags( Code::ComputeHandlerFlags(Code::STORE_IC)); masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, code_flags, receiver, key, slot, no_reg); __ pop(vector); __ pop(slot); Label no_pop_miss; __ jmp(&no_pop_miss); __ bind(&miss); __ pop(value); __ bind(&no_pop_miss); StoreIC::GenerateMiss(masm); } void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); } void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) { GenerateImpl(masm, true); } static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm, Register receiver, Register key, Register vector, Register slot, Register feedback, Label* miss) { // feedback initially contains the feedback array Label next, next_loop, prepare_next; Label load_smi_map, compare_map; Label transition_call; Label pop_and_miss; ExternalReference virtual_register = ExternalReference::virtual_handler_register(masm->isolate()); ExternalReference virtual_slot = ExternalReference::virtual_slot_register(masm->isolate()); __ push(receiver); __ push(vector); Register receiver_map = receiver; Register cached_map = vector; Register value = StoreDescriptor::ValueRegister(); // Receiver might not be a heap object. __ JumpIfSmi(receiver, &load_smi_map); __ mov(receiver_map, FieldOperand(receiver, 0)); __ bind(&compare_map); // Polymorphic, we have to loop from 0 to N - 1 __ push(key); // Current stack layout: // - esp[0] -- key // - esp[4] -- vector // - esp[8] -- receiver // - esp[12] -- value // - esp[16] -- return address // // Required stack layout for handler call: // - esp[0] -- return address // - receiver, key, value, vector, slot in registers. // - handler in virtual register. Register counter = key; __ mov(counter, Immediate(Smi::FromInt(0))); __ bind(&next_loop); __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size, FixedArray::kHeaderSize)); __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset)); __ j(not_equal, &prepare_next); __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size, FixedArray::kHeaderSize + kPointerSize)); __ CompareRoot(cached_map, Heap::kUndefinedValueRootIndex); __ j(not_equal, &transition_call); __ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size, FixedArray::kHeaderSize + 2 * kPointerSize)); __ pop(key); __ pop(vector); __ pop(receiver); __ lea(feedback, FieldOperand(feedback, Code::kHeaderSize)); __ mov(Operand::StaticVariable(virtual_register), feedback); __ pop(value); __ jmp(Operand::StaticVariable(virtual_register)); __ bind(&transition_call); // Current stack layout: // - esp[0] -- key // - esp[4] -- vector // - esp[8] -- receiver // - esp[12] -- value // - esp[16] -- return address // // Required stack layout for handler call: // - esp[0] -- return address // - receiver, key, value, map, vector in registers. // - handler and slot in virtual registers. __ mov(Operand::StaticVariable(virtual_slot), slot); __ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size, FixedArray::kHeaderSize + 2 * kPointerSize)); __ lea(feedback, FieldOperand(feedback, Code::kHeaderSize)); __ mov(Operand::StaticVariable(virtual_register), feedback); __ mov(cached_map, FieldOperand(cached_map, WeakCell::kValueOffset)); // The weak cell may have been cleared. __ JumpIfSmi(cached_map, &pop_and_miss); DCHECK(!cached_map.is(VectorStoreTransitionDescriptor::MapRegister())); __ mov(VectorStoreTransitionDescriptor::MapRegister(), cached_map); // Pop key into place. __ pop(key); __ pop(vector); __ pop(receiver); __ pop(value); __ jmp(Operand::StaticVariable(virtual_register)); __ bind(&prepare_next); __ add(counter, Immediate(Smi::FromInt(3))); __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset)); __ j(less, &next_loop); // We exhausted our array of map handler pairs. __ bind(&pop_and_miss); __ pop(key); __ pop(vector); __ pop(receiver); __ jmp(miss); __ bind(&load_smi_map); __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); __ jmp(&compare_map); } void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // edx Register key = VectorStoreICDescriptor::NameRegister(); // ecx Register value = VectorStoreICDescriptor::ValueRegister(); // eax Register vector = VectorStoreICDescriptor::VectorRegister(); // ebx Register slot = VectorStoreICDescriptor::SlotRegister(); // edi Label miss; __ push(value); Register scratch = value; __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size, FixedArray::kHeaderSize)); // Is it a weak cell? Label try_array; Label not_array, smi_key, key_okay; __ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex); __ j(not_equal, &try_array); HandleMonomorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss); // Is it a fixed array? __ bind(&try_array); __ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex); __ j(not_equal, &not_array); HandlePolymorphicKeyedStoreCase(masm, receiver, key, vector, slot, scratch, &miss); __ bind(&not_array); Label try_poly_name; __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex); __ j(not_equal, &try_poly_name); __ pop(value); Handle<Code> megamorphic_stub = KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState()); __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET); __ bind(&try_poly_name); // We might have a name in feedback, and a fixed array in the next slot. __ cmp(key, scratch); __ j(not_equal, &miss); // If the name comparison succeeded, we know we have a fixed array with // at least one map/handler pair. __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size, FixedArray::kHeaderSize + kPointerSize)); HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, false, &miss); __ bind(&miss); __ pop(value); KeyedStoreIC::GenerateMiss(masm); } void CallICTrampolineStub::Generate(MacroAssembler* masm) { __ EmitLoadTypeFeedbackVector(ebx); CallICStub stub(isolate(), state()); __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET); } void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { if (masm->isolate()->function_entry_hook() != NULL) { ProfileEntryHookStub stub(masm->isolate()); masm->CallStub(&stub); } } void ProfileEntryHookStub::Generate(MacroAssembler* masm) { // Save volatile registers. const int kNumSavedRegisters = 3; __ push(eax); __ push(ecx); __ push(edx); // Calculate and push the original stack pointer. __ lea(eax, Operand(esp, (kNumSavedRegisters + 1) * kPointerSize)); __ push(eax); // Retrieve our return address and use it to calculate the calling // function's address. __ mov(eax, Operand(esp, (kNumSavedRegisters + 1) * kPointerSize)); __ sub(eax, Immediate(Assembler::kCallInstructionLength)); __ push(eax); // Call the entry hook. DCHECK(isolate()->function_entry_hook() != NULL); __ call(FUNCTION_ADDR(isolate()->function_entry_hook()), RelocInfo::RUNTIME_ENTRY); __ add(esp, Immediate(2 * kPointerSize)); // Restore ecx. __ pop(edx); __ pop(ecx); __ pop(eax); __ ret(0); } template <class T> static void CreateArrayDispatch(MacroAssembler* masm, AllocationSiteOverrideMode mode) { if (mode == DISABLE_ALLOCATION_SITES) { T stub(masm->isolate(), GetInitialFastElementsKind(), mode); __ TailCallStub(&stub); } else if (mode == DONT_OVERRIDE) { int last_index = GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND); for (int i = 0; i <= last_index; ++i) { Label next; ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); __ cmp(edx, kind); __ j(not_equal, &next); T stub(masm->isolate(), kind); __ TailCallStub(&stub); __ bind(&next); } // If we reached this point there is a problem. __ Abort(kUnexpectedElementsKindInArrayConstructor); } else { UNREACHABLE(); } } static void CreateArrayDispatchOneArgument(MacroAssembler* masm, AllocationSiteOverrideMode mode) { // ebx - allocation site (if mode != DISABLE_ALLOCATION_SITES) // edx - kind (if mode != DISABLE_ALLOCATION_SITES) // eax - number of arguments // edi - constructor? // esp[0] - return address // esp[4] - last argument Label normal_sequence; if (mode == DONT_OVERRIDE) { STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); STATIC_ASSERT(FAST_ELEMENTS == 2); STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4); STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); // is the low bit set? If so, we are holey and that is good. __ test_b(edx, 1); __ j(not_zero, &normal_sequence); } // look at the first argument __ mov(ecx, Operand(esp, kPointerSize)); __ test(ecx, ecx); __ j(zero, &normal_sequence); if (mode == DISABLE_ALLOCATION_SITES) { ElementsKind initial = GetInitialFastElementsKind(); ElementsKind holey_initial = GetHoleyElementsKind(initial); ArraySingleArgumentConstructorStub stub_holey( masm->isolate(), holey_initial, DISABLE_ALLOCATION_SITES); __ TailCallStub(&stub_holey); __ bind(&normal_sequence); ArraySingleArgumentConstructorStub stub(masm->isolate(), initial, DISABLE_ALLOCATION_SITES); __ TailCallStub(&stub); } else if (mode == DONT_OVERRIDE) { // We are going to create a holey array, but our kind is non-holey. // Fix kind and retry. __ inc(edx); if (FLAG_debug_code) { Handle<Map> allocation_site_map = masm->isolate()->factory()->allocation_site_map(); __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map)); __ Assert(equal, kExpectedAllocationSite); } // Save the resulting elements kind in type info. We can't just store r3 // in the AllocationSite::transition_info field because elements kind is // restricted to a portion of the field...upper bits need to be left alone. STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0); __ add(FieldOperand(ebx, AllocationSite::kTransitionInfoOffset), Immediate(Smi::FromInt(kFastElementsKindPackedToHoley))); __ bind(&normal_sequence); int last_index = GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND); for (int i = 0; i <= last_index; ++i) { Label next; ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); __ cmp(edx, kind); __ j(not_equal, &next); ArraySingleArgumentConstructorStub stub(masm->isolate(), kind); __ TailCallStub(&stub); __ bind(&next); } // If we reached this point there is a problem. __ Abort(kUnexpectedElementsKindInArrayConstructor); } else { UNREACHABLE(); } } template <class T> static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) { int to_index = GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND); for (int i = 0; i <= to_index; ++i) { ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); T stub(isolate, kind); stub.GetCode(); if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) { T stub1(isolate, kind, DISABLE_ALLOCATION_SITES); stub1.GetCode(); } } } void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) { ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>( isolate); ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>( isolate); ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>( isolate); } void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime( Isolate* isolate) { ElementsKind kinds[2] = {FAST_ELEMENTS, FAST_HOLEY_ELEMENTS}; for (int i = 0; i < 2; i++) { // For internal arrays we only need a few things InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]); stubh1.GetCode(); InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]); stubh2.GetCode(); InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]); stubh3.GetCode(); } } void ArrayConstructorStub::GenerateDispatchToArrayStub( MacroAssembler* masm, AllocationSiteOverrideMode mode) { if (argument_count() == ANY) { Label not_zero_case, not_one_case; __ test(eax, eax); __ j(not_zero, &not_zero_case); CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode); __ bind(&not_zero_case); __ cmp(eax, 1); __ j(greater, &not_one_case); CreateArrayDispatchOneArgument(masm, mode); __ bind(&not_one_case); CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode); } else if (argument_count() == NONE) { CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode); } else if (argument_count() == ONE) { CreateArrayDispatchOneArgument(masm, mode); } else if (argument_count() == MORE_THAN_ONE) { CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode); } else { UNREACHABLE(); } } void ArrayConstructorStub::Generate(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- eax : argc (only if argument_count() is ANY or MORE_THAN_ONE) // -- ebx : AllocationSite or undefined // -- edi : constructor // -- edx : Original constructor // -- esp[0] : return address // -- esp[4] : last argument // ----------------------------------- if (FLAG_debug_code) { // The array construct code is only set for the global and natives // builtin Array functions which always have maps. // Initial map for the builtin Array function should be a map. __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset)); // Will both indicate a NULL and a Smi. __ test(ecx, Immediate(kSmiTagMask)); __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction); __ CmpObjectType(ecx, MAP_TYPE, ecx); __ Assert(equal, kUnexpectedInitialMapForArrayFunction); // We should either have undefined in ebx or a valid AllocationSite __ AssertUndefinedOrAllocationSite(ebx); } Label subclassing; // Enter the context of the Array function. __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); __ cmp(edx, edi); __ j(not_equal, &subclassing); Label no_info; // If the feedback vector is the undefined value call an array constructor // that doesn't use AllocationSites. __ cmp(ebx, isolate()->factory()->undefined_value()); __ j(equal, &no_info); // Only look at the lower 16 bits of the transition info. __ mov(edx, FieldOperand(ebx, AllocationSite::kTransitionInfoOffset)); __ SmiUntag(edx); STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0); __ and_(edx, Immediate(AllocationSite::ElementsKindBits::kMask)); GenerateDispatchToArrayStub(masm, DONT_OVERRIDE); __ bind(&no_info); GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES); // Subclassing. __ bind(&subclassing); switch (argument_count()) { case ANY: case MORE_THAN_ONE: __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi); __ add(eax, Immediate(3)); break; case NONE: __ mov(Operand(esp, 1 * kPointerSize), edi); __ mov(eax, Immediate(3)); break; case ONE: __ mov(Operand(esp, 2 * kPointerSize), edi); __ mov(eax, Immediate(4)); break; } __ PopReturnAddressTo(ecx); __ Push(edx); __ Push(ebx); __ PushReturnAddressFrom(ecx); __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate())); } void InternalArrayConstructorStub::GenerateCase(MacroAssembler* masm, ElementsKind kind) { Label not_zero_case, not_one_case; Label normal_sequence; __ test(eax, eax); __ j(not_zero, &not_zero_case); InternalArrayNoArgumentConstructorStub stub0(isolate(), kind); __ TailCallStub(&stub0); __ bind(&not_zero_case); __ cmp(eax, 1); __ j(greater, &not_one_case); if (IsFastPackedElementsKind(kind)) { // We might need to create a holey array // look at the first argument __ mov(ecx, Operand(esp, kPointerSize)); __ test(ecx, ecx); __ j(zero, &normal_sequence); InternalArraySingleArgumentConstructorStub stub1_holey( isolate(), GetHoleyElementsKind(kind)); __ TailCallStub(&stub1_holey); } __ bind(&normal_sequence); InternalArraySingleArgumentConstructorStub stub1(isolate(), kind); __ TailCallStub(&stub1); __ bind(&not_one_case); InternalArrayNArgumentsConstructorStub stubN(isolate(), kind); __ TailCallStub(&stubN); } void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- eax : argc // -- edi : constructor // -- esp[0] : return address // -- esp[4] : last argument // ----------------------------------- if (FLAG_debug_code) { // The array construct code is only set for the global and natives // builtin Array functions which always have maps. // Initial map for the builtin Array function should be a map. __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset)); // Will both indicate a NULL and a Smi. __ test(ecx, Immediate(kSmiTagMask)); __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction); __ CmpObjectType(ecx, MAP_TYPE, ecx); __ Assert(equal, kUnexpectedInitialMapForArrayFunction); } // Figure out the right elements kind __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset)); // Load the map's "bit field 2" into |result|. We only need the first byte, // but the following masking takes care of that anyway. __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset)); // Retrieve elements_kind from bit field 2. __ DecodeField<Map::ElementsKindBits>(ecx); if (FLAG_debug_code) { Label done; __ cmp(ecx, Immediate(FAST_ELEMENTS)); __ j(equal, &done); __ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS)); __ Assert(equal, kInvalidElementsKindForInternalArrayOrInternalPackedArray); __ bind(&done); } Label fast_elements_case; __ cmp(ecx, Immediate(FAST_ELEMENTS)); __ j(equal, &fast_elements_case); GenerateCase(masm, FAST_HOLEY_ELEMENTS); __ bind(&fast_elements_case); GenerateCase(masm, FAST_ELEMENTS); } void FastNewObjectStub::Generate(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- edi : target // -- edx : new target // -- esi : context // -- esp[0] : return address // ----------------------------------- __ AssertFunction(edi); __ AssertReceiver(edx); // Verify that the new target is a JSFunction. Label new_object; __ CmpObjectType(edx, JS_FUNCTION_TYPE, ebx); __ j(not_equal, &new_object); // Load the initial map and verify that it's in fact a map. __ mov(ecx, FieldOperand(edx, JSFunction::kPrototypeOrInitialMapOffset)); __ JumpIfSmi(ecx, &new_object); __ CmpObjectType(ecx, MAP_TYPE, ebx); __ j(not_equal, &new_object); // Fall back to runtime if the target differs from the new target's // initial map constructor. __ cmp(edi, FieldOperand(ecx, Map::kConstructorOrBackPointerOffset)); __ j(not_equal, &new_object); // Allocate the JSObject on the heap. Label allocate, done_allocate; __ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceSizeOffset)); __ lea(ebx, Operand(ebx, times_pointer_size, 0)); __ Allocate(ebx, eax, edi, no_reg, &allocate, NO_ALLOCATION_FLAGS); __ bind(&done_allocate); // Initialize the JSObject fields. __ mov(Operand(eax, JSObject::kMapOffset), ecx); __ mov(Operand(eax, JSObject::kPropertiesOffset), masm->isolate()->factory()->empty_fixed_array()); __ mov(Operand(eax, JSObject::kElementsOffset), masm->isolate()->factory()->empty_fixed_array()); STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize); __ lea(ebx, Operand(eax, JSObject::kHeaderSize)); // ----------- S t a t e ------------- // -- eax : result (untagged) // -- ebx : result fields (untagged) // -- edi : result end (untagged) // -- ecx : initial map // -- esi : context // -- esp[0] : return address // ----------------------------------- // Perform in-object slack tracking if requested. Label slack_tracking; STATIC_ASSERT(Map::kNoSlackTracking == 0); __ test(FieldOperand(ecx, Map::kBitField3Offset), Immediate(Map::ConstructionCounter::kMask)); __ j(not_zero, &slack_tracking, Label::kNear); { // Initialize all in-object fields with undefined. __ LoadRoot(edx, Heap::kUndefinedValueRootIndex); __ InitializeFieldsWithFiller(ebx, edi, edx); // Add the object tag to make the JSObject real. STATIC_ASSERT(kHeapObjectTag == 1); __ inc(eax); __ Ret(); } __ bind(&slack_tracking); { // Decrease generous allocation count. STATIC_ASSERT(Map::ConstructionCounter::kNext == 32); __ sub(FieldOperand(ecx, Map::kBitField3Offset), Immediate(1 << Map::ConstructionCounter::kShift)); // Initialize the in-object fields with undefined. __ movzx_b(edx, FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset)); __ neg(edx); __ lea(edx, Operand(edi, edx, times_pointer_size, 0)); __ LoadRoot(edi, Heap::kUndefinedValueRootIndex); __ InitializeFieldsWithFiller(ebx, edx, edi); // Initialize the remaining (reserved) fields with one pointer filler map. __ movzx_b(edx, FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset)); __ lea(edx, Operand(ebx, edx, times_pointer_size, 0)); __ LoadRoot(edi, Heap::kOnePointerFillerMapRootIndex); __ InitializeFieldsWithFiller(ebx, edx, edi); // Add the object tag to make the JSObject real. STATIC_ASSERT(kHeapObjectTag == 1); __ inc(eax); // Check if we can finalize the instance size. Label finalize; STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1); __ test(FieldOperand(ecx, Map::kBitField3Offset), Immediate(Map::ConstructionCounter::kMask)); __ j(zero, &finalize, Label::kNear); __ Ret(); // Finalize the instance size. __ bind(&finalize); { FrameScope scope(masm, StackFrame::INTERNAL); __ Push(eax); __ Push(ecx); __ CallRuntime(Runtime::kFinalizeInstanceSize); __ Pop(eax); } __ Ret(); } // Fall back to %AllocateInNewSpace. __ bind(&allocate); { FrameScope scope(masm, StackFrame::INTERNAL); __ SmiTag(ebx); __ Push(ecx); __ Push(ebx); __ CallRuntime(Runtime::kAllocateInNewSpace); __ Pop(ecx); } STATIC_ASSERT(kHeapObjectTag == 1); __ dec(eax); __ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceSizeOffset)); __ lea(edi, Operand(eax, ebx, times_pointer_size, 0)); __ jmp(&done_allocate); // Fall back to %NewObject. __ bind(&new_object); __ PopReturnAddressTo(ecx); __ Push(edi); __ Push(edx); __ PushReturnAddressFrom(ecx); __ TailCallRuntime(Runtime::kNewObject); } void FastNewRestParameterStub::Generate(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- edi : function // -- esi : context // -- ebp : frame pointer // -- esp[0] : return address // ----------------------------------- __ AssertFunction(edi); // For Ignition we need to skip all possible handler/stub frames until // we reach the JavaScript frame for the function (similar to what the // runtime fallback implementation does). So make edx point to that // JavaScript frame. { Label loop, loop_entry; __ mov(edx, ebp); __ jmp(&loop_entry, Label::kNear); __ bind(&loop); __ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset)); __ bind(&loop_entry); __ cmp(edi, Operand(edx, StandardFrameConstants::kMarkerOffset)); __ j(not_equal, &loop); } // Check if we have rest parameters (only possible if we have an // arguments adaptor frame below the function frame). Label no_rest_parameters; __ mov(ebx, Operand(edx, StandardFrameConstants::kCallerFPOffset)); __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); __ j(not_equal, &no_rest_parameters, Label::kNear); // Check if the arguments adaptor frame contains more arguments than // specified by the function's internal formal parameter count. Label rest_parameters; __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset)); __ sub(eax, FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset)); __ j(greater, &rest_parameters); // Return an empty rest parameter array. __ bind(&no_rest_parameters); { // ----------- S t a t e ------------- // -- esi : context // -- esp[0] : return address // ----------------------------------- // Allocate an empty rest parameter array. Label allocate, done_allocate; __ Allocate(JSArray::kSize, eax, edx, ecx, &allocate, TAG_OBJECT); __ bind(&done_allocate); // Setup the rest parameter array in rax. __ LoadGlobalFunction(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, ecx); __ mov(FieldOperand(eax, JSArray::kMapOffset), ecx); __ mov(ecx, isolate()->factory()->empty_fixed_array()); __ mov(FieldOperand(eax, JSArray::kPropertiesOffset), ecx); __ mov(FieldOperand(eax, JSArray::kElementsOffset), ecx); __ mov(FieldOperand(eax, JSArray::kLengthOffset), Immediate(Smi::FromInt(0))); STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize); __ Ret(); // Fall back to %AllocateInNewSpace. __ bind(&allocate); { FrameScope scope(masm, StackFrame::INTERNAL); __ Push(Smi::FromInt(JSArray::kSize)); __ CallRuntime(Runtime::kAllocateInNewSpace); } __ jmp(&done_allocate); } __ bind(&rest_parameters); { // Compute the pointer to the first rest parameter (skippping the receiver). __ lea(ebx, Operand(ebx, eax, times_half_pointer_size, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize)); // ----------- S t a t e ------------- // -- esi : context // -- eax : number of rest parameters (tagged) // -- ebx : pointer to first rest parameters // -- esp[0] : return address // ----------------------------------- // Allocate space for the rest parameter array plus the backing store. Label allocate, done_allocate; __ lea(ecx, Operand(eax, times_half_pointer_size, JSArray::kSize + FixedArray::kHeaderSize)); __ Allocate(ecx, edx, edi, no_reg, &allocate, TAG_OBJECT); __ bind(&done_allocate); // Setup the elements array in edx. __ mov(FieldOperand(edx, FixedArray::kMapOffset), isolate()->factory()->fixed_array_map()); __ mov(FieldOperand(edx, FixedArray::kLengthOffset), eax); { Label loop, done_loop; __ Move(ecx, Smi::FromInt(0)); __ bind(&loop); __ cmp(ecx, eax); __ j(equal, &done_loop, Label::kNear); __ mov(edi, Operand(ebx, 0 * kPointerSize)); __ mov(FieldOperand(edx, ecx, times_half_pointer_size, FixedArray::kHeaderSize), edi); __ sub(ebx, Immediate(1 * kPointerSize)); __ add(ecx, Immediate(Smi::FromInt(1))); __ jmp(&loop); __ bind(&done_loop); } // Setup the rest parameter array in edi. __ lea(edi, Operand(edx, eax, times_half_pointer_size, FixedArray::kHeaderSize)); __ LoadGlobalFunction(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, ecx); __ mov(FieldOperand(edi, JSArray::kMapOffset), ecx); __ mov(FieldOperand(edi, JSArray::kPropertiesOffset), isolate()->factory()->empty_fixed_array()); __ mov(FieldOperand(edi, JSArray::kElementsOffset), edx); __ mov(FieldOperand(edi, JSArray::kLengthOffset), eax); STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize); __ mov(eax, edi); __ Ret(); // Fall back to %AllocateInNewSpace. __ bind(&allocate); { FrameScope scope(masm, StackFrame::INTERNAL); __ SmiTag(ecx); __ Push(eax); __ Push(ebx); __ Push(ecx); __ CallRuntime(Runtime::kAllocateInNewSpace); __ mov(edx, eax); __ Pop(ebx); __ Pop(eax); } __ jmp(&done_allocate); } } void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- edi : function // -- esi : context // -- ebp : frame pointer // -- esp[0] : return address // ----------------------------------- __ AssertFunction(edi); // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub. __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset)); __ lea(edx, Operand(ebp, ecx, times_half_pointer_size, StandardFrameConstants::kCallerSPOffset)); // ecx : number of parameters (tagged) // edx : parameters pointer // edi : function // esp[0] : return address // Check if the calling frame is an arguments adaptor frame. Label adaptor_frame, try_allocate, runtime; __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset)); __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); __ j(equal, &adaptor_frame, Label::kNear); // No adaptor, parameter count = argument count. __ mov(ebx, ecx); __ push(ecx); __ jmp(&try_allocate, Label::kNear); // We have an adaptor frame. Patch the parameters pointer. __ bind(&adaptor_frame); __ mov(ebx, ecx); __ push(ecx); __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset)); __ lea(edx, Operand(edx, ecx, times_2, StandardFrameConstants::kCallerSPOffset)); // ebx = parameter count (tagged) // ecx = argument count (smi-tagged) // Compute the mapped parameter count = min(ebx, ecx) in ebx. __ cmp(ebx, ecx); __ j(less_equal, &try_allocate, Label::kNear); __ mov(ebx, ecx); // Save mapped parameter count and function. __ bind(&try_allocate); __ push(edi); __ push(ebx); // Compute the sizes of backing store, parameter map, and arguments object. // 1. Parameter map, has 2 extra words containing context and backing store. const int kParameterMapHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize; Label no_parameter_map; __ test(ebx, ebx); __ j(zero, &no_parameter_map, Label::kNear); __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize)); __ bind(&no_parameter_map); // 2. Backing store. __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize)); // 3. Arguments object. __ add(ebx, Immediate(JSSloppyArgumentsObject::kSize)); // Do the allocation of all three objects in one go. __ Allocate(ebx, eax, edi, no_reg, &runtime, TAG_OBJECT); // eax = address of new object(s) (tagged) // ecx = argument count (smi-tagged) // esp[0] = mapped parameter count (tagged) // esp[4] = function // esp[8] = parameter count (tagged) // Get the arguments map from the current native context into edi. Label has_mapped_parameters, instantiate; __ mov(edi, NativeContextOperand()); __ mov(ebx, Operand(esp, 0 * kPointerSize)); __ test(ebx, ebx); __ j(not_zero, &has_mapped_parameters, Label::kNear); __ mov( edi, Operand(edi, Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX))); __ jmp(&instantiate, Label::kNear); __ bind(&has_mapped_parameters); __ mov(edi, Operand(edi, Context::SlotOffset( Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX))); __ bind(&instantiate); // eax = address of new object (tagged) // ebx = mapped parameter count (tagged) // ecx = argument count (smi-tagged) // edi = address of arguments map (tagged) // esp[0] = mapped parameter count (tagged) // esp[4] = function // esp[8] = parameter count (tagged) // Copy the JS object part. __ mov(FieldOperand(eax, JSObject::kMapOffset), edi); __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), masm->isolate()->factory()->empty_fixed_array()); __ mov(FieldOperand(eax, JSObject::kElementsOffset), masm->isolate()->factory()->empty_fixed_array()); // Set up the callee in-object property. STATIC_ASSERT(JSSloppyArgumentsObject::kCalleeIndex == 1); __ mov(edi, Operand(esp, 1 * kPointerSize)); __ AssertNotSmi(edi); __ mov(FieldOperand(eax, JSSloppyArgumentsObject::kCalleeOffset), edi); // Use the length (smi tagged) and set that as an in-object property too. __ AssertSmi(ecx); __ mov(FieldOperand(eax, JSSloppyArgumentsObject::kLengthOffset), ecx); // Set up the elements pointer in the allocated arguments object. // If we allocated a parameter map, edi will point there, otherwise to the // backing store. __ lea(edi, Operand(eax, JSSloppyArgumentsObject::kSize)); __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi); // eax = address of new object (tagged) // ebx = mapped parameter count (tagged) // ecx = argument count (tagged) // edx = address of receiver argument // edi = address of parameter map or backing store (tagged) // esp[0] = mapped parameter count (tagged) // esp[4] = function // esp[8] = parameter count (tagged) // Free two registers. __ push(edx); __ push(eax); // Initialize parameter map. If there are no mapped arguments, we're done. Label skip_parameter_map; __ test(ebx, ebx); __ j(zero, &skip_parameter_map); __ mov(FieldOperand(edi, FixedArray::kMapOffset), Immediate(isolate()->factory()->sloppy_arguments_elements_map())); __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2)))); __ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax); __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi); __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize)); __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax); // Copy the parameter slots and the holes in the arguments. // We need to fill in mapped_parameter_count slots. They index the context, // where parameters are stored in reverse order, at // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 // The mapped parameter thus need to get indices // MIN_CONTEXT_SLOTS+parameter_count-1 .. // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count // We loop from right to left. Label parameters_loop, parameters_test; __ push(ecx); __ mov(eax, Operand(esp, 3 * kPointerSize)); __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); __ add(ebx, Operand(esp, 5 * kPointerSize)); __ sub(ebx, eax); __ mov(ecx, isolate()->factory()->the_hole_value()); __ mov(edx, edi); __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize)); // eax = loop variable (tagged) // ebx = mapping index (tagged) // ecx = the hole value // edx = address of parameter map (tagged) // edi = address of backing store (tagged) // esp[0] = argument count (tagged) // esp[4] = address of new object (tagged) // esp[8] = address of receiver argument // esp[12] = mapped parameter count (tagged) // esp[16] = function // esp[20] = parameter count (tagged) __ jmp(&parameters_test, Label::kNear); __ bind(&parameters_loop); __ sub(eax, Immediate(Smi::FromInt(1))); __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx); __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx); __ add(ebx, Immediate(Smi::FromInt(1))); __ bind(&parameters_test); __ test(eax, eax); __ j(not_zero, &parameters_loop, Label::kNear); __ pop(ecx); __ bind(&skip_parameter_map); // ecx = argument count (tagged) // edi = address of backing store (tagged) // esp[0] = address of new object (tagged) // esp[4] = address of receiver argument // esp[8] = mapped parameter count (tagged) // esp[12] = function // esp[16] = parameter count (tagged) // Copy arguments header and remaining slots (if there are any). __ mov(FieldOperand(edi, FixedArray::kMapOffset), Immediate(isolate()->factory()->fixed_array_map())); __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx); Label arguments_loop, arguments_test; __ mov(ebx, Operand(esp, 2 * kPointerSize)); __ mov(edx, Operand(esp, 1 * kPointerSize)); __ sub(edx, ebx); // Is there a smarter way to do negative scaling? __ sub(edx, ebx); __ jmp(&arguments_test, Label::kNear); __ bind(&arguments_loop); __ sub(edx, Immediate(kPointerSize)); __ mov(eax, Operand(edx, 0)); __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax); __ add(ebx, Immediate(Smi::FromInt(1))); __ bind(&arguments_test); __ cmp(ebx, ecx); __ j(less, &arguments_loop, Label::kNear); // Restore. __ pop(eax); // Address of arguments object. __ Drop(4); // Return. __ ret(0); // Do the runtime call to allocate the arguments object. __ bind(&runtime); __ pop(eax); // Remove saved mapped parameter count. __ pop(edi); // Pop saved function. __ pop(eax); // Remove saved parameter count. __ pop(eax); // Pop return address. __ push(edi); // Push function. __ push(edx); // Push parameters pointer. __ push(ecx); // Push parameter count. __ push(eax); // Push return address. __ TailCallRuntime(Runtime::kNewSloppyArguments); } void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- edi : function // -- esi : context // -- ebp : frame pointer // -- esp[0] : return address // ----------------------------------- __ AssertFunction(edi); // For Ignition we need to skip all possible handler/stub frames until // we reach the JavaScript frame for the function (similar to what the // runtime fallback implementation does). So make edx point to that // JavaScript frame. { Label loop, loop_entry; __ mov(edx, ebp); __ jmp(&loop_entry, Label::kNear); __ bind(&loop); __ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset)); __ bind(&loop_entry); __ cmp(edi, Operand(edx, StandardFrameConstants::kMarkerOffset)); __ j(not_equal, &loop); } // Check if we have an arguments adaptor frame below the function frame. Label arguments_adaptor, arguments_done; __ mov(ebx, Operand(edx, StandardFrameConstants::kCallerFPOffset)); __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); __ j(equal, &arguments_adaptor, Label::kNear); { __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kFormalParameterCountOffset)); __ lea(ebx, Operand(edx, eax, times_half_pointer_size, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize)); } __ jmp(&arguments_done, Label::kNear); __ bind(&arguments_adaptor); { __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset)); __ lea(ebx, Operand(ebx, eax, times_half_pointer_size, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize)); } __ bind(&arguments_done); // ----------- S t a t e ------------- // -- eax : number of arguments (tagged) // -- ebx : pointer to the first argument // -- esi : context // -- esp[0] : return address // ----------------------------------- // Allocate space for the strict arguments object plus the backing store. Label allocate, done_allocate; __ lea(ecx, Operand(eax, times_half_pointer_size, JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize)); __ Allocate(ecx, edx, edi, no_reg, &allocate, TAG_OBJECT); __ bind(&done_allocate); // Setup the elements array in edx. __ mov(FieldOperand(edx, FixedArray::kMapOffset), isolate()->factory()->fixed_array_map()); __ mov(FieldOperand(edx, FixedArray::kLengthOffset), eax); { Label loop, done_loop; __ Move(ecx, Smi::FromInt(0)); __ bind(&loop); __ cmp(ecx, eax); __ j(equal, &done_loop, Label::kNear); __ mov(edi, Operand(ebx, 0 * kPointerSize)); __ mov(FieldOperand(edx, ecx, times_half_pointer_size, FixedArray::kHeaderSize), edi); __ sub(ebx, Immediate(1 * kPointerSize)); __ add(ecx, Immediate(Smi::FromInt(1))); __ jmp(&loop); __ bind(&done_loop); } // Setup the rest parameter array in edi. __ lea(edi, Operand(edx, eax, times_half_pointer_size, FixedArray::kHeaderSize)); __ LoadGlobalFunction(Context::STRICT_ARGUMENTS_MAP_INDEX, ecx); __ mov(FieldOperand(edi, JSStrictArgumentsObject::kMapOffset), ecx); __ mov(FieldOperand(edi, JSStrictArgumentsObject::kPropertiesOffset), isolate()->factory()->empty_fixed_array()); __ mov(FieldOperand(edi, JSStrictArgumentsObject::kElementsOffset), edx); __ mov(FieldOperand(edi, JSStrictArgumentsObject::kLengthOffset), eax); STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize); __ mov(eax, edi); __ Ret(); // Fall back to %AllocateInNewSpace. __ bind(&allocate); { FrameScope scope(masm, StackFrame::INTERNAL); __ SmiTag(ecx); __ Push(eax); __ Push(ebx); __ Push(ecx); __ CallRuntime(Runtime::kAllocateInNewSpace); __ mov(edx, eax); __ Pop(ebx); __ Pop(eax); } __ jmp(&done_allocate); } void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) { Register context_reg = esi; Register slot_reg = ebx; Register result_reg = eax; Label slow_case; // Go up context chain to the script context. for (int i = 0; i < depth(); ++i) { __ mov(result_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX)); context_reg = result_reg; } // Load the PropertyCell value at the specified slot. __ mov(result_reg, ContextOperand(context_reg, slot_reg)); __ mov(result_reg, FieldOperand(result_reg, PropertyCell::kValueOffset)); // Check that value is not the_hole. __ CompareRoot(result_reg, Heap::kTheHoleValueRootIndex); __ j(equal, &slow_case, Label::kNear); __ Ret(); // Fallback to the runtime. __ bind(&slow_case); __ SmiTag(slot_reg); __ Pop(result_reg); // Pop return address. __ Push(slot_reg); __ Push(result_reg); // Push return address. __ TailCallRuntime(Runtime::kLoadGlobalViaContext); } void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) { Register context_reg = esi; Register slot_reg = ebx; Register value_reg = eax; Register cell_reg = edi; Register cell_details_reg = edx; Register cell_value_reg = ecx; Label fast_heapobject_case, fast_smi_case, slow_case; if (FLAG_debug_code) { __ CompareRoot(value_reg, Heap::kTheHoleValueRootIndex); __ Check(not_equal, kUnexpectedValue); } // Go up context chain to the script context. for (int i = 0; i < depth(); ++i) { __ mov(cell_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX)); context_reg = cell_reg; } // Load the PropertyCell at the specified slot. __ mov(cell_reg, ContextOperand(context_reg, slot_reg)); // Load PropertyDetails for the cell (actually only the cell_type and kind). __ mov(cell_details_reg, FieldOperand(cell_reg, PropertyCell::kDetailsOffset)); __ SmiUntag(cell_details_reg); __ and_(cell_details_reg, Immediate(PropertyDetails::PropertyCellTypeField::kMask | PropertyDetails::KindField::kMask | PropertyDetails::kAttributesReadOnlyMask)); // Check if PropertyCell holds mutable data. Label not_mutable_data; __ cmp(cell_details_reg, Immediate(PropertyDetails::PropertyCellTypeField::encode( PropertyCellType::kMutable) | PropertyDetails::KindField::encode(kData))); __ j(not_equal, &not_mutable_data); __ JumpIfSmi(value_reg, &fast_smi_case); __ bind(&fast_heapobject_case); __ mov(FieldOperand(cell_reg, PropertyCell::kValueOffset), value_reg); __ RecordWriteField(cell_reg, PropertyCell::kValueOffset, value_reg, cell_details_reg, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); // RecordWriteField clobbers the value register, so we need to reload. __ mov(value_reg, FieldOperand(cell_reg, PropertyCell::kValueOffset)); __ Ret(); __ bind(&not_mutable_data); // Check if PropertyCell value matches the new value (relevant for Constant, // ConstantType and Undefined cells). Label not_same_value; __ mov(cell_value_reg, FieldOperand(cell_reg, PropertyCell::kValueOffset)); __ cmp(cell_value_reg, value_reg); __ j(not_equal, &not_same_value, FLAG_debug_code ? Label::kFar : Label::kNear); // Make sure the PropertyCell is not marked READ_ONLY. __ test(cell_details_reg, Immediate(PropertyDetails::kAttributesReadOnlyMask)); __ j(not_zero, &slow_case); if (FLAG_debug_code) { Label done; // This can only be true for Constant, ConstantType and Undefined cells, // because we never store the_hole via this stub. __ cmp(cell_details_reg, Immediate(PropertyDetails::PropertyCellTypeField::encode( PropertyCellType::kConstant) | PropertyDetails::KindField::encode(kData))); __ j(equal, &done); __ cmp(cell_details_reg, Immediate(PropertyDetails::PropertyCellTypeField::encode( PropertyCellType::kConstantType) | PropertyDetails::KindField::encode(kData))); __ j(equal, &done); __ cmp(cell_details_reg, Immediate(PropertyDetails::PropertyCellTypeField::encode( PropertyCellType::kUndefined) | PropertyDetails::KindField::encode(kData))); __ Check(equal, kUnexpectedValue); __ bind(&done); } __ Ret(); __ bind(&not_same_value); // Check if PropertyCell contains data with constant type (and is not // READ_ONLY). __ cmp(cell_details_reg, Immediate(PropertyDetails::PropertyCellTypeField::encode( PropertyCellType::kConstantType) | PropertyDetails::KindField::encode(kData))); __ j(not_equal, &slow_case, Label::kNear); // Now either both old and new values must be SMIs or both must be heap // objects with same map. Label value_is_heap_object; __ JumpIfNotSmi(value_reg, &value_is_heap_object, Label::kNear); __ JumpIfNotSmi(cell_value_reg, &slow_case, Label::kNear); // Old and new values are SMIs, no need for a write barrier here. __ bind(&fast_smi_case); __ mov(FieldOperand(cell_reg, PropertyCell::kValueOffset), value_reg); __ Ret(); __ bind(&value_is_heap_object); __ JumpIfSmi(cell_value_reg, &slow_case, Label::kNear); Register cell_value_map_reg = cell_value_reg; __ mov(cell_value_map_reg, FieldOperand(cell_value_reg, HeapObject::kMapOffset)); __ cmp(cell_value_map_reg, FieldOperand(value_reg, HeapObject::kMapOffset)); __ j(equal, &fast_heapobject_case); // Fallback to the runtime. __ bind(&slow_case); __ SmiTag(slot_reg); __ Pop(cell_reg); // Pop return address. __ Push(slot_reg); __ Push(value_reg); __ Push(cell_reg); // Push return address. __ TailCallRuntime(is_strict(language_mode()) ? Runtime::kStoreGlobalViaContext_Strict : Runtime::kStoreGlobalViaContext_Sloppy); } // Generates an Operand for saving parameters after PrepareCallApiFunction. static Operand ApiParameterOperand(int index) { return Operand(esp, index * kPointerSize); } // Prepares stack to put arguments (aligns and so on). Reserves // space for return value if needed (assumes the return value is a handle). // Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1) // etc. Saves context (esi). If space was reserved for return value then // stores the pointer to the reserved slot into esi. static void PrepareCallApiFunction(MacroAssembler* masm, int argc) { __ EnterApiExitFrame(argc); if (__ emit_debug_code()) { __ mov(esi, Immediate(bit_cast<int32_t>(kZapValue))); } } // Calls an API function. Allocates HandleScope, extracts returned value // from handle and propagates exceptions. Clobbers ebx, edi and // caller-save registers. Restores context. On return removes // stack_space * kPointerSize (GCed). static void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, ExternalReference thunk_ref, Operand thunk_last_arg, int stack_space, Operand* stack_space_operand, Operand return_value_operand, Operand* context_restore_operand) { Isolate* isolate = masm->isolate(); ExternalReference next_address = ExternalReference::handle_scope_next_address(isolate); ExternalReference limit_address = ExternalReference::handle_scope_limit_address(isolate); ExternalReference level_address = ExternalReference::handle_scope_level_address(isolate); DCHECK(edx.is(function_address)); // Allocate HandleScope in callee-save registers. __ mov(ebx, Operand::StaticVariable(next_address)); __ mov(edi, Operand::StaticVariable(limit_address)); __ add(Operand::StaticVariable(level_address), Immediate(1)); if (FLAG_log_timer_events) { FrameScope frame(masm, StackFrame::MANUAL); __ PushSafepointRegisters(); __ PrepareCallCFunction(1, eax); __ mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address(isolate))); __ CallCFunction(ExternalReference::log_enter_external_function(isolate), 1); __ PopSafepointRegisters(); } Label profiler_disabled; Label end_profiler_check; __ mov(eax, Immediate(ExternalReference::is_profiling_address(isolate))); __ cmpb(Operand(eax, 0), 0); __ j(zero, &profiler_disabled); // Additional parameter is the address of the actual getter function. __ mov(thunk_last_arg, function_address); // Call the api function. __ mov(eax, Immediate(thunk_ref)); __ call(eax); __ jmp(&end_profiler_check); __ bind(&profiler_disabled); // Call the api function. __ call(function_address); __ bind(&end_profiler_check); if (FLAG_log_timer_events) { FrameScope frame(masm, StackFrame::MANUAL); __ PushSafepointRegisters(); __ PrepareCallCFunction(1, eax); __ mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address(isolate))); __ CallCFunction(ExternalReference::log_leave_external_function(isolate), 1); __ PopSafepointRegisters(); } Label prologue; // Load the value from ReturnValue __ mov(eax, return_value_operand); Label promote_scheduled_exception; Label delete_allocated_handles; Label leave_exit_frame; __ bind(&prologue); // No more valid handles (the result handle was the last one). Restore // previous handle scope. __ mov(Operand::StaticVariable(next_address), ebx); __ sub(Operand::StaticVariable(level_address), Immediate(1)); __ Assert(above_equal, kInvalidHandleScopeLevel); __ cmp(edi, Operand::StaticVariable(limit_address)); __ j(not_equal, &delete_allocated_handles); // Leave the API exit frame. __ bind(&leave_exit_frame); bool restore_context = context_restore_operand != NULL; if (restore_context) { __ mov(esi, *context_restore_operand); } if (stack_space_operand != nullptr) { __ mov(ebx, *stack_space_operand); } __ LeaveApiExitFrame(!restore_context); // Check if the function scheduled an exception. ExternalReference scheduled_exception_address = ExternalReference::scheduled_exception_address(isolate); __ cmp(Operand::StaticVariable(scheduled_exception_address), Immediate(isolate->factory()->the_hole_value())); __ j(not_equal, &promote_scheduled_exception); #if DEBUG // Check if the function returned a valid JavaScript value. Label ok; Register return_value = eax; Register map = ecx; __ JumpIfSmi(return_value, &ok, Label::kNear); __ mov(map, FieldOperand(return_value, HeapObject::kMapOffset)); __ CmpInstanceType(map, LAST_NAME_TYPE); __ j(below_equal, &ok, Label::kNear); __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE); __ j(above_equal, &ok, Label::kNear); __ cmp(map, isolate->factory()->heap_number_map()); __ j(equal, &ok, Label::kNear); __ cmp(return_value, isolate->factory()->undefined_value()); __ j(equal, &ok, Label::kNear); __ cmp(return_value, isolate->factory()->true_value()); __ j(equal, &ok, Label::kNear); __ cmp(return_value, isolate->factory()->false_value()); __ j(equal, &ok, Label::kNear); __ cmp(return_value, isolate->factory()->null_value()); __ j(equal, &ok, Label::kNear); __ Abort(kAPICallReturnedInvalidObject); __ bind(&ok); #endif if (stack_space_operand != nullptr) { DCHECK_EQ(0, stack_space); __ pop(ecx); __ add(esp, ebx); __ jmp(ecx); } else { __ ret(stack_space * kPointerSize); } // Re-throw by promoting a scheduled exception. __ bind(&promote_scheduled_exception); __ TailCallRuntime(Runtime::kPromoteScheduledException); // HandleScope limit has changed. Delete allocated extensions. ExternalReference delete_extensions = ExternalReference::delete_handle_scope_extensions(isolate); __ bind(&delete_allocated_handles); __ mov(Operand::StaticVariable(limit_address), edi); __ mov(edi, eax); __ mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address(isolate))); __ mov(eax, Immediate(delete_extensions)); __ call(eax); __ mov(eax, edi); __ jmp(&leave_exit_frame); } static void CallApiFunctionStubHelper(MacroAssembler* masm, const ParameterCount& argc, bool return_first_arg, bool call_data_undefined, bool is_lazy) { // ----------- S t a t e ------------- // -- edi : callee // -- ebx : call_data // -- ecx : holder // -- edx : api_function_address // -- esi : context // -- eax : number of arguments if argc is a register // -- // -- esp[0] : return address // -- esp[4] : last argument // -- ... // -- esp[argc * 4] : first argument // -- esp[(argc + 1) * 4] : receiver // ----------------------------------- Register callee = edi; Register call_data = ebx; Register holder = ecx; Register api_function_address = edx; Register context = esi; Register return_address = eax; typedef FunctionCallbackArguments FCA; STATIC_ASSERT(FCA::kContextSaveIndex == 6); STATIC_ASSERT(FCA::kCalleeIndex == 5); STATIC_ASSERT(FCA::kDataIndex == 4); STATIC_ASSERT(FCA::kReturnValueOffset == 3); STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2); STATIC_ASSERT(FCA::kIsolateIndex == 1); STATIC_ASSERT(FCA::kHolderIndex == 0); STATIC_ASSERT(FCA::kArgsLength == 7); DCHECK(argc.is_immediate() || eax.is(argc.reg())); if (argc.is_immediate()) { __ pop(return_address); // context save. __ push(context); } else { // pop return address and save context __ xchg(context, Operand(esp, 0)); return_address = context; } // callee __ push(callee); // call data __ push(call_data); Register scratch = call_data; if (!call_data_undefined) { // return value __ push(Immediate(masm->isolate()->factory()->undefined_value())); // return value default __ push(Immediate(masm->isolate()->factory()->undefined_value())); } else { // return value __ push(scratch); // return value default __ push(scratch); } // isolate __ push(Immediate(reinterpret_cast<int>(masm->isolate()))); // holder __ push(holder); __ mov(scratch, esp); // push return address __ push(return_address); if (!is_lazy) { // load context from callee __ mov(context, FieldOperand(callee, JSFunction::kContextOffset)); } // API function gets reference to the v8::Arguments. If CPU profiler // is enabled wrapper function will be called and we need to pass // address of the callback as additional parameter, always allocate // space for it. const int kApiArgc = 1 + 1; // Allocate the v8::Arguments structure in the arguments' space since // it's not controlled by GC. const int kApiStackSpace = 4; PrepareCallApiFunction(masm, kApiArgc + kApiStackSpace); // FunctionCallbackInfo::implicit_args_. __ mov(ApiParameterOperand(2), scratch); if (argc.is_immediate()) { __ add(scratch, Immediate((argc.immediate() + FCA::kArgsLength - 1) * kPointerSize)); // FunctionCallbackInfo::values_. __ mov(ApiParameterOperand(3), scratch); // FunctionCallbackInfo::length_. __ Move(ApiParameterOperand(4), Immediate(argc.immediate())); // FunctionCallbackInfo::is_construct_call_. __ Move(ApiParameterOperand(5), Immediate(0)); } else { __ lea(scratch, Operand(scratch, argc.reg(), times_pointer_size, (FCA::kArgsLength - 1) * kPointerSize)); // FunctionCallbackInfo::values_. __ mov(ApiParameterOperand(3), scratch); // FunctionCallbackInfo::length_. __ mov(ApiParameterOperand(4), argc.reg()); // FunctionCallbackInfo::is_construct_call_. __ lea(argc.reg(), Operand(argc.reg(), times_pointer_size, (FCA::kArgsLength + 1) * kPointerSize)); __ mov(ApiParameterOperand(5), argc.reg()); } // v8::InvocationCallback's argument. __ lea(scratch, ApiParameterOperand(2)); __ mov(ApiParameterOperand(0), scratch); ExternalReference thunk_ref = ExternalReference::invoke_function_callback(masm->isolate()); Operand context_restore_operand(ebp, (2 + FCA::kContextSaveIndex) * kPointerSize); // Stores return the first js argument int return_value_offset = 0; if (return_first_arg) { return_value_offset = 2 + FCA::kArgsLength; } else { return_value_offset = 2 + FCA::kReturnValueOffset; } Operand return_value_operand(ebp, return_value_offset * kPointerSize); int stack_space = 0; Operand is_construct_call_operand = ApiParameterOperand(5); Operand* stack_space_operand = &is_construct_call_operand; if (argc.is_immediate()) { stack_space = argc.immediate() + FCA::kArgsLength + 1; stack_space_operand = nullptr; } CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, ApiParameterOperand(1), stack_space, stack_space_operand, return_value_operand, &context_restore_operand); } void CallApiFunctionStub::Generate(MacroAssembler* masm) { bool call_data_undefined = this->call_data_undefined(); CallApiFunctionStubHelper(masm, ParameterCount(eax), false, call_data_undefined, false); } void CallApiAccessorStub::Generate(MacroAssembler* masm) { bool is_store = this->is_store(); int argc = this->argc(); bool call_data_undefined = this->call_data_undefined(); bool is_lazy = this->is_lazy(); CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store, call_data_undefined, is_lazy); } void CallApiGetterStub::Generate(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- esp[0] : return address // -- esp[4] : name // -- esp[8 .. (8 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_ // -- ... // -- edx : api_function_address // ----------------------------------- DCHECK(edx.is(ApiGetterDescriptor::function_address())); // v8::PropertyCallbackInfo::args_ array and name handle. const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1; // Allocate v8::PropertyCallbackInfo object, arguments for callback and // space for optional callback address parameter (in case CPU profiler is // active) in non-GCed stack space. const int kApiArgc = 3 + 1; Register api_function_address = edx; Register scratch = ebx; // Load address of v8::PropertyAccessorInfo::args_ array. __ lea(scratch, Operand(esp, 2 * kPointerSize)); PrepareCallApiFunction(masm, kApiArgc); // Create v8::PropertyCallbackInfo object on the stack and initialize // it's args_ field. Operand info_object = ApiParameterOperand(3); __ mov(info_object, scratch); __ sub(scratch, Immediate(kPointerSize)); __ mov(ApiParameterOperand(0), scratch); // name. __ lea(scratch, info_object); __ mov(ApiParameterOperand(1), scratch); // arguments pointer. // Reserve space for optional callback address parameter. Operand thunk_last_arg = ApiParameterOperand(2); ExternalReference thunk_ref = ExternalReference::invoke_accessor_getter_callback(isolate()); // +3 is to skip prolog, return address and name handle. Operand return_value_operand( ebp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize); CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, thunk_last_arg, kStackUnwindSpace, nullptr, return_value_operand, NULL); } #undef __ } // namespace internal } // namespace v8 #endif // V8_TARGET_ARCH_X87
/*! * Copyright (c) 2018 by Contributors * * \file quantize.cc * * \brief transform a graph to a low-bit graph * for compression and acceleration. */ #include <dmlc/thread_local.h> #include <tvm/base.h> #include <tvm/relay/pass.h> #include <tvm/relay/expr_functor.h> #include <tvm/relay/op_attr_types.h> #include <cmath> #include <string> #include <vector> #include <stack> #include "pattern_util.h" #include "quantize.h" namespace tvm { namespace relay { namespace quantize { /*! \brief Attribute for simulated quantize operator */ struct SimulatedQuantizeAttrs : public tvm::AttrsNode<SimulatedQuantizeAttrs> { int kind; bool sign; std::string rounding; TVM_DECLARE_ATTRS(SimulatedQuantizeAttrs, "relay.attrs.SimulatedQuantizeAttrs") { TVM_ATTR_FIELD(kind) .describe("kind of field, hint for nbit/dtype configuration."); TVM_ATTR_FIELD(sign).set_default(true) .describe("whether to use signed data type."); TVM_ATTR_FIELD(rounding).set_default("round") .describe("rounding mode. Can be 'floor', 'ceil', 'round'"); } }; TVM_REGISTER_NODE_TYPE(SimulatedQuantizeAttrs); bool SimulatedQuantizeRel(const Array<Type>& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { CHECK_EQ(types.size(), 5); const auto param = attrs.as<SimulatedQuantizeAttrs>(); CHECK(param != nullptr); const auto* data = types[0].as<TensorTypeNode>(); CHECK(data != nullptr); CHECK_NE(data->shape.size(), 0) << "Input shape cannot be empty"; reporter->Assign(types[1], TensorTypeNode::make({}, Float(32))); // dom_scale reporter->Assign(types[2], TensorTypeNode::make({}, Float(32))); // clip_min reporter->Assign(types[3], TensorTypeNode::make({}, Float(32))); // clip_max reporter->Assign(types[4], types[0]); // output return true; } RELAY_REGISTER_OP("relay.op.annotation.simulated_quantize") .describe(R"code(simulated quantize op)code" TVM_ADD_FILELINE) .set_num_inputs(4) .add_argument("data", "Tensor", "The input data.") .add_argument("dom_scale", "Tensor", "The domain scale of input data. It should be a scalar") .add_argument("clip_min", "Tensor", "lower bound. It should be a scalar") .add_argument("clip_max", "Tensor", "upper bound. It should be a scalar") .set_attrs_type_key("relay.attrs.SimulatedQuantizeAttrs") .set_support_level(10) .add_type_rel("SimulatedQuantize", SimulatedQuantizeRel); TVM_REGISTER_API("relay._quantize.simulated_quantize") .set_body_typed<Expr(Expr, Expr, Expr, Expr, int, bool, std::string)>( [](Expr data, Expr dom_scale, Expr clip_min, Expr clip_max, int kind, bool sign, std::string rounding) { auto attrs = make_node<SimulatedQuantizeAttrs>(); attrs->kind = kind; attrs->sign = sign; attrs->rounding = rounding; static const Op& op = Op::Get("relay.op.annotation.simulated_quantize"); return CallNode::make(op, {data, dom_scale, clip_min, clip_max}, Attrs(attrs), {}); }); // ============= // annotate pass Expr QAnnotateExprNode::Realize() const { const auto& cfg = QConfig::Current(); if (cfg->store_lowbit_output) { // store low bit output back for VTA const PackedFunc* f = runtime::Registry::Get("relay.quantize.attach_simulated_quantize"); return (*f)(this->expr, static_cast<int>(kQInput)); } else { return expr; } } QAnnotateExpr QAnnotateExprNode::make(Expr expr, QAnnotateKind kind) { auto rnode = make_node<QAnnotateExprNode>(); rnode->expr = expr; rnode->kind = kind; return QAnnotateExpr(rnode); } TVM_REGISTER_API("relay._quantize.make_annotate_expr") .set_body([](TVMArgs args, TVMRetValue *ret) { *ret = QAnnotateExprNode::make(args[0], static_cast<QAnnotateKind>(args[1].operator int())); }); TVM_REGISTER_API("relay._quantize.annotate") .set_body_typed<Expr(Expr)>([] (const Expr& expr) { std::function<Expr(const Expr&)> fmulti_ref = [](const Expr& e) { if (e->derived_from<TempExprNode>()) { const auto* n = e.as<QAnnotateExprNode>(); CHECK(n); const PackedFunc* f = runtime::Registry::Get("relay.quantize.attach_simulated_quantize"); Expr ret = (*f)(n->expr, static_cast<int>(kQInput)); return static_cast<Expr>(QAnnotateExprNode::make(ret, kQInput)); } return e; }; return ForwardRewrite(expr, "FQAnnotateRewrite", nullptr, nullptr); }); // ============= // realize pass Expr QRealizeIntExprNode::Realize() const { const auto& cfg = QConfig::Current(); Expr data = this->data; if (cfg->store_lowbit_output) { data = Cast(data, cfg->dtype_input); } // dequantize data = Cast(data, Float(32)); data = Multiply(data, this->dom_scale); return data; } QRealizeIntExpr QRealizeIntExprNode::make(Expr data, Expr dom_scale, DataType dtype) { NodePtr<QRealizeIntExprNode> n = make_node<QRealizeIntExprNode>(); n->data = std::move(data); n->dom_scale = std::move(dom_scale); n->dtype = std::move(dtype); return QRealizeIntExpr(n); } inline Expr ForwardOp(const Call& ref_call, const Array<Expr>& args) { return CallNode::make(ref_call->op, args, ref_call->attrs, ref_call->type_args); } /* calculate `data * s1 / s2`, use shift if possible */ inline Expr MulAndDiv(Expr data, float s1, float s2) { // here we assume the dtype of data is dtype activation const QConfig& cfg = QConfig::Current(); if (s1 == s2) return data; float factor = s1 / s2; float shift_factor = std::log2(factor); CHECK_GT(shift_factor, 0); if (static_cast<int>(shift_factor) == shift_factor) { return LeftShift(data, MakeConstantScalar(cfg->dtype_activation, static_cast<int>(shift_factor))); } else if (static_cast<int>(factor) == factor) { return Multiply(data, MakeConstantScalar(cfg->dtype_activation, factor)); } else { LOG(FATAL) << "fall back to float computation"; data = Cast(data, Float(32)); return Multiply(data, MakeConstantScalar(Float(32), factor)); } } Expr QuantizeRealize(const Call& ref_call, const Array<Expr>& new_args, const NodeRef& ctx) { const QConfig& cfg = QConfig::Current(); // do not handle data type cast const auto param = ref_call->attrs.as<SimulatedQuantizeAttrs>(); CHECK_EQ(param->rounding, "round"); Expr dom_scale = new_args[1]; Expr clip_min = new_args[2]; Expr clip_max = new_args[3]; float dom_scale_imm = GetScalarFromConstant<float>(dom_scale); float clip_min_imm = GetScalarFromConstant<float>(clip_min); float clip_max_imm = GetScalarFromConstant<float>(clip_max); // x * idom_scale = y * odom_scale // => y = x * idom_scale / odom_scale if (const auto* n = new_args[0].as<QRealizeIntExprNode>()) { // int32->int8 Expr data = n->data; float idom_scale_imm = GetScalarFromConstant<float>(n->dom_scale); float odom_scale_imm = GetScalarFromConstant<float>(dom_scale); if (idom_scale_imm == odom_scale_imm) { // same domain scale, only clip data = Clip(data, clip_min_imm, clip_max_imm); return QRealizeIntExprNode::make(data, dom_scale, n->dtype); } float shift_nbit = std::log2(odom_scale_imm / idom_scale_imm); CHECK_GT(shift_nbit, 0); if (static_cast<int>(shift_nbit) == shift_nbit) { // use right shift if (cfg->round_for_shift) { float round_bias = std::pow(2.0, shift_nbit - 1); data = Add(data, MakeConstantScalar(cfg->dtype_activation, static_cast<int>(round_bias))); } data = RightShift(data, MakeConstantScalar(cfg->dtype_activation, static_cast<int>(shift_nbit))); data = Clip(data, clip_min_imm, clip_max_imm); return QRealizeIntExprNode::make(data, dom_scale, n->dtype); } else { // float computation data = Cast(data, Float(32)); Expr scaled_data = Multiply(data, Divide(n->dom_scale, dom_scale)); Expr round_data = Clip(Round(scaled_data), clip_min_imm, clip_max_imm); return QRealizeIntExprNode::make(round_data, dom_scale, Float(32)); } } // quantize from real CHECK(!new_args[0]->derived_from<TempExprNode>()); Expr data = new_args[0]; Expr scaled_data = Multiply(data, MakeConstantScalar(Float(32), 1 / dom_scale_imm)); Expr round_data = Clip(Round(scaled_data), clip_min_imm, clip_max_imm); return QRealizeIntExprNode::make(round_data, dom_scale, Float(32)); } RELAY_REGISTER_OP("relay.op.annotation.simulated_quantize") .set_attr<FForwardRewrite>("FQRealizeRewrite", QuantizeRealize); Expr Conv2dRealize(const Call& ref_call, const Array<Expr>& new_args, const NodeRef& ctx) { const QConfig& cfg = QConfig::Current(); CHECK_EQ(new_args.size(), 2); if (!new_args[0]->derived_from<TempExprNode>() && !new_args[1]->derived_from<TempExprNode>()) { return Expr(nullptr); } const auto* lhs = new_args[0].as<QRealizeIntExprNode>(); CHECK(lhs); const auto* rhs = new_args[1].as<QRealizeIntExprNode>(); CHECK(rhs); Expr ldata = lhs->data; if (lhs->dtype != cfg->dtype_input) { ldata = Cast(ldata, cfg->dtype_input); } Expr rdata = Cast(rhs->data, cfg->dtype_weight); const auto ref_attrs = ref_call->attrs.as<Conv2DAttrs>(); auto attrs = make_node<Conv2DAttrs>(); *attrs = *ref_attrs; DataType out_dtype = cfg->dtype_activation; attrs->out_dtype = out_dtype; Expr ret = CallNode::make(ref_call->op, {ldata, rdata}, Attrs(attrs), ref_call->type_args); Expr dom_scale = FoldConstant(Multiply(lhs->dom_scale, rhs->dom_scale)); return QRealizeIntExprNode::make(ret, dom_scale, out_dtype); } RELAY_REGISTER_OP("nn.conv2d") .set_attr<FForwardRewrite>("FQRealizeRewrite", Conv2dRealize); Expr MulRealize(const Call& ref_call, const Array<Expr>& new_args, const NodeRef& ctx) { const QConfig& cfg = QConfig::Current(); CHECK_EQ(new_args.size(), 2); if (new_args[0].as<QRealizeIntExprNode>() && new_args[1].as<QRealizeIntExprNode>()) { // execute the operation with activation data type. const auto* lhs = new_args[0].as<QRealizeIntExprNode>(); const auto* rhs = new_args[1].as<QRealizeIntExprNode>(); Expr ldata = lhs->data; Expr rdata = rhs->data; DataType dtype = cfg->dtype_activation; if (lhs->dtype == Float(32)) { ldata = Cast(ldata, dtype); } else { CHECK_EQ(lhs->dtype, dtype); } if (rhs->dtype == Float(32)) { rdata = Cast(rdata, dtype); } else { CHECK_EQ(rhs->dtype, dtype); } Expr ret = ForwardOp(ref_call, {ldata, rdata}); Expr dom_scale = FoldConstant(Multiply(lhs->dom_scale, rhs->dom_scale)); return QRealizeIntExprNode::make(ret, dom_scale, dtype); } CHECK(!new_args[0]->derived_from<TempExprNode>() && !new_args[1]->derived_from<TempExprNode>()); return Expr(nullptr); } RELAY_REGISTER_OP("multiply") .set_attr<FForwardRewrite>("FQRealizeRewrite", MulRealize); float ChooseDomScale(const std::vector<const QRealizeIntExprNode*>& nptrs) { if (nptrs.size() == 2) { // x = a * s1, y = b * s2 // x + y = (a * s1 / s2 + b) * s2, if s1 > s2 // = (a + b * s2 / s1) * s1, if s2 > s1 float s1 = GetScalarFromConstant<float>(nptrs[0]->dom_scale); float s2 = GetScalarFromConstant<float>(nptrs[1]->dom_scale); return s1 > s2 ? s2 : s1; } else { const QConfig& cfg = QConfig::Current(); float scale = cfg->global_scale; return scale / std::pow(2.0, cfg->nbit_activation - 1); } } /* \brief Unify the dom scale of arguments */ Array<Expr> UnifyDTypeScale(const Array<Expr>& args, DataType* dtype_ptr, Expr* scale_ptr) { const QConfig& cfg = QConfig::Current(); std::vector<const QRealizeIntExprNode*> nptrs; Array<Expr> ret; for (auto arg : args) { const auto* nptr = arg.as<QRealizeIntExprNode>(); CHECK(nptr); nptrs.push_back(nptr); ret.push_back(nptr->data); } // unify the data type DataType dtype = cfg->dtype_activation; for (size_t i = 0; i < ret.size(); ++i) { if (nptrs[i]->dtype != dtype) { ret.Set(i, Cast(ret[i], dtype)); } } // unify the dom_scale float s = ChooseDomScale(nptrs); Expr dom_scale = MakeConstantScalar(Float(32), s); for (size_t i = 0; i < ret.size(); ++i) { float cur_s = GetScalarFromConstant<float>(nptrs[i]->dom_scale); ret.Set(i, MulAndDiv(ret[i], cur_s, s)); } *dtype_ptr = dtype; *scale_ptr = dom_scale; return ret; } Expr AddRealize(const Call& ref_call, const Array<Expr>& new_args, const NodeRef& ctx) { CHECK_EQ(new_args.size(), 2); if (new_args[0].as<QRealizeIntExprNode>() && new_args[1].as<QRealizeIntExprNode>()) { DataType dtype; Expr dom_scale; Array<Expr> ret_args = UnifyDTypeScale(new_args, &dtype, &dom_scale); Expr ret = ForwardOp(ref_call, ret_args); return QRealizeIntExprNode::make(ret, dom_scale, dtype); } CHECK(!new_args[0]->derived_from<TempExprNode>() && !new_args[1]->derived_from<TempExprNode>()); return Expr(nullptr); } RELAY_REGISTER_OP("add") .set_attr<FForwardRewrite>("FQRealizeRewrite", AddRealize); Expr ConcatenateRealize(const Call& ref_call, const Array<Expr>& new_args, const NodeRef& ctx) { CHECK_EQ(new_args.size(), 1); const auto* tuple = new_args[0].as<TupleNode>(); CHECK(tuple); const Array<Expr>& arr = tuple->fields; if (arr[0].as<QRealizeIntExprNode>()) { DataType dtype; Expr dom_scale; Array<Expr> ret_args = UnifyDTypeScale(arr, &dtype, &dom_scale); Expr ret = ForwardOp(ref_call, {TupleNode::make(ret_args)}); return QRealizeIntExprNode::make(ret, dom_scale, dtype); } else { for (auto arg : new_args) { CHECK(!arg->derived_from<TempExprNode>()); } return Expr(nullptr); } } RELAY_REGISTER_OP("concatenate") .set_attr<FForwardRewrite>("FQRealizeRewrite", ConcatenateRealize); /* \brief forward the original operator */ Expr IdentityRealize(const Call& ref_call, const Array<Expr>& new_args, const NodeRef& ctx) { CHECK_EQ(new_args.size(), 1); if (const auto* n = new_args[0].as<QRealizeIntExprNode>()) { Expr ret = ForwardOp(ref_call, {n->data}); return QRealizeIntExprNode::make(ret, n->dom_scale, n->dtype); } CHECK(!new_args[0]->derived_from<TempExprNode>()); return Expr(nullptr); } RELAY_REGISTER_OP("nn.relu") .set_attr<FForwardRewrite>("FQRealizeRewrite", IdentityRealize); RELAY_REGISTER_OP("strided_slice") .set_attr<FForwardRewrite>("FQRealizeRewrite", IdentityRealize); Expr MaxPoolRealize(const Call& ref_call, const Array<Expr>& new_args, const NodeRef& ctx) { const QConfig& cfg = QConfig::Current(); CHECK_EQ(new_args.size(), 1); if (const auto* n = new_args[0].as<QRealizeIntExprNode>()) { Expr data = Cast(n->data, cfg->dtype_input); Expr ret = ForwardOp(ref_call, {data}); return QRealizeIntExprNode::make(ret, n->dom_scale, cfg->dtype_input); } CHECK(!new_args[0]->derived_from<TempExprNode>()); return Expr(nullptr); } RELAY_REGISTER_OP("nn.max_pool2d") .set_attr<FForwardRewrite>("FQRealizeRewrite", MaxPoolRealize); Expr AvgPoolRealize(const Call& ref_call, const Array<Expr>& new_args, const NodeRef& ctx) { const QConfig& cfg = QConfig::Current(); CHECK_EQ(new_args.size(), 1); if (const auto* n = new_args[0].as<QRealizeIntExprNode>()) { Expr data = n->data; if (n->dtype != cfg->dtype_activation) { data = Cast(n->data, cfg->dtype_activation); } Expr ret = ForwardOp(ref_call, {data}); return QRealizeIntExprNode::make(ret, n->dom_scale, cfg->dtype_activation); } CHECK(!new_args[0]->derived_from<TempExprNode>()); return Expr(nullptr); } RELAY_REGISTER_OP("nn.avg_pool2d") .set_attr<FForwardRewrite>("FQRealizeRewrite", AvgPoolRealize); TVM_REGISTER_API("relay._quantize.realize") .set_body_typed<Expr(Expr)>([](const Expr& e) { Expr ret = ForwardRewrite(e, "FQRealizeRewrite", nullptr, nullptr); return ret; }); // ============= // qconfig QConfig qconfig() { return QConfig(make_node<QConfigNode>()); } /*! \brief Entry to hold the BuildConfig context stack. */ struct TVMQConfigThreadLocalEntry { /*! \brief The default build config if the stack is empty */ QConfig default_config; /*! \brief The current build config context */ std::stack<QConfig> context_stack; TVMQConfigThreadLocalEntry() : default_config(qconfig()) { } }; /*! \brief Thread local store to hold the BuildConfig context stack. */ typedef dmlc::ThreadLocalStore<TVMQConfigThreadLocalEntry> TVMQConfigThreadLocalStore; void QConfig::EnterQConfigScope(const QConfig& build_config) { TVMQConfigThreadLocalEntry *entry = TVMQConfigThreadLocalStore::Get(); entry->context_stack.push(build_config); } void QConfig::ExitQConfigScope() { TVMQConfigThreadLocalEntry *entry = TVMQConfigThreadLocalStore::Get(); entry->context_stack.pop(); } QConfig& QConfig::Current() { TVMQConfigThreadLocalEntry *entry = TVMQConfigThreadLocalStore::Get(); if (entry->context_stack.size() > 0) { return entry->context_stack.top(); } return entry->default_config; } TVM_REGISTER_NODE_TYPE(QConfigNode); TVM_STATIC_IR_FUNCTOR(IRPrinter, vtable) .set_dispatch<QConfigNode>([](const QConfigNode *op, IRPrinter *p) { p->stream << "qconfig("; p->stream << "nbit_input=" << op->nbit_input << ", "; p->stream << "nbit_weight=" << op->nbit_weight << ", "; p->stream << "nbit_activation=" << op->nbit_activation << ", "; p->stream << "global_scale=" << op->global_scale << ", "; p->stream << "skip_k_conv==" << op->skip_k_conv << ", "; p->stream << "round_for_shift==" << op->round_for_shift << ", "; p->stream << "store_lowbit_output==" << op->store_lowbit_output << ", "; p->stream << "debug_enabled_ops==" << op->debug_enabled_ops; p->stream << ")"; }); TVM_REGISTER_API("relay._quantize._GetCurrentQConfig") .set_body([](TVMArgs args, TVMRetValue* ret) { *ret = QConfig::Current(); }); TVM_REGISTER_API("relay._quantize._EnterQConfigScope") .set_body([](TVMArgs args, TVMRetValue* ret) { QConfig target = args[0]; QConfig::EnterQConfigScope(target); }); TVM_REGISTER_API("relay._quantize._ExitQConfigScope") .set_body([](TVMArgs args, TVMRetValue* ret) { QConfig::ExitQConfigScope(); }); } // namespace quantize } // namespace relay } // namespace tvm
// Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "ios/chrome/browser/web/features.h" namespace web { const base::Feature kWebPageDefaultZoomFromDynamicType{ "WebPageDefaultZoomFromDynamicType", base::FEATURE_DISABLED_BY_DEFAULT}; const base::Feature kWebPageAlternativeTextZoom{ "WebPageAlternativeTextZoom", base::FEATURE_DISABLED_BY_DEFAULT}; const base::Feature kRestoreSessionFromCache{"RestoreSessionFromCache", base::FEATURE_DISABLED_BY_DEFAULT}; const base::Feature kForceMajorVersion100InUserAgent{ "ForceMajorVersion100InUserAgent", base::FEATURE_DISABLED_BY_DEFAULT}; } // namespace web
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "nodestate.h" #include <boost/lexical_cast.hpp> #include <vespa/vespalib/text/stringtokenizer.h> #include <vespa/document/util/stringutil.h> #include <vespa/vespalib/util/exceptions.h> #include <vespa/vespalib/stllike/asciistream.h> #include <sstream> #include <cmath> #include <vespa/log/log.h> LOG_SETUP(".vdslib.nodestate"); namespace storage::lib { NodeState::NodeState(const NodeState &) = default; NodeState & NodeState::operator = (const NodeState &) = default; NodeState::NodeState(NodeState &&) = default; NodeState & NodeState::operator = (NodeState &&) = default; NodeState::~NodeState() { } NodeState::NodeState() : _type(0), _state(0), _description(""), _capacity(1.0), _reliability(1), _initProgress(0.0), _minUsedBits(16), _diskStates(), _anyDiskDown(false), _startTimestamp(0) { setState(State::UP); } NodeState::NodeState(const NodeType& type, const State& state, const vespalib::stringref & description, double capacity, uint16_t reliability) : _type(&type), _state(0), _description(description), _capacity(1.0), _reliability(1), _initProgress(0.0), _minUsedBits(16), _diskStates(), _anyDiskDown(false), _startTimestamp(0) { setState(state); if (type == NodeType::STORAGE) { setCapacity(capacity); setReliability(reliability); } } namespace { struct DiskData { bool empty; uint16_t diskIndex; std::ostringstream ost; DiskData() : empty(true), diskIndex(0), ost() {} void addTo(std::vector<DiskState>& diskStates) { if (!empty) { while (diskIndex >= diskStates.size()) { diskStates.push_back(DiskState(State::UP)); } diskStates[diskIndex] = DiskState(ost.str()); empty = true; ost.str(""); } } }; } NodeState::NodeState(const vespalib::stringref & serialized, const NodeType* type) : _type(type), _state(&State::UP), _description(), _capacity(1.0), _reliability(1), _initProgress(0.0), _minUsedBits(16), _diskStates(), _anyDiskDown(false), _startTimestamp(0) { vespalib::StringTokenizer st(serialized, " \t\f\r\n"); st.removeEmptyTokens(); DiskData diskData; for (vespalib::StringTokenizer::Iterator it = st.begin(); it != st.end(); ++it) { std::string::size_type index = it->find(':'); if (index == std::string::npos) { throw vespalib::IllegalArgumentException( "Token " + *it + " does not contain ':': " + serialized, VESPA_STRLOC); } std::string key = it->substr(0, index); std::string value = it->substr(index + 1); if (key.size() > 0) switch (key[0]) { case 'b': if (_type != 0 && *type != NodeType::STORAGE) break; if (key.size() > 1) break; try{ setMinUsedBits(boost::lexical_cast<uint32_t>(value)); } catch (...) { throw vespalib::IllegalArgumentException( "Illegal used bits '" + value + "'. Used bits " "must be a positive integer ", VESPA_STRLOC); } continue; case 's': if (key.size() > 1) break; setState(State::get(value)); continue; case 'c': if (key.size() > 1) break; if (_type != 0 && *type != NodeType::STORAGE) break; try{ setCapacity(boost::lexical_cast<double>(value)); } catch (...) { throw vespalib::IllegalArgumentException( "Illegal capacity '" + value + "'. Capacity must be" "a positive floating point number", VESPA_STRLOC); } continue; case 'r': if (_type != 0 && *type != NodeType::STORAGE) break; if (key.size() > 1) break; try{ setReliability(boost::lexical_cast<uint16_t>(value)); } catch (...) { throw vespalib::IllegalArgumentException( "Illegal reliability '" + value + "'. Reliability " "must be a positive integer", VESPA_STRLOC); } continue; case 'i': if (key.size() > 1) break; try{ setInitProgress(boost::lexical_cast<double>(value)); } catch (...) { throw vespalib::IllegalArgumentException( "Illegal init progress '" + value + "'. Init " "progress must be a floating point number from 0.0 " "to 1.0", VESPA_STRLOC); } continue; case 't': if (key.size() > 1) break; try{ setStartTimestamp(boost::lexical_cast<uint64_t>(value)); } catch (...) { throw vespalib::IllegalArgumentException( "Illegal start timestamp '" + value + "'. Start " "timestamp must be 0 or positive long.", VESPA_STRLOC); } continue; case 'm': if (key.size() > 1) break; _description = document::StringUtil::unescape(value); continue; case 'd': { if (_type != 0 && *type != NodeType::STORAGE) break; if (key.size() == 1) { uint16_t size(0); try{ size = boost::lexical_cast<uint16_t>(value); } catch (...) { throw vespalib::IllegalArgumentException( "Invalid disk count '" + value + "'. Need a " "positive integer value", VESPA_STRLOC); } while (_diskStates.size() < size) { _diskStates.push_back(DiskState(State::UP)); } continue; } if (key[1] != '.') break; uint16_t diskIndex; std::string::size_type endp = key.find('.', 2); std::string indexStr; if (endp == std::string::npos) { indexStr = key.substr(2); } else { indexStr = key.substr(2, endp - 2); } try{ diskIndex = boost::lexical_cast<uint16_t>(indexStr); } catch (...) { throw vespalib::IllegalArgumentException( "Invalid disk index '" + indexStr + "'. Need a " "positive integer value", VESPA_STRLOC); } if (diskIndex >= _diskStates.size()) { std::ostringstream ost; ost << "Cannot index disk " << diskIndex << " of " << _diskStates.size(); throw vespalib::IllegalArgumentException( ost.str(), VESPA_STRLOC); } if (diskData.diskIndex != diskIndex) { diskData.addTo(_diskStates); } if (endp == std::string::npos) { diskData.ost << " s:" << value; } else { diskData.ost << " " << key.substr(endp + 1) << ':' << value; } diskData.diskIndex = diskIndex; diskData.empty = false; continue; } default: break; } LOG(debug, "Unknown key %s in nodestate. Ignoring it, assuming it's a " "new feature from a newer version than ourself: %s", key.c_str(), serialized.c_str()); } diskData.addTo(_diskStates); updateAnyDiskDownFlag(); } void NodeState::updateAnyDiskDownFlag() { bool anyDown = false; for (uint32_t i=0; i<_diskStates.size(); ++i) { if (_diskStates[i].getState() != State::UP) { anyDown = true; } } _anyDiskDown = anyDown; } namespace { struct SeparatorPrinter { mutable bool first; SeparatorPrinter() : first(true) {} void print(vespalib::asciistream & os) const { if (first) { first = false; } else { os << ' '; } } }; vespalib::asciistream & operator<<(vespalib::asciistream & os, const SeparatorPrinter& sep) { sep.print(os); return os; } } void NodeState::serialize(vespalib::asciistream & out, const vespalib::stringref & prefix, bool includeDescription, bool includeDiskDescription, bool useOldFormat) const { SeparatorPrinter sep; // Always give node state if not part of a system state // to prevent empty serialization if (*_state != State::UP || prefix.size() == 0) { out << sep << prefix << "s:"; if (useOldFormat && *_state == State::STOPPING) { out << State::DOWN.serialize(); } else { out << _state->serialize(); } } if (_capacity != 1.0) { out << sep << prefix << "c:" << _capacity; } if (_reliability != 1) { out << sep << prefix << "r:" << _reliability; } if (_minUsedBits != 16) { out << sep << prefix << "b:" << _minUsedBits; } if (*_state == State::INITIALIZING && !useOldFormat) { out << sep << prefix << "i:" << _initProgress; } if (_startTimestamp != 0) { out << sep << prefix << "t:" << _startTimestamp; } if (_diskStates.size() > 0) { out << sep << prefix << "d:" << _diskStates.size(); for (uint16_t i = 0; i < _diskStates.size(); ++i) { vespalib::asciistream diskPrefix; diskPrefix << prefix << "d." << i << "."; vespalib::asciistream disk; _diskStates[i].serialize(disk, diskPrefix.str(), includeDiskDescription, useOldFormat); if ( ! disk.str().empty()) { out << " " << disk.str(); } } } if (includeDescription && ! _description.empty()) { out << sep << prefix << "m:" << document::StringUtil::escape(_description, ' '); } } const DiskState& NodeState::getDiskState(uint16_t index) const { static const DiskState defaultState(State::UP); if (_diskStates.size() == 0) return defaultState; if (index >= _diskStates.size()) { std::ostringstream ost; ost << "Cannot get status of disk " << index << " of " << _diskStates.size() << "."; throw vespalib::IllegalArgumentException(ost.str(), VESPA_STRLOC); } return _diskStates[index]; } void NodeState::setState(const State& state) { if (_type != 0) { // We don't know whether you want to store reported, wanted or // current node state, so we must accept any. if (!state.validReportedNodeState(*_type) && !state.validWantedNodeState(*_type)) { throw vespalib::IllegalArgumentException( state.toString(true) + " is not a legal " + _type->toString() + " state", VESPA_STRLOC); } } _state = &state; } void NodeState::setMinUsedBits(uint32_t usedBits) { if (usedBits < 1 || usedBits > 58) { std::ostringstream ost; ost << "Illegal used bits '" << usedBits << "'. Minimum used bits" "must be an integer > 0 and < 59."; throw vespalib::IllegalArgumentException(ost.str(), VESPA_STRLOC); } _minUsedBits = usedBits; } void NodeState::setCapacity(vespalib::Double capacity) { if (capacity < 0) { std::ostringstream ost; ost << "Illegal capacity '" << capacity << "'. Capacity " "must be a positive floating point number"; throw vespalib::IllegalArgumentException(ost.str(), VESPA_STRLOC); } if (_type != 0 && *_type != NodeType::STORAGE) { throw vespalib::IllegalArgumentException( "Capacity only make sense for storage nodes.", VESPA_STRLOC); } _capacity = capacity; } void NodeState::setReliability(uint16_t reliability) { if (reliability == 0) { std::ostringstream ost; ost << "Illegal reliability '" << reliability << "'. Reliability " "must be a positive integer."; throw vespalib::IllegalArgumentException(ost.str(), VESPA_STRLOC); } if (_type != 0 && *_type != NodeType::STORAGE) { throw vespalib::IllegalArgumentException( "Reliability only make sense for storage nodes.", VESPA_STRLOC); } _reliability = reliability; } void NodeState::setInitProgress(vespalib::Double initProgress) { if (initProgress < 0 || initProgress > 1.0) { std::ostringstream ost; ost << "Illegal init progress '" << initProgress << "'. Init progress " "must be a floating point number from 0.0 to 1.0"; throw vespalib::IllegalArgumentException(ost.str(), VESPA_STRLOC); } _initProgress = initProgress; } void NodeState::setStartTimestamp(uint64_t startTimestamp) { _startTimestamp = startTimestamp; } void NodeState::setDiskCount(uint16_t count) { while (_diskStates.size() > count) { _diskStates.pop_back(); } _diskStates.reserve(count); while (_diskStates.size() < count) { _diskStates.push_back(DiskState(State::UP)); } updateAnyDiskDownFlag(); } void NodeState::setDiskState(uint16_t index, const DiskState& state) { if (index >= _diskStates.size()) { throw vespalib::IllegalArgumentException( vespalib::make_string("Can't set state of disk %u of %u.", index, (uint32_t) _diskStates.size()), VESPA_STRLOC); } _diskStates[index] = state; updateAnyDiskDownFlag(); } void NodeState::print(std::ostream& out, bool verbose, const std::string& indent) const { if (!verbose) { vespalib::asciistream tmp; serialize(tmp); out << tmp.str(); return; } _state->print(out, verbose, indent); if (_capacity != 1.0) { out << ", capacity " << _capacity; } if (_reliability != 1) { out << ", reliability " << _reliability; } if (_minUsedBits != 16) { out << ", minimum used bits " << _minUsedBits; } if (*_state == State::INITIALIZING) { out << ", init progress " << _initProgress; } if (_startTimestamp != 0) { out << ", start timestamp " << _startTimestamp; } if (_diskStates.size() > 0) { bool printedHeader = false; for (uint32_t i=0; i<_diskStates.size(); ++i) { if (_diskStates[i] != DiskState(State::UP)) { if (!printedHeader) { out << ","; printedHeader = true; } out << " Disk " << i << "("; _diskStates[i].print(out, false, indent); out << ")"; } } } if (_description.size() > 0) { out << ": " << _description; } } bool NodeState::operator==(const NodeState& other) const { if (_state != other._state || _capacity != other._capacity || _reliability != other._reliability || _minUsedBits != other._minUsedBits || _startTimestamp != other._startTimestamp || (*_state == State::INITIALIZING && _initProgress != other._initProgress)) { return false; } for (uint32_t i=0, n=std::max(_diskStates.size(), other._diskStates.size()); i < n; ++i) { if (getDiskState(i) != other.getDiskState(i)) { return false; } } return true; } bool NodeState::similarTo(const NodeState& other) const { if (_state != other._state || _capacity != other._capacity || _reliability != other._reliability || _minUsedBits != other._minUsedBits || _startTimestamp < other._startTimestamp) { return false; } if (*_state == State::INITIALIZING) { double limit = getListingBucketsInitProgressLimit(); bool below1 = (_initProgress < limit); bool below2 = (other._initProgress < limit); if (below1 != below2) { return false; } } for (uint32_t i=0, n=std::max(_diskStates.size(), other._diskStates.size()); i < n; ++i) { if (getDiskState(i) != other.getDiskState(i)) { return false; } } return true; } void NodeState::verifySupportForNodeType(const NodeType& type) const { if (_type != 0 && *_type == type) return; if (!_state->validReportedNodeState(type) && !_state->validWantedNodeState(type)) { throw vespalib::IllegalArgumentException("State " + _state->toString() + " does not fit a node of type " + type.toString(), VESPA_STRLOC); } if (type == NodeType::DISTRIBUTOR && _capacity != 1.0) { throw vespalib::IllegalArgumentException("Capacity should not be " "set for a distributor node.", VESPA_STRLOC); } if (type == NodeType::DISTRIBUTOR && _reliability != 1) { throw vespalib::IllegalArgumentException("Reliability should not be " "set for a distributor node.", VESPA_STRLOC); } } std::string NodeState::getTextualDifference(const NodeState& other) const { std::ostringstream source; std::ostringstream target; if (_state != other._state) { source << ", " << *_state; target << ", " << *other._state; } if (_capacity != other._capacity) { source << ", capacity " << _capacity; target << ", capacity " << other._capacity; } if (_reliability != other._reliability) { source << ", reliability " << _reliability; target << ", reliability " << other._reliability; } if (_minUsedBits != other._minUsedBits) { source << ", minUsedBits " << _minUsedBits; target << ", minUsedBits " << _minUsedBits; } if (_initProgress != other._initProgress) { if (_state == &State::INITIALIZING) { source << ", init progress " << _initProgress; } if (other._state == &State::INITIALIZING) { target << ", init progress " << other._initProgress; } } if (_startTimestamp != other._startTimestamp) { source << ", start timestamp " << _startTimestamp; target << ", start timestamp " << other._startTimestamp; } if (_diskStates.size() != other._diskStates.size()) { source << ", " << _diskStates.size() << " disks"; target << ", " << other._diskStates.size() << " disks"; } else { for (uint32_t i=0; i<_diskStates.size(); ++i) { if (_diskStates[i] != other._diskStates[i]) { source << ", disk " << i << _diskStates[i]; target << ", disk " << i << other._diskStates[i]; } } } if (source.str().length() < 2 || target.str().length() < 2) { return "no change"; } std::ostringstream total; total << source.str().substr(2) << " to " << target.str().substr(2); if (other._description != _description) { total << " (" << other._description << ")"; } return total.str(); } }
// Update.cpp #include "Update.h" #include "../../../Common/IntToString.h" #include "../../../Common/StringConvert.h" #include "../../../Windows/DLL.h" #include "../../../Windows/FileDir.h" #include "../../../Windows/FileFind.h" #include "../../../Windows/FileName.h" #include "../../../Windows/PropVariant.h" #include "../../../Windows/PropVariantConv.h" #include "../../../Windows/TimeUtils.h" #include "../../Common/FileStreams.h" #include "../../Common/LimitedStreams.h" #include "../../Compress/CopyCoder.h" #include "../Common/DirItem.h" #include "../Common/EnumDirItems.h" #include "../Common/OpenArchive.h" #include "../Common/UpdateProduce.h" #include "EnumDirItems.h" #include "SetProperties.h" #include "TempFiles.h" #include "UpdateCallback.h" static const char *kUpdateIsNotSupoorted = "update operations are not supported for this archive"; using namespace NWindows; using namespace NCOM; using namespace NFile; using namespace NDir; using namespace NName; static CFSTR kTempFolderPrefix = FTEXT("7zE"); void CUpdateErrorInfo::SetFromLastError(const char *message) { SystemError = ::GetLastError(); Message = message; } HRESULT CUpdateErrorInfo::SetFromLastError(const char *message, const FString &fileName) { SetFromLastError(message); FileNames.Add(fileName); return Get_HRESULT_Error(); } static bool DeleteEmptyFolderAndEmptySubFolders(const FString &path) { NFind::CFileInfo fileInfo; FString pathPrefix = path + FCHAR_PATH_SEPARATOR; { NFind::CEnumerator enumerator(pathPrefix + FCHAR_ANY_MASK); while (enumerator.Next(fileInfo)) { if (fileInfo.IsDir()) if (!DeleteEmptyFolderAndEmptySubFolders(pathPrefix + fileInfo.Name)) return false; } } /* // we don't need clear read-only for folders if (!MySetFileAttributes(path, 0)) return false; */ return RemoveDir(path); } using namespace NUpdateArchive; class COutMultiVolStream: public IOutStream, public CMyUnknownImp { unsigned _streamIndex; // required stream UInt64 _offsetPos; // offset from start of _streamIndex index UInt64 _absPos; UInt64 _length; struct CAltStreamInfo { COutFileStream *StreamSpec; CMyComPtr<IOutStream> Stream; FString Name; UInt64 Pos; UInt64 RealSize; }; CObjectVector<CAltStreamInfo> Streams; public: // CMyComPtr<IArchiveUpdateCallback2> VolumeCallback; CRecordVector<UInt64> Sizes; FString Prefix; CTempFiles *TempFiles; void Init() { _streamIndex = 0; _offsetPos = 0; _absPos = 0; _length = 0; } bool SetMTime(const FILETIME *mTime); HRESULT Close(); UInt64 GetSize() const { return _length; } MY_UNKNOWN_IMP1(IOutStream) STDMETHOD(Write)(const void *data, UInt32 size, UInt32 *processedSize); STDMETHOD(Seek)(Int64 offset, UInt32 seekOrigin, UInt64 *newPosition); STDMETHOD(SetSize)(UInt64 newSize); }; // static NSynchronization::CCriticalSection g_TempPathsCS; HRESULT COutMultiVolStream::Close() { HRESULT res = S_OK; FOR_VECTOR (i, Streams) { COutFileStream *s = Streams[i].StreamSpec; if (s) { HRESULT res2 = s->Close(); if (res2 != S_OK) res = res2; } } return res; } bool COutMultiVolStream::SetMTime(const FILETIME *mTime) { bool res = true; FOR_VECTOR (i, Streams) { COutFileStream *s = Streams[i].StreamSpec; if (s) if (!s->SetMTime(mTime)) res = false; } return res; } STDMETHODIMP COutMultiVolStream::Write(const void *data, UInt32 size, UInt32 *processedSize) { if (processedSize != NULL) *processedSize = 0; while (size > 0) { if (_streamIndex >= Streams.Size()) { CAltStreamInfo altStream; FChar temp[16]; ConvertUInt32ToString(_streamIndex + 1, temp); FString name = temp; while (name.Len() < 3) name.InsertAtFront(FTEXT('0')); name.Insert(0, Prefix); altStream.StreamSpec = new COutFileStream; altStream.Stream = altStream.StreamSpec; if (!altStream.StreamSpec->Create(name, false)) return ::GetLastError(); { // NSynchronization::CCriticalSectionLock lock(g_TempPathsCS); TempFiles->Paths.Add(name); } altStream.Pos = 0; altStream.RealSize = 0; altStream.Name = name; Streams.Add(altStream); continue; } CAltStreamInfo &altStream = Streams[_streamIndex]; unsigned index = _streamIndex; if (index >= Sizes.Size()) index = Sizes.Size() - 1; UInt64 volSize = Sizes[index]; if (_offsetPos >= volSize) { _offsetPos -= volSize; _streamIndex++; continue; } if (_offsetPos != altStream.Pos) { // CMyComPtr<IOutStream> outStream; // RINOK(altStream.Stream.QueryInterface(IID_IOutStream, &outStream)); RINOK(altStream.Stream->Seek(_offsetPos, STREAM_SEEK_SET, NULL)); altStream.Pos = _offsetPos; } UInt32 curSize = (UInt32)MyMin((UInt64)size, volSize - altStream.Pos); UInt32 realProcessed; RINOK(altStream.Stream->Write(data, curSize, &realProcessed)); data = (void *)((Byte *)data + realProcessed); size -= realProcessed; altStream.Pos += realProcessed; _offsetPos += realProcessed; _absPos += realProcessed; if (_absPos > _length) _length = _absPos; if (_offsetPos > altStream.RealSize) altStream.RealSize = _offsetPos; if (processedSize != NULL) *processedSize += realProcessed; if (altStream.Pos == volSize) { _streamIndex++; _offsetPos = 0; } if (realProcessed == 0 && curSize != 0) return E_FAIL; break; } return S_OK; } STDMETHODIMP COutMultiVolStream::Seek(Int64 offset, UInt32 seekOrigin, UInt64 *newPosition) { if (seekOrigin >= 3) return STG_E_INVALIDFUNCTION; switch (seekOrigin) { case STREAM_SEEK_SET: _absPos = offset; break; case STREAM_SEEK_CUR: _absPos += offset; break; case STREAM_SEEK_END: _absPos = _length + offset; break; } _offsetPos = _absPos; if (newPosition != NULL) *newPosition = _absPos; _streamIndex = 0; return S_OK; } STDMETHODIMP COutMultiVolStream::SetSize(UInt64 newSize) { unsigned i = 0; while (i < Streams.Size()) { CAltStreamInfo &altStream = Streams[i++]; if ((UInt64)newSize < altStream.RealSize) { RINOK(altStream.Stream->SetSize(newSize)); altStream.RealSize = newSize; break; } newSize -= altStream.RealSize; } while (i < Streams.Size()) { { CAltStreamInfo &altStream = Streams.Back(); altStream.Stream.Release(); DeleteFileAlways(altStream.Name); } Streams.DeleteBack(); } _offsetPos = _absPos; _streamIndex = 0; _length = newSize; return S_OK; } void CArchivePath::ParseFromPath(const UString &path, EArcNameMode mode) { OriginalPath = path; SplitPathToParts_2(path, Prefix, Name); if (mode == k_ArcNameMode_Add) return; if (mode == k_ArcNameMode_Exact) { BaseExtension.Empty(); return; } int dotPos = Name.ReverseFind_Dot(); if (dotPos < 0) return; if ((unsigned)dotPos == Name.Len() - 1) { Name.DeleteBack(); BaseExtension.Empty(); return; } const UString ext = Name.Ptr(dotPos + 1); if (BaseExtension.IsEqualTo_NoCase(ext)) { BaseExtension = ext; Name.DeleteFrom(dotPos); } else BaseExtension.Empty(); } UString CArchivePath::GetFinalPath() const { UString path = GetPathWithoutExt(); if (!BaseExtension.IsEmpty()) { path += L'.'; path += BaseExtension; } return path; } UString CArchivePath::GetFinalVolPath() const { UString path = GetPathWithoutExt(); if (!BaseExtension.IsEmpty()) { path += L'.'; path += VolExtension; } return path; } FString CArchivePath::GetTempPath() const { FString path = TempPrefix; path += us2fs(Name); if (!BaseExtension.IsEmpty()) { path += FTEXT('.'); path += us2fs(BaseExtension); } path.AddAscii(".tmp"); path += TempPostfix; return path; } static const wchar_t *kDefaultArcType = L"7z"; static const wchar_t *kDefaultArcExt = L"7z"; static const char *kSFXExtension = #ifdef _WIN32 "exe"; #else ""; #endif bool CUpdateOptions::InitFormatIndex(const CCodecs *codecs, const CObjectVector<COpenType> &types, const UString &arcPath) { if (types.Size() > 1) return false; // int arcTypeIndex = -1; if (types.Size() != 0) { MethodMode.Type = types[0]; MethodMode.Type_Defined = true; } if (MethodMode.Type.FormatIndex < 0) { // MethodMode.Type = -1; MethodMode.Type = COpenType(); if (ArcNameMode != k_ArcNameMode_Add) { MethodMode.Type.FormatIndex = codecs->FindFormatForArchiveName(arcPath); if (MethodMode.Type.FormatIndex >= 0) MethodMode.Type_Defined = true; } } return true; } bool CUpdateOptions::SetArcPath(const CCodecs *codecs, const UString &arcPath) { UString typeExt; int formatIndex = MethodMode.Type.FormatIndex; if (formatIndex < 0) { typeExt = kDefaultArcExt; } else { const CArcInfoEx &arcInfo = codecs->Formats[formatIndex]; if (!arcInfo.UpdateEnabled) return false; typeExt = arcInfo.GetMainExt(); } UString ext = typeExt; if (SfxMode) ext.SetFromAscii(kSFXExtension); ArchivePath.BaseExtension = ext; ArchivePath.VolExtension = typeExt; ArchivePath.ParseFromPath(arcPath, ArcNameMode); FOR_VECTOR (i, Commands) { CUpdateArchiveCommand &uc = Commands[i]; uc.ArchivePath.BaseExtension = ext; uc.ArchivePath.VolExtension = typeExt; uc.ArchivePath.ParseFromPath(uc.UserArchivePath, ArcNameMode); } return true; } struct CUpdateProduceCallbackImp: public IUpdateProduceCallback { const CObjectVector<CArcItem> *_arcItems; IUpdateCallbackUI *_callback; CUpdateProduceCallbackImp(const CObjectVector<CArcItem> *a, IUpdateCallbackUI *callback): _arcItems(a), _callback(callback) {} virtual HRESULT ShowDeleteFile(unsigned arcIndex); }; HRESULT CUpdateProduceCallbackImp::ShowDeleteFile(unsigned arcIndex) { const CArcItem &ai = (*_arcItems)[arcIndex]; return _callback->ShowDeleteFile(ai.Name, ai.IsDir); } bool CRenamePair::Prepare() { if (RecursedType != NRecursedType::kNonRecursed) return false; if (!WildcardParsing) return true; return !DoesNameContainWildcard(OldName); } extern bool g_CaseSensitive; static unsigned CompareTwoNames(const wchar_t *s1, const wchar_t *s2) { for (unsigned i = 0;; i++) { wchar_t c1 = s1[i]; wchar_t c2 = s2[i]; if (c1 == 0 || c2 == 0) return i; if (c1 == c2) continue; if (!g_CaseSensitive && (MyCharUpper(c1) == MyCharUpper(c2))) continue; if (IsPathSepar(c1) && IsPathSepar(c2)) continue; return i; } } bool CRenamePair::GetNewPath(bool isFolder, const UString &src, UString &dest) const { unsigned num = CompareTwoNames(OldName, src); if (OldName[num] == 0) { if (src[num] != 0 && !IsPathSepar(src[num]) && num != 0 && !IsPathSepar(src[num - 1])) return false; } else { // OldName[num] != 0 // OldName = "1\1a.txt" // src = "1" if (!isFolder || src[num] != 0 || !IsPathSepar(OldName[num]) || OldName[num + 1] != 0) return false; } dest = NewName + src.Ptr(num); return true; } #ifdef SUPPORT_ALT_STREAMS int FindAltStreamColon_in_Path(const wchar_t *path); #endif static HRESULT Compress( const CUpdateOptions &options, bool isUpdatingItself, CCodecs *codecs, const CActionSet &actionSet, const CArc *arc, CArchivePath &archivePath, const CObjectVector<CArcItem> &arcItems, Byte *processedItemsStatuses, const CDirItems &dirItems, const CDirItem *parentDirItem, CTempFiles &tempFiles, CUpdateErrorInfo &errorInfo, IUpdateCallbackUI *callback, CFinishArchiveStat &st) { CMyComPtr<IOutArchive> outArchive; int formatIndex = options.MethodMode.Type.FormatIndex; if (arc) { formatIndex = arc->FormatIndex; if (formatIndex < 0) return E_NOTIMPL; CMyComPtr<IInArchive> archive2 = arc->Archive; HRESULT result = archive2.QueryInterface(IID_IOutArchive, &outArchive); if (result != S_OK) throw kUpdateIsNotSupoorted; } else { RINOK(codecs->CreateOutArchive(formatIndex, outArchive)); #ifdef EXTERNAL_CODECS { CMyComPtr<ISetCompressCodecsInfo> setCompressCodecsInfo; outArchive.QueryInterface(IID_ISetCompressCodecsInfo, (void **)&setCompressCodecsInfo); if (setCompressCodecsInfo) { RINOK(setCompressCodecsInfo->SetCompressCodecsInfo(codecs)); } } #endif } if (outArchive == 0) throw kUpdateIsNotSupoorted; NFileTimeType::EEnum fileTimeType; { UInt32 value; RINOK(outArchive->GetFileTimeType(&value)); switch (value) { case NFileTimeType::kWindows: case NFileTimeType::kUnix: case NFileTimeType::kDOS: fileTimeType = (NFileTimeType::EEnum)value; break; default: return E_FAIL; } } { const CArcInfoEx &arcInfo = codecs->Formats[formatIndex]; if (options.AltStreams.Val && !arcInfo.Flags_AltStreams()) return E_NOTIMPL; if (options.NtSecurity.Val && !arcInfo.Flags_NtSecure()) return E_NOTIMPL; } CRecordVector<CUpdatePair2> updatePairs2; UStringVector newNames; if (options.RenamePairs.Size() != 0) { FOR_VECTOR (i, arcItems) { const CArcItem &ai = arcItems[i]; bool needRename = false; UString dest; if (ai.Censored) { FOR_VECTOR (j, options.RenamePairs) { const CRenamePair &rp = options.RenamePairs[j]; if (rp.GetNewPath(ai.IsDir, ai.Name, dest)) { needRename = true; break; } #ifdef SUPPORT_ALT_STREAMS if (ai.IsAltStream) { int colonPos = FindAltStreamColon_in_Path(ai.Name); if (colonPos >= 0) { UString mainName = ai.Name.Left(colonPos); /* actually we must improve that code to support cases with folder renaming like: rn arc dir1\ dir2\ */ if (rp.GetNewPath(false, mainName, dest)) { needRename = true; dest += L':'; dest += ai.Name.Ptr(colonPos + 1); break; } } } #endif } } CUpdatePair2 up2; up2.SetAs_NoChangeArcItem(ai.IndexInServer); if (needRename) { up2.NewProps = true; RINOK(arc->IsItemAnti(i, up2.IsAnti)); up2.NewNameIndex = newNames.Add(dest); } updatePairs2.Add(up2); } } else { CRecordVector<CUpdatePair> updatePairs; GetUpdatePairInfoList(dirItems, arcItems, fileTimeType, updatePairs); // must be done only once!!! CUpdateProduceCallbackImp upCallback(&arcItems, callback); UpdateProduce(updatePairs, actionSet, updatePairs2, isUpdatingItself ? &upCallback : NULL); } { UInt32 numItems = 0; FOR_VECTOR (i, updatePairs2) if (updatePairs2[i].NewData) numItems++; RINOK(callback->SetNumItems(numItems)); } CArchiveUpdateCallback *updateCallbackSpec = new CArchiveUpdateCallback; CMyComPtr<IArchiveUpdateCallback> updateCallback(updateCallbackSpec); updateCallbackSpec->ShareForWrite = options.OpenShareForWrite; updateCallbackSpec->StdInMode = options.StdInMode; updateCallbackSpec->Callback = callback; if (arc) { // we set Archive to allow to transfer GetProperty requests back to DLL. updateCallbackSpec->Archive = arc->Archive; } updateCallbackSpec->DirItems = &dirItems; updateCallbackSpec->ParentDirItem = parentDirItem; updateCallbackSpec->StoreNtSecurity = options.NtSecurity.Val; updateCallbackSpec->StoreHardLinks = options.HardLinks.Val; updateCallbackSpec->StoreSymLinks = options.SymLinks.Val; updateCallbackSpec->Arc = arc; updateCallbackSpec->ArcItems = &arcItems; updateCallbackSpec->UpdatePairs = &updatePairs2; updateCallbackSpec->ProcessedItemsStatuses = processedItemsStatuses; if (options.RenamePairs.Size() != 0) updateCallbackSpec->NewNames = &newNames; CMyComPtr<IOutStream> outSeekStream; CMyComPtr<ISequentialOutStream> outStream; if (!options.StdOutMode) { FString dirPrefix; if (!GetOnlyDirPrefix(us2fs(archivePath.GetFinalPath()), dirPrefix)) throw 1417161; CreateComplexDir(dirPrefix); } COutFileStream *outStreamSpec = NULL; CStdOutFileStream *stdOutFileStreamSpec = NULL; COutMultiVolStream *volStreamSpec = NULL; if (options.VolumesSizes.Size() == 0) { if (options.StdOutMode) { stdOutFileStreamSpec = new CStdOutFileStream; outStream = stdOutFileStreamSpec; } else { outStreamSpec = new COutFileStream; outSeekStream = outStreamSpec; outStream = outSeekStream; bool isOK = false; FString realPath; for (unsigned i = 0; i < (1 << 16); i++) { if (archivePath.Temp) { if (i > 0) { FChar s[16]; ConvertUInt32ToString(i, s); archivePath.TempPostfix = s; } realPath = archivePath.GetTempPath(); } else realPath = us2fs(archivePath.GetFinalPath()); if (outStreamSpec->Create(realPath, false)) { tempFiles.Paths.Add(realPath); isOK = true; break; } if (::GetLastError() != ERROR_FILE_EXISTS) break; if (!archivePath.Temp) break; } if (!isOK) return errorInfo.SetFromLastError("cannot open file", realPath); } } else { if (options.StdOutMode) return E_FAIL; if (arc && arc->GetGlobalOffset() > 0) return E_NOTIMPL; volStreamSpec = new COutMultiVolStream; outSeekStream = volStreamSpec; outStream = outSeekStream; volStreamSpec->Sizes = options.VolumesSizes; volStreamSpec->Prefix = us2fs(archivePath.GetFinalVolPath()); volStreamSpec->Prefix += FTEXT('.'); volStreamSpec->TempFiles = &tempFiles; volStreamSpec->Init(); /* updateCallbackSpec->VolumesSizes = volumesSizes; updateCallbackSpec->VolName = archivePath.Prefix + archivePath.Name; if (!archivePath.VolExtension.IsEmpty()) updateCallbackSpec->VolExt = UString(L'.') + archivePath.VolExtension; */ } RINOK(SetProperties(outArchive, options.MethodMode.Properties)); if (options.SfxMode) { CInFileStream *sfxStreamSpec = new CInFileStream; CMyComPtr<IInStream> sfxStream(sfxStreamSpec); if (!sfxStreamSpec->Open(options.SfxModule)) return errorInfo.SetFromLastError("cannot open SFX module", options.SfxModule); CMyComPtr<ISequentialOutStream> sfxOutStream; COutFileStream *outStreamSpec2 = NULL; if (options.VolumesSizes.Size() == 0) sfxOutStream = outStream; else { outStreamSpec2 = new COutFileStream; sfxOutStream = outStreamSpec2; FString realPath = us2fs(archivePath.GetFinalPath()); if (!outStreamSpec2->Create(realPath, false)) return errorInfo.SetFromLastError("cannot open file", realPath); } { UInt64 sfxSize; RINOK(sfxStreamSpec->GetSize(&sfxSize)); RINOK(callback->WriteSfx(fs2us(options.SfxModule), sfxSize)); } RINOK(NCompress::CopyStream(sfxStream, sfxOutStream, NULL)); if (outStreamSpec2) { RINOK(outStreamSpec2->Close()); } } CMyComPtr<ISequentialOutStream> tailStream; if (options.SfxMode || !arc || arc->ArcStreamOffset == 0) tailStream = outStream; else { // Int64 globalOffset = arc->GetGlobalOffset(); RINOK(arc->InStream->Seek(0, STREAM_SEEK_SET, NULL)); RINOK(NCompress::CopyStream_ExactSize(arc->InStream, outStream, arc->ArcStreamOffset, NULL)); if (options.StdOutMode) tailStream = outStream; else { CTailOutStream *tailStreamSpec = new CTailOutStream; tailStream = tailStreamSpec; tailStreamSpec->Stream = outSeekStream; tailStreamSpec->Offset = arc->ArcStreamOffset; tailStreamSpec->Init(); } } HRESULT result = outArchive->UpdateItems(tailStream, updatePairs2.Size(), updateCallback); // callback->Finalize(); RINOK(result); if (!updateCallbackSpec->AreAllFilesClosed()) { errorInfo.Message = "There are unclosed input file:"; errorInfo.FileNames = updateCallbackSpec->_openFiles_Paths; return E_FAIL; } if (options.SetArcMTime) { FILETIME ft; ft.dwLowDateTime = 0; ft.dwHighDateTime = 0; FOR_VECTOR (i, updatePairs2) { CUpdatePair2 &pair2 = updatePairs2[i]; const FILETIME *ft2 = NULL; if (pair2.NewProps && pair2.DirIndex >= 0) ft2 = &dirItems.Items[pair2.DirIndex].MTime; else if (pair2.UseArcProps && pair2.ArcIndex >= 0) ft2 = &arcItems[pair2.ArcIndex].MTime; if (ft2) { if (::CompareFileTime(&ft, ft2) < 0) ft = *ft2; } } if (ft.dwLowDateTime != 0 || ft.dwHighDateTime != 0) { if (outStreamSpec) outStreamSpec->SetMTime(&ft); else if (volStreamSpec) volStreamSpec->SetMTime(&ft);; } } if (callback) { UInt64 size = 0; if (outStreamSpec) outStreamSpec->GetSize(&size); else if (stdOutFileStreamSpec) size = stdOutFileStreamSpec->GetSize(); else size = volStreamSpec->GetSize(); st.OutArcFileSize = size; } if (outStreamSpec) result = outStreamSpec->Close(); else if (volStreamSpec) result = volStreamSpec->Close(); return result; } bool CensorNode_CheckPath2(const NWildcard::CCensorNode &node, const CReadArcItem &item, bool &include); static bool Censor_CheckPath(const NWildcard::CCensor &censor, const CReadArcItem &item) { bool finded = false; FOR_VECTOR (i, censor.Pairs) { bool include; if (CensorNode_CheckPath2(censor.Pairs[i].Head, item, include)) { if (!include) return false; finded = true; } } return finded; } static HRESULT EnumerateInArchiveItems( // bool storeStreamsMode, const NWildcard::CCensor &censor, const CArc &arc, CObjectVector<CArcItem> &arcItems) { arcItems.Clear(); UInt32 numItems; IInArchive *archive = arc.Archive; RINOK(archive->GetNumberOfItems(&numItems)); arcItems.ClearAndReserve(numItems); CReadArcItem item; for (UInt32 i = 0; i < numItems; i++) { CArcItem ai; RINOK(arc.GetItem(i, item)); ai.Name = item.Path; ai.IsDir = item.IsDir; ai.IsAltStream = #ifdef SUPPORT_ALT_STREAMS item.IsAltStream; #else false; #endif /* if (!storeStreamsMode && ai.IsAltStream) continue; */ ai.Censored = Censor_CheckPath(censor, item); RINOK(arc.GetItemMTime(i, ai.MTime, ai.MTimeDefined)); RINOK(arc.GetItemSize(i, ai.Size, ai.SizeDefined)); { CPropVariant prop; RINOK(archive->GetProperty(i, kpidTimeType, &prop)); if (prop.vt == VT_UI4) { ai.TimeType = (int)(NFileTimeType::EEnum)prop.ulVal; switch (ai.TimeType) { case NFileTimeType::kWindows: case NFileTimeType::kUnix: case NFileTimeType::kDOS: break; default: return E_FAIL; } } } ai.IndexInServer = i; arcItems.AddInReserved(ai); } return S_OK; } #if defined(_WIN32) && !defined(UNDER_CE) #include <mapi.h> #endif struct CRefSortPair { unsigned Len; unsigned Index; }; #define RINOZ(x) { int __tt = (x); if (__tt != 0) return __tt; } static int CompareRefSortPair(const CRefSortPair *a1, const CRefSortPair *a2, void *) { RINOZ(-MyCompare(a1->Len, a2->Len)); return MyCompare(a1->Index, a2->Index); } static unsigned GetNumSlashes(const FChar *s) { for (unsigned numSlashes = 0;;) { FChar c = *s++; if (c == 0) return numSlashes; if (IS_PATH_SEPAR(c)) numSlashes++; } } #ifdef _WIN32 void ConvertToLongNames(NWildcard::CCensor &censor); #endif HRESULT UpdateArchive( CCodecs *codecs, const CObjectVector<COpenType> &types, const UString &cmdArcPath2, NWildcard::CCensor &censor, CUpdateOptions &options, CUpdateErrorInfo &errorInfo, IOpenCallbackUI *openCallback, IUpdateCallbackUI2 *callback, bool needSetPath) { if (options.StdOutMode && options.EMailMode) return E_FAIL; if (types.Size() > 1) return E_NOTIMPL; bool renameMode = !options.RenamePairs.IsEmpty(); if (renameMode) { if (options.Commands.Size() != 1) return E_FAIL; } if (options.DeleteAfterCompressing) { if (options.Commands.Size() != 1) return E_NOTIMPL; const CActionSet &as = options.Commands[0].ActionSet; for (int i = 2; i < NPairState::kNumValues; i++) if (as.StateActions[i] != NPairAction::kCompress) return E_NOTIMPL; } censor.AddPathsToCensor(options.PathMode); #ifdef _WIN32 ConvertToLongNames(censor); #endif censor.ExtendExclude(); if (options.VolumesSizes.Size() > 0 && (options.EMailMode /* || options.SfxMode */)) return E_NOTIMPL; if (options.SfxMode) { CProperty property; property.Name.SetFromAscii("rsfx"); options.MethodMode.Properties.Add(property); if (options.SfxModule.IsEmpty()) { errorInfo.Message = "SFX file is not specified"; return E_FAIL; } bool found = false; if (options.SfxModule.Find(FCHAR_PATH_SEPARATOR) < 0) { const FString fullName = NDLL::GetModuleDirPrefix() + options.SfxModule; if (NFind::DoesFileExist(fullName)) { options.SfxModule = fullName; found = true; } } if (!found) { if (!NFind::DoesFileExist(options.SfxModule)) return errorInfo.SetFromLastError("cannot find specified SFX module", options.SfxModule); } } CArchiveLink arcLink; if (needSetPath) { if (!options.InitFormatIndex(codecs, types, cmdArcPath2) || !options.SetArcPath(codecs, cmdArcPath2)) return E_NOTIMPL; } const UString arcPath = options.ArchivePath.GetFinalPath(); if (cmdArcPath2.IsEmpty()) { if (options.MethodMode.Type.FormatIndex < 0) throw "type of archive is not specified"; } else { NFind::CFileInfo fi; if (!fi.Find(us2fs(arcPath))) { if (renameMode) throw "can't find archive";; if (options.MethodMode.Type.FormatIndex < 0) { if (!options.SetArcPath(codecs, cmdArcPath2)) return E_NOTIMPL; } } else { if (fi.IsDir()) throw "there is no such archive"; if (fi.IsDevice) return E_NOTIMPL; if (options.VolumesSizes.Size() > 0) return E_NOTIMPL; CObjectVector<COpenType> types2; // change it. if (options.MethodMode.Type_Defined) types2.Add(options.MethodMode.Type); // We need to set Properties to open archive only in some cases (WIM archives). CIntVector excl; COpenOptions op; #ifndef _SFX op.props = &options.MethodMode.Properties; #endif op.codecs = codecs; op.types = &types2; op.excludedFormats = &excl; op.stdInMode = false; op.stream = NULL; op.filePath = arcPath; RINOK(callback->StartOpenArchive(arcPath)); HRESULT result = arcLink.Open_Strict(op, openCallback); if (result == E_ABORT) return result; HRESULT res2 = callback->OpenResult(codecs, arcLink, arcPath, result); /* if (result == S_FALSE) return E_FAIL; */ RINOK(res2); RINOK(result); if (arcLink.VolumePaths.Size() > 1) { errorInfo.SystemError = (DWORD)E_NOTIMPL; errorInfo.Message = "Updating for multivolume archives is not implemented"; return E_NOTIMPL; } CArc &arc = arcLink.Arcs.Back(); arc.MTimeDefined = !fi.IsDevice; arc.MTime = fi.MTime; if (arc.ErrorInfo.ThereIsTail) { errorInfo.SystemError = (DWORD)E_NOTIMPL; errorInfo.Message = "There is some data block after the end of the archive"; return E_NOTIMPL; } if (options.MethodMode.Type.FormatIndex < 0) { options.MethodMode.Type.FormatIndex = arcLink.GetArc()->FormatIndex; if (!options.SetArcPath(codecs, cmdArcPath2)) return E_NOTIMPL; } } } if (options.MethodMode.Type.FormatIndex < 0) { options.MethodMode.Type.FormatIndex = codecs->FindFormatForArchiveType(kDefaultArcType); if (options.MethodMode.Type.FormatIndex < 0) return E_NOTIMPL; } bool thereIsInArchive = arcLink.IsOpen; if (!thereIsInArchive && renameMode) return E_FAIL; CDirItems dirItems; dirItems.Callback = callback; CDirItem parentDirItem; CDirItem *parentDirItem_Ptr = NULL; /* FStringVector requestedPaths; FStringVector *requestedPaths_Ptr = NULL; if (options.DeleteAfterCompressing) requestedPaths_Ptr = &requestedPaths; */ if (options.StdInMode) { CDirItem di; di.Name = options.StdInFileName; di.Size = (UInt64)(Int64)-1; di.Attrib = 0; NTime::GetCurUtcFileTime(di.MTime); di.CTime = di.ATime = di.MTime; dirItems.Items.Add(di); } else { bool needScanning = false; if (!renameMode) FOR_VECTOR (i, options.Commands) if (options.Commands[i].ActionSet.NeedScanning()) needScanning = true; if (needScanning) { RINOK(callback->StartScanning()); dirItems.SymLinks = options.SymLinks.Val; #if defined(_WIN32) && !defined(UNDER_CE) dirItems.ReadSecure = options.NtSecurity.Val; #endif dirItems.ScanAltStreams = options.AltStreams.Val; HRESULT res = EnumerateItems(censor, options.PathMode, options.AddPathPrefix, dirItems); if (res != S_OK) { if (res != E_ABORT) errorInfo.Message = "Scanning error"; return res; } RINOK(callback->FinishScanning(dirItems.Stat)); if (censor.Pairs.Size() == 1) { NFind::CFileInfo fi; FString prefix = us2fs(censor.Pairs[0].Prefix); prefix += FTEXT('.'); // UString prefix = censor.Pairs[0].Prefix; /* if (prefix.Back() == WCHAR_PATH_SEPARATOR) { prefix.DeleteBack(); } */ if (fi.Find(prefix)) if (fi.IsDir()) { parentDirItem.Size = fi.Size; parentDirItem.CTime = fi.CTime; parentDirItem.ATime = fi.ATime; parentDirItem.MTime = fi.MTime; parentDirItem.Attrib = fi.Attrib; parentDirItem_Ptr = &parentDirItem; int secureIndex = -1; #if defined(_WIN32) && !defined(UNDER_CE) if (options.NtSecurity.Val) dirItems.AddSecurityItem(prefix, secureIndex); #endif parentDirItem.SecureIndex = secureIndex; parentDirItem_Ptr = &parentDirItem; } } } } FString tempDirPrefix; bool usesTempDir = false; #ifdef _WIN32 CTempDir tempDirectory; if (options.EMailMode && options.EMailRemoveAfter) { tempDirectory.Create(kTempFolderPrefix); tempDirPrefix = tempDirectory.GetPath(); NormalizeDirPathPrefix(tempDirPrefix); usesTempDir = true; } #endif CTempFiles tempFiles; bool createTempFile = false; if (!options.StdOutMode && options.UpdateArchiveItself) { CArchivePath &ap = options.Commands[0].ArchivePath; ap = options.ArchivePath; // if ((archive != 0 && !usesTempDir) || !options.WorkingDir.IsEmpty()) if ((thereIsInArchive || !options.WorkingDir.IsEmpty()) && !usesTempDir && options.VolumesSizes.Size() == 0) { createTempFile = true; ap.Temp = true; if (!options.WorkingDir.IsEmpty()) ap.TempPrefix = options.WorkingDir; else ap.TempPrefix = us2fs(ap.Prefix); NormalizeDirPathPrefix(ap.TempPrefix); } } unsigned ci; for (ci = 0; ci < options.Commands.Size(); ci++) { CArchivePath &ap = options.Commands[ci].ArchivePath; if (usesTempDir) { // Check it ap.Prefix = fs2us(tempDirPrefix); // ap.Temp = true; // ap.TempPrefix = tempDirPrefix; } if (!options.StdOutMode && (ci > 0 || !createTempFile)) { const FString path = us2fs(ap.GetFinalPath()); if (NFind::DoesFileOrDirExist(path)) { errorInfo.SystemError = ERROR_FILE_EXISTS; errorInfo.Message = "The file already exists"; errorInfo.FileNames.Add(path); return errorInfo.Get_HRESULT_Error(); } } } CObjectVector<CArcItem> arcItems; if (thereIsInArchive) { RINOK(EnumerateInArchiveItems( // options.StoreAltStreams, censor, arcLink.Arcs.Back(), arcItems)); } /* FStringVector processedFilePaths; FStringVector *processedFilePaths_Ptr = NULL; if (options.DeleteAfterCompressing) processedFilePaths_Ptr = &processedFilePaths; */ CByteBuffer processedItems; if (options.DeleteAfterCompressing) { unsigned num = dirItems.Items.Size(); processedItems.Alloc(num); for (unsigned i = 0; i < num; i++) processedItems[i] = 0; } /* #ifndef _NO_CRYPTO if (arcLink.PasswordWasAsked) { // We set password, if open have requested password RINOK(callback->SetPassword(arcLink.Password)); } #endif */ for (ci = 0; ci < options.Commands.Size(); ci++) { const CArc *arc = thereIsInArchive ? arcLink.GetArc() : NULL; CUpdateArchiveCommand &command = options.Commands[ci]; UString name; bool isUpdating; if (options.StdOutMode) { name.SetFromAscii("stdout"); isUpdating = thereIsInArchive; } else { name = command.ArchivePath.GetFinalPath(); isUpdating = (ci == 0 && options.UpdateArchiveItself && thereIsInArchive); } RINOK(callback->StartArchive(name, isUpdating)) CFinishArchiveStat st; RINOK(Compress(options, isUpdating, codecs, command.ActionSet, arc, command.ArchivePath, arcItems, options.DeleteAfterCompressing ? (Byte *)processedItems : NULL, dirItems, parentDirItem_Ptr, tempFiles, errorInfo, callback, st)); RINOK(callback->FinishArchive(st)); } if (thereIsInArchive) { RINOK(arcLink.Close()); arcLink.Release(); } tempFiles.Paths.Clear(); if (createTempFile) { try { CArchivePath &ap = options.Commands[0].ArchivePath; const FString &tempPath = ap.GetTempPath(); if (thereIsInArchive) if (!DeleteFileAlways(us2fs(arcPath))) return errorInfo.SetFromLastError("cannot delete the file", us2fs(arcPath)); if (!MyMoveFile(tempPath, us2fs(arcPath))) { errorInfo.SetFromLastError("cannot move the file", tempPath); errorInfo.FileNames.Add(us2fs(arcPath)); return errorInfo.Get_HRESULT_Error(); } } catch(...) { throw; } } #if defined(_WIN32) && !defined(UNDER_CE) if (options.EMailMode) { NDLL::CLibrary mapiLib; if (!mapiLib.Load(FTEXT("Mapi32.dll"))) { errorInfo.SetFromLastError("cannot load Mapi32.dll"); return errorInfo.Get_HRESULT_Error(); } /* LPMAPISENDDOCUMENTS fnSend = (LPMAPISENDDOCUMENTS)mapiLib.GetProc("MAPISendDocuments"); if (fnSend == 0) { errorInfo.SetFromLastError)("7-Zip cannot find MAPISendDocuments function"); return errorInfo.Get_HRESULT_Error(); } */ LPMAPISENDMAIL sendMail = (LPMAPISENDMAIL)mapiLib.GetProc("MAPISendMail"); if (sendMail == 0) { errorInfo.SetFromLastError("7-Zip cannot find MAPISendMail function"); return errorInfo.Get_HRESULT_Error();; } FStringVector fullPaths; unsigned i; for (i = 0; i < options.Commands.Size(); i++) { CArchivePath &ap = options.Commands[i].ArchivePath; FString finalPath = us2fs(ap.GetFinalPath()); FString arcPath2; if (!MyGetFullPathName(finalPath, arcPath2)) return errorInfo.SetFromLastError("GetFullPathName error", finalPath); fullPaths.Add(arcPath2); } CCurrentDirRestorer curDirRestorer; for (i = 0; i < fullPaths.Size(); i++) { UString arcPath2 = fs2us(fullPaths[i]); UString fileName = ExtractFileNameFromPath(arcPath2); AString path = GetAnsiString(arcPath2); AString name = GetAnsiString(fileName); // Warning!!! MAPISendDocuments function changes Current directory // fnSend(0, ";", (LPSTR)(LPCSTR)path, (LPSTR)(LPCSTR)name, 0); MapiFileDesc f; memset(&f, 0, sizeof(f)); f.nPosition = 0xFFFFFFFF; f.lpszPathName = (char *)(const char *)path; f.lpszFileName = (char *)(const char *)name; MapiMessage m; memset(&m, 0, sizeof(m)); m.nFileCount = 1; m.lpFiles = &f; const AString addr = GetAnsiString(options.EMailAddress); MapiRecipDesc rec; if (!addr.IsEmpty()) { memset(&rec, 0, sizeof(rec)); rec.ulRecipClass = MAPI_TO; rec.lpszAddress = (char *)(const char *)addr; m.nRecipCount = 1; m.lpRecips = &rec; } sendMail((LHANDLE)0, 0, &m, MAPI_DIALOG, 0); } } #endif if (options.DeleteAfterCompressing) { CRecordVector<CRefSortPair> pairs; FStringVector foldersNames; unsigned i; for (i = 0; i < dirItems.Items.Size(); i++) { const CDirItem &dirItem = dirItems.Items[i]; FString phyPath = dirItems.GetPhyPath(i); if (dirItem.IsDir()) { CRefSortPair pair; pair.Index = i; pair.Len = GetNumSlashes(phyPath); pairs.Add(pair); } else { if (processedItems[i] != 0 || dirItem.Size == 0) { RINOK(callback->DeletingAfterArchiving(phyPath, false)); DeleteFileAlways(phyPath); } else { // file was skipped /* errorInfo.SystemError = 0; errorInfo.Message = "file was not processed"; errorInfo.FileName = phyPath; return E_FAIL; */ } } } pairs.Sort(CompareRefSortPair, NULL); for (i = 0; i < pairs.Size(); i++) { FString phyPath = dirItems.GetPhyPath(pairs[i].Index); if (NFind::DoesDirExist(phyPath)) { RINOK(callback->DeletingAfterArchiving(phyPath, true)); RemoveDir(phyPath); } } RINOK(callback->FinishDeletingAfterArchiving()); } return S_OK; }
#include <bits/stdc++.h> using namespace std; struct Dsu { int n; vector<int> p; Dsu(int _n) { n = _n; p.resize(n); init(); } inline void init() { for (int i = 0; i < n; i++) { p[i] = i; } } inline int find(int x) { return p[x] == x ? x: p[x] = find(p[x]); } inline bool join(int x, int y) { x = find(x); y = find(y); if (x == y) return false; p[y] = x; return true; } inline bool check(int x, int y) { return find(x) == find(y); } }; using ll=long long ; // this problem may got some difficulty to understand. // conclusion optim form, is that for some root with max a-b (>=0), visit(pay) some sub-components Gs, then root, then some final G // keep in mind a simple fact. if cash = max(a-b) + sum(b). then we have a 'path' end with u*=argmax(a-b) // use this fact, it should be easy to understand. // and easy to imagine cash >= sum, so into G, obviously we can select any init node we want. void solve() { int n,m; cin >> n >> m; vector<int> a(n), b(n); vector<vector<int>> g(n); for (int i = 0; i < n; i++) { cin >> a[i] >> b[i]; } for (int _ = 0; _ < m; _++) { int x,y; cin >> x >> y; x--;y--; g[x].emplace_back(y); g[y].emplace_back(x); } vector<int> id(n); iota(id.begin(), id.end(), 0); sort(id.begin(), id.end(), [&](int i, int j){ return a[i]-b[i] < a[j]-b[j]; }); Dsu d(n); vector<ll> base(n); vector<ll> need(n); for (int i = 0; i < n; i++) { base[i] = b[i]; need[i] = max(a[i], b[i]); } vector<bool> trk(n, false); // bottem-up for (int u: id) { trk[u] = true; set<int> s; for (int v: g[u]) { if (trk[v]) { s.insert(d.find(v)); } } if (s.empty()) continue; for (int v: s) { base[u] += base[v]; d.join(u, v); // an idiom way, p(u)=u alises to base summation } ll leaving_need = max(a[u]-b[u], 0);//without 0 is fine, since need[v]>=0, but idiom ll optim = 1e18; for (int v: s) { ll leaving = max(need[v], leaving_need); ll before = base[u] - base[v]; optim = min(optim, leaving + before); } need[u] = optim; } ll res = need[d.find(0)]; cout << res; } int main() { ios_base::sync_with_stdio(false); cin.tie(NULL); solve(); cout << endl; }
{% import 'common_macros.cpp' as common with context %} {% macro main() %} {{ common.insert_group_preamble() }} {# USES_VARIABLES { N, t, rate, _clock_t, _clock_dt, _spikespace, _num_source_neurons, _source_start, _source_stop } #} int _num_spikes = {{_spikespace}}[_num_spikespace-1]; // For subgroups, we do not want to record all spikes // We assume that spikes are ordered int _start_idx = -1; int _end_idx = - 1; for(int _j=0; _j<_num_spikes; _j++) { const int _idx = {{_spikespace}}[_j]; if (_idx >= _source_start) { _start_idx = _j; break; } } if (_start_idx == -1) _start_idx = _num_spikes; for(int _j=_start_idx; _j<_num_spikes; _j++) { const int _idx = {{_spikespace}}[_j]; if (_idx >= _source_stop) { _end_idx = _j; break; } } if (_end_idx == -1) _end_idx = _num_spikes; _num_spikes = _end_idx - _start_idx; // Calculate the new length for the arrays const npy_int _new_len = (npy_int)({{_dynamic_t}}.attr("shape")[0]) + 1; // Resize the arrays PyObject_CallMethod(_owner, "resize", "i", _new_len); // Get the potentially newly created underlying data arrays double *t_data = (double*)(((PyArrayObject*)(PyObject*){{_dynamic_t}}.attr("data"))->data); double *rate_data = (double*)(((PyArrayObject*)(PyObject*){{_dynamic_rate}}.attr("data"))->data); //Set the new values t_data[_new_len - 1] = {{_clock_t}}; rate_data[_new_len - 1] = 1.0 * _num_spikes / {{_clock_dt}} / _num_source_neurons; {{N}} = _new_len; {% endmacro %} {% macro support_code() %} {% endmacro %}
/* * Reflect Library by Parra Studios * A library for provide reflection and metadata representation. * * Copyright (C) 2016 - 2021 Vicente Eduardo Ferrer Garcia <vic798@gmail.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include <gtest/gtest.h> #include <reflect/reflect_class.h> #include <reflect/reflect_object.h> #include <reflect/reflect_value_type.h> #include <log/log.h> typedef struct hello_world_class_type { // These are static attributes that belong to the class int a; float b; char c[10]; } * hello_world_class; typedef struct hello_world_object_type { // These are attributes that belong to the object char d; long e; } * hello_world_object; int hello_world_object_impl_interface_create(object obj, object_impl impl) { hello_world_object hello_world = (hello_world_object)impl; (void)obj; EXPECT_NE((void *)NULL, (void *)hello_world); // Default values for static attributes (this will be done automatically by the language runtime) hello_world->d = 'd'; hello_world->e = 55; return 0; } value hello_world_object_impl_interface_get(object obj, object_impl impl, const char *key) { hello_world_object hello_world = (hello_world_object)impl; (void)obj; // Horrible but it is just a ilustrative example if (strcmp(key, "d") == 0) { return value_create_char(hello_world->d); } else if (strcmp(key, "e") == 0) { return value_create_long(hello_world->e); } return NULL; } int hello_world_object_impl_interface_set(object obj, object_impl impl, const char *key, value v) { hello_world_object hello_world = (hello_world_object)impl; EXPECT_NE((void *)NULL, (void *)hello_world); (void)obj; // Horrible but it is just a ilustrative example if (strcmp(key, "d") == 0) { hello_world->d = value_to_char(v); } else if (strcmp(key, "e") == 0) { hello_world->e = value_to_long(v); } return 0; } value hello_world_object_impl_interface_method_invoke(object obj, object_impl impl, const char *key, object_args args, size_t size) { // TODO: Maybe we can improve this with other methods and arguments like in reflect_function_test static const char str[] = "Hello World"; (void)obj; (void)impl; (void)key; (void)args; (void)size; return value_create_string(str, sizeof(str) - 1); } value hello_world_object_impl_interface_method_await(object obj, object_impl impl, const char *key, object_args args, size_t size, object_resolve_callback resolve, object_reject_callback reject, void *ctx) { // TODO (void)obj; (void)impl; (void)key; (void)args; (void)size; (void)resolve; (void)reject; (void)ctx; return NULL; } int hello_world_object_impl_interface_destructor(object obj, object_impl impl) { (void)obj; (void)impl; return 0; } void hello_world_object_impl_interface_destroy(object obj, object_impl impl) { hello_world_object hello_world_obj = static_cast<hello_world_object>(impl); (void)obj; delete hello_world_obj; } object_interface hello_world_object_impl_interface_singleton() { static struct object_interface_type hello_world_interface { &hello_world_object_impl_interface_create, &hello_world_object_impl_interface_get, &hello_world_object_impl_interface_set, &hello_world_object_impl_interface_method_invoke, &hello_world_object_impl_interface_method_await, &hello_world_object_impl_interface_destructor, &hello_world_object_impl_interface_destroy }; return &hello_world_interface; } int hello_world_class_impl_interface_create(klass cls, class_impl impl) { hello_world_class hello_world = (hello_world_class)impl; (void)cls; EXPECT_NE((void *)NULL, (void *)hello_world); // Default values for static attributes (this will be done automatically by the language runtime) hello_world->a = 0; hello_world->b = 0.0f; hello_world->c[0] = '\0'; return 0; } object hello_world_class_impl_interface_constructor(klass cls, class_impl impl, const char *name, class_args args, size_t size) { hello_world_object hello_world_obj = new hello_world_object_type(); (void)impl; object obj = object_create(name, hello_world_obj, &hello_world_object_impl_interface_singleton, cls); if (object_increment_reference(obj) != 0) { /* TODO: Abort? */ } if (args == 0) { // Default constructor hello_world_obj->d = 'A'; hello_world_obj->e = 0L; } else if (size == 2) { hello_world_obj->d = value_to_char(args[0]); hello_world_obj->e = value_to_long(args[1]); } return obj; } value hello_world_class_impl_interface_static_get(klass cls, class_impl impl, const char *key) { hello_world_class hello_world = (hello_world_class)impl; (void)cls; // Horrible but it is just a ilustrative example if (strcmp(key, "a") == 0) { return value_create_int(hello_world->a); } else if (strcmp(key, "b") == 0) { return value_create_float(hello_world->b); } else if (strcmp(key, "c") == 0) { return value_create_string(hello_world->c, 9); } return NULL; } int hello_world_class_impl_interface_static_set(klass cls, class_impl impl, const char *key, value v) { hello_world_class hello_world = (hello_world_class)impl; EXPECT_NE((void *)NULL, (void *)hello_world); (void)cls; // Horrible but it is just a ilustrative example if (strcmp(key, "a") == 0) { hello_world->a = value_to_int(v); } else if (strcmp(key, "b") == 0) { hello_world->b = value_to_float(v); } else if (strcmp(key, "c") == 0) { strncpy(hello_world->c, value_to_string(v), 10); } return 0; } value hello_world_class_impl_interface_static_invoke(klass cls, class_impl impl, const char *key, class_args args, size_t size) { // TODO (void)cls; (void)impl; (void)key; (void)args; (void)size; return NULL; } value hello_world_class_impl_interface_static_await(klass cls, class_impl impl, const char *key, class_args args, size_t size, class_resolve_callback resolve, class_reject_callback reject, void *ctx) { // TODO (void)cls; (void)impl; (void)key; (void)args; (void)size; (void)resolve; (void)reject; (void)ctx; return NULL; } void hello_world_class_impl_interface_destroy(klass cls, class_impl impl) { hello_world_class hellow_world_cls = static_cast<hello_world_class>(impl); (void)cls; delete hellow_world_cls; } class_interface hello_world_class_impl_interface_singleton() { static struct class_interface_type hello_world_interface { &hello_world_class_impl_interface_create, &hello_world_class_impl_interface_constructor, &hello_world_class_impl_interface_static_get, &hello_world_class_impl_interface_static_set, &hello_world_class_impl_interface_static_invoke, &hello_world_class_impl_interface_static_await, &hello_world_class_impl_interface_destroy }; return &hello_world_interface; } class reflect_object_class_test : public testing::Test { public: }; TEST_F(reflect_object_class_test, DefaultConstructor) { EXPECT_EQ((int)0, (int)log_configure("metacall", log_policy_format_text(), log_policy_schedule_sync(), log_policy_storage_sequential(), log_policy_stream_stdio(stdout))); // Create class hello_world_class hellow_world_cls = new hello_world_class_type(); EXPECT_NE((void *)NULL, (void *)hellow_world_cls); klass cls = class_create("HelloWorld", hellow_world_cls, &hello_world_class_impl_interface_singleton); EXPECT_EQ((int)class_increment_reference(cls), (int)0); // Get and set static attributes from the class { value a = class_static_get(cls, "a"); ASSERT_NE((value)NULL, (value)a); EXPECT_EQ((int)0, (int)value_to_int(a)); value_type_destroy(a); value b = class_static_get(cls, "b"); ASSERT_NE((value)NULL, (value)b); EXPECT_EQ((float)0.0f, (float)value_to_float(b)); value_type_destroy(b); value c = class_static_get(cls, "c"); ASSERT_NE((value)NULL, (value)c); EXPECT_EQ((char)'\0', (char)*value_to_string(c)); value_type_destroy(c); value new_a = value_create_int(1234); ASSERT_EQ((int)0, (int)class_static_set(cls, "a", new_a)); a = class_static_get(cls, "a"); EXPECT_NE((value)NULL, (value)a); EXPECT_EQ((int)1234, (int)value_to_int(a)); value_type_destroy(a); value_type_destroy(new_a); value new_c = value_create_string("hi", 2); ASSERT_EQ((int)0, (int)class_static_set(cls, "c", new_c)); c = class_static_get(cls, "c"); EXPECT_NE((value)NULL, (value)c); EXPECT_EQ((char)'h', (char)value_to_string(c)[0]); EXPECT_EQ((char)'i', (char)value_to_string(c)[1]); value_type_destroy(c); value_type_destroy(new_c); } // Create object (default constructor) { value args[] = { NULL }; object obj = class_new(cls, "helloWorldObj", args, 0); ASSERT_NE((object)NULL, (object)obj); // Get & set attributes from object { value d = object_get(obj, "d"); ASSERT_NE((value)NULL, (value)d); EXPECT_EQ((char)'d', (char)value_to_char(d)); value_type_destroy(d); value e = object_get(obj, "e"); ASSERT_NE((value)NULL, (value)e); EXPECT_EQ((long)55L, (long)value_to_long(e)); value_type_destroy(e); value new_d = value_create_char('M'); ASSERT_EQ((char)0, (char)object_set(obj, "d", new_d)); d = object_get(obj, "d"); EXPECT_NE((value)NULL, (value)d); EXPECT_EQ((char)'M', (char)value_to_char(d)); value_type_destroy(d); value_type_destroy(new_d); value new_e = value_create_long(1234); ASSERT_EQ((long)0, (long)object_set(obj, "e", new_e)); e = object_get(obj, "e"); EXPECT_NE((value)NULL, (value)e); EXPECT_EQ((long)1234L, (long)value_to_long(e)); value_type_destroy(e); value_type_destroy(new_e); } // Test object call value ret = object_call(obj, "test_func", args, 0); ASSERT_NE((value)NULL, (value)ret); ASSERT_EQ((int)0, (int)strcmp(value_to_string(ret), "Hello World")); value_type_destroy(ret); // TODO: Test object await object_destroy(obj); } // Create object (custom constructor) { value args[] = { value_create_char('F'), value_create_long(3435L) }; object obj = class_new(cls, "helloWorldObj", args, 2); ASSERT_NE((object)NULL, (object)obj); value_type_destroy(args[0]); value_type_destroy(args[1]); // Get attributes from object { value d = object_get(obj, "d"); ASSERT_NE((value)NULL, (value)d); EXPECT_EQ((char)'F', (char)value_to_char(d)); value_type_destroy(d); value e = object_get(obj, "e"); ASSERT_NE((value)NULL, (value)e); EXPECT_EQ((long)3435L, (long)value_to_long(e)); value_type_destroy(e); } // Test object call value ret = object_call(obj, "test_func", args, 0); ASSERT_NE((value)NULL, (value)ret); ASSERT_EQ((int)0, (int)strcmp(value_to_string(ret), "Hello World")); value_type_destroy(ret); // TODO: Test object await object_destroy(obj); } class_destroy(cls); }
// Fill out your copyright notice in the Description page of Project Settings. #include "Snake_Project.h" #include "BombProjectile_Seek.h" #include "Bots/Misc/RocketTankTargetDecal.h" ABombProjectile_Seek::ABombProjectile_Seek(const FObjectInitializer& PCIP) : ABombProjectile(PCIP) { HeightOffset = 500.0f; FlightTime = 5; FlightTimeRange = FVector2D(1.0f, 5.0f); HeightOffsetRange = FVector2D(150, 1000); MidPoint = 0.5f; } void ABombProjectile_Seek::PostInitializeComponents() { Super::PostInitializeComponents(); } void ABombProjectile_Seek::Tick(float DeltaTime) { AActor::Tick(DeltaTime); if (bHomingProjectile) { FVector Target = MoveComp->HomingTargetComponent->GetComponentLocation(); FlightProgress += DeltaTime; float Alpha = FlightProgress / FlightTime; FVector AtoB = FMath::Lerp(InitialLocation, TargetHeight, Alpha); FVector BtoC = FMath::Lerp(TargetHeight, Target, Alpha); FVector Final = FMath::Lerp(AtoB, BtoC, Alpha); FVector Direction = BtoC - AtoB; Direction.Normalize(); FRotator Rotation = Direction.Rotation(); SetActorRotation(Rotation); SetActorLocation(Final, true); } } void ABombProjectile_Seek::OnImpact(const FHitResult& Hit) { Super::OnImpact(Hit); FVector HitLocation = GetActorLocation(); if (Hit.IsValidBlockingHit()) { HitLocation = Hit.Location; } OnImpact_BlueprintEvent(HitLocation); } void ABombProjectile_Seek::InitializeHoming(USceneComponent* InTargetComponent, float IdealDistance) { Super::InitializeHoming(InTargetComponent, IdealDistance); ARocketTankTargetDecal* HomingTarget = Cast<ARocketTankTargetDecal>(InTargetComponent->GetOwner()); if (HomingTarget) { TargetDecal = HomingTarget; HomingTarget->Show(this); } bHomingProjectile = true; FVector Distance = InTargetComponent->GetComponentLocation() - GetActorLocation(); float Per = FMath::Clamp(Distance.Size() / IdealDistance, 0.0f, 1.0f); FlightTime = FMath::Lerp(FlightTimeRange.X, FlightTimeRange.Y, Per); HeightOffset = FMath::Lerp(HeightOffsetRange.X, HeightOffsetRange.Y, Per); InitialLocation = GetActorLocation(); float Point = Distance.Size() * MidPoint; TargetHeight = GetActorLocation() + (FVector::UpVector * HeightOffset) + (GetActorForwardVector() * Point); } void ABombProjectile_Seek::HideAndDestroy(const FHitResult& Hit) { Super::HideAndDestroy(Hit); if(TargetDecal) { TargetDecal->Hide(); TargetDecal = nullptr; } } void ABombProjectile_Seek::BeginOverlapCheck(AActor* Other, UPrimitiveComponent* OtherComp, int32 OtherBodyIndex, bool bFromSweep, const FHitResult& SweepResult) { if (Other == this) { return; } ABaseProjectile* Projectile = Cast<ABaseProjectile>(Other); if (Projectile) { AddUniqueIgnoreActor(Other); } }
#pragma once #include <polymesh/Mesh.hh> namespace polymesh { /// Given a flat polymesh with convex faces, naively triangulates all faces void triangulate_naive(Mesh& m); }
// Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "txdb.h" #include "chainparams.h" #include "hash.h" #include "random.h" #include "pow.h" #include "uint256.h" #include "util.h" #include "ui_interface.h" #include "init.h" #include <stdint.h> #include <boost/thread.hpp> static const char DB_COIN = 'C'; static const char DB_COINS = 'c'; static const char DB_BLOCK_FILES = 'f'; static const char DB_TXINDEX = 't'; static const char DB_BLOCK_INDEX = 'b'; static const char DB_BEST_BLOCK = 'B'; static const char DB_HEAD_BLOCKS = 'H'; static const char DB_FLAG = 'F'; static const char DB_REINDEX_FLAG = 'R'; static const char DB_LAST_BLOCK = 'l'; namespace { struct CoinEntry { COutPoint* outpoint; char key; CoinEntry(const COutPoint* ptr) : outpoint(const_cast<COutPoint*>(ptr)), key(DB_COIN) {} template<typename Stream> void Serialize(Stream &s) const { s << key; s << outpoint->hash; s << VARINT(outpoint->n); } template<typename Stream> void Unserialize(Stream& s) { s >> key; s >> outpoint->hash; s >> VARINT(outpoint->n); } }; } CCoinsViewDB::CCoinsViewDB(size_t nCacheSize, bool fMemory, bool fWipe) : db(GetDataDir() / "chainstate", nCacheSize, fMemory, fWipe, true) { } bool CCoinsViewDB::GetCoin(const COutPoint &outpoint, Coin &coin) const { return db.Read(CoinEntry(&outpoint), coin); } bool CCoinsViewDB::HaveCoin(const COutPoint &outpoint) const { return db.Exists(CoinEntry(&outpoint)); } uint256 CCoinsViewDB::GetBestBlock() const { uint256 hashBestChain; if (!db.Read(DB_BEST_BLOCK, hashBestChain)) return uint256(); return hashBestChain; } std::vector<uint256> CCoinsViewDB::GetHeadBlocks() const { std::vector<uint256> vhashHeadBlocks; if (!db.Read(DB_HEAD_BLOCKS, vhashHeadBlocks)) { return std::vector<uint256>(); } return vhashHeadBlocks; } bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) { CDBBatch batch(db); size_t count = 0; size_t changed = 0; size_t batch_size = (size_t)gArgs.GetArg("-dbbatchsize", nDefaultDbBatchSize); int crash_simulate = gArgs.GetArg("-dbcrashratio", 0); assert(!hashBlock.IsNull()); uint256 old_tip = GetBestBlock(); if (old_tip.IsNull()) { // We may be in the middle of replaying. std::vector<uint256> old_heads = GetHeadBlocks(); if (old_heads.size() == 2) { assert(old_heads[0] == hashBlock); old_tip = old_heads[1]; } } // In the first batch, mark the database as being in the middle of a // transition from old_tip to hashBlock. // A vector is used for future extensibility, as we may want to support // interrupting after partial writes from multiple independent reorgs. batch.Erase(DB_BEST_BLOCK); batch.Write(DB_HEAD_BLOCKS, std::vector<uint256>{hashBlock, old_tip}); for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end();) { if (it->second.flags & CCoinsCacheEntry::DIRTY) { CoinEntry entry(&it->first); if (it->second.coin.IsSpent()) batch.Erase(entry); else batch.Write(entry, it->second.coin); changed++; } count++; CCoinsMap::iterator itOld = it++; mapCoins.erase(itOld); if (batch.SizeEstimate() > batch_size) { LogPrint(BCLog::COINDB, "Writing partial batch of %.2f MiB\n", batch.SizeEstimate() * (1.0 / 1048576.0)); db.WriteBatch(batch); batch.Clear(); if (crash_simulate) { static FastRandomContext rng; if (rng.randrange(crash_simulate) == 0) { LogPrintf("Simulating a crash. Goodbye.\n"); _Exit(0); } } } } // In the last batch, mark the database as consistent with hashBlock again. batch.Erase(DB_HEAD_BLOCKS); batch.Write(DB_BEST_BLOCK, hashBlock); LogPrint(BCLog::COINDB, "Writing final batch of %.2f MiB\n", batch.SizeEstimate() * (1.0 / 1048576.0)); bool ret = db.WriteBatch(batch); LogPrint(BCLog::COINDB, "Committed %u changed transaction outputs (out of %u) to coin database...\n", (unsigned int)changed, (unsigned int)count); return ret; } size_t CCoinsViewDB::EstimateSize() const { return db.EstimateSize(DB_COIN, (char)(DB_COIN+1)); } CBlockTreeDB::CBlockTreeDB(size_t nCacheSize, bool fMemory, bool fWipe) : CDBWrapper(GetDataDir() / "blocks" / "index", nCacheSize, fMemory, fWipe) { } bool CBlockTreeDB::ReadBlockFileInfo(int nFile, CBlockFileInfo &info) { return Read(std::make_pair(DB_BLOCK_FILES, nFile), info); } bool CBlockTreeDB::WriteReindexing(bool fReindexing) { if (fReindexing) return Write(DB_REINDEX_FLAG, '1'); else return Erase(DB_REINDEX_FLAG); } bool CBlockTreeDB::ReadReindexing(bool &fReindexing) { fReindexing = Exists(DB_REINDEX_FLAG); return true; } bool CBlockTreeDB::ReadLastBlockFile(int &nFile) { return Read(DB_LAST_BLOCK, nFile); } CCoinsViewCursor *CCoinsViewDB::Cursor() const { CCoinsViewDBCursor *i = new CCoinsViewDBCursor(const_cast<CDBWrapper&>(db).NewIterator(), GetBestBlock()); /* It seems that there are no "const iterators" for LevelDB. Since we only need read operations on it, use a const-cast to get around that restriction. */ i->pcursor->Seek(DB_COIN); // Cache key of first record if (i->pcursor->Valid()) { CoinEntry entry(&i->keyTmp.second); i->pcursor->GetKey(entry); i->keyTmp.first = entry.key; } else { i->keyTmp.first = 0; // Make sure Valid() and GetKey() return false } return i; } bool CCoinsViewDBCursor::GetKey(COutPoint &key) const { // Return cached key if (keyTmp.first == DB_COIN) { key = keyTmp.second; return true; } return false; } bool CCoinsViewDBCursor::GetValue(Coin &coin) const { return pcursor->GetValue(coin); } unsigned int CCoinsViewDBCursor::GetValueSize() const { return pcursor->GetValueSize(); } bool CCoinsViewDBCursor::Valid() const { return keyTmp.first == DB_COIN; } void CCoinsViewDBCursor::Next() { pcursor->Next(); CoinEntry entry(&keyTmp.second); if (!pcursor->Valid() || !pcursor->GetKey(entry)) { keyTmp.first = 0; // Invalidate cached key after last record so that Valid() and GetKey() return false } else { keyTmp.first = entry.key; } } bool CBlockTreeDB::WriteBatchSync(const std::vector<std::pair<int, const CBlockFileInfo*> >& fileInfo, int nLastFile, const std::vector<const CBlockIndex*>& blockinfo) { CDBBatch batch(*this); for (std::vector<std::pair<int, const CBlockFileInfo*> >::const_iterator it=fileInfo.begin(); it != fileInfo.end(); it++) { batch.Write(std::make_pair(DB_BLOCK_FILES, it->first), *it->second); } batch.Write(DB_LAST_BLOCK, nLastFile); for (std::vector<const CBlockIndex*>::const_iterator it=blockinfo.begin(); it != blockinfo.end(); it++) { batch.Write(std::make_pair(DB_BLOCK_INDEX, (*it)->GetBlockHash()), CDiskBlockIndex(*it)); } return WriteBatch(batch, true); } bool CBlockTreeDB::ReadTxIndex(const uint256 &txid, CDiskTxPos &pos) { return Read(std::make_pair(DB_TXINDEX, txid), pos); } bool CBlockTreeDB::WriteTxIndex(const std::vector<std::pair<uint256, CDiskTxPos> >&vect) { CDBBatch batch(*this); for (std::vector<std::pair<uint256,CDiskTxPos> >::const_iterator it=vect.begin(); it!=vect.end(); it++) batch.Write(std::make_pair(DB_TXINDEX, it->first), it->second); return WriteBatch(batch); } bool CBlockTreeDB::WriteFlag(const std::string &name, bool fValue) { return Write(std::make_pair(DB_FLAG, name), fValue ? '1' : '0'); } bool CBlockTreeDB::ReadFlag(const std::string &name, bool &fValue) { char ch; if (!Read(std::make_pair(DB_FLAG, name), ch)) return false; fValue = ch == '1'; return true; } bool CBlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function<CBlockIndex*(const uint256&)> insertBlockIndex) { std::unique_ptr<CDBIterator> pcursor(NewIterator()); pcursor->Seek(std::make_pair(DB_BLOCK_INDEX, uint256())); // Load mapBlockIndex while (pcursor->Valid()) { boost::this_thread::interruption_point(); std::pair<char, uint256> key; if (pcursor->GetKey(key) && key.first == DB_BLOCK_INDEX) { CDiskBlockIndex diskindex; if (pcursor->GetValue(diskindex)) { // Construct block index object CBlockIndex* pindexNew = insertBlockIndex(diskindex.GetBlockHash()); pindexNew->pprev = insertBlockIndex(diskindex.hashPrev); pindexNew->nHeight = diskindex.nHeight; pindexNew->nFile = diskindex.nFile; pindexNew->nDataPos = diskindex.nDataPos; pindexNew->nUndoPos = diskindex.nUndoPos; pindexNew->nVersion = diskindex.nVersion; pindexNew->hashMerkleRoot = diskindex.hashMerkleRoot; pindexNew->nTime = diskindex.nTime; pindexNew->nBits = diskindex.nBits; pindexNew->nNonce = diskindex.nNonce; pindexNew->nStatus = diskindex.nStatus; pindexNew->nTx = diskindex.nTx; // JewelzCoin: Disable PoW Sanity check while loading block index from disk. // We use the sha256 hash for the block index for performance reasons, which is recorded for later use. // CheckProofOfWork() uses the scrypt hash which is discarded after a block is accepted. // While it is technically feasible to verify the PoW, doing so takes several minutes as it // requires recomputing every PoW hash during every JewelzCoin startup. // We opt instead to simply trust the data that is on your local disk. //if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, consensusParams)) // return error("%s: CheckProofOfWork failed: %s", __func__, pindexNew->ToString()); pcursor->Next(); } else { return error("%s: failed to read value", __func__); } } else { break; } } return true; } namespace { //! Legacy class to deserialize pre-pertxout database entries without reindex. class CCoins { public: //! whether transaction is a coinbase bool fCoinBase; //! unspent transaction outputs; spent outputs are .IsNull(); spent outputs at the end of the array are dropped std::vector<CTxOut> vout; //! at which height this transaction was included in the active block chain int nHeight; //! empty constructor CCoins() : fCoinBase(false), vout(0), nHeight(0) { } template<typename Stream> void Unserialize(Stream &s) { unsigned int nCode = 0; // version int nVersionDummy; ::Unserialize(s, VARINT(nVersionDummy)); // header code ::Unserialize(s, VARINT(nCode)); fCoinBase = nCode & 1; std::vector<bool> vAvail(2, false); vAvail[0] = (nCode & 2) != 0; vAvail[1] = (nCode & 4) != 0; unsigned int nMaskCode = (nCode / 8) + ((nCode & 6) != 0 ? 0 : 1); // spentness bitmask while (nMaskCode > 0) { unsigned char chAvail = 0; ::Unserialize(s, chAvail); for (unsigned int p = 0; p < 8; p++) { bool f = (chAvail & (1 << p)) != 0; vAvail.push_back(f); } if (chAvail != 0) nMaskCode--; } // txouts themself vout.assign(vAvail.size(), CTxOut()); for (unsigned int i = 0; i < vAvail.size(); i++) { if (vAvail[i]) ::Unserialize(s, REF(CTxOutCompressor(vout[i]))); } // coinbase height ::Unserialize(s, VARINT(nHeight)); } }; } /** Upgrade the database from older formats. * * Currently implemented: from the per-tx utxo model (0.8..0.14.x) to per-txout. */ bool CCoinsViewDB::Upgrade() { std::unique_ptr<CDBIterator> pcursor(db.NewIterator()); pcursor->Seek(std::make_pair(DB_COINS, uint256())); if (!pcursor->Valid()) { return true; } int64_t count = 0; LogPrintf("Upgrading utxo-set database...\n"); LogPrintf("[0%%]..."); size_t batch_size = 1 << 24; CDBBatch batch(db); uiInterface.SetProgressBreakAction(StartShutdown); int reportDone = 0; std::pair<unsigned char, uint256> key; std::pair<unsigned char, uint256> prev_key = {DB_COINS, uint256()}; while (pcursor->Valid()) { boost::this_thread::interruption_point(); if (ShutdownRequested()) { break; } if (pcursor->GetKey(key) && key.first == DB_COINS) { if (count++ % 256 == 0) { uint32_t high = 0x100 * *key.second.begin() + *(key.second.begin() + 1); int percentageDone = (int)(high * 100.0 / 65536.0 + 0.5); uiInterface.ShowProgress(_("Upgrading UTXO database") + "\n"+ _("(press q to shutdown and continue later)") + "\n", percentageDone); if (reportDone < percentageDone/10) { // report max. every 10% step LogPrintf("[%d%%]...", percentageDone); reportDone = percentageDone/10; } } CCoins old_coins; if (!pcursor->GetValue(old_coins)) { return error("%s: cannot parse CCoins record", __func__); } COutPoint outpoint(key.second, 0); for (size_t i = 0; i < old_coins.vout.size(); ++i) { if (!old_coins.vout[i].IsNull() && !old_coins.vout[i].scriptPubKey.IsUnspendable()) { Coin newcoin(std::move(old_coins.vout[i]), old_coins.nHeight, old_coins.fCoinBase); outpoint.n = i; CoinEntry entry(&outpoint); batch.Write(entry, newcoin); } } batch.Erase(key); if (batch.SizeEstimate() > batch_size) { db.WriteBatch(batch); batch.Clear(); db.CompactRange(prev_key, key); prev_key = key; } pcursor->Next(); } else { break; } } db.WriteBatch(batch); db.CompactRange({DB_COINS, uint256()}, key); uiInterface.SetProgressBreakAction(std::function<void(void)>()); LogPrintf("[%s].\n", ShutdownRequested() ? "CANCELLED" : "DONE"); return !ShutdownRequested(); }
#pragma once #include <Register/Utility.hpp> namespace Kvasir { //Analog comparators ACMP0/1/2/3 namespace AcmpCtrl{ ///<Comparator block control register using Addr = Register::Address<0x40008000,0x00000000,0x00000000,unsigned>; ///Reserved. constexpr Register::FieldLocation<Addr,Register::maskFromRange(7,0),Register::ReadWriteAccess,unsigned> reserved{}; ///Selects the which comparators set and reset the ROSC output. enum class RoscctlVal { acmp1Acmp0=0x00000000, ///<ACMP1/ACMP0. The ROSC output is set by ACMP1 and reset by ACMP0. acmp0Acmp1=0x00000001, ///<ACMP0/ACMP1. The ROSC output is set by ACMP0 and reset by ACMP1. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(8,8),Register::ReadWriteAccess,RoscctlVal> roscctl{}; namespace RoscctlValC{ constexpr Register::FieldValue<decltype(roscctl)::Type,RoscctlVal::acmp1Acmp0> acmp1Acmp0{}; constexpr Register::FieldValue<decltype(roscctl)::Type,RoscctlVal::acmp0Acmp1> acmp0Acmp1{}; } ///Selects the reset source for the ROSC output. enum class ExtresetVal { internal=0x00000000, ///<Internal. The ROSC output is reset by the internal chip reset. fromPinRoscReset=0x00000001, ///<From pin ROSC_RESET. The ROSC output is reset by the ROSC_RESET input. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(9,9),Register::ReadWriteAccess,ExtresetVal> extReset{}; namespace ExtresetValC{ constexpr Register::FieldValue<decltype(extReset)::Type,ExtresetVal::internal> internal{}; constexpr Register::FieldValue<decltype(extReset)::Type,ExtresetVal::fromPinRoscReset> fromPinRoscReset{}; } ///Reserved. constexpr Register::FieldLocation<Addr,Register::maskFromRange(31,10),Register::ReadWriteAccess,unsigned> reserved{}; } namespace AcmpCmp0{ ///<Comparator 0 source control using Addr = Register::Address<0x40008004,0x00000000,0x00000000,unsigned>; ///Comparator enable control. enum class EnVal { disabled=0x00000000, ///<Disabled. Comparator disabled. enabled=0x00000001, ///<Enabled. Comparator is enabled. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(0,0),Register::ReadWriteAccess,EnVal> en{}; namespace EnValC{ constexpr Register::FieldValue<decltype(en)::Type,EnVal::disabled> disabled{}; constexpr Register::FieldValue<decltype(en)::Type,EnVal::enabled> enabled{}; } ///Reserved. constexpr Register::FieldLocation<Addr,Register::maskFromRange(1,1),Register::ReadWriteAccess,unsigned> reserved{}; ///Interrupt enable. enum class IntenVal { disabled=0x00000000, ///<Disabled. Interrupts are disabled.. enabled=0x00000001, ///<Enabled. Interrupts are enabled.. Must set to 1 for interrupts to propagate to the NVIC and start-up logic. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(2,2),Register::ReadWriteAccess,IntenVal> inten{}; namespace IntenValC{ constexpr Register::FieldValue<decltype(inten)::Type,IntenVal::disabled> disabled{}; constexpr Register::FieldValue<decltype(inten)::Type,IntenVal::enabled> enabled{}; } ///Comparator status. This bit reflects the comparator output constexpr Register::FieldLocation<Addr,Register::maskFromRange(3,3),Register::ReadWriteAccess,unsigned> stat{}; ///VM input select. enum class VmVal { vrefDivider0=0x00000000, ///<Vref divider 0. acmpI1=0x00000001, ///<ACMP_I1. acmpI2=0x00000002, ///<ACMP_I2. acmp0I3=0x00000003, ///<ACMP0_I3. acmp0I4=0x00000004, ///<ACMP0_I4. internal0=0x00000005, ///<Internal 0.9 V band gap reference. tempSensor=0x00000006, ///<Temp sensor. adc02=0x00000007, ///<ADC0_2. Input for ADC0 channel 2. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(6,4),Register::ReadWriteAccess,VmVal> vm{}; namespace VmValC{ constexpr Register::FieldValue<decltype(vm)::Type,VmVal::vrefDivider0> vrefDivider0{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::acmpI1> acmpI1{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::acmpI2> acmpI2{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::acmp0I3> acmp0I3{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::acmp0I4> acmp0I4{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::internal0> internal0{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::tempSensor> tempSensor{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::adc02> adc02{}; } ///Reserved. constexpr Register::FieldLocation<Addr,Register::maskFromRange(7,7),Register::ReadWriteAccess,unsigned> reserved{}; ///VP input select. enum class VpVal { vrefDivider0=0x00000000, ///<Vref divider 0. acmpI1=0x00000001, ///<ACMP_I1. acmpI2=0x00000002, ///<ACMP_I2. acmp0I3=0x00000003, ///<ACMP0_I3. acmp0I4=0x00000004, ///<ACMP0_I4. internal0=0x00000005, ///<Internal 0.9 V band gap reference. tempSensor=0x00000006, ///<Temp sensor. adc02=0x00000007, ///<ADC0_2. Input for ADC0 channel 2. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(10,8),Register::ReadWriteAccess,VpVal> vp{}; namespace VpValC{ constexpr Register::FieldValue<decltype(vp)::Type,VpVal::vrefDivider0> vrefDivider0{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::acmpI1> acmpI1{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::acmpI2> acmpI2{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::acmp0I3> acmp0I3{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::acmp0I4> acmp0I4{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::internal0> internal0{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::tempSensor> tempSensor{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::adc02> adc02{}; } ///Reserved. constexpr Register::FieldLocation<Addr,Register::maskFromRange(12,11),Register::ReadWriteAccess,unsigned> reserved{}; ///Hysteresis control. When enabled, hysteresis determines the difference required between the comparator inputs before the comparator output switches. The difference must be in the direction opposite of the current comparator output. enum class HysVal { hysteresisIsTurned=0x00000000, ///<Hysteresis is turned off, comparator output will change as the input voltages cross. hysteresisEq5Mv=0x00000001, ///<Hysteresis = 5 mV. hysteresisEq10Mv=0x00000002, ///<Hysteresis = 10 mV. hysteresisEq15Mv=0x00000003, ///<Hysteresis = 15 mV. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(14,13),Register::ReadWriteAccess,HysVal> hys{}; namespace HysValC{ constexpr Register::FieldValue<decltype(hys)::Type,HysVal::hysteresisIsTurned> hysteresisIsTurned{}; constexpr Register::FieldValue<decltype(hys)::Type,HysVal::hysteresisEq5Mv> hysteresisEq5Mv{}; constexpr Register::FieldValue<decltype(hys)::Type,HysVal::hysteresisEq10Mv> hysteresisEq10Mv{}; constexpr Register::FieldValue<decltype(hys)::Type,HysVal::hysteresisEq15Mv> hysteresisEq15Mv{}; } ///Selects the polarity of the CMP output for purposes of generating level interrupts. enum class IntpolVal { notInverted=0x00000000, ///<Not inverted. The output is used as-is for generating interrupts. inverted=0x00000001, ///<Inverted. The output is used inverted for generating interrupts. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(15,15),Register::ReadWriteAccess,IntpolVal> intpol{}; namespace IntpolValC{ constexpr Register::FieldValue<decltype(intpol)::Type,IntpolVal::notInverted> notInverted{}; constexpr Register::FieldValue<decltype(intpol)::Type,IntpolVal::inverted> inverted{}; } ///Select interrupt type. enum class InttypeVal { edge=0x00000000, ///<Edge. Comparator interrupt is edge triggered. level=0x00000001, ///<Level. Comparator interrupt is level triggered. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(16,16),Register::ReadWriteAccess,InttypeVal> inttype{}; namespace InttypeValC{ constexpr Register::FieldValue<decltype(inttype)::Type,InttypeVal::edge> edge{}; constexpr Register::FieldValue<decltype(inttype)::Type,InttypeVal::level> level{}; } ///Select edge triggered interrupt to be active on either high or low transitions, when INTTYPE = 0. enum class IntedgeVal { falling=0x00000000, ///<Falling. Comparator interrupt is active on falling edges. rising=0x00000001, ///<Rising. Comparator interrupt is active on rising edges. bothEdges=0x00000002, ///<Both edges. Comparator Interrupt is active on both edges. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(18,17),Register::ReadWriteAccess,IntedgeVal> intedge{}; namespace IntedgeValC{ constexpr Register::FieldValue<decltype(intedge)::Type,IntedgeVal::falling> falling{}; constexpr Register::FieldValue<decltype(intedge)::Type,IntedgeVal::rising> rising{}; constexpr Register::FieldValue<decltype(intedge)::Type,IntedgeVal::bothEdges> bothEdges{}; } ///Interrupt flag. enum class IntflagVal { notPending=0x00000000, ///<Not pending. The Comparator interrupt is not pending. pending=0x00000001, ///<Pending. The Comparator interrupt is pending. Writing a 1 to this bit clears the flag. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(19,19),Register::ReadWriteAccess,IntflagVal> intflag{}; namespace IntflagValC{ constexpr Register::FieldValue<decltype(intflag)::Type,IntflagVal::notPending> notPending{}; constexpr Register::FieldValue<decltype(intflag)::Type,IntflagVal::pending> pending{}; } ///Voltage ladder enable for comparator 0. enum class VladenVal { disabled=0x00000000, ///<Disabled. The Comparator voltage ladder is disabled. enabled=0x00000001, ///<Enabled. The Comparator voltage ladder is enabled. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(20,20),Register::ReadWriteAccess,VladenVal> vladen{}; namespace VladenValC{ constexpr Register::FieldValue<decltype(vladen)::Type,VladenVal::disabled> disabled{}; constexpr Register::FieldValue<decltype(vladen)::Type,VladenVal::enabled> enabled{}; } ///Reserved. constexpr Register::FieldLocation<Addr,Register::maskFromRange(21,21),Register::ReadWriteAccess,unsigned> reserved{}; ///Voltage reference select for comparator 0 voltage ladder. enum class VladrefVal { vrefCmpPin=0x00000000, ///<VREF_CMP pin. vddaPin=0x00000001, ///<VDDA pin. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(22,22),Register::ReadWriteAccess,VladrefVal> vladref{}; namespace VladrefValC{ constexpr Register::FieldValue<decltype(vladref)::Type,VladrefVal::vrefCmpPin> vrefCmpPin{}; constexpr Register::FieldValue<decltype(vladref)::Type,VladrefVal::vddaPin> vddaPin{}; } ///Reserved. constexpr Register::FieldLocation<Addr,Register::maskFromRange(23,23),Register::ReadWriteAccess,unsigned> reserved{}; ///Voltage ladder value for comparator 0. The reference voltage Vref depends on the setting of VLADREF (either VDD(3V3) or voltage on pin VREF_CMP). 00000 = Vss. 00001 = 1 x Vref / 31. 00010 = 2 x Vref / 31. ... 11111 = Vref constexpr Register::FieldLocation<Addr,Register::maskFromRange(28,24),Register::ReadWriteAccess,unsigned> vsel{}; ///Configure the propagation delay. A shorter propagation delay means higher power consumption. Use values from 0x0 (shortest propagation delay and highest power consumption) to 0x2 (longest propagation delay and lowest power consumption). See the data sheet for details. constexpr Register::FieldLocation<Addr,Register::maskFromRange(30,29),Register::ReadWriteAccess,unsigned> dly{}; ///Reserved. constexpr Register::FieldLocation<Addr,Register::maskFromRange(31,31),Register::ReadWriteAccess,unsigned> reserved{}; } namespace AcmpCmp1{ ///<Comparator 1 source control using Addr = Register::Address<0x4000800c,0x00000000,0x00000000,unsigned>; ///Comparator enable control. enum class EnVal { disabled=0x00000000, ///<Disabled. Comparator disabled. enabled=0x00000001, ///<Enabled. Comparator is enabled. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(0,0),Register::ReadWriteAccess,EnVal> en{}; namespace EnValC{ constexpr Register::FieldValue<decltype(en)::Type,EnVal::disabled> disabled{}; constexpr Register::FieldValue<decltype(en)::Type,EnVal::enabled> enabled{}; } ///Reserved. constexpr Register::FieldLocation<Addr,Register::maskFromRange(1,1),Register::ReadWriteAccess,unsigned> reserved{}; ///Interrupt enable. enum class IntenVal { disabled=0x00000000, ///<Disabled. Interrupts are disabled.. enabled=0x00000001, ///<Enabled. Interrupts are enabled.. Must set to 1 for interrupts to propagate to the NVIC and start-up logic. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(2,2),Register::ReadWriteAccess,IntenVal> inten{}; namespace IntenValC{ constexpr Register::FieldValue<decltype(inten)::Type,IntenVal::disabled> disabled{}; constexpr Register::FieldValue<decltype(inten)::Type,IntenVal::enabled> enabled{}; } ///Comparator status. This bit reflects the comparator output constexpr Register::FieldLocation<Addr,Register::maskFromRange(3,3),Register::ReadWriteAccess,unsigned> stat{}; ///VM input select. enum class VmVal { vrefDivider1=0x00000000, ///<Vref divider 1. acmpI1=0x00000001, ///<ACMP_I1. acmpI2=0x00000002, ///<ACMP_I2. acmp1I3=0x00000003, ///<ACMP1_I3. acmp1I4=0x00000004, ///<ACMP1_I4. internal0=0x00000005, ///<Internal 0.9 V band gap reference. adc01=0x00000006, ///<ADC0_1. Input for ADC0 channel 1. adc03=0x00000007, ///<ADC0_3. Input for ADC0 channel 3. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(6,4),Register::ReadWriteAccess,VmVal> vm{}; namespace VmValC{ constexpr Register::FieldValue<decltype(vm)::Type,VmVal::vrefDivider1> vrefDivider1{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::acmpI1> acmpI1{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::acmpI2> acmpI2{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::acmp1I3> acmp1I3{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::acmp1I4> acmp1I4{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::internal0> internal0{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::adc01> adc01{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::adc03> adc03{}; } ///Reserved. constexpr Register::FieldLocation<Addr,Register::maskFromRange(7,7),Register::ReadWriteAccess,unsigned> reserved{}; ///VP input select. enum class VpVal { vrefDivider1=0x00000000, ///<Vref divider 1. acmpI1=0x00000001, ///<ACMP_I1. acmpI2=0x00000002, ///<ACMP_I2. acmp1I3=0x00000003, ///<ACMP1_I3. acmp1I4=0x00000004, ///<ACMP1_I4. internal0=0x00000005, ///<Internal 0.9 V band gap reference. adc01=0x00000006, ///<ADC0_1. Input for ADC0 channel 1. adc03=0x00000007, ///<ADC0_3. Input for ADC0 channel 3. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(10,8),Register::ReadWriteAccess,VpVal> vp{}; namespace VpValC{ constexpr Register::FieldValue<decltype(vp)::Type,VpVal::vrefDivider1> vrefDivider1{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::acmpI1> acmpI1{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::acmpI2> acmpI2{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::acmp1I3> acmp1I3{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::acmp1I4> acmp1I4{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::internal0> internal0{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::adc01> adc01{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::adc03> adc03{}; } ///Reserved. constexpr Register::FieldLocation<Addr,Register::maskFromRange(12,11),Register::ReadWriteAccess,unsigned> reserved{}; ///Hysteresis control. When enabled, hysteresis determines the difference required between the comparator inputs before the comparator output switches. The difference must be in the direction opposite of the current comparator output. enum class HysVal { hysteresisIsTurned=0x00000000, ///<Hysteresis is turned off, comparator output will change as the input voltages cross. hysteresisEq5Mv=0x00000001, ///<Hysteresis = 5 mV. hysteresisEq10Mv=0x00000002, ///<Hysteresis = 10 mV. hysteresisEq15Mv=0x00000003, ///<Hysteresis = 15 mV. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(14,13),Register::ReadWriteAccess,HysVal> hys{}; namespace HysValC{ constexpr Register::FieldValue<decltype(hys)::Type,HysVal::hysteresisIsTurned> hysteresisIsTurned{}; constexpr Register::FieldValue<decltype(hys)::Type,HysVal::hysteresisEq5Mv> hysteresisEq5Mv{}; constexpr Register::FieldValue<decltype(hys)::Type,HysVal::hysteresisEq10Mv> hysteresisEq10Mv{}; constexpr Register::FieldValue<decltype(hys)::Type,HysVal::hysteresisEq15Mv> hysteresisEq15Mv{}; } ///Selects the polarity of the CMP output for purposes of generating level interrupts. enum class IntpolVal { notInverted=0x00000000, ///<Not inverted. The output is used as-is for generating interrupts. inverted=0x00000001, ///<Inverted. The output is used inverted for generating interrupts. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(15,15),Register::ReadWriteAccess,IntpolVal> intpol{}; namespace IntpolValC{ constexpr Register::FieldValue<decltype(intpol)::Type,IntpolVal::notInverted> notInverted{}; constexpr Register::FieldValue<decltype(intpol)::Type,IntpolVal::inverted> inverted{}; } ///Select interrupt type. enum class InttypeVal { edge=0x00000000, ///<Edge. Comparator interrupt is edge triggered. level=0x00000001, ///<Level. Comparator interrupt is level triggered. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(16,16),Register::ReadWriteAccess,InttypeVal> inttype{}; namespace InttypeValC{ constexpr Register::FieldValue<decltype(inttype)::Type,InttypeVal::edge> edge{}; constexpr Register::FieldValue<decltype(inttype)::Type,InttypeVal::level> level{}; } ///Select edge triggered interrupt to be active on either high or low transitions, when INTTYPE = 0. enum class IntedgeVal { falling=0x00000000, ///<Falling. Comparator interrupt is active on falling edges. rising=0x00000001, ///<Rising. Comparator interrupt is active on rising edges. bothEdges=0x00000002, ///<Both edges. Comparator Interrupt is active on both edges. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(18,17),Register::ReadWriteAccess,IntedgeVal> intedge{}; namespace IntedgeValC{ constexpr Register::FieldValue<decltype(intedge)::Type,IntedgeVal::falling> falling{}; constexpr Register::FieldValue<decltype(intedge)::Type,IntedgeVal::rising> rising{}; constexpr Register::FieldValue<decltype(intedge)::Type,IntedgeVal::bothEdges> bothEdges{}; } ///Interrupt flag. enum class IntflagVal { notPending=0x00000000, ///<Not pending. The Comparator interrupt is not pending. pending=0x00000001, ///<Pending. The Comparator interrupt is pending. Writing a 1 to this bit clears the flag. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(19,19),Register::ReadWriteAccess,IntflagVal> intflag{}; namespace IntflagValC{ constexpr Register::FieldValue<decltype(intflag)::Type,IntflagVal::notPending> notPending{}; constexpr Register::FieldValue<decltype(intflag)::Type,IntflagVal::pending> pending{}; } ///Voltage ladder enable for comparator 1. enum class VladenVal { disabled=0x00000000, ///<Disabled. The Comparator voltage ladder is disabled. enabled=0x00000001, ///<Enabled. The Comparator voltage ladder is enabled. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(20,20),Register::ReadWriteAccess,VladenVal> vladen{}; namespace VladenValC{ constexpr Register::FieldValue<decltype(vladen)::Type,VladenVal::disabled> disabled{}; constexpr Register::FieldValue<decltype(vladen)::Type,VladenVal::enabled> enabled{}; } ///Reserved. constexpr Register::FieldLocation<Addr,Register::maskFromRange(21,21),Register::ReadWriteAccess,unsigned> reserved{}; ///Voltage reference select for comparator 1 voltage ladder. enum class VladrefVal { vrefCmpPin=0x00000000, ///<VREF_CMP pin. vddaPin=0x00000001, ///<VDDA pin. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(22,22),Register::ReadWriteAccess,VladrefVal> vladref{}; namespace VladrefValC{ constexpr Register::FieldValue<decltype(vladref)::Type,VladrefVal::vrefCmpPin> vrefCmpPin{}; constexpr Register::FieldValue<decltype(vladref)::Type,VladrefVal::vddaPin> vddaPin{}; } ///Reserved. constexpr Register::FieldLocation<Addr,Register::maskFromRange(23,23),Register::ReadWriteAccess,unsigned> reserved{}; ///Voltage ladder value for comparator 1. The reference voltage Vref depends on the setting of VLADREF (either VDD(3V3) or voltage on pin VREF_CMP). 00000 = Vss. 00001 = 1 x Vref / 31. 00010 = 2 x Vref / 31. ... 11111 = Vref constexpr Register::FieldLocation<Addr,Register::maskFromRange(28,24),Register::ReadWriteAccess,unsigned> vsel{}; ///Configure the propagation delay. A shorter propagation delay means higher power consumption. Use values from 0x0 (shortest propagation delay and highest power consumption) to 0x2 (longest propagation delay and lowest power consumption). See the data sheet for details. constexpr Register::FieldLocation<Addr,Register::maskFromRange(30,29),Register::ReadWriteAccess,unsigned> dly{}; ///Reserved. constexpr Register::FieldLocation<Addr,Register::maskFromRange(31,31),Register::ReadWriteAccess,unsigned> reserved{}; } namespace AcmpCmp2{ ///<Comparator 2 source control using Addr = Register::Address<0x40008014,0x00000000,0x00000000,unsigned>; ///Comparator enable control. enum class EnVal { disabled=0x00000000, ///<Disabled. Comparator disabled. enabled=0x00000001, ///<Enabled. Comparator is enabled. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(0,0),Register::ReadWriteAccess,EnVal> en{}; namespace EnValC{ constexpr Register::FieldValue<decltype(en)::Type,EnVal::disabled> disabled{}; constexpr Register::FieldValue<decltype(en)::Type,EnVal::enabled> enabled{}; } ///Reserved. constexpr Register::FieldLocation<Addr,Register::maskFromRange(1,1),Register::ReadWriteAccess,unsigned> reserved{}; ///Interrupt enable. enum class IntenVal { disabled=0x00000000, ///<Disabled. Interrupts are disabled.. enabled=0x00000001, ///<Enabled. Interrupts are enabled.. Must set to 1 for interrupts to propagate to the NVIC and start-up logic. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(2,2),Register::ReadWriteAccess,IntenVal> inten{}; namespace IntenValC{ constexpr Register::FieldValue<decltype(inten)::Type,IntenVal::disabled> disabled{}; constexpr Register::FieldValue<decltype(inten)::Type,IntenVal::enabled> enabled{}; } ///Comparator status. This bit reflects the comparator output constexpr Register::FieldLocation<Addr,Register::maskFromRange(3,3),Register::ReadWriteAccess,unsigned> stat{}; ///VM input select. enum class VmVal { vrefDivider2=0x00000000, ///<Vref divider 2. acmpI1=0x00000001, ///<ACMP_I1. acmpI2=0x00000002, ///<ACMP_I2. acmp2I3=0x00000003, ///<ACMP2_I3. acmp2I4=0x00000004, ///<ACMP2_I4. internal0=0x00000005, ///<Internal 0.9 V band gap reference. adc00=0x00000006, ///<ADC0_0. Input for ADC0 channel 0. adc12=0x00000007, ///<ADC1_2. Input for ADC1 channel 2. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(6,4),Register::ReadWriteAccess,VmVal> vm{}; namespace VmValC{ constexpr Register::FieldValue<decltype(vm)::Type,VmVal::vrefDivider2> vrefDivider2{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::acmpI1> acmpI1{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::acmpI2> acmpI2{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::acmp2I3> acmp2I3{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::acmp2I4> acmp2I4{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::internal0> internal0{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::adc00> adc00{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::adc12> adc12{}; } ///Reserved. constexpr Register::FieldLocation<Addr,Register::maskFromRange(7,7),Register::ReadWriteAccess,unsigned> reserved{}; ///VP input select. enum class VpVal { vrefDivider2=0x00000000, ///<Vref divider 2. acmpI1=0x00000001, ///<ACMP_I1. acmpI2=0x00000002, ///<ACMP_I2. acmp2I3=0x00000003, ///<ACMP2_I3. acmp2I4=0x00000004, ///<ACMP2_I4. internal0=0x00000005, ///<Internal 0.9 V band gap reference. adc00=0x00000006, ///<ADC0_0. Input for ADC0 channel 0. adc12=0x00000007, ///<ADC1_2. Input for ADC1 channel 2. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(10,8),Register::ReadWriteAccess,VpVal> vp{}; namespace VpValC{ constexpr Register::FieldValue<decltype(vp)::Type,VpVal::vrefDivider2> vrefDivider2{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::acmpI1> acmpI1{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::acmpI2> acmpI2{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::acmp2I3> acmp2I3{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::acmp2I4> acmp2I4{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::internal0> internal0{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::adc00> adc00{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::adc12> adc12{}; } ///Reserved. constexpr Register::FieldLocation<Addr,Register::maskFromRange(12,11),Register::ReadWriteAccess,unsigned> reserved{}; ///Hysteresis control. When enabled, hysteresis determines the difference required between the comparator inputs before the comparator output switches. The difference must be in the direction opposite of the current comparator output. enum class HysVal { hysteresisIsTurned=0x00000000, ///<Hysteresis is turned off, comparator output will change as the input voltages cross. hysteresisEq5Mv=0x00000001, ///<Hysteresis = 5 mV. hysteresisEq10Mv=0x00000002, ///<Hysteresis = 10 mV. hysteresisEq15Mv=0x00000003, ///<Hysteresis = 15 mV. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(14,13),Register::ReadWriteAccess,HysVal> hys{}; namespace HysValC{ constexpr Register::FieldValue<decltype(hys)::Type,HysVal::hysteresisIsTurned> hysteresisIsTurned{}; constexpr Register::FieldValue<decltype(hys)::Type,HysVal::hysteresisEq5Mv> hysteresisEq5Mv{}; constexpr Register::FieldValue<decltype(hys)::Type,HysVal::hysteresisEq10Mv> hysteresisEq10Mv{}; constexpr Register::FieldValue<decltype(hys)::Type,HysVal::hysteresisEq15Mv> hysteresisEq15Mv{}; } ///Selects the polarity of the CMP output for purposes of generating level interrupts. enum class IntpolVal { notInverted=0x00000000, ///<Not inverted. The output is used as-is for generating interrupts. inverted=0x00000001, ///<Inverted. The output is used inverted for generating interrupts. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(15,15),Register::ReadWriteAccess,IntpolVal> intpol{}; namespace IntpolValC{ constexpr Register::FieldValue<decltype(intpol)::Type,IntpolVal::notInverted> notInverted{}; constexpr Register::FieldValue<decltype(intpol)::Type,IntpolVal::inverted> inverted{}; } ///Select interrupt type. enum class InttypeVal { edge=0x00000000, ///<Edge. Comparator interrupt is edge triggered. level=0x00000001, ///<Level. Comparator interrupt is level triggered. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(16,16),Register::ReadWriteAccess,InttypeVal> inttype{}; namespace InttypeValC{ constexpr Register::FieldValue<decltype(inttype)::Type,InttypeVal::edge> edge{}; constexpr Register::FieldValue<decltype(inttype)::Type,InttypeVal::level> level{}; } ///Select edge triggered interrupt to be active on either high or low transitions, when INTTYPE = 0. enum class IntedgeVal { falling=0x00000000, ///<Falling. Comparator interrupt is active on falling edges. rising=0x00000001, ///<Rising. Comparator interrupt is active on rising edges. bothEdges=0x00000002, ///<Both edges. Comparator Interrupt is active on both edges. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(18,17),Register::ReadWriteAccess,IntedgeVal> intedge{}; namespace IntedgeValC{ constexpr Register::FieldValue<decltype(intedge)::Type,IntedgeVal::falling> falling{}; constexpr Register::FieldValue<decltype(intedge)::Type,IntedgeVal::rising> rising{}; constexpr Register::FieldValue<decltype(intedge)::Type,IntedgeVal::bothEdges> bothEdges{}; } ///Interrupt flag. enum class IntflagVal { notPending=0x00000000, ///<Not pending. The Comparator interrupt is not pending. pending=0x00000001, ///<Pending. The Comparator interrupt is pending. Writing a 1 to this bit clears the flag. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(19,19),Register::ReadWriteAccess,IntflagVal> intflag{}; namespace IntflagValC{ constexpr Register::FieldValue<decltype(intflag)::Type,IntflagVal::notPending> notPending{}; constexpr Register::FieldValue<decltype(intflag)::Type,IntflagVal::pending> pending{}; } ///Voltage ladder enable for comparator 2. enum class VladenVal { disabled=0x00000000, ///<Disabled. The Comparator voltage ladder is disabled. enabled=0x00000001, ///<Enabled. The Comparator voltage ladder is enabled. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(20,20),Register::ReadWriteAccess,VladenVal> vladen{}; namespace VladenValC{ constexpr Register::FieldValue<decltype(vladen)::Type,VladenVal::disabled> disabled{}; constexpr Register::FieldValue<decltype(vladen)::Type,VladenVal::enabled> enabled{}; } ///Reserved. constexpr Register::FieldLocation<Addr,Register::maskFromRange(21,21),Register::ReadWriteAccess,unsigned> reserved{}; ///Voltage reference select for comparator 2 voltage ladder. enum class VladrefVal { vrefCmpPin=0x00000000, ///<VREF_CMP pin. vddaPin=0x00000001, ///<VDDA pin. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(22,22),Register::ReadWriteAccess,VladrefVal> vladref{}; namespace VladrefValC{ constexpr Register::FieldValue<decltype(vladref)::Type,VladrefVal::vrefCmpPin> vrefCmpPin{}; constexpr Register::FieldValue<decltype(vladref)::Type,VladrefVal::vddaPin> vddaPin{}; } ///Reserved. constexpr Register::FieldLocation<Addr,Register::maskFromRange(23,23),Register::ReadWriteAccess,unsigned> reserved{}; ///Voltage ladder value for comparator 2. The reference voltage Vref depends on the setting of VLADREF (either VDD(3V3) or voltage on pin VREF_CMP). 00000 = Vss. 00001 = 1 x Vref / 31. 00010 = 2 x Vref / 31. ... 11111 = Vref constexpr Register::FieldLocation<Addr,Register::maskFromRange(28,24),Register::ReadWriteAccess,unsigned> vsel{}; ///Configure the propagation delay. A shorter propagation delay means higher power consumption. Use values from 0x0 (shortest propagation delay and highest power consumption) to 0x2 (longest propagation delay and lowest power consumption). See the data sheet for details. constexpr Register::FieldLocation<Addr,Register::maskFromRange(30,29),Register::ReadWriteAccess,unsigned> dly{}; ///Reserved. constexpr Register::FieldLocation<Addr,Register::maskFromRange(31,31),Register::ReadWriteAccess,unsigned> reserved{}; } namespace AcmpCmp3{ ///<Comparator 3 source control using Addr = Register::Address<0x4000801c,0x00000000,0x00000000,unsigned>; ///Comparator enable control. enum class EnVal { disabled=0x00000000, ///<Disabled. Comparator disabled. enabled=0x00000001, ///<Enabled. Comparator is enabled. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(0,0),Register::ReadWriteAccess,EnVal> en{}; namespace EnValC{ constexpr Register::FieldValue<decltype(en)::Type,EnVal::disabled> disabled{}; constexpr Register::FieldValue<decltype(en)::Type,EnVal::enabled> enabled{}; } ///Reserved. constexpr Register::FieldLocation<Addr,Register::maskFromRange(1,1),Register::ReadWriteAccess,unsigned> reserved{}; ///Interrupt enable. enum class IntenVal { disabled=0x00000000, ///<Disabled. Interrupts are disabled.. enabled=0x00000001, ///<Enabled. Interrupts are enabled.. Must set to 1 for interrupts to propagate to the NVIC and start-up logic. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(2,2),Register::ReadWriteAccess,IntenVal> inten{}; namespace IntenValC{ constexpr Register::FieldValue<decltype(inten)::Type,IntenVal::disabled> disabled{}; constexpr Register::FieldValue<decltype(inten)::Type,IntenVal::enabled> enabled{}; } ///Comparator status. This bit reflects the comparator output constexpr Register::FieldLocation<Addr,Register::maskFromRange(3,3),Register::ReadWriteAccess,unsigned> stat{}; ///VM input select. enum class VmVal { vrefDivider3=0x00000000, ///<Vref divider 3. acmpI1=0x00000001, ///<ACMP_I1. acmpI2=0x00000002, ///<ACMP_I2. acmp3I3=0x00000003, ///<ACMP3_I3. acmp3I4=0x00000004, ///<ACMP3_I4. internal0=0x00000005, ///<Internal 0.9 V band gap reference. adc11=0x00000006, ///<ADC1_1. Input for ADC1 channel 1. adc13=0x00000007, ///<ADC1_3. Input for ADC1 channel 3. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(6,4),Register::ReadWriteAccess,VmVal> vm{}; namespace VmValC{ constexpr Register::FieldValue<decltype(vm)::Type,VmVal::vrefDivider3> vrefDivider3{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::acmpI1> acmpI1{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::acmpI2> acmpI2{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::acmp3I3> acmp3I3{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::acmp3I4> acmp3I4{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::internal0> internal0{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::adc11> adc11{}; constexpr Register::FieldValue<decltype(vm)::Type,VmVal::adc13> adc13{}; } ///Reserved. constexpr Register::FieldLocation<Addr,Register::maskFromRange(7,7),Register::ReadWriteAccess,unsigned> reserved{}; ///VP input select. enum class VpVal { vrefDivider3=0x00000000, ///<Vref divider 3. acmpI1=0x00000001, ///<ACMP_I1. acmpI2=0x00000002, ///<ACMP_I2. acmp3I3=0x00000003, ///<ACMP3_I3. acmp3I4=0x00000004, ///<ACMP3_I4. internal0=0x00000005, ///<Internal 0.9 V band gap reference. adc11=0x00000006, ///<ADC1_1. Input for ADC1 channel 1. adc13=0x00000007, ///<ADC1_3. Input for ADC1 channel 3. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(10,8),Register::ReadWriteAccess,VpVal> vp{}; namespace VpValC{ constexpr Register::FieldValue<decltype(vp)::Type,VpVal::vrefDivider3> vrefDivider3{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::acmpI1> acmpI1{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::acmpI2> acmpI2{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::acmp3I3> acmp3I3{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::acmp3I4> acmp3I4{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::internal0> internal0{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::adc11> adc11{}; constexpr Register::FieldValue<decltype(vp)::Type,VpVal::adc13> adc13{}; } ///Reserved. constexpr Register::FieldLocation<Addr,Register::maskFromRange(12,11),Register::ReadWriteAccess,unsigned> reserved{}; ///Hysteresis control. When enabled, hysteresis determines the difference required between the comparator inputs before the comparator output switches. The difference must be in the direction opposite of the current comparator output. enum class HysVal { hysteresisIsTurned=0x00000000, ///<Hysteresis is turned off, comparator output will change as the input voltages cross. hysteresisEq5Mv=0x00000001, ///<Hysteresis = 5 mV. hysteresisEq10Mv=0x00000002, ///<Hysteresis = 10 mV. hysteresisEq15Mv=0x00000003, ///<Hysteresis = 15 mV. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(14,13),Register::ReadWriteAccess,HysVal> hys{}; namespace HysValC{ constexpr Register::FieldValue<decltype(hys)::Type,HysVal::hysteresisIsTurned> hysteresisIsTurned{}; constexpr Register::FieldValue<decltype(hys)::Type,HysVal::hysteresisEq5Mv> hysteresisEq5Mv{}; constexpr Register::FieldValue<decltype(hys)::Type,HysVal::hysteresisEq10Mv> hysteresisEq10Mv{}; constexpr Register::FieldValue<decltype(hys)::Type,HysVal::hysteresisEq15Mv> hysteresisEq15Mv{}; } ///Selects the polarity of the CMP output for purposes of generating level interrupts. enum class IntpolVal { notInverted=0x00000000, ///<Not inverted. The output is used as-is for generating interrupts. inverted=0x00000001, ///<Inverted. The output is used inverted for generating interrupts. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(15,15),Register::ReadWriteAccess,IntpolVal> intpol{}; namespace IntpolValC{ constexpr Register::FieldValue<decltype(intpol)::Type,IntpolVal::notInverted> notInverted{}; constexpr Register::FieldValue<decltype(intpol)::Type,IntpolVal::inverted> inverted{}; } ///Select interrupt type. enum class InttypeVal { edge=0x00000000, ///<Edge. Comparator interrupt is edge triggered. level=0x00000001, ///<Level. Comparator interrupt is level triggered. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(16,16),Register::ReadWriteAccess,InttypeVal> inttype{}; namespace InttypeValC{ constexpr Register::FieldValue<decltype(inttype)::Type,InttypeVal::edge> edge{}; constexpr Register::FieldValue<decltype(inttype)::Type,InttypeVal::level> level{}; } ///Select edge triggered interrupt to be active on either high or low transitions, when INTTYPE = 0. enum class IntedgeVal { falling=0x00000000, ///<Falling. Comparator interrupt is active on falling edges. rising=0x00000001, ///<Rising. Comparator interrupt is active on rising edges. bothEdges=0x00000002, ///<Both edges. Comparator Interrupt is active on both edges. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(18,17),Register::ReadWriteAccess,IntedgeVal> intedge{}; namespace IntedgeValC{ constexpr Register::FieldValue<decltype(intedge)::Type,IntedgeVal::falling> falling{}; constexpr Register::FieldValue<decltype(intedge)::Type,IntedgeVal::rising> rising{}; constexpr Register::FieldValue<decltype(intedge)::Type,IntedgeVal::bothEdges> bothEdges{}; } ///Interrupt flag. enum class IntflagVal { notPending=0x00000000, ///<Not pending. The Comparator interrupt is not pending. pending=0x00000001, ///<Pending. The Comparator interrupt is pending. Writing a 1 to this bit clears the flag. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(19,19),Register::ReadWriteAccess,IntflagVal> intflag{}; namespace IntflagValC{ constexpr Register::FieldValue<decltype(intflag)::Type,IntflagVal::notPending> notPending{}; constexpr Register::FieldValue<decltype(intflag)::Type,IntflagVal::pending> pending{}; } ///Voltage ladder enable for comparator 3. enum class VladenVal { disabled=0x00000000, ///<Disabled. The Comparator voltage ladder is disabled. enabled=0x00000001, ///<Enabled. The Comparator voltage ladder is enabled. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(20,20),Register::ReadWriteAccess,VladenVal> vladen{}; namespace VladenValC{ constexpr Register::FieldValue<decltype(vladen)::Type,VladenVal::disabled> disabled{}; constexpr Register::FieldValue<decltype(vladen)::Type,VladenVal::enabled> enabled{}; } ///Reserved. constexpr Register::FieldLocation<Addr,Register::maskFromRange(21,21),Register::ReadWriteAccess,unsigned> reserved{}; ///Voltage reference select for comparator 3 voltage ladder. enum class VladrefVal { vrefCmpPin=0x00000000, ///<VREF_CMP pin. vddaPin=0x00000001, ///<VDDA pin. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(22,22),Register::ReadWriteAccess,VladrefVal> vladref{}; namespace VladrefValC{ constexpr Register::FieldValue<decltype(vladref)::Type,VladrefVal::vrefCmpPin> vrefCmpPin{}; constexpr Register::FieldValue<decltype(vladref)::Type,VladrefVal::vddaPin> vddaPin{}; } ///Reserved. constexpr Register::FieldLocation<Addr,Register::maskFromRange(23,23),Register::ReadWriteAccess,unsigned> reserved{}; ///Voltage ladder value for comparator 3. The reference voltage Vref depends on the setting of VLADREF (either VDD(3V3) or voltage on pin VREF_CMP). 00000 = Vss. 00001 = 1 x Vref / 31. 00010 = 2 x Vref / 31. ... 11111 = Vref constexpr Register::FieldLocation<Addr,Register::maskFromRange(28,24),Register::ReadWriteAccess,unsigned> vsel{}; ///Configure the propagation delay. A shorter propagation delay means higher power consumption. Use values from 0x0 (shortest propagation delay and highest power consumption) to 0x2 (longest propagation delay and lowest power consumption). See the data sheet for details. constexpr Register::FieldLocation<Addr,Register::maskFromRange(30,29),Register::ReadWriteAccess,unsigned> dly{}; ///Reserved. constexpr Register::FieldLocation<Addr,Register::maskFromRange(31,31),Register::ReadWriteAccess,unsigned> reserved{}; } namespace AcmpCmpfiltr0{ ///<Comparator 0 pin filter set-up using Addr = Register::Address<0x40008008,0x00000000,0x00000000,unsigned>; ///Digital filter sample mode. enum class SmodeVal { bypassInputFilter=0x00000000, ///<Bypass input filter. v1ClockCycle=0x00000001, ///<1 clock cycle. Input pulses shorter than one filter clock are rejected. v2ClockCycles=0x00000002, ///<2 clock cycles. Input pulses shorter than two filter clocks are rejected. v3ClockCycles=0x00000003, ///<3 clock cycles. Input pulses shorter than three filter clocks are rejected. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(1,0),Register::ReadWriteAccess,SmodeVal> sMode{}; namespace SmodeValC{ constexpr Register::FieldValue<decltype(sMode)::Type,SmodeVal::bypassInputFilter> bypassInputFilter{}; constexpr Register::FieldValue<decltype(sMode)::Type,SmodeVal::v1ClockCycle> v1ClockCycle{}; constexpr Register::FieldValue<decltype(sMode)::Type,SmodeVal::v2ClockCycles> v2ClockCycles{}; constexpr Register::FieldValue<decltype(sMode)::Type,SmodeVal::v3ClockCycles> v3ClockCycles{}; } ///Select clock divider for comparator clock CMP_PCLK. enum class ClkdivVal { cmpPclk=0x00000000, ///<CMP_PCLK. cmpPclkdiv2=0x00000001, ///<CMP_PCLK/2. cmpPclkdiv4=0x00000002, ///<CMP_PCLK/4. cmpPclkdiv8=0x00000003, ///<CMP_PCLK/8. cmpPclkdiv16=0x00000004, ///<CMP_PCLK/16. cmpPclkdiv32=0x00000005, ///<CMP_PCLK/32. cmpPclkdiv64=0x00000006, ///<CMP_PCLK/64. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(4,2),Register::ReadWriteAccess,ClkdivVal> clkdiv{}; namespace ClkdivValC{ constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclk> cmpPclk{}; constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclkdiv2> cmpPclkdiv2{}; constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclkdiv4> cmpPclkdiv4{}; constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclkdiv8> cmpPclkdiv8{}; constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclkdiv16> cmpPclkdiv16{}; constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclkdiv32> cmpPclkdiv32{}; constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclkdiv64> cmpPclkdiv64{}; } ///Reserved constexpr Register::FieldLocation<Addr,Register::maskFromRange(31,5),Register::ReadWriteAccess,unsigned> reserved{}; } namespace AcmpCmpfiltr1{ ///<Comparator 0 pin filter set-up using Addr = Register::Address<0x40008010,0x00000000,0x00000000,unsigned>; ///Digital filter sample mode. enum class SmodeVal { bypassInputFilter=0x00000000, ///<Bypass input filter. v1ClockCycle=0x00000001, ///<1 clock cycle. Input pulses shorter than one filter clock are rejected. v2ClockCycles=0x00000002, ///<2 clock cycles. Input pulses shorter than two filter clocks are rejected. v3ClockCycles=0x00000003, ///<3 clock cycles. Input pulses shorter than three filter clocks are rejected. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(1,0),Register::ReadWriteAccess,SmodeVal> sMode{}; namespace SmodeValC{ constexpr Register::FieldValue<decltype(sMode)::Type,SmodeVal::bypassInputFilter> bypassInputFilter{}; constexpr Register::FieldValue<decltype(sMode)::Type,SmodeVal::v1ClockCycle> v1ClockCycle{}; constexpr Register::FieldValue<decltype(sMode)::Type,SmodeVal::v2ClockCycles> v2ClockCycles{}; constexpr Register::FieldValue<decltype(sMode)::Type,SmodeVal::v3ClockCycles> v3ClockCycles{}; } ///Select clock divider for comparator clock CMP_PCLK. enum class ClkdivVal { cmpPclk=0x00000000, ///<CMP_PCLK. cmpPclkdiv2=0x00000001, ///<CMP_PCLK/2. cmpPclkdiv4=0x00000002, ///<CMP_PCLK/4. cmpPclkdiv8=0x00000003, ///<CMP_PCLK/8. cmpPclkdiv16=0x00000004, ///<CMP_PCLK/16. cmpPclkdiv32=0x00000005, ///<CMP_PCLK/32. cmpPclkdiv64=0x00000006, ///<CMP_PCLK/64. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(4,2),Register::ReadWriteAccess,ClkdivVal> clkdiv{}; namespace ClkdivValC{ constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclk> cmpPclk{}; constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclkdiv2> cmpPclkdiv2{}; constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclkdiv4> cmpPclkdiv4{}; constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclkdiv8> cmpPclkdiv8{}; constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclkdiv16> cmpPclkdiv16{}; constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclkdiv32> cmpPclkdiv32{}; constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclkdiv64> cmpPclkdiv64{}; } ///Reserved constexpr Register::FieldLocation<Addr,Register::maskFromRange(31,5),Register::ReadWriteAccess,unsigned> reserved{}; } namespace AcmpCmpfiltr2{ ///<Comparator 0 pin filter set-up using Addr = Register::Address<0x40008018,0x00000000,0x00000000,unsigned>; ///Digital filter sample mode. enum class SmodeVal { bypassInputFilter=0x00000000, ///<Bypass input filter. v1ClockCycle=0x00000001, ///<1 clock cycle. Input pulses shorter than one filter clock are rejected. v2ClockCycles=0x00000002, ///<2 clock cycles. Input pulses shorter than two filter clocks are rejected. v3ClockCycles=0x00000003, ///<3 clock cycles. Input pulses shorter than three filter clocks are rejected. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(1,0),Register::ReadWriteAccess,SmodeVal> sMode{}; namespace SmodeValC{ constexpr Register::FieldValue<decltype(sMode)::Type,SmodeVal::bypassInputFilter> bypassInputFilter{}; constexpr Register::FieldValue<decltype(sMode)::Type,SmodeVal::v1ClockCycle> v1ClockCycle{}; constexpr Register::FieldValue<decltype(sMode)::Type,SmodeVal::v2ClockCycles> v2ClockCycles{}; constexpr Register::FieldValue<decltype(sMode)::Type,SmodeVal::v3ClockCycles> v3ClockCycles{}; } ///Select clock divider for comparator clock CMP_PCLK. enum class ClkdivVal { cmpPclk=0x00000000, ///<CMP_PCLK. cmpPclkdiv2=0x00000001, ///<CMP_PCLK/2. cmpPclkdiv4=0x00000002, ///<CMP_PCLK/4. cmpPclkdiv8=0x00000003, ///<CMP_PCLK/8. cmpPclkdiv16=0x00000004, ///<CMP_PCLK/16. cmpPclkdiv32=0x00000005, ///<CMP_PCLK/32. cmpPclkdiv64=0x00000006, ///<CMP_PCLK/64. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(4,2),Register::ReadWriteAccess,ClkdivVal> clkdiv{}; namespace ClkdivValC{ constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclk> cmpPclk{}; constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclkdiv2> cmpPclkdiv2{}; constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclkdiv4> cmpPclkdiv4{}; constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclkdiv8> cmpPclkdiv8{}; constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclkdiv16> cmpPclkdiv16{}; constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclkdiv32> cmpPclkdiv32{}; constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclkdiv64> cmpPclkdiv64{}; } ///Reserved constexpr Register::FieldLocation<Addr,Register::maskFromRange(31,5),Register::ReadWriteAccess,unsigned> reserved{}; } namespace AcmpCmpfiltr3{ ///<Comparator 0 pin filter set-up using Addr = Register::Address<0x40008020,0x00000000,0x00000000,unsigned>; ///Digital filter sample mode. enum class SmodeVal { bypassInputFilter=0x00000000, ///<Bypass input filter. v1ClockCycle=0x00000001, ///<1 clock cycle. Input pulses shorter than one filter clock are rejected. v2ClockCycles=0x00000002, ///<2 clock cycles. Input pulses shorter than two filter clocks are rejected. v3ClockCycles=0x00000003, ///<3 clock cycles. Input pulses shorter than three filter clocks are rejected. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(1,0),Register::ReadWriteAccess,SmodeVal> sMode{}; namespace SmodeValC{ constexpr Register::FieldValue<decltype(sMode)::Type,SmodeVal::bypassInputFilter> bypassInputFilter{}; constexpr Register::FieldValue<decltype(sMode)::Type,SmodeVal::v1ClockCycle> v1ClockCycle{}; constexpr Register::FieldValue<decltype(sMode)::Type,SmodeVal::v2ClockCycles> v2ClockCycles{}; constexpr Register::FieldValue<decltype(sMode)::Type,SmodeVal::v3ClockCycles> v3ClockCycles{}; } ///Select clock divider for comparator clock CMP_PCLK. enum class ClkdivVal { cmpPclk=0x00000000, ///<CMP_PCLK. cmpPclkdiv2=0x00000001, ///<CMP_PCLK/2. cmpPclkdiv4=0x00000002, ///<CMP_PCLK/4. cmpPclkdiv8=0x00000003, ///<CMP_PCLK/8. cmpPclkdiv16=0x00000004, ///<CMP_PCLK/16. cmpPclkdiv32=0x00000005, ///<CMP_PCLK/32. cmpPclkdiv64=0x00000006, ///<CMP_PCLK/64. }; constexpr Register::FieldLocation<Addr,Register::maskFromRange(4,2),Register::ReadWriteAccess,ClkdivVal> clkdiv{}; namespace ClkdivValC{ constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclk> cmpPclk{}; constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclkdiv2> cmpPclkdiv2{}; constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclkdiv4> cmpPclkdiv4{}; constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclkdiv8> cmpPclkdiv8{}; constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclkdiv16> cmpPclkdiv16{}; constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclkdiv32> cmpPclkdiv32{}; constexpr Register::FieldValue<decltype(clkdiv)::Type,ClkdivVal::cmpPclkdiv64> cmpPclkdiv64{}; } ///Reserved constexpr Register::FieldLocation<Addr,Register::maskFromRange(31,5),Register::ReadWriteAccess,unsigned> reserved{}; } }
//====================================================================== /*! \file ObjectLux.hpp * * \copydoc Copyright * \author Mario Brumm (mb) * \date Apr 26, 2012 *///------------------------------------------------------------------- #ifndef IBEOSDK_OBJECTLUX_HPP_SEEN #define IBEOSDK_OBJECTLUX_HPP_SEEN //====================================================================== #include <ibeosdk/misc/WinCompatibility.hpp> #include <ibeosdk/datablocks/snippets/Snippet.hpp> #include <ibeosdk/ObjectBasic.hpp> #include <ibeosdk/Point2d.hpp> #include <ibeosdk/PointSigma2d.hpp> #include <vector> #include <iostream> #include <cmath> //====================================================================== namespace ibeosdk { //====================================================================== class ObjectLux : public Snippet { public: ObjectLux(); ObjectLux(const ObjectLux& other); ObjectLux& operator= (const ObjectLux& other); virtual ~ObjectLux(); public: virtual std::streamsize getSerializedSize() const; virtual bool deserialize(std::istream& is); virtual bool serialize(std::ostream& os) const; public: UINT16 getObjectId() const { return m_id; } UINT16 getObjectAge() const { return m_age; } UINT16 getPredictionAge() const { return m_predictionAge; } UINT16 getRelativeTimestamp() const { return m_relativeTimestamp; } Point2d getReferencePoint() const { return m_refPoint; } PointSigma2d getReferencePointSigma() const { return m_refPointSigma; } Point2d getClosestPoint() const { return m_closestPoint; } Point2d getBoundingBoxCenter() const { return m_boundingBoxCenter; } UINT16 getBoundingBoxWidth() const { return m_boundingBoxWidth; } UINT16 getBoundingBoxLength() const { return m_boundingBoxLength; } Point2d getObjectBoxCenter() const { return m_objectBoxCenter; } UINT16 getObjectBoxSizeX() const { return m_objectBoxSizeX; } UINT16 getObjectBoxSizeY() const { return m_objectBoxSizeY; } INT16 getObjectBoxOrientation() const { return m_objectBoxOrientation; } Point2d getAbsoluteVelocity() const { return m_absVelocity; } UINT16 getAbsoluteVelocitySigmaX() const { return m_absVelSigmaX; } UINT16 getAbsoluteVelocitySigmaY() const { return m_absVelSigmaY; } Point2d getRelativeVelocity() const { return m_relVelocity; } luxObjectClass::LuxObjectClass getClassification() const { return m_class; } UINT16 getClassificationAge() const { return m_classAge; } UINT16 getClassificationCertainty() const { return m_classCertainty; } UINT16 getNumberOfContourPoints() const { return m_numContourPoints; } bool isNumContourPointsValid() const { return this->m_numContourPointsIsValid; } const std::vector<Point2d>& getContourPoints() const { return m_contourPoints; } std::vector<Point2d>& getContourPoints() { return m_contourPoints; } public: void setObjectId(const UINT16 id) { this->m_id = id; } void setObjectAge(const UINT16 age) { this->m_age = age; } void setPredictionAge(const UINT16 predictionAge) { this->m_predictionAge = predictionAge; } void setRelativeTimestamp(const UINT16 relativeTimestamp) { this->m_relativeTimestamp = relativeTimestamp; } void setRefPoint(const Point2d refPoint) { this->m_refPoint = refPoint; } void setRefPointSigma(const PointSigma2d refPointSigma) { this->m_refPointSigma = refPointSigma; } void setClosestPoint(const Point2d closestPoint) { this->m_closestPoint = closestPoint; } void setBoundingBoxCenter(const Point2d boundingBoxCenter) { this->m_boundingBoxCenter = boundingBoxCenter; } void setBoundingBoxWidth(const UINT16 boundingBoxWidth) { this->m_boundingBoxWidth = boundingBoxWidth; } void setBoundingBoxLength(const UINT16 boundingBoxLength) { this->m_boundingBoxLength = boundingBoxLength; } void setObjectBoxCenter(const Point2d objectBoxCenter) { this->m_objectBoxCenter = objectBoxCenter; } void setObjectBoxLength(const UINT16 objectBoxLength) { this->m_objectBoxSizeX = objectBoxLength; } void setObjectBoxWidth(const UINT16 objectBoxWidth) { this->m_objectBoxSizeY = objectBoxWidth; } void setObjectBoxOrientation(const INT16 objectBoxOrientation) { this->m_objectBoxOrientation = objectBoxOrientation; } void setAbsVelocity(const Point2d absVelocity) { this->m_absVelocity = absVelocity; } void setAbsVelSigmaX(const UINT16 absVelSigmaX) { this->m_absVelSigmaX = absVelSigmaX; } void setAbsVelSigmaY(const UINT16 absVelSigmaY) { this->m_absVelSigmaY = absVelSigmaY; } void setRelVelocity(const Point2d relVelocity) { this->m_relVelocity = relVelocity; } void setClass(const luxObjectClass::LuxObjectClass cl) { this->m_class = cl; } void setClassAge(const UINT16 classAge) { this->m_classAge = classAge; } void setClassCertainty(const UINT16 classCertainty) { this->m_classCertainty = classCertainty; } void setNumContourPoints(const UINT16 numContourPoints) { if (numContourPoints!= contourIsInvalid) { this->m_numContourPoints = numContourPoints; this-> m_numContourPointsIsValid = true; } else { this->m_numContourPoints = 1; this-> m_numContourPointsIsValid = false; } } void setNumCoutourPointsValid(const bool valid) { this->m_numContourPointsIsValid = valid; } void setContourPoints(const std::vector<Point2d>& newContourPts) { this->m_contourPoints = newContourPts; } public: static float angle2rad(const INT16 ticks) { const UINT16 angleTicksPerRotation = 36000; // (x < 0) ? ((x % N) + N) : x % N return float(((ticks < 0) ? float((ticks % angleTicksPerRotation) + angleTicksPerRotation) : float(ticks % angleTicksPerRotation)) * 2.f * M_PI / float(angleTicksPerRotation)); } protected: static const UINT16 contourIsInvalid; static const int maxContourPoints = 34; protected: UINT16 m_id; UINT16 m_age; UINT16 m_predictionAge; UINT16 m_relativeTimestamp; Point2d m_refPoint; PointSigma2d m_refPointSigma; Point2d m_closestPoint; Point2d m_boundingBoxCenter; UINT16 m_boundingBoxWidth; // y-value UINT16 m_boundingBoxLength; // x-value Point2d m_objectBoxCenter; UINT16 m_objectBoxSizeX; // x-value UINT16 m_objectBoxSizeY; // y-value INT16 m_objectBoxOrientation; // angle in [deg/100]. Point2d m_absVelocity; UINT16 m_absVelSigmaX; UINT16 m_absVelSigmaY; Point2d m_relVelocity; luxObjectClass::LuxObjectClass m_class; UINT16 m_classAge; UINT16 m_classCertainty; UINT16 m_numContourPoints; bool m_numContourPointsIsValid; std::vector<Point2d> m_contourPoints; }; // ObjectLux //====================================================================== std::ostream& operator<<(std::ostream& os, const ObjectLux& luxObj); //====================================================================== } // namespace ibeosdk //====================================================================== #endif // IBEOSDK_OBJECTLUX_HPP_SEEN //======================================================================
// This file auto generated by plugin for ida pro. Generated code only for x64. Please, dont change manually #pragma once #include <common/common.h> #include <_INTERNET_CACHE_ENTRY_INFOA.hpp> START_ATF_NAMESPACE typedef _INTERNET_CACHE_ENTRY_INFOA *LPINTERNET_CACHE_ENTRY_INFOA; END_ATF_NAMESPACE
/*========================== begin_copyright_notice ============================ Copyright (C) 2020-2021 Intel Corporation SPDX-License-Identifier: MIT ============================= end_copyright_notice ===========================*/ #include "BuildIR.h" #include "../Timer.h" using namespace vISA; static const unsigned MESSAGE_PRECISION_SUBTYPE_OFFSET = 30; static const unsigned SIMD_MODE_2_OFFSET = 29; bool IR_Builder::isSamplerMsgWithPO( VISASampler3DSubOpCode samplerOp) const { return false; } uint32_t IR_Builder::createSamplerMsgDesc( VISASampler3DSubOpCode samplerOp, bool isNativeSIMDSize, bool isFP16Return, bool isFP16Input) const { // Now create message descriptor // 7:0 - BTI // 11:8 - Sampler Index // 16:12 - Message Type // 18:17 - SIMD Mode[0:1] // 19 - Header Present // 24:20 - Response Length // 28:25 - Message Length // 29 - SIMD Mode[2] // 30 - Return Format // 31 - CPS Message LOD Compensation Enable // We only set message type, SIMD mode, and return format here. The other fields // are set in createSendInst as they are common with other send messages uint32_t fc = 0; fc |= ((uint32_t)samplerOp & 0x1f) << 12; if (isNativeSIMDSize) { fc |= (1 << 17); } else { fc |= (2 << 17); } if (isFP16Return) { // 16-bit return type. Note that this doesn't change the return length fc |= (1 << MESSAGE_PRECISION_SUBTYPE_OFFSET); } if (isFP16Input) { fc |= (1 << SIMD_MODE_2_OFFSET); } return fc; } int IR_Builder::translateVISASampleInfoInst( VISA_Exec_Size executionSize, VISA_EMask_Ctrl emask, ChannelMask chMask, G4_Operand* surface, G4_DstRegRegion* dst) { TIME_SCOPE(VISA_BUILDER_IR_CONSTRUCTION); G4_ExecSize execSize {Get_VISA_Exec_Size(executionSize)}; G4_InstOpts instOpt = Get_Gen4_Emask(emask, execSize); VISAChannelMask channels = chMask.getAPI(); bool useFakeHeader = (getPlatform() < GENX_SKL) ? false : (channels == CHANNEL_MASK_R); bool preEmption = forceSamplerHeader(); bool forceSplitSend = shouldForceSplitSend(surface); bool useHeader = true; // SAMPLEINFO has 0 parameters so its only header unsigned int numRows = 1; G4_Declare *msg = NULL; G4_SrcRegRegion *m0 = NULL; if (!useFakeHeader || forceSplitSend || preEmption) { msg = getSamplerHeader(false /*isBindlessSampler*/, false /*samperIndexGE16*/); unsigned int secondDword = chMask.getHWEncoding() << 12; G4_Imm* immOpndSecondDword = createImm(secondDword, Type_UD); // mov (1) msg(0,2) immOpndSecondDword auto payloadDstRgn = createDst(msg->getRegVar(), 0, 2, 1, Type_UD); G4_INST* movInst = createMov(g4::SIMD1, payloadDstRgn, immOpndSecondDword, InstOpt_NoOpt, true); movInst->setOptionOn(InstOpt_WriteEnable); m0 = createSrcRegRegion(msg, getRegionStride1()); } else { useHeader = false; msg = createTempVar(getNativeExecSize(), Type_UD, GRFALIGN); G4_DstRegRegion *dst = createDst(msg->getRegVar(), 0, 0, 1, Type_UD); G4_Imm* src0Imm = createImm(0, Type_UD); (void) createMov(getNativeExecSize(), dst, src0Imm, InstOpt_WriteEnable, true); m0 = createSrc(msg->getRegVar(), 0, 0, getRegionStride1(), Type_UD); } // Now create message descriptor // 7:0 - BTI // 11:8 - Sampler Index // 16:12 - Message Type // 18:17 - SIMD Mode // 19 - Header Present // 24:20 - Response Length // 28:25 - Message Length // 29 - SIMD Mode // 30 - Return Format // 31 - CPS Message LOD Compensation Enable unsigned int fc = 0; fc |= ((unsigned int) VISA_3D_SAMPLEINFO & 0x1f) << 12; if (execSize == getNativeExecSize()) { fc |= (1 << 17); } else { fc |= (2 << 17); } uint32_t retSize = (execSize == getNativeExecSize() ? chMask.getNumEnabledChannels() : chMask.getNumEnabledChannels() * 2); if (forceSplitSend) { createSplitSendInst(NULL, dst, m0, numRows, createNullSrc(Type_UD), 0, retSize, execSize, fc, SFID::SAMPLER, useHeader, SendAccess::READ_ONLY, surface, NULL, instOpt, false); } else { createSendInst(NULL, dst, m0, numRows, retSize, execSize, fc, SFID::SAMPLER, useHeader, SendAccess::READ_ONLY, surface, NULL, instOpt, false); } return VISA_SUCCESS; } int IR_Builder::translateVISAResInfoInst( VISA_Exec_Size executionSize, VISA_EMask_Ctrl emask, ChannelMask chMask, G4_Operand* surface, G4_SrcRegRegion* lod, G4_DstRegRegion* dst) { TIME_SCOPE(VISA_BUILDER_IR_CONSTRUCTION); G4_ExecSize execSize {Get_VISA_Exec_Size(executionSize)}; G4_InstOpts instOpt = Get_Gen4_Emask(emask, execSize); //For SKL if channels are continuous don't need header VISAChannelMask channels = chMask.getAPI(); bool preEmption = forceSamplerHeader(); bool useHeader = preEmption || (getPlatform() < GENX_SKL) ? channels != CHANNEL_MASK_RGBA : (channels != CHANNEL_MASK_R && channels != CHANNEL_MASK_RG && channels != CHANNEL_MASK_RGB && channels != CHANNEL_MASK_RGBA); // Setup number of rows = (header + lod) by default unsigned int numRows = (execSize == getNativeExecSize() ? 1 : 2); if (useHeader) { numRows++; } unsigned int regOff = 0; uint32_t returnLength = (execSize == getNativeExecSize() ? chMask.getNumEnabledChannels() : chMask.getNumEnabledChannels() * 2); bool useSplitSend = useSends(); G4_Declare *msg = NULL; G4_Declare *payloadUD = NULL; if (useSplitSend) { if (useHeader) { --numRows; } unsigned int numElts = numRows * numEltPerGRF<Type_UB>()/TypeSize(Type_F); msg = getSamplerHeader(false /*isBindlessSampler*/, false /*samperIndexGE16*/); payloadUD = createSendPayloadDcl(numElts, Type_UD); } else { unsigned int numElts = numRows * numEltPerGRF<Type_UB>()/TypeSize(Type_F); msg = createSendPayloadDcl(numElts, Type_UD); payloadUD = createSendPayloadDcl(numElts - (useHeader ? GENX_SAMPLER_IO_SZ : 0), Type_UD); payloadUD->setAliasDeclare(msg, useHeader ? numEltPerGRF<Type_UB>() : 0); if (useHeader) { // Both SAMPLEINFO and RESINFO use header createMovR0Inst(msg, 0, 0, true); } } if (useHeader) { unsigned int secondDword = 0; secondDword |= (chMask.getHWEncoding() << 12); G4_Imm* immOpndSecondDword = createImm(secondDword, Type_UD); // mov (1) msg(0,2) immOpndSecondDword auto payloadDstRgn = createDst(msg->getRegVar(), 0, 2, 1, Type_UD); G4_INST* movInst = createMov(g4::SIMD1, payloadDstRgn, immOpndSecondDword, InstOpt_NoOpt, true); movInst->setOptionOn(InstOpt_WriteEnable); } // Copy over lod vector operand to payload's 1st row Copy_SrcRegRegion_To_Payload(payloadUD, regOff, lod, execSize, instOpt | InstOpt_BreakPoint); // Now create message descriptor // 7:0 - BTI // 11:8 - Sampler Index // 16:12 - Message Type // 18:17 - SIMD Mode // 19 - Header Present // 24:20 - Response Length // 28:25 - Message Length // 29 - SIMD Mode // 30 - Return Format // 31 - CPS Message LOD Compensation Enable unsigned int fc = 0; fc |= ((unsigned int) VISA_3D_RESINFO & 0x1f) << 12; if (execSize == getNativeExecSize()) { fc |= (1 << 17); } else { fc |= (2 << 17); } if (useSplitSend) { G4_SrcRegRegion *m0 = nullptr; G4_SrcRegRegion *m1 = nullptr; unsigned int src0Size = 0; unsigned int src1Size = 0; if (useHeader) { m0 = createSrcRegRegion(msg, getRegionStride1()); m1 = createSrcRegRegion(payloadUD, getRegionStride1()); src0Size = 1; src1Size = numRows; } else { m0 = createSrcRegRegion(payloadUD, getRegionStride1()); m1 = createNullSrc(Type_UD); src0Size = numRows; src1Size = 0; } createSplitSendInst(NULL, dst, m0, src0Size, m1, src1Size, returnLength, execSize, fc, SFID::SAMPLER, useHeader, SendAccess::READ_ONLY, surface, NULL, instOpt, false); } else { G4_SrcRegRegion *m = createSrcRegRegion(msg, getRegionStride1()); createSendInst(NULL, dst, m, numRows, returnLength, execSize, fc, SFID::SAMPLER, useHeader, SendAccess::READ_ONLY, surface, NULL, instOpt, false); } return VISA_SUCCESS; } // generate a URB_SIMD8* message // urbHandle -- 1 GRF holding 8 URB handles. This is the header of the message // perSlotOffset -- 1 GRF holding 8 DWord offsets. If present, it must be immediately after the header // channelMask -- 1 GRF holding 8 8-bit masks. In vISA spec they have constant values and must be // identical. If present, occurs after the per slot message phase if the per slot // message phase exists else it occurs after the header. int IR_Builder::translateVISAURBWrite3DInst( G4_Predicate* pred, VISA_Exec_Size executionSize, VISA_EMask_Ctrl emask, uint8_t numOut, uint16_t globalOffset, G4_SrcRegRegion* channelMask, G4_SrcRegRegion* urbHandle, G4_SrcRegRegion* perSlotOffset, G4_SrcRegRegion* vertexData) { TIME_SCOPE(VISA_BUILDER_IR_CONSTRUCTION); G4_ExecSize execSize {Get_VISA_Exec_Size(executionSize)}; G4_InstOpts instOpt = Get_Gen4_Emask(emask, execSize); if (numOut == 0) { MUST_BE_TRUE(vertexData->isNullReg(), "vertex payload must be null ARF when numOut is 0"); } // header + channelMask + numOut unsigned int numRows = 2 + numOut; const bool useHeader = true; bool usePerSlotIndex = false; bool useChannelMask = true; if (!perSlotOffset->isNullReg()) { usePerSlotIndex = true; numRows++; } if (channelMask->isNullReg()) { useChannelMask = false; numRows--; } bool useSplitSend = useSends(); // So far, we don't have a obvious cut except for header. As the result, // split-send is disabled once there's no header in the message. if (!useHeader) useSplitSend = false; if (numOut == 0) { // no split send if payload is null useSplitSend = false; } // msg is the header for split send, or the entire payload for regular send G4_Declare *msg = NULL; G4_Declare* payloadF = NULL; G4_Declare* payloadD = NULL; G4_Declare* payloadUD = NULL; if (useSplitSend) { ASSERT_USER(useHeader, "So far, split-send is only used when header is present!"); --numRows; if (numRows > 0) { unsigned int numElts = numRows * numEltPerGRF<Type_UB>()/TypeSize(Type_F); // we can use the urb handle directly since URB write will not modify its header //msg = createSendPayloadDcl(GENX_SAMPLER_IO_SZ, Type_UD); payloadUD = createSendPayloadDcl(numElts, Type_UD); payloadF = createSendPayloadDcl(numElts, Type_F); payloadD = createSendPayloadDcl(numElts, Type_D); payloadF->setAliasDeclare(payloadUD, 0); payloadD->setAliasDeclare(payloadUD, 0); } } else { unsigned int numElts = numRows * numEltPerGRF<Type_UB>()/TypeSize(Type_F); msg = createSendPayloadDcl(numElts, Type_UD); if (numRows > 1) { payloadUD = createSendPayloadDcl(numElts - (useHeader ? GENX_SAMPLER_IO_SZ : 0), Type_UD); payloadF = createSendPayloadDcl(numElts - (useHeader ? GENX_SAMPLER_IO_SZ : 0), Type_F); payloadD = createSendPayloadDcl(numElts - (useHeader ? GENX_SAMPLER_IO_SZ : 0), Type_D); payloadUD->setAliasDeclare(msg, useHeader ? numEltPerGRF<Type_UB>() : 0); payloadF->setAliasDeclare(msg, useHeader ? numEltPerGRF<Type_UB>() : 0); payloadD->setAliasDeclare(msg, useHeader ? numEltPerGRF<Type_UB>() : 0); } } unsigned int regOff = 0; // Setup header if (useHeader && msg != NULL) { unsigned ignoredOff = 0; Copy_SrcRegRegion_To_Payload(msg, ignoredOff, urbHandle, g4::SIMD8, instOpt); } if (usePerSlotIndex) { Copy_SrcRegRegion_To_Payload(payloadUD, regOff, perSlotOffset, g4::SIMD8, instOpt); } if (useChannelMask) { // shl (8) M2.0<1>:ud cmask<8;8,1>:ud 0x10:uw auto payloadUDRegRgnRow2 = createDst(payloadUD->getRegVar(), regOff++, 0, 1, Type_UD); createBinOp(G4_shl, g4::SIMD8, payloadUDRegRgnRow2, channelMask, createImm(16, Type_UW), instOpt, true); } G4_Declare* vertexDataDcl = numOut == 0 ? NULL : vertexData->getBase()->asRegVar()->getDeclare(); bool needsDataMove = (!useSplitSend || usePerSlotIndex || useChannelMask); if (needsDataMove) { // we have to insert moves to make payload contiguous unsigned int startSrcRow = vertexData->getRegOff(); for (int i = 0; i < numOut; i++) { G4_DstRegRegion payloadTypedRegRowi(*this, Direct, payloadF->getRegVar(), regOff++, 0, 1, Type_F); G4_DstRegRegion* payloadTypedRegRowRgni = createDstRegRegion(payloadTypedRegRowi); G4_SrcRegRegion* vertexSrcRegRgnRowi = createSrc(vertexDataDcl->getRegVar(), startSrcRow++, 0, getRegionStride1(), Type_F); createMov(g4::SIMD8, payloadTypedRegRowRgni, vertexSrcRegRgnRowi, instOpt, true); } } else { payloadUD = vertexDataDcl; } // Msg descriptor unsigned int fc = 0; fc |= 0x7; fc |= (globalOffset << 4); if (useChannelMask) { fc |= (0x1 << 15); } if (usePerSlotIndex) { fc |= (0x1 << 17); } if (useSplitSend) { G4_SrcRegRegion *m0 = urbHandle; G4_SrcRegRegion *m1 = nullptr; if (needsDataMove) { m1 = createSrcRegRegion(payloadUD, getRegionStride1()); } else { ASSERT_USER(payloadUD == vertexDataDcl, "If there is no need for data move then payloadUD == vertexDataDcl must hold!"); m1 = createSrc( payloadUD->getRegVar(), vertexData->getRegOff(), vertexData->getSubRegOff(), getRegionStride1(), payloadUD->getElemType()); } createSplitSendInst(pred, createNullDst(Type_UD), m0, 1, m1, numRows, 0, execSize, fc, SFID::URB, useHeader, SendAccess::WRITE_ONLY, NULL, NULL, instOpt, false); } else { G4_SrcRegRegion *m = createSrcRegRegion(msg, getRegionStride1()); createSendInst(pred, createNullDst(Type_UD), m, numRows, 0, execSize, fc, SFID::URB, useHeader, SendAccess::WRITE_ONLY, nullptr, nullptr, instOpt, false); } return VISA_SUCCESS; } /*****************************************************************************\ ENUM: EU_GEN6_DATA_PORT_RENDER_TARGET_WRITE_CONTROL \*****************************************************************************/ enum EU_GEN6_DATA_PORT_RENDER_TARGET_WRITE_CONTROL { EU_GEN6_DATA_PORT_RENDER_TARGET_WRITE_CONTROL_SIMD16_SINGLE_SOURCE = 0, EU_GEN6_DATA_PORT_RENDER_TARGET_WRITE_CONTROL_SIMD16_SINGLE_SOURCE_REPLICATED = 1, EU_GEN6_DATA_PORT_RENDER_TARGET_WRITE_CONTROL_SIMD8_DUAL_SOURCE_LOW = 2, EU_GEN6_DATA_PORT_RENDER_TARGET_WRITE_CONTROL_SIMD8_DUAL_SOURCE_HIGH = 3, EU_GEN6_DATA_PORT_RENDER_TARGET_WRITE_CONTROL_SIMD8_SINGLE_SOURCE_LOW = 4, EU_GEN6_DATA_PORT_RENDER_TARGET_WRITE_CONTROL_SIMD8_IMAGE_WRITE = 5 }; int IR_Builder::translateVISARTWrite3DInst( G4_Predicate* pred, VISA_Exec_Size executionSize, VISA_EMask_Ctrl emask, G4_Operand *surface, G4_SrcRegRegion *r1HeaderOpnd, G4_Operand *rtIndex, vISA_RT_CONTROLS cntrls, G4_SrcRegRegion *sampleIndexOpnd, G4_Operand *cpsCounter, unsigned int numParms, G4_SrcRegRegion ** msgOpnds) { TIME_SCOPE(VISA_BUILDER_IR_CONSTRUCTION); G4_ExecSize execSize = toExecSize(executionSize); G4_InstOpts instOpt = Get_Gen4_Emask(emask, execSize); bool useHeader = false; uint8_t varOffset = 0; G4_SrcRegRegion * s0a = NULL; //oMask G4_SrcRegRegion * oM = NULL; if (cntrls.s0aPresent) { s0a = msgOpnds[varOffset]; ++varOffset; } if (cntrls.oMPresent) { oM = msgOpnds[varOffset]; ++varOffset; } G4_SrcRegRegion * R = msgOpnds[varOffset++]; G4_SrcRegRegion * G = msgOpnds[varOffset++]; G4_SrcRegRegion * B = msgOpnds[varOffset++]; G4_SrcRegRegion * A = msgOpnds[varOffset++]; //depth G4_SrcRegRegion * Z = NULL; if (cntrls.zPresent) Z = msgOpnds[varOffset++]; //stencil G4_SrcRegRegion * S = NULL; if (cntrls.isStencil) { S = msgOpnds[varOffset++]; } if (varOffset != numParms) { assert(0); return VISA_FAILURE; } bool FP16Data = R->getType() == Type_HF; if (FP16Data) { MUST_BE_TRUE((G->isNullReg() || G->getType() == Type_HF) && (B->isNullReg() || B->getType() == Type_HF) && (A->isNullReg() || A->getType() == Type_HF), "R,G,B,A for RT write must have the same type"); } auto mult = (execSize == getNativeExecSize() ? 1 : 2); mult = (FP16Data)? 1 : mult; //RGBA sr0Alpha take up one GRF in SIMD8 and SIMD16 modes. //in SIMD8 upper DWORDs are reserved unsigned int numRows = numParms * mult; //Depth is always Float //For SIMD16 it is 2 grfs //For SIMD8 it is 1 grf if (FP16Data && cntrls.zPresent && executionSize == EXEC_SIZE_16) { ++numRows; } if (cntrls.oMPresent && mult == 2) { // oM is always 1 row irrespective of execSize numRows--; } //although for now HW only supports stencil in SIMD8 mode if (cntrls.isStencil && mult == 2) { // stencil is always 1 row irrespective of execSize numRows--; } // header is always 64 byte const int numDWInHeader = 16; const int headerBytes = numDWInHeader * sizeof(int); const int numHeaderGRF = numDWInHeader / getNativeExecSize(); /* All other values should be set by default. Most of the time when renderTargetIndex != 0, src0Alpha is present also */ bool isRTIdxNonzero = cntrls.RTIndexPresent && (rtIndex->isSrcRegRegion() || (rtIndex->isImm() && rtIndex->asImm()->getImm() != 0)); bool isRTIdxDynamic = cntrls.RTIndexPresent && rtIndex->isSrcRegRegion(); bool needsHeaderForMRT = isRTIdxDynamic || cntrls.s0aPresent || (!hasHeaderlessMRTWrite() && isRTIdxNonzero); if (needsHeaderForMRT || cntrls.isSampleIndex) { useHeader = true; numRows += numHeaderGRF; } bool useSplitSend = useSends(); // So far, we don't have a obvious cut except for header. As the result, // split-send is disabled once there's no header in the message. G4_SrcRegRegion* srcToUse = NULL; G4_Declare *msg = NULL; G4_Declare *msgF = NULL; G4_Declare *payloadUD = NULL; G4_Declare *payloadUW = NULL; G4_Declare *payloadFOrHF = NULL; G4_Declare *payloadF = NULL; if (useSplitSend) { if (useHeader) { //subtracting Header numRows -= numHeaderGRF; //creating header msg = createSendPayloadDcl(numDWInHeader, Type_UD); msgF = createSendPayloadDcl(numDWInHeader, Type_F); msgF->setAliasDeclare(msg, 0); } //creating payload unsigned int numElts = numRows * numEltPerGRF<Type_UB>() / TypeSize(Type_F); payloadUD = createSendPayloadDcl(numElts, Type_UD); payloadFOrHF = createSendPayloadDcl(numElts, FP16Data ? Type_HF : Type_F); payloadUW = createSendPayloadDcl(numElts, Type_UW); payloadF = createSendPayloadDcl(numElts, Type_F); payloadFOrHF->setAliasDeclare(payloadUD, 0); payloadUW->setAliasDeclare(payloadUD, 0); payloadF->setAliasDeclare(payloadUD, 0); } else { unsigned int numElts = numRows * numEltPerGRF<Type_UB>()/TypeSize(Type_F); //creating enough space for header + payload msg = createSendPayloadDcl(numElts, Type_UD); msgF = createSendPayloadDcl(GENX_SAMPLER_IO_SZ * 2, Type_F); msgF->setAliasDeclare(msg, 0); //creating payload declarations. payloadUD = createSendPayloadDcl(numElts - (useHeader ? numDWInHeader : 0), Type_UD); payloadFOrHF = createSendPayloadDcl(numElts - (useHeader ? numDWInHeader : 0), FP16Data ? Type_HF : Type_F); payloadUW = createSendPayloadDcl(numElts - (useHeader ? numDWInHeader : 0), Type_UW); payloadF = createSendPayloadDcl(numElts, Type_F); //setting them to alias a top level decl with offset past the header payloadUD->setAliasDeclare(msg, useHeader ? headerBytes : 0); payloadFOrHF->setAliasDeclare(msg, useHeader ? headerBytes : 0); payloadUW->setAliasDeclare(msg, useHeader ? headerBytes : 0); payloadF->setAliasDeclare(payloadUD, 0); } if (useHeader) { ASSERT_USER(r1HeaderOpnd, "Second GRF for header that was passed in is NULL."); G4_DstRegRegion* payloadRegRgn = createDst(msg->getRegVar(), 0, 0, 1, Type_UD); G4_Declare* r0 = getBuiltinR0(); G4_SrcRegRegion* r0RegRgn = createSrc(r0->getRegVar(), 0, 0, getRegionStride1(), Type_UD); //moves data from r0 to header portion of the message G4_INST* movInst = createMov(g4::SIMD8, payloadRegRgn, r0RegRgn, InstOpt_NoOpt, true); movInst->setOptionOn(InstOpt_WriteEnable); payloadRegRgn = createDst(msg->getRegVar(), 1, 0, 1, Type_UD); r1HeaderOpnd->setType(*this, Type_UD); movInst = createMov(g4::SIMD8, payloadRegRgn, r1HeaderOpnd, InstOpt_NoOpt, true); movInst->setOptionOn(InstOpt_WriteEnable); #define SAMPLE_INDEX_OFFSET 6 if (cntrls.isSampleIndex) { G4_Declare* tmpDcl = createTempVar(2, Type_UD, Any); G4_DstRegRegion* tmpDst = createDst(tmpDcl->getRegVar(), 0, 0, 1, Type_UD); createBinOp(G4_shl, g4::SIMD1, tmpDst, sampleIndexOpnd, createImm(SAMPLE_INDEX_OFFSET, Type_UD), InstOpt_WriteEnable, true); G4_DstRegRegion* payloadUDRegRgn = createDst(msg->getRegVar(), 0, 0, 1, Type_UD); G4_SrcRegRegion* tmpSrc = createSrc(tmpDcl->getRegVar(), 0, 0, getRegionScalar(), Type_UD); G4_SrcRegRegion* payloadSrc = createSrc(msg->getRegVar(), 0, 0, getRegionScalar(), Type_UD); createBinOp(G4_or, g4::SIMD1, payloadUDRegRgn, payloadSrc, tmpSrc, InstOpt_WriteEnable, true); } if (isRTIdxNonzero) { G4_DstRegRegion* dstRTIRgn = createDst(msg->getRegVar(), 0, 2, 1, Type_UD); G4_INST* rtiMovInst = createMov(g4::SIMD1, dstRTIRgn, rtIndex, InstOpt_NoOpt, true); rtiMovInst->setOptionOn(InstOpt_WriteEnable); } //if header is used, then predication value will need to be stored //in the header if (useHeader && (pred || cntrls.isHeaderMaskfromCe0)) { //moving pixelMask in to payload G4_DstRegRegion* dstPixelMaskRgn = createDst( msg->getRegVar(), 1, 14, 1, Type_UW); // setPixelMaskRgn when WA ce0 is needed auto setPixelMaskRgn = [=](G4_InstOption Option) -> void { G4_Declare* flagDecl = createTempFlag(2, "WAce0"); G4_RegVar* flagVar = flagDecl->getRegVar(); G4_DstRegRegion* flag = createDst( flagVar, 0, Option == InstOpt_M16 ? 1 : 0, 1, Type_UW); // (1) (W) mov (1|M0) WAce0.[0|1]:uw, 0 // M0 : WAce0.0; M16 : WAce0.1 // (2) cmp (16|[M0|M16]) (eq)WAce0.0 r0:uw r0:uw // (3) (W) mov(1|M0) dstPixelMaskRgn:uw WAce0.[0|1]:uw // M0 : WAce0.0; M16 : WAce0.1 createMov(g4::SIMD1, flag, createImm(0, Type_UW), InstOpt_WriteEnable, true); G4_SrcRegRegion* r0_0 = createSrc( getRealR0()->getRegVar(), 0, 0, getRegionStride1(), Type_UW); G4_SrcRegRegion* r0_1 = createSrc( getRealR0()->getRegVar(), 0, 0, getRegionStride1(), Type_UW); G4_DstRegRegion* nullDst = createNullDst(Type_UW); G4_CondMod* flagCM = createCondMod(Mod_e, flagVar, 0); createInst(NULL, G4_cmp, flagCM, g4::NOSAT, g4::SIMD16, nullDst, r0_0, r0_1, Option, true); G4_SrcRegRegion* flagSrc = createSrc( flagVar, 0, Option == InstOpt_M16 ? 1 : 0, getRegionScalar(), Type_UW); // move to dstPixelMaskRgn createMov(g4::SIMD1, dstPixelMaskRgn, flagSrc, InstOpt_WriteEnable, true); }; G4_SrcRegRegion* pixelMask = NULL; if (emask == vISA_EMASK_M5_NM || emask == vISA_EMASK_M5) { if (pred) { //this is a Second half of a SIMD32 RT write. We need to get second half of flag register. //mov whole register in to GRF, move second word of it in to payload. G4_SrcRegRegion* pixelMaskTmp = createSrc( pred->getBase()->asRegVar(), 0, 0, getRegionScalar(), Type_UD); G4_Declare* tmpDcl = createTempVar(1, Type_UD, Any); G4_DstRegRegion* tmpDst = createDst(tmpDcl->getRegVar(), 0, 0, 1, Type_UD); createMov(g4::SIMD1, tmpDst, pixelMaskTmp, InstOpt_WriteEnable, true); pixelMask = createSrc( tmpDcl->getRegVar(), 0, 1, getRegionScalar(), Type_UW); // move from temp register to header createMov(g4::SIMD1, dstPixelMaskRgn, pixelMask, InstOpt_WriteEnable, true); } else { if (VISA_WA_CHECK(getPWaTable(), Wa_1406950495)) { setPixelMaskRgn(InstOpt_M16); } else { G4_SrcRegRegion* ce0 = createSrc( phyregpool.getMask0Reg(), 0, 0, getRegionScalar(), Type_UD); // shr .14<1>:uw ce0:ud 16:uw createBinOp(G4_shr, g4::SIMD1, dstPixelMaskRgn, ce0, createImm(16, Type_UW), InstOpt_WriteEnable, true); } } } else { if (pred) { pixelMask = createSrc( pred->getBase()->asRegVar(), 0, 0, getRegionScalar(), Type_UW); //clearing lower 15 bits createMov(g4::SIMD1, dstPixelMaskRgn, pixelMask, InstOpt_WriteEnable, true); } else { if (VISA_WA_CHECK(getPWaTable(), Wa_1406950495)) { setPixelMaskRgn(InstOpt_M0); } else { G4_SrcRegRegion* ce0 = createSrc( phyregpool.getMask0Reg(), 0, 0, getRegionScalar(), Type_UD); // mov .14<1>:uw ce0:ud. clearing lower 15 bits createMov(g4::SIMD1, dstPixelMaskRgn, ce0, InstOpt_WriteEnable, true); } } } pred = NULL; } unsigned int orImmVal = 0; //setting first DWORD of MHC_RT_C0 - Render Target Message Header Control if (cntrls.isStencil) { orImmVal = (0x1 << 14); } if (cntrls.zPresent) { orImmVal = (0x1 << 13); } if (cntrls.oMPresent) { orImmVal |= (0x1 << 12); } if (cntrls.s0aPresent) { orImmVal |= (0x1 << 11); } if (orImmVal != 0) { G4_SrcRegRegion* immSrcRegRgn = createSrc(msg->getRegVar(), 0, 0, getRegionScalar(), Type_UD); G4_DstRegRegion* immDstRegRgn = createDst(msg->getRegVar(), 0, 0, 1, Type_UD); G4_INST* immOrInst = createBinOp(G4_or, g4::SIMD1, immDstRegRgn, immSrcRegRgn, createImm(orImmVal, Type_UD), InstOpt_WriteEnable, true); immOrInst->setOptionOn(InstOpt_WriteEnable); } } // Check whether coalescing is possible #define UNINITIALIZED_DWORD 0xffffffff unsigned int offset = UNINITIALIZED_DWORD; // If the header is not present or split-send is available, we will try to // coalesc payload by checking whether the source is already prepared in a // continuous region. If so, we could reuse the source region directly // instead of copying it again. bool canCoalesce = !useHeader || useSplitSend; G4_SrcRegRegion* prevRawOpnd = NULL; if (R->isNullReg() || G->isNullReg() || B->isNullReg() || A->isNullReg()) canCoalesce = false; if (canCoalesce && cntrls.s0aPresent) { prevRawOpnd = s0a; offset = getByteOffsetSrcRegion(s0a); } if (canCoalesce && cntrls.oMPresent) { //by default it will check based on first opnd type, but that can be HF, F, we need second operand type //according to spec oM is UW canCoalesce = checkIfRegionsAreConsecutive(prevRawOpnd, oM, execSize, oM->getType()); prevRawOpnd = oM; if (offset == UNINITIALIZED_DWORD) { offset = getByteOffsetSrcRegion(oM); } } if (canCoalesce) { if (execSize == 16 && cntrls.oMPresent) { // oM is 1 GRF for SIMD16 since it is UW type canCoalesce = checkIfRegionsAreConsecutive(oM, R, execSize, Type_UW); prevRawOpnd = R; } else { canCoalesce = checkIfRegionsAreConsecutive(prevRawOpnd, R, execSize); prevRawOpnd = R; } if (offset == UNINITIALIZED_DWORD) { offset = getByteOffsetSrcRegion(prevRawOpnd); } if (canCoalesce) { auto tempExecSize = execSize; if (FP16Data && execSize == 8) tempExecSize = g4::SIMD16; canCoalesce = checkIfRegionsAreConsecutive(prevRawOpnd, G, tempExecSize) && checkIfRegionsAreConsecutive(G, B, tempExecSize) && checkIfRegionsAreConsecutive(B, A, tempExecSize); prevRawOpnd = A; if (offset == UNINITIALIZED_DWORD) { offset = getByteOffsetSrcRegion(A); if (FP16Data && execSize == g4::SIMD8) offset += 8; } } } if (canCoalesce && cntrls.zPresent) { canCoalesce = checkIfRegionsAreConsecutive(prevRawOpnd, Z, execSize); prevRawOpnd = Z; } if (canCoalesce && cntrls.isStencil) { canCoalesce = checkIfRegionsAreConsecutive(prevRawOpnd, S, execSize); prevRawOpnd = S; } if (canCoalesce == false) { // Copy parms to payload unsigned regOff = 0; if (cntrls.s0aPresent) { Copy_SrcRegRegion_To_Payload(payloadFOrHF, regOff, s0a, execSize, instOpt); } if (cntrls.oMPresent) { Copy_SrcRegRegion_To_Payload(payloadUW, regOff, oM, execSize, instOpt); //Copy_SrcRegRegion_To_Payload increments regOff by 1 if byteSize ==2 //works for oM since in SIMD16 it occupies one GRF } // When RT write is HF s0a,R, G, B, A are allowed to be HF. // In SIMD8 upper DWORDS are reserved. // In SIMD16 uppder DOWRDS contain second grf worth of values if type was F. // // Output can be only Depth, so V0 is passed in if RGBA don't need to be outputted auto offIncrement = 2; if (execSize == 8 || FP16Data) offIncrement = 1; if (!R->isNullReg()) Copy_SrcRegRegion_To_Payload(payloadFOrHF, regOff, R, execSize, instOpt); else regOff+= offIncrement; if (!G->isNullReg()) Copy_SrcRegRegion_To_Payload(payloadFOrHF, regOff, G, execSize, instOpt); else regOff+= offIncrement; if (!B->isNullReg()) Copy_SrcRegRegion_To_Payload(payloadFOrHF, regOff, B, execSize, instOpt); else regOff+= offIncrement; if (!A->isNullReg()) Copy_SrcRegRegion_To_Payload(payloadFOrHF, regOff, A, execSize, instOpt); else regOff += offIncrement; if (cntrls.zPresent) { Copy_SrcRegRegion_To_Payload(payloadF, regOff, Z, execSize, instOpt); } if (cntrls.isStencil) { Copy_SrcRegRegion_To_Payload(payloadFOrHF, regOff, S, execSize, InstOpt_WriteEnable); } srcToUse = createSrcRegRegion(payloadUD, getRegionStride1()); } else { // Coalesce and directly use original raw operand G4_Declare *dcl = R->getBase()->asRegVar()->getDeclare(); srcToUse = createSrc(dcl->getRegVar(), offset / 32, 0, getRegionStride1(), R->getType()); } // Now create message message descriptor // 7:0 - BTI // 10:8 - Render Target Message Subtype // 11 - Slot Group Select // 12 - Last Render Target Select // 13 - Reserved (DevBDW) // 13 - Per-Sample PS Outputs Enable (DevSKL+) // 17:14 - Message Type // 18 - Reserved // 19 - Header Present // 24:20 - Response Length // 28:25 - Message Length // 29 - Reserved // 30 - Message Precision Subtype (DevBDW+) // 31 - Reserved (MBZ) unsigned int fc = 0; //making explicit EU_GEN6_DATA_PORT_RENDER_TARGET_WRITE_CONTROL messageType = (executionSize == EXEC_SIZE_8) ? EU_GEN6_DATA_PORT_RENDER_TARGET_WRITE_CONTROL_SIMD8_SINGLE_SOURCE_LOW : EU_GEN6_DATA_PORT_RENDER_TARGET_WRITE_CONTROL_SIMD16_SINGLE_SOURCE; #define RENDER_TARGET_MESSAGE_SUBTYPE_OFFSET 8 fc |= (messageType << RENDER_TARGET_MESSAGE_SUBTYPE_OFFSET); #define SLOT_GROUP_SELECT_OFFSET 11 //for SIMD32 for second RT Write setting this bit if (emask == vISA_EMASK_M5_NM || emask == vISA_EMASK_M5) fc |= (0x1 << SLOT_GROUP_SELECT_OFFSET); if (cntrls.isLastWrite) { #define LAST_RENDER_TARGET_SELECT_OFFSET 12 fc |= (0x1 << LAST_RENDER_TARGET_SELECT_OFFSET); } if (cntrls.isPerSample) { #define PER_SAMPLE_PS_ENABLE_OFFSET 13 fc += (0x1 << PER_SAMPLE_PS_ENABLE_OFFSET); } if (FP16Data) { fc |= 0x1 << MESSAGE_PRECISION_SUBTYPE_OFFSET; } #define MESSAGE_TYPE 14 fc |= (0xc << MESSAGE_TYPE); #define COARSE_PIXEL_OUTPUT_ENABLE 18 if (cntrls.isCoarseMode) fc |= 0x1 << COARSE_PIXEL_OUTPUT_ENABLE; #define CPS_COUNTER_EXT_MSG_DESC_OFFSET 16 uint16_t extFuncCtrl = 0; if (cntrls.isNullRT && getPlatform() >= GENX_TGLLP) { // extFuncCtrl is the 16:31 bits of extDesc. NullRT is the bit 20 of extDesc. // That says NullRT is the bit 4 of extFuncCtrl. #define NULL_RENDER_TARGET 4 extFuncCtrl |= 0x1 << NULL_RENDER_TARGET; } if (useSplitSend || cpsCounter) { G4_SendDescRaw *msgDesc = NULL; G4_SrcRegRegion *m0 = NULL; bool indirectExDesc = false; if (useHeader) { m0 = createSrcRegRegion(msg, getRegionStride1()); msgDesc = createSendMsgDesc(fc, 0, numHeaderGRF, SFID::DP_WRITE, numRows, extFuncCtrl, SendAccess::WRITE_ONLY, surface); msgDesc->setHeaderPresent(useHeader); } else { if (!isRTIdxNonzero && !cntrls.s0aPresent) { // direct imm is a-ok for ext desc msgDesc = createSendMsgDesc(fc, 0, numRows, SFID::DP_WRITE, 0, extFuncCtrl, SendAccess::WRITE_ONLY, surface); } else { assert(rtIndex->isImm() && "RTIndex must be imm at this point"); uint8_t RTIndex = (uint8_t)rtIndex->asImm()->getImm() & 0x7; uint32_t desc = G4_SendDescRaw::createDesc(fc, false, numRows, 0); uint32_t extDesc = G4_SendDescRaw::createMRTExtDesc(cntrls.s0aPresent, RTIndex, false, 0, extFuncCtrl); msgDesc = createGeneralMsgDesc(desc, extDesc, SendAccess::WRITE_ONLY, surface); if (!canEncodeFullExtDesc()) { // we must use a0 for extended msg desc in this case as there aren't enough bits to encode // the full ext desc // mov (1) a0.2:ud extDesc G4_DstRegRegion* dst = createDstRegRegion(getBuiltinA0Dot2(), 1); createMov(g4::SIMD1, dst, createImm(extDesc, Type_UD), InstOpt_WriteEnable, true); indirectExDesc = true; } } } /* If we need to set cps counter then ext_message descriptor needs to be a register. */ if (cpsCounter) { ASSERT_USER(hasCPS(), "CPS counter is not supported"); unsigned msgDescValue = msgDesc->getExtendedDesc(); //shifting CPS counter by appropriate number of bits and storing in ext_descriptor operand G4_DstRegRegion *dstMove2 = createDstRegRegion(getBuiltinA0Dot2(), 1); G4_Imm *immedOpnd = createImm(msgDescValue, Type_UD); ///setting lower bits createBinOp(G4_or, g4::SIMD1, dstMove2, cpsCounter, immedOpnd, InstOpt_WriteEnable, true); indirectExDesc = true; } if (!useHeader) { m0 = srcToUse; srcToUse = createNullSrc(Type_UD); } createSplitSendToRenderTarget( pred, createNullDst(Type_UD), m0, srcToUse, indirectExDesc ? createSrcRegRegion(getBuiltinA0Dot2(), getRegionScalar()) : nullptr, execSize, msgDesc, instOpt); } else { G4_SrcRegRegion *m = srcToUse; if (useHeader) m = createSrcRegRegion(msg, getRegionStride1()); createSendInst(pred, createNullDst(Type_UD), m, numRows, 0, execSize, fc, SFID::DP_WRITE, useHeader, SendAccess::WRITE_ONLY, surface, NULL, instOpt, true); } return VISA_SUCCESS; } // Bit 15 of aoffimmi is set in messages with sampler index >= 16. static bool IsSamplerIndexGE16(G4_Operand* aoffimmi) { bool ret = false; if (aoffimmi && aoffimmi->isImm()) { const uint16_t aoffimmiVal = (uint16_t)aoffimmi->asImm()->getInt(); ret = (aoffimmiVal & 0x8000) != 0; } return ret; } // return the contents of M0.2 for sampler messages. It must be an immediate value static uint32_t createSampleHeader0Dot2(VISASampler3DSubOpCode op, bool pixelNullMask, uint16_t aoffimmi, ChannelMask channels, IR_Builder* builder) { uint32_t secondDword = aoffimmi & 0xfff; switch (op) { case VISA_3D_GATHER4: //gather4 source channel select secondDword |= (channels.getSingleChannel() << 16); break; case VISA_3D_GATHER4_PO: if (builder->hasGather4PO()) { secondDword |= (channels.getSingleChannel() << 16); } break; case VISA_3D_GATHER4_PO_C: break; case VISA_3D_GATHER4_C: // do nothing as channle must be Red (0) break; default: // RGBA write channel mask secondDword |= (channels.getHWEncoding() << 12); break; } // M0.2:23, Pixel Null Mask Enable. // Only valid for SKL+, and ignored otherwise. if (builder->hasPixelNullMask() && pixelNullMask) { secondDword |= 1 << 23; } return secondDword; } // // Coarse Pixel Shading(CPS) LOD compensation enable. // // - must be disabled if the response length of the message is zero; // - must be disabled if the messages is from a 32-pixel dispatch thread; // - must be disabled unless SIMD Mode is SIMD8* or SIMD16*; // - only available for sample, sample_b, sample_bc, sample_c, and LOD. // static void checkCPSEnable(VISASampler3DSubOpCode op, unsigned reponseLength, unsigned execSize) { ASSERT_USER(reponseLength > 0, "CPS LOD Compensation Enable must be disabled if the " "response length is zero"); ASSERT_USER(execSize == 8 || execSize == 16, "CPS LOD Compensation Enable only valid for SIMD8* or SIMD16*"); ASSERT_USER(op == VISA_3D_SAMPLE || op == VISA_3D_SAMPLE_B || op == VISA_3D_SAMPLE_C || op == VISA_3D_SAMPLE_B_C || op == VISA_3D_LOD, "CPD LOD Compensation Enable only available for " "sample, sample_b, sample_bc, sample_c and LOD"); } static G4_Operand* createSampleHeader( IR_Builder* builder, G4_Declare* header, VISASampler3DSubOpCode actualop, bool pixelNullMask, G4_Operand* aoffimmi, ChannelMask srcChannel, G4_Operand* sampler) { G4_Operand* retSampler = sampler; uint16_t aoffimmiVal = aoffimmi->isImm() ? (uint16_t)aoffimmi->asImm()->getInt() : 0; unsigned int secondDword = createSampleHeader0Dot2(actualop, pixelNullMask, aoffimmiVal, srcChannel, builder); G4_Imm* immOpndSecondDword = builder->createImm(secondDword, Type_UD); G4_DstRegRegion* payloadDstRgn = builder->createDst(header->getRegVar(), 0, 2, 1, Type_UD); if (aoffimmi->isImm()) { // mov (1) payload(0,2) immOpndSecondDword builder->createMov(g4::SIMD1, payloadDstRgn, immOpndSecondDword, InstOpt_WriteEnable, true); } else { // or (1) payload(0,2) aoffimmi<0;1,0>:uw immOpndSeconDword builder->createBinOp(G4_or, g4::SIMD1, payloadDstRgn, aoffimmi, immOpndSecondDword, InstOpt_WriteEnable, true); } if (sampler != nullptr) { builder->doSamplerHeaderMove(header, sampler); // Use bit 15 of aoffimmi to tell VISA the sample index could be greater // than 15. In this case, we need to use msg header, and setup M0.3 // to point to next 16 sampler state. if (IsSamplerIndexGE16(aoffimmi)) { retSampler = builder->emitSampleIndexGE16(sampler, header); } } return retSampler; } static bool needsNoMaskCoordinates(VISASampler3DSubOpCode opcode) { return opcode == VISA_3D_SAMPLE || opcode == VISA_3D_SAMPLE_B || opcode == VISA_3D_SAMPLE_C || opcode == VISA_3D_SAMPLE_B_C || opcode == VISA_3D_LOD || opcode == VISA_3D_SAMPLE_KILLPIX; } static uint8_t getUPosition(VISASampler3DSubOpCode opcode) { uint8_t position = 0; switch (opcode) { case VISA_3D_SAMPLE: case VISA_3D_LOD: case VISA_3D_SAMPLE_D: case VISA_3D_SAMPLE_LZ: case VISA_3D_SAMPLE_KILLPIX: position = 0; break; case VISA_3D_SAMPLE_B: case VISA_3D_SAMPLE_L: case VISA_3D_SAMPLE_C: case VISA_3D_SAMPLE_D_C: case VISA_3D_SAMPLE_C_LZ: position = 1; break; case VISA_3D_SAMPLE_B_C: case VISA_3D_SAMPLE_L_C: position = 2; break; default: MUST_BE_TRUE(false, "unexpected sampler operation"); return 0; } return position; } static void setUniformSampler(G4_InstSend* sendInst, bool uniformSampler) { if (!uniformSampler) { sendInst->setSerialize(); } } /* Need to split sample_d and sample_dc in to two simd8 sends since HW doesn't support it. Also need to split any sample instruciton that has more then 5 parameters. Since there is a limit on msg length. */ static unsigned TmpSmplDstID = 0; // TODO: use IR_Builder::getNameString.... const char* getNameString( Mem_Manager& mem, size_t size, const char* format, ...) { #ifdef _DEBUG char* name = (char*) mem.alloc(size); va_list args; va_start(args, format); std::vsnprintf(name, size, format, args); va_end(args); return name; #else const char* name = ""; return const_cast<char*>(name); #endif } // split simd32/16 sampler messages into simd16/8 messages due to HW limitation. int IR_Builder::splitSampleInst( VISASampler3DSubOpCode actualop, bool pixelNullMask, bool cpsEnable, G4_Predicate* pred, ChannelMask srcChannel, int numChannels, G4_Operand *aoffimmi, G4_Operand *sampler, G4_Operand *surface, G4_DstRegRegion* dst, VISA_EMask_Ctrl emask, bool useHeader, unsigned numRows, // msg length for each simd8 unsigned int numParms, G4_SrcRegRegion ** params, bool uniformSampler) { int status = VISA_SUCCESS; G4_SrcRegRegion *secondHalf[12]; bool isHalfReturn = dst->getTypeSize() == 2; const bool halfInput = params[0]->getTypeSize() == 2; // Now, depending on message type emit out parms to payload unsigned regOff = (useHeader ? 1 : 0); G4_SrcRegRegion* temp = nullptr; G4_ExecSize execSize = getNativeExecSize(); uint16_t numElts = numRows * numEltPerGRF<Type_F>(); G4_Declare* payloadF = createSendPayloadDcl(numElts, Type_F); G4_Declare* payloadUD = createTempVar(numElts, Type_UD, GRFALIGN); payloadUD->setAliasDeclare(payloadF, 0); G4_SrcRegRegion* srcToUse = createSrc(payloadUD->getRegVar(), 0, 0, getRegionStride1(), Type_UD); // even though we only use lower half of the GRF, we have to allocate full GRF G4_Declare* payloadHF = createTempVar(numElts * 2, Type_HF, Any); payloadHF->setAliasDeclare(payloadF, 0); /********* Creating temp destination, since results are interleaved **************/ G4_DstRegRegion *dst1 = createNullDst(dst->getType()); G4_Declare * originalDstDcl = nullptr; G4_Declare* tempDstDcl = nullptr; bool pixelNullMaskEnable = false; unsigned tmpDstRows = 0; if (!dst->isNullReg()) { originalDstDcl = dst->getBase()->asRegVar()->getDeclare(); tmpDstRows = numChannels; // If Pixel Null Mask is enabled, then one extra GRF is needed for the // write back message. pixelNullMaskEnable = hasPixelNullMask() && pixelNullMask; if (pixelNullMaskEnable) { ASSERT_USER(useHeader, "pixel null mask requires a header"); ++tmpDstRows; } const char *name = getNameString(mem, 20, "%s%d", "TmpSmplDst_", TmpSmplDstID++); tempDstDcl = createDeclareNoLookup(name, originalDstDcl->getRegFile(), originalDstDcl->getNumElems(), (uint16_t)tmpDstRows, originalDstDcl->getElemType()); dst1 = createDstRegRegion(dst->getRegAccess(), tempDstDcl->getRegVar(), 0, 0, 1, dst->getType()); } /********* End creating temp destination ***********************/ G4_Declare* header = nullptr; if (useHeader) { const bool samplerIndexGE16 = IsSamplerIndexGE16(aoffimmi); bool bindlessSampler = sampler ? isBindlessSampler(sampler) : false; header = getSamplerHeader(bindlessSampler, samplerIndexGE16); sampler = createSampleHeader(this, header, actualop, pixelNullMask, aoffimmi, srcChannel, sampler); createMovInst(payloadUD, 0, 0, g4::SIMD8, nullptr, nullptr, createSrcRegRegion(header, getRegionStride1()), true); } G4_InstOpts instOpt = Get_Gen4_Emask(emask, execSize); for (unsigned paramCounter = 0; paramCounter < numParms; ++paramCounter) { temp = params[paramCounter]; uint32_t MovInstOpt = InstOpt_WriteEnable; if (temp->getTypeSize() == 2) { // we should generate // mov (8) dst<1>:hf src.0<8;8,1>:hf G4_DstRegRegion* dstHF = createDst( payloadHF->getRegVar(), regOff++, 0, 1, temp->getType()); temp->setRegion(*this, getRegionStride1()); createMov(g4::SIMD8, dstHF, temp, MovInstOpt, true); } else { Copy_SrcRegRegion_To_Payload(payloadF, regOff, temp, execSize, MovInstOpt); } } uint32_t responseLength = getSamplerResponseLength(numChannels, isHalfReturn, execSize, pixelNullMaskEnable, dst->isNullReg()); uint32_t fc = createSamplerMsgDesc(actualop, execSize == getNativeExecSize(), isHalfReturn, halfInput); uint32_t desc = G4_SendDescRaw::createDesc(fc, useHeader, numRows, responseLength); if (cpsEnable) { checkCPSEnable(actualop, responseLength, 8); } G4_SendDescRaw *msgDesc = createSampleMsgDesc(desc, cpsEnable, 0, surface, sampler); G4_InstSend* sendInst = nullptr; bool forceSplitSend = shouldForceSplitSend(surface); if (forceSplitSend) { sendInst = createSplitSendInst( pred, dst1, srcToUse, createNullSrc(Type_UD), execSize, msgDesc, instOpt, false); } else { sendInst = createSendInst( pred, dst1, srcToUse, execSize, msgDesc, instOpt, false); } setUniformSampler(sendInst, uniformSampler); // SKL+ // For SIMD8 // // W4.7:1 Reserved (not written): This W4 is only delivered when Pixel Null // Mask Enable is enabled. // // W4.0 32:8 Reserved: always written as 0xffffff // 7:0 Pixel Null Mask: This field has the bit for all pixels set // to 1 except those pixels in which a null page was source for // at least one texel. // // Need to combine the results from the above two writewback messages. // Denote by U0[W4:0] the last row of the first writeback message, and // by U1[W4:0] the last row of the second writeback message. Then the last // row of the whole writeback message is to take the bitwise OR of // U0[W4:0] and U1[W4:0]. G4_Declare *tempDstUD = 0; G4_Declare *tempDst2UD = 0; G4_Declare *origDstUD = 0; // temp dst for the second send G4_DstRegRegion *dst2 = createNullDst(dst->getType()); G4_Declare* tempDstDcl2 = nullptr; if (!dst->isNullReg()) { const char *name = getNameString(mem, 20, "%s%d", "TmpSmplDst2_", TmpSmplDstID++); tempDstDcl2 = createDeclareNoLookup(name, originalDstDcl->getRegFile(), originalDstDcl->getNumElems(), (uint16_t)tmpDstRows, originalDstDcl->getElemType()); if (pixelNullMaskEnable) { unsigned int numElts = tempDstDcl->getNumElems() * tempDstDcl->getNumRows(); tempDstUD = createTempVar(numElts, Type_UD, GRFALIGN); tempDstUD->setAliasDeclare(tempDstDcl, 0); numElts = tempDstDcl2->getNumElems() * tempDstDcl2->getNumRows(); tempDst2UD = createTempVar(numElts, Type_UD, GRFALIGN); tempDst2UD->setAliasDeclare(tempDstDcl2, 0); numElts = originalDstDcl->getNumElems() * originalDstDcl->getNumRows(); origDstUD = createTempVar(numElts, Type_UD, GRFALIGN); origDstUD->setAliasDeclare(originalDstDcl, 0); } dst2 = createDstRegRegion(dst->getRegAccess(), tempDstDcl2->getRegVar(), 0, 0, 1, dst->getType()); } // update emask emask = Get_Next_EMask(emask, execSize); G4_InstOpts instOpt2 = Get_Gen4_Emask(emask, execSize); auto dupPredicate = [this](G4_Predicate* pred) { G4_Predicate* pred2 = nullptr; if (pred) { pred2 = createPredicate( pred->getState(), pred->getBase(), 0); } return pred2; }; { /**************** SECOND HALF OF THE SEND *********************/ // re-create payload declare so the two sends may be issued independently G4_Declare* payloadF = createSendPayloadDcl(numElts, Type_F); G4_Declare* payloadUD = createTempVar(numElts, Type_UD, GRFALIGN); payloadUD->setAliasDeclare(payloadF, 0); // even though we only use lower half of the GRF, we have to allocate full GRF G4_Declare* payloadHF = createTempVar(numElts * 2, Type_HF, Any); payloadHF->setAliasDeclare(payloadF, 0); G4_SrcRegRegion *srcToUse2 = createSrc(payloadUD->getRegVar(), 0, 0, getRegionStride1(), Type_UD); if (useHeader) { createMovInst(payloadUD, 0, 0, g4::SIMD8, nullptr, nullptr, createSrcRegRegion(header, getRegionStride1()), true); } for (unsigned int i = 0; i < numParms; i++) { if (params[i]->isNullReg()) { secondHalf[i] = params[i]; } else if (params[i]->getTypeSize() == 2) { // V1(0,8)<8;8,1> secondHalf[i] = createSrcWithNewSubRegOff(params[i], execSize); } else { // V1(1,0)<8;8,1> secondHalf[i] = createSrcWithNewRegOff(params[i], params[i]->getRegOff() + 1); } } regOff = (useHeader ? 1 : 0); for (unsigned paramCounter = 0; paramCounter < numParms; ++paramCounter) { temp = secondHalf[paramCounter]; uint32_t MovInstOpt = InstOpt_WriteEnable; if (temp->getTypeSize() == 2) { // we should generate // mov (8) dst<1>:hf src.8<8;8,1>:hf G4_DstRegRegion* dstHF = createDst( payloadHF->getRegVar(), regOff++, 0, 1, temp->getType()); createMov(execSize, dstHF, temp, MovInstOpt, true); } else { Copy_SrcRegRegion_To_Payload(payloadF, regOff, temp, execSize, MovInstOpt); } } G4_Operand *surface2 = duplicateOperand(surface); // sampler may be null for 3d load (specifically ld2dms_w) G4_Operand* sampler2 = sampler == nullptr ? nullptr : duplicateOperand(sampler); G4_Predicate* pred2 = dupPredicate(pred); G4_SendDescRaw *msgDesc2 = createSampleMsgDesc(desc, cpsEnable, 0, surface2, sampler2); msgDesc2->setHeaderPresent(useHeader); if (forceSplitSend) { sendInst = createSplitSendInst( pred2, dst2, srcToUse2, createNullSrc(Type_UD), execSize, msgDesc2, instOpt2, false); } else { sendInst = createSendInst( pred2, dst2, srcToUse2, execSize, msgDesc2, instOpt2, false); } setUniformSampler(sendInst, uniformSampler); } { /**************** MOVING FROM TEMP TO DST, 1st half *********************/ regOff = 0; for (unsigned i = 0; i < tmpDstRows; i++, regOff += 1) { // If Pixel Null Mask is enabled, then only copy the last double word. if (pixelNullMaskEnable && i == tmpDstRows - 1) { G4_DstRegRegion *origDstPtr = createDst(origDstUD->getRegVar(), short(regOff), 0, 1, Type_UD); G4_SrcRegRegion *src0Ptr = createSrc(tempDstUD->getRegVar(), short(i), 0, getRegionScalar(), Type_UD); G4_Predicate* pred2 = dupPredicate(pred); // Copy the write mask message W4.0 into the dst. (No mask?) createInst(pred2, G4_mov, NULL, g4::NOSAT, g4::SIMD1, origDstPtr, src0Ptr, NULL, NULL, InstOpt_WriteEnable, true); // Skip the remaining part of the loop. break; } G4_SrcRegRegion *tmpSrcPnt = createSrc(tempDstDcl->getRegVar(), (short)i, 0, getRegionStride1(), tempDstDcl->getElemType()); uint32_t MovInstOpt = instOpt; if (isHalfReturn) { // mov (8) dst(0,0)<1>:hf tmp(0,0)<8;8,1>:hf {Q1} G4_DstRegRegion* dst = createDst( originalDstDcl->getRegVar(), (short)regOff, 0, 1, originalDstDcl->getElemType()); createMov(execSize, dst, tmpSrcPnt, MovInstOpt, true); } else { Copy_SrcRegRegion_To_Payload(originalDstDcl, regOff, tmpSrcPnt, execSize, MovInstOpt); } } } { /**************** MOVING FROM TEMP TO DST, 2nd half *********************/ regOff = isHalfReturn ? 0 : 1; for (unsigned i = 0; i < tmpDstRows; i++, regOff += 1) { // If Pixel Null Mask is enabled, copy the second half to the originai dst if (pixelNullMaskEnable && i == tmpDstRows - 1) { G4_Type secondHalfType = execSize == g4::SIMD8 ? Type_UB : Type_UW; G4_DstRegRegion* origDstPtr = createDst(origDstUD->getRegVar(), regOff - 1, 1, 1, secondHalfType); G4_SrcRegRegion* src0Ptr = createSrc(tempDst2UD->getRegVar(), short(i), 0, getRegionScalar(), secondHalfType); G4_Predicate* pred2 = dupPredicate(pred); // write to dst.0[8:15] createInst(pred2, G4_mov, NULL, g4::NOSAT, g4::SIMD1, origDstPtr, src0Ptr, NULL, InstOpt_WriteEnable, true); // Skip the remaining part of the loop. break; } G4_SrcRegRegion *tmpSrcPnt = createSrc(tempDstDcl2->getRegVar(), (short)i, 0, getRegionStride1(), tempDstDcl->getElemType()); uint32_t MovInstOpt = instOpt2; if (isHalfReturn) { // mov (8) dst(0,8)<1>:hf tmp(0,0)<8;8,1>:hf {Q2} G4_DstRegRegion* dst = createDst( originalDstDcl->getRegVar(), (short)regOff, execSize, 1, originalDstDcl->getElemType()); createMov(execSize, dst, tmpSrcPnt, MovInstOpt, true); } else { Copy_SrcRegRegion_To_Payload(originalDstDcl, regOff, tmpSrcPnt, execSize, MovInstOpt); } } } return status; } void IR_Builder::doSamplerHeaderMove(G4_Declare* headerDcl, G4_Operand* sampler) { if (isBindlessSampler(sampler)) { // sampler index in msg desc will be 0, manipulate the sampler offset instead // mov (1) M0.3<1>:ud sampler<0;1,0>:ud the driver will send the handle with bit 0 already set G4_DstRegRegion* dst = createDst(headerDcl->getRegVar(), 0, 3, 1, Type_UD); createMov(g4::SIMD1, dst, sampler, InstOpt_WriteEnable, true); } } // // generate the r0 move for the sampler message header, and return the dcl // for CNL+, also set SSP to dynamic if message is not bindless // G4_Declare* IR_Builder::getSamplerHeader(bool isBindlessSampler, bool samplerIndexGE16) { G4_Declare* dcl = nullptr; G4_InstOpts dbgOpt = m_options->getOption(vISA_markSamplerMoves) ? InstOpt_BreakPoint : InstOpt_NoOpt; if (m_options->getOption(vISA_cacheSamplerHeader) && !isBindlessSampler) { dcl = builtinSamplerHeader; if (!builtinSamplerHeaderInitialized) { builtinSamplerHeaderInitialized = true; if (hasBindlessSampler()) { // make sure we set bit 0 of M0.3:ud to be 0 // and (1) M0.6<1>:uw M0.6<1>:uw 0xFFFE G4_DstRegRegion* dst = createDst(dcl->getRegVar(), 0, 6, 1, Type_UW); G4_SrcRegRegion* src0 = createSrc(dcl->getRegVar(), 0, 6, getRegionScalar(), Type_UW); G4_INST* SSPMove = createBinOp(G4_and, g4::SIMD1, dst, src0, createImm(0xFFFE, Type_UW), InstOpt_WriteEnable, false); instList.push_front(SSPMove); } G4_INST* r0Move = createMov(g4::SIMD8, createDstRegRegion(dcl, 1), createSrcRegRegion(builtinR0, getRegionStride1()), InstOpt_WriteEnable | dbgOpt, false); instList.push_front(r0Move); } if (samplerIndexGE16) { // When sampler index is greater or equal 16 then the // createSamplerHeader() message overwrites the sampler states // pointer in the header -> cannot use the cached value in this // case. dcl = createSendPayloadDcl(GENX_DATAPORT_IO_SZ, Type_UD); dcl->setCapableOfReuse(); G4_SrcRegRegion* src = createSrc(builtinSamplerHeader->getRegVar(), 0, 0, getRegionStride1(), Type_UD); createMovInst(dcl, 0, 0, g4::SIMD8, NULL, NULL, src, false, dbgOpt); } } else { dcl = createSendPayloadDcl(GENX_DATAPORT_IO_SZ, Type_UD); dcl->setCapableOfReuse(); createMovR0Inst(dcl, 0, 0, true, dbgOpt); if (hasBindlessSampler() && !isBindlessSampler) { // make sure we set bit 0 of M0.3:ud to be 0 // and (1) M0.6<1>:uw M0.6<1>:uw 0xFFFE G4_DstRegRegion* dst = createDst(dcl->getRegVar(), 0, 6, 1, Type_UW); G4_SrcRegRegion* src0 = createSrc(dcl->getRegVar(), 0, 6, getRegionScalar(), Type_UW); createBinOp(G4_and, g4::SIMD1, dst, src0, createImm(0xFFFE, Type_UW), InstOpt_WriteEnable, true); } } return dcl; } // get the number of GRFs occupied by a sampler message's operand static uint32_t getNumGRF(unsigned grfSize, bool isFP16, int execSize) { int numBytes = (isFP16 ? 2 : 4) * execSize; return (numBytes + grfSize - 1) / grfSize; } uint32_t IR_Builder::getSamplerResponseLength( int numChannels, bool isFP16, int execSize, bool pixelNullMask, bool nullDst) { if (nullDst) { hasNullReturnSampler = true; return 0; } uint32_t responseLength = numChannels * getNumGRF(getGRFSize(), isFP16, execSize); if (pixelNullMask) { ++responseLength; } return responseLength; } static bool needSamplerHeader( IR_Builder* builder, bool pixelNullMask, bool nonZeroAoffImmi, bool needHeaderForChannels, bool bindlessSampler, bool simd16HFReturn) { return builder->forceSamplerHeader() || (pixelNullMask && builder->hasPixelNullMask()) || nonZeroAoffImmi || needHeaderForChannels || bindlessSampler || (simd16HFReturn && VISA_WA_CHECK(builder->getPWaTable(), WaHeaderRequiredOnSimd16Sample16bit)); } // This function assumes there are no gaps in parameter array. e.g. NULL pointers // If there is a gap it must be RawOperand with value 0. int IR_Builder::translateVISASampler3DInst( VISASampler3DSubOpCode actualop, bool pixelNullMask, bool cpsEnable, bool uniformSampler, G4_Predicate* pred, VISA_Exec_Size executionSize, VISA_EMask_Ctrl emask, ChannelMask chMask, G4_Operand *aoffimmi, G4_Operand *sampler, G4_Operand *surface, G4_DstRegRegion* dst, unsigned int numParms, G4_SrcRegRegion ** params) { TIME_SCOPE(VISA_BUILDER_IR_CONSTRUCTION); G4_ExecSize execSize = toExecSize(executionSize); G4_InstOpts instOpt = Get_Gen4_Emask(emask, execSize); // First setup message header and message payload // Message header and payload size is numParms GRFs const bool FP16Return = dst->getTypeSize() == 2; const bool FP16Input = params[0]->getType() == Type_HF; bool useHeader = false; unsigned int numRows = numParms * getNumGRF(getGRFSize(), FP16Input, execSize); VISAChannelMask channels = chMask.getAPI(); // For SKL+ channel mask R, RG, RGB, and RGBA may be derived from response length bool needHeaderForChannels = (getPlatform() < GENX_SKL) ? channels != CHANNEL_MASK_RGBA : (channels != CHANNEL_MASK_R && channels != CHANNEL_MASK_RG && channels != CHANNEL_MASK_RGB && channels != CHANNEL_MASK_RGBA); bool nonZeroAoffImmi = !(aoffimmi->isImm() && aoffimmi->asImm()->getInt() == 0); bool simd16HFReturn = FP16Return && execSize == 16; if (needSamplerHeader(this, pixelNullMask, nonZeroAoffImmi, needHeaderForChannels, isBindlessSampler(sampler), simd16HFReturn)) { useHeader = true; ++numRows; } int numChannels = chMask.getNumEnabledChannels(); if (execSize > getNativeExecSize() && (numRows > 11 || actualop == VISA_3D_SAMPLE_D || actualop == VISA_3D_SAMPLE_D_C || actualop == VISA_3D_SAMPLE_KILLPIX)) { // decrementing since we will produce SIMD8 code. // don't do this for SIMD16H since its message length is the same as SIMD8H if (!FP16Input) { numRows -= numParms; } return splitSampleInst(actualop, pixelNullMask, cpsEnable, pred, chMask, numChannels, aoffimmi, sampler, surface, dst, emask, useHeader, numRows, numParms, params, uniformSampler); } bool useSplitSend = useSends(); G4_SrcRegRegion *header = 0; G4_Operand* samplerIdx = sampler; if (useHeader) { const bool samplerIndexGE16 = IsSamplerIndexGE16(aoffimmi); G4_Declare *dcl = getSamplerHeader(isBindlessSampler(sampler), samplerIndexGE16); samplerIdx = createSampleHeader(this, dcl, actualop, pixelNullMask, aoffimmi, chMask, sampler); header = createSrcRegRegion(dcl, getRegionStride1()); } G4_InstOpts dbgOpt = m_options->getOption(vISA_markSamplerMoves) ? InstOpt_BreakPoint : InstOpt_NoOpt; // Collect payload sources. unsigned len = numParms + (header ? 1 : 0); std::vector<PayloadSource> sources(len); unsigned i = 0; // Collect header if present. if (header) { sources[i].opnd = header; sources[i].execSize = g4::SIMD8; sources[i].instOpt = InstOpt_WriteEnable | dbgOpt; ++i; } // Collect all parameters. bool needNoMask = needsNoMaskCoordinates(actualop); unsigned uPos = needNoMask ? getUPosition(actualop) : ~0u; for (unsigned j = 0; j != numParms; ++j) { sources[i].opnd = params[j]; sources[i].execSize = execSize; sources[i].instOpt = (needNoMask && (uPos <= j && j < (uPos + 3))) ? InstOpt_WriteEnable | dbgOpt : instOpt | dbgOpt; ++i; } ASSERT_USER(i == len, "There's mismatching during payload source collecting!"); G4_SrcRegRegion *msgs[2] = {0, 0}; unsigned sizes[2] = {0, 0}; preparePayload(msgs, sizes, execSize, useSplitSend, sources.data(), len); uint32_t responseLength = getSamplerResponseLength(numChannels, FP16Return, execSize, hasPixelNullMask() && pixelNullMask, dst->isNullReg()); // Check if CPS LOD Compensation Enable is valid. if (cpsEnable) { checkCPSEnable(actualop, responseLength, execSize); } uint32_t fc = createSamplerMsgDesc(actualop, execSize == getNativeExecSize(), FP16Return, FP16Input); uint32_t desc = G4_SendDescRaw::createDesc(fc, useHeader, sizes[0], responseLength); G4_InstSend* sendInst = nullptr; bool forceSplitSend = shouldForceSplitSend(surface); if (msgs[1] == 0 && !forceSplitSend) { ASSERT_USER(sizes[1] == 0, "Expect the 2nd part of the payload has zero size!"); G4_SendDescRaw *msgDesc = createSampleMsgDesc(desc, cpsEnable, 0, surface, samplerIdx); sendInst = createSendInst(pred, dst, msgs[0], execSize, msgDesc, instOpt, false); } else { G4_SendDescRaw *msgDesc = createSampleMsgDesc(desc, cpsEnable, sizes[1], surface, samplerIdx); sendInst = createSplitSendInst(pred, dst, msgs[0], msgs[1], execSize, msgDesc, instOpt, false); } setUniformSampler(sendInst, uniformSampler); return VISA_SUCCESS; } int IR_Builder::translateVISALoad3DInst( VISASampler3DSubOpCode actualop, bool pixelNullMask, G4_Predicate *pred_opnd, VISA_Exec_Size executionSize, VISA_EMask_Ctrl em, ChannelMask channelMask, G4_Operand* aoffimmi, G4_Operand* surface, G4_DstRegRegion* dst, uint8_t numParms, G4_SrcRegRegion ** opndArray) { TIME_SCOPE(VISA_BUILDER_IR_CONSTRUCTION); bool useHeader = false; G4_ExecSize execSize = toExecSize(executionSize); G4_InstOpts instOpt = Get_Gen4_Emask(em, execSize); const bool halfReturn = dst->getTypeSize() == 2; const bool halfInput = opndArray[0]->getTypeSize() == 2; unsigned int numRows = numParms * getNumGRF(getGRFSize(), halfInput, execSize); VISAChannelMask channels = channelMask.getAPI(); // For SKL+ channel mask R, RG, RGB, and RGBA may be derived from response length bool needHeaderForChannels = (getPlatform() < GENX_SKL) ? channels != CHANNEL_MASK_RGBA : (channels != CHANNEL_MASK_R && channels != CHANNEL_MASK_RG && channels != CHANNEL_MASK_RGB && channels != CHANNEL_MASK_RGBA); bool nonZeroAoffImmi = !(aoffimmi->isImm() && aoffimmi->asImm()->getInt() == 0); bool simd16HFReturn = halfReturn && execSize == 16; if (needSamplerHeader(this, pixelNullMask, nonZeroAoffImmi, needHeaderForChannels, false, simd16HFReturn)) { useHeader = true; ++numRows; } int numChannels = channelMask.getNumEnabledChannels(); if (execSize > getNativeExecSize() && numRows > 11) { // decrementing since we will produce SIMD8 code. // don't do this for SIMD16H since its message length is the same as SIMD8H if (!halfInput) { numRows -= numParms; } return splitSampleInst(actualop, pixelNullMask, /*cpsEnable*/false, pred_opnd, channelMask, numChannels, aoffimmi, NULL, surface, dst, em, useHeader, numRows, numParms, opndArray); } bool useSplitSend = useSends(); G4_SrcRegRegion *header = nullptr; if (useHeader) { G4_Declare* dcl = getSamplerHeader(false /*isBindlessSampler*/, false /*samperIndexGE16*/); { (void)createSampleHeader(this, dcl, actualop, pixelNullMask, aoffimmi, channelMask, nullptr); } header = createSrcRegRegion(dcl, getRegionStride1()); } // Collect payload sources. unsigned len = numParms + (header ? 1 : 0); std::vector<PayloadSource> sources(len); unsigned i = 0; // Collect header if present. if (header) { sources[i].opnd = header; sources[i].execSize = g4::SIMD8; sources[i].instOpt = InstOpt_WriteEnable; ++i; } // Collect all parameters. bool needNoMask = needsNoMaskCoordinates(actualop); unsigned uPos = needNoMask ? getUPosition(actualop) : ~0u; for (unsigned j = 0; j != numParms; ++j) { sources[i].opnd = opndArray[j]; sources[i].execSize = execSize; sources[i].instOpt = (needNoMask && (uPos <= j && j < (uPos + 3))) ? InstOpt_WriteEnable : instOpt; ++i; } ASSERT_USER(i == len, "There's mismatching during payload source collecting!"); G4_SrcRegRegion *msgs[2] = {0, 0}; unsigned sizes[2] = {0, 0}; preparePayload(msgs, sizes, execSize, useSplitSend, sources.data(), len); uint32_t fc = createSamplerMsgDesc(actualop, execSize == getNativeExecSize(), halfReturn, halfInput); uint32_t responseLength = getSamplerResponseLength(numChannels, halfReturn, execSize, hasPixelNullMask() && pixelNullMask, dst->isNullReg()); bool forceSplitSend = shouldForceSplitSend(surface); if (msgs[1] == 0 && !forceSplitSend) { createSendInst(pred_opnd, dst, msgs[0], sizes[0], responseLength, execSize, fc, SFID::SAMPLER, useHeader, SendAccess::READ_ONLY, surface, NULL, instOpt, false); } else { createSplitSendInst(pred_opnd, dst, msgs[0], sizes[0], msgs[1], sizes[1], responseLength, execSize, fc, SFID::SAMPLER, useHeader, SendAccess::READ_ONLY, surface, NULL, instOpt, false); } return VISA_SUCCESS; } int IR_Builder::translateVISAGather3dInst( VISASampler3DSubOpCode actualop, bool pixelNullMask, G4_Predicate* pred, VISA_Exec_Size executionSize, VISA_EMask_Ctrl em, ChannelMask channelMask, G4_Operand* aoffimmi, G4_Operand* sampler, G4_Operand* surface, G4_DstRegRegion* dst, unsigned int numOpnds, G4_SrcRegRegion ** opndArray) { TIME_SCOPE(VISA_BUILDER_IR_CONSTRUCTION); bool useHeader = false; G4_ExecSize execSize = toExecSize(executionSize); G4_InstOpts instOpt = Get_Gen4_Emask(em, execSize); const bool FP16Return = dst->getTypeSize() == 2; const bool FP16Input = opndArray[0]->getType() == Type_HF; unsigned int numRows = numOpnds * getNumGRF(getGRFSize(), FP16Input, execSize); bool nonZeroAoffImmi = !(aoffimmi->isImm() && aoffimmi->asImm()->getInt() == 0); bool needHeaderForChannels = channelMask.getSingleChannel() != VISA_3D_GATHER4_CHANNEL_R; bool simd16HFReturn = FP16Return && execSize == 16; if (needSamplerHeader(this, pixelNullMask, nonZeroAoffImmi, needHeaderForChannels, isBindlessSampler(sampler), simd16HFReturn)) { useHeader = true; ++numRows; } if (execSize > getNativeExecSize() && numRows > 11) { // decrementing since we will produce SIMD8 code. // don't do this for SIMD16H since its message length is the same as SIMD8H if (!FP16Input) { numRows -= numOpnds; } return splitSampleInst(actualop, pixelNullMask, /*cpsEnable*/false, pred, channelMask, 4, aoffimmi, sampler, surface, dst, em, useHeader, numRows, numOpnds, opndArray); } bool useSplitSend = useSends(); G4_SrcRegRegion *header = nullptr; G4_Operand* samplerIdx = sampler; if (useHeader) { const bool samplerIndexGE16 = IsSamplerIndexGE16(aoffimmi); G4_Declare *dcl = getSamplerHeader(isBindlessSampler(sampler), samplerIndexGE16); { samplerIdx = createSampleHeader(this, dcl, actualop, pixelNullMask, aoffimmi, channelMask, sampler); } header = createSrcRegRegion(dcl, getRegionStride1()); } // Collect payload sources. unsigned len = numOpnds + (header ? 1 : 0); std::vector<PayloadSource> sources(len); unsigned i = 0; // Collect header if present. if (header) { sources[i].opnd = header; sources[i].execSize = g4::SIMD8; sources[i].instOpt = InstOpt_WriteEnable; ++i; } // Collect all parameters. bool needNoMask = needsNoMaskCoordinates(actualop); unsigned uPos = needNoMask ? getUPosition(actualop) : ~0u; for (unsigned j = 0; j != numOpnds; ++j) { sources[i].opnd = opndArray[j]; sources[i].execSize = execSize; sources[i].instOpt = (needNoMask && (uPos <= j && j < (uPos + 3))) ? InstOpt_WriteEnable : instOpt; ++i; } ASSERT_USER(i == len, "There's mismatching during payload source collecting!"); G4_SrcRegRegion *msgs[2] = {0, 0}; unsigned sizes[2] = {0, 0}; preparePayload(msgs, sizes, execSize, useSplitSend, sources.data(), len); uint32_t fc = createSamplerMsgDesc(actualop, execSize == getNativeExecSize(), FP16Return, FP16Input); uint32_t responseLength = getSamplerResponseLength(4, FP16Return, execSize, hasPixelNullMask() && pixelNullMask, dst->isNullReg()); bool forceSplitSend = shouldForceSplitSend(surface); if (msgs[1] == 0 && !forceSplitSend) { createSendInst(pred, dst, msgs[0], sizes[0], responseLength, execSize, fc, SFID::SAMPLER, useHeader, SendAccess::READ_ONLY, surface, samplerIdx, instOpt, false); } else { createSplitSendInst(pred, dst, msgs[0], sizes[0], msgs[1], sizes[1], responseLength, execSize, fc, SFID::SAMPLER, useHeader, SendAccess::READ_ONLY, surface, samplerIdx, instOpt, false); } return VISA_SUCCESS; } /* * Translates Sampler Norm API intrinsic. * * Assuming: N = 4, channelMask=ABGR_ENABLE, surfIndex = 0x21, samplerIndex = 0x4, * then the generated code should look like the following for GT: * * .declare VX Base=m ElementSize=4 Type=ud Total=16 * .declare VY Base=r ElementSize=2 Type=uw Total=128 * * mov (8) VX(0,0)<1>, r0:ud * mov (1) VX(0,2)<1>, 0 * mov (1) VX(1,1)<1>, deltaU * mov (1) VX(1,2)<1>, u * mov (1) VX(1,5)<1>, deltaV * mov (1) VX(1,6)<1>, v * send (16) VY(0,0)<1>, VX(0,0), 0x2, 0x048bc421 * mov (128) M(0,0)<1>, VY(0,0) * * VX(0,0): message header * * VX(1,0): SIMD32 media payload * * ex_desc: 0x2 == 0010 (Target Function ID: Sampling Engine) * * desc: 0x048bc421 == Bit 31-29: 000 (Reserved) * Bit 28-25: 0010 (Message Length =) * Bit 24-20: 01000 (Response Message Length = 8) * Bit 19: 1 (Header present) * Bit 18: 0 (Reserved) * Bit 17-16: 11 (SIMD Mode = SIMD32) * Bit 15-12: 1100 (Message Type = sample_unorm media) * Bit 11-8: 0000 + samplerIndex (Sampler Index) * Bit 7-0: 00000000 + surfIndex (Binding Table Index) * */ int IR_Builder::translateVISASamplerNormInst( G4_Operand* surface, G4_Operand* sampler, ChannelMask channel, unsigned numEnabledChannels, G4_Operand* deltaUOpnd, G4_Operand* uOffOpnd, G4_Operand* deltaVOpnd, G4_Operand* vOffOpnd, G4_DstRegRegion* dst_opnd) { TIME_SCOPE(VISA_BUILDER_IR_CONSTRUCTION); // mov (8) VX(0,0)<1>, r0:ud // add dcl for VX G4_Declare *dcl = createSendPayloadDcl(2 * GENX_SAMPLER_IO_SZ, Type_UD); // mov VX(0,0)<1>, r0 createMovR0Inst(dcl, 0, 0); /* mov (1) VX(0,2)<1>, 0 */ unsigned cmask = channel.getHWEncoding() << 12; createMovInst(dcl, 0, 2, g4::SIMD1, NULL, NULL, createImm(cmask, Type_UD)); G4_Declare *dcl1 = createSendPayloadDcl(GENX_DATAPORT_IO_SZ, Type_F); dcl1->setAliasDeclare(dcl, numEltPerGRF<Type_UB>()); // mov (1) VX(1,4)<1>, deltaU createMovInst(dcl1, 0, 4, g4::SIMD1, NULL, NULL, deltaUOpnd); // mov (1) VX(1,2)<1>, u createMovInst(dcl1, 0, 2, g4::SIMD1, NULL, NULL, uOffOpnd); // mov (1) VX(1,5)<1>, deltaV createMovInst(dcl1, 0, 5, g4::SIMD1, NULL, NULL, deltaVOpnd); // mov (1) VX(1,3)<1>, v createMovInst(dcl1, 0, 3, g4::SIMD1, NULL, NULL, vOffOpnd); // send's operands preparation // create a currDst for VX G4_SrcRegRegion* payload = createSrcRegRegion(dcl, getRegionStride1()); G4_DstRegRegion* d = checkSendDst(dst_opnd->asDstRegRegion()); // Set bit 12-17 for the message descriptor unsigned descFc = 0; descFc |= 0xC << 12; // Bit 16-12 = 1100 for Sampler Message Type descFc |= 0x3 << 17; // Bit 18-17 = 11 for SIMD32 mode createSendInst( NULL, d, payload, 2, 32*numEnabledChannels*TypeSize(Type_UW)/numEltPerGRF<Type_UB>(), g4::SIMD32, descFc, SFID::SAMPLER, 1, SendAccess::READ_ONLY, surface, sampler, 0, false); return VISA_SUCCESS; } /* * Translates Sampler intrinsic. * * Assuming: N = 4, channelMask=ABGR_ENABLE, surfIndex = 0x21, samplerIndex = 0x4, * then the generated code should look like the following for GT: * * .declare VX Base=m ElementSize=4 Type=f Total=72 * .declare VY Base=r ElementSize=4 Type=f Total=64 * .declare VZ Base=r ElementSize=2 Type=w Total=128 ALIAS(VY,0) * * mov (8) VX(0,0)<1>, r0:ud * mov (1) VX(0,2)<1>, 0 * mov (16) VX(1,0)<1>, u * mov (16) VX(3,0)<1>, v * mov (16) VX(5,0)<1>, r * mov (16) VX(7,0)<1>, 0 * send (16) VY(0,0)<1>, VX(0,0), 0x2, 0x128a0421 * mov (64) M(0,0)<1>, VY(0,0) * * ex_desc: 0x2 == 0010 (Target Function ID: Sampling Engine) * * desc: 0x128a0421 == Bit 31-29: 000 (Reserved) * Bit 28-25: 1001 (Message Length = 9 (1+2*4 for SIMD16)) * Bit 24-20: 01000 (Response Message Length = 8) * Bit 19: 1 (Header present) * Bit 18: 0 (Reserved) * Bit 17-16: 10 (SIMD Mode = SIMD16) * Bit 15-12: 0000 (Message Type = Sample) * Bit 11-8: 0000 + samplerIndex (Sampler Index) * Bit 7-0: 00000000 + surfIndex (Binding Table Index) * */ int IR_Builder::translateVISASamplerInst( unsigned simdMode, G4_Operand* surface, G4_Operand* sampler, ChannelMask channel, unsigned numEnabledChannels, G4_Operand* uOffOpnd, G4_Operand* vOffOpnd, G4_Operand* rOffOpnd, G4_DstRegRegion* dstOpnd) { TIME_SCOPE(VISA_BUILDER_IR_CONSTRUCTION); // mov (8) VX(0,0)<1>, r0:ud // add dcl for VX unsigned num_payload_elt = simdMode/2 * numEltPerGRF<Type_UB>()/TypeSize(Type_UD); G4_Declare *dcl = createSendPayloadDcl(num_payload_elt + GENX_SAMPLER_IO_SZ, Type_UD); // mov VX(0,0)<1>, r0 createMovR0Inst(dcl, 0, 0); unsigned cmask = channel.getHWEncoding() << 12; /* mov (1) VX(0,2)<1>, 0 */ createMovInst(dcl, 0, 2, g4::SIMD1, NULL, NULL, createImm(cmask, Type_UD)); // set up the message payload // lod is always uninitialized for us as we don't support it. G4_Declare *dcl1 = createSendPayloadDcl(num_payload_elt, Type_UD); dcl1->setAliasDeclare(dcl, numEltPerGRF<Type_UB>()); /* mov (sample_mode) VX(0,0)<1>, u */ createMovSendSrcInst(dcl1, 0, 0, simdMode, uOffOpnd, 0); if (sampler == NULL) { // ld if (getPlatform() < GENX_SKL) { // the order of paramters is // u lod v r /* mov (sample_mode) VX(sample_mode/8, 0)<1>, lod */ createMovSendSrcInst(dcl1, simdMode/8, 0, simdMode, createImm(0, Type_UD), 0); /* mov (sample_mode) VX(2*sample_mode/8, 0)<1>, v */ createMovSendSrcInst(dcl1, 2*simdMode/8, 0, simdMode, vOffOpnd, 0); /* mov (sample_mode) VX(3*sampler_mode/8, 0)<1>, r */ createMovSendSrcInst(dcl1, 3*simdMode/8, 0, simdMode, rOffOpnd, 0); } else { // SKL+: the order of paramters is // u v lod r /* mov (sample_mode) VX(sample_mode/8, 0)<1>, v */ createMovSendSrcInst(dcl1, simdMode/8, 0, simdMode, vOffOpnd, 0); /* mov (sample_mode) VX(2*sample_mode/8, 0)<1>, lod */ createMovSendSrcInst(dcl1, 2*simdMode/8, 0, simdMode, createImm(0, Type_UD), 0); /* mov (sample_mode) VX(3*sampler_mode/8, 0)<1>, r */ createMovSendSrcInst(dcl1, 3*simdMode/8, 0, simdMode, rOffOpnd, 0); } } else { // sample /* mov (sample_mode) VX(1 + sample_mode/8, 0)<1>, v */ createMovSendSrcInst(dcl1, simdMode/8, 0, simdMode, vOffOpnd, 0); /* mov (sample_mode) VX(3,0)<1>, r */ createMovSendSrcInst(dcl1, 2*simdMode/8, 0, simdMode, rOffOpnd, 0); /* mov (sample_mode) VX(5,0)<1>, 0 */ createMovSendSrcInst(dcl1, 3*simdMode/8, 0, simdMode, createImm(0, Type_UD), 0); } // send's operands preparation // create a currDst for VX G4_SrcRegRegion* payload = createSrcRegRegion(dcl, getRegionStride1()); G4_DstRegRegion* d = checkSendDst(dstOpnd->asDstRegRegion()); // Set bit 9-8 for the message descriptor unsigned descFc = 0; // Bit 17-18 = 10 for SIMD mode if (simdMode == 8) { descFc |= 0x1 << 17; } else { descFc |= 0x2 << 17; } if (sampler == nullptr) { static const unsigned SAMPLER_MESSAGE_TYPE_OFFSET = 12; // LD message descFc += VISASampler3DSubOpCode::VISA_3D_LD << SAMPLER_MESSAGE_TYPE_OFFSET; } if (simdMode == 16) { // redefine the type and offset of post dst. if ((d->getType() != Type_W) && (d->getType() != Type_UW)) { short new_SubRegOff = dstOpnd->asDstRegRegion()->getSubRegOff(); if (dstOpnd->getRegAccess() == Direct) { new_SubRegOff = (dstOpnd->asDstRegRegion()->getSubRegOff() * dstOpnd->getTypeSize()) / TypeSize(Type_W); } G4_DstRegRegion new_dst( *this, dstOpnd->getRegAccess(), dstOpnd->asDstRegRegion()->getBase(), dstOpnd->asDstRegRegion()->getRegOff(), new_SubRegOff, 1, Type_W); d = createDstRegRegion(new_dst); } } createSendInst( NULL, d, payload, 1 + simdMode/2, ((simdMode == 8) ? 32 : (numEnabledChannels*16))*TypeSize(Type_F)/numEltPerGRF<Type_UB>(), G4_ExecSize(simdMode), descFc, SFID::SAMPLER, 1, SendAccess::READ_ONLY, surface, sampler, 0, false); return VISA_SUCCESS; }
/* * 拡散方程式 * * Takaaki MINOMO * */ #include <iostream> #include <array> #include <cmath> #include <sprout/cmath.hpp> constexpr int N = 128; constexpr double Lx = 1.0; constexpr double Ly = 1.0; constexpr double cx = 1.0; constexpr double cy = 1.0; constexpr double D = 0.0; constexpr double tLimit = 1000; constexpr double dx = Lx / N; constexpr double dy = Ly / N; constexpr double dt = 0.002; constexpr double x = 0.; constexpr double y = 0.; constexpr int INTV = 3; namespace mino2357{ template <typename T = double> class extendedArray{ private: T* u; int Num; public: extendedArray(int n){ Num = n; u = new T[(Num + 1) * (Num + 1)];} ~extendedArray(){ delete[] u;} constexpr T& operator()(int, int); }; template <typename T> constexpr T& extendedArray<T>::operator()(int i, int j){ return u[(j%Num) * Num + (i%Num)]; } template <typename T = double> constexpr T initFunc(T x, T y) noexcept { T a = 5.0; T b = 5.0; return std::exp(- 25.0 *((x - a * Lx / 10.0) * (x - a * Lx / 10.0) + (y - b * Ly / 10.0) * (y - b * Ly / 10.0))); } template <typename T = double> constexpr void makeInitFunc(extendedArray<T>& u) noexcept { for(int i=0; i<=N; ++i){ for(int j=0; j<=N; ++j){ u(i, j) = initFunc<T>(i * dx, j * dx); } } } } int main(){ double t = 0.0; auto u1 = mino2357::extendedArray<>(N); auto u2 = mino2357::extendedArray<>(N); mino2357::makeInitFunc<>(u1); std::cout << D * dt / (dx * dx) << std::endl; if(cx * dt / dx >=1.0 || cy * dt / dy >= 1.0 || D * dt / (dx * dx) > 0.5 || D * dt / (dy * dy) > 0.5){ std::cout << "安定性条件を満たしていません." << std::endl; } /**********************************************************************/ /* 可視化の設定(gnuplot) */ /**********************************************************************/ std::FILE *gp = popen( "gnuplot -persist", "w" ); fprintf(gp, "set pm3d\n"); //fprintf(gp, "set pm3d map\n"); fprintf(gp, "set contour\n"); fprintf(gp, "set xr [0:%f]\n", Lx); fprintf(gp, "set yr [0:%f]\n", Ly); fprintf(gp, "set zr [0.0:1.0]\n"); fprintf(gp, "set size square\n"); //fprintf(gp, "set grid\n"); //fprintf(gp, "unset key\n"); //初期条件描画 fprintf(gp, "splot '-'w l\n"); for(int i=0; i<=N; ++i){ for(int j=0; j<=N; ++j){ fprintf(gp, "%f %f %f\n", x + i * dx, y + j * dy, u1(i, j)); } fprintf(gp, "\n"); } fprintf(gp, "e\n"); fflush(gp); std::cout << "Enterキーを押してください." << std::endl; getchar(); //タイムループ for(int it = 0; t<tLimit; ++it) { //拡散 /* for(int i=0; i<=N; ++i){ for(int j=0; j<=N; ++j){ u2(i, j) = u1(i, j) + D * dt / (dx * dx) * (u1(i-1, j) - 2.0 * u1(i, j) + u1(i+1, j)) + D * dt / (dy * dy) * (u1(i, j-1) - 2.0 * u1(i, j) + u1(i, j+1)); } } */ //移流 cx > 0 cy > 0 の対応しかまだしてない. for(int i=0; i<=N; ++i){ for(int j=0; j<=N; ++j){ u2(i, j) = u1(i,j) - cx * dt / dx * (u1(i,j) - u1(i-1,j)) - cy * dt / dy * (u1(i, j) - u1(i, j-1)); } } //u2の描画 if(it%INTV == 0){ fprintf(gp, "splot '-' w l\n"); for(int i=0; i<=N; ++i){ for(int j=0; j<=N; ++j){ fprintf(gp, "%f %f %f\n", x + i * dx, y + j * dy, u2(i, j)); } fprintf(gp, "\n"); } fprintf(gp, "e\n"); fflush(gp); } for(int i=0; i<=N; ++i){ for(int j=0; j<=N; ++j){ u1(i, j) = u2(i, j); } } } //FILEポインタの解放 pclose(gp); }
#include "Graph.h" //Author : Suvojit Manna //Application : CurveSim //Initialize Graph as a Null graph Graph::Graph() { this->V = 0; this->E = 0; } //Initiate with the number of Vertex Graph::Graph(const size_t n) { this->V = n; this->E = 0; //Set size of adjadency list to no of Vertex G.resize(n); } //Adds a new vertex to the Graph void Graph::add_vertex(const std::bitset<STATES> &state) { //check if already in Graph if (vertexID.find(state) != vertexID.end()) return; //else proceed to add in graph size_t stateID = V; //assign state ID std::pair<std::bitset<STATES>, size_t> vertex(state, stateID); //add to map vertexID.insert(vertex); //add to adjadency list std::vector<Edge> adj; G.push_back(adj); //increment vertex count ++V; inDegree.push_back(0); } //Add a new edge to the Graph void Graph::add_edge(const std::bitset<STATES> &u, const std::bitset<STATES> &v, const size_t latency) { //Find state IDs size_t uid = vertexID.find(u)->second; size_t vid = vertexID.find(v)->second; //Create a new Edge and insert Edge uv(u, uid, v, vid, latency); G[uid].push_back(uv); //Increment Edge Count ++E; //Increment Indegree Count inDegree[vid] += 1; //TODO : Add Exception Check } //Return all adjacent Edges of vertex const std::vector<Edge>& Graph::adj(const std::bitset<STATES> &vertex) const { size_t vID = vertexID.find(vertex)->second; return G[vID]; } //Return |V| size_t Graph::v_count(void) const { return V; } //Return |E| size_t Graph::e_count(void) const { return E; } //Return all vertex as map vertex to ID const std::unordered_map<std::bitset<STATES>, size_t>& Graph::vertex(void) const { return vertexID; } //Return Outdegree of a vertex size_t Graph::out_degree(const std::bitset<STATES> &vertex) { size_t vID = vertexID.find(vertex)->second; return G[vID].size(); } //Return Indegree of a vertex size_t Graph::in_degree(const std::bitset<STATES> &vertex) { size_t vID = vertexID.find(vertex)->second; return inDegree[vID]; } //Return true if Vertex is Present bool Graph::has_vertex(const std::bitset<STATES> &vertex) { return vertexID.find(vertex) != vertexID.end(); } //Return String representation of the Graph std::string Graph::to_string(size_t stateLen) const { std::string graphStr; //For all vertex for (auto &state : vertexID) { //TODO : Format String graphStr += "State : " + state.first.to_string().substr(STATES - stateLen) + "\n"; //For all edges of the vertex for (auto &edge : G[state.second]) graphStr += edge.to_string(stateLen) + "\n"; } return graphStr; }
#include "Timer.h" #include <stdio.h> #include <algorithm> using std::cerr; using std::endl; using std::max; // ---------------------------------------------------------------------------- Timer *Timer::instance = NULL; // ---------------------------------------------------------------------------- static double DiffTime(const struct TIMEINFO &startTime, const struct TIMEINFO &endTime) { #if defined(_WIN32) // // Figure out how many milliseconds between start and end times // int ms = (int) difftime(endTime.time, startTime.time); if (ms == 0) { ms = endTime.millitm - startTime.millitm; } else { ms = ((ms - 1) * 1000); ms += (1000 - startTime.millitm) + endTime.millitm; } double seconds = (ms/1000.); #elif defined(HAVE_CLOCK_GETTIME) && defined(HAVE_CLOCK_PROCESS_CPUTIME_ID) double seconds = double(endTime.tv_sec - startTime.tv_sec) + double(endTime.tv_nsec - startTime.tv_nsec) / 1.0e9; #elif defined(HAVE_GETTIMEOFDAY) double seconds = double(endTime.tv_sec - startTime.tv_sec) + double(endTime.tv_usec - startTime.tv_usec) / 1000000.; #else # error No supported timer available. #endif return seconds; } static void GetCurrentTimeInfo(struct TIMEINFO &timeInfo) { #if defined(_WIN32) _ftime(&timeInfo); #elif defined(HAVE_CLOCK_GETTIME) && defined(HAVE_CLOCK_PROCESS_CPUTIME_ID) clock_gettime( CLOCK_REALTIME, &timeInfo ); #elif defined(HAVE_GETTIMEOFDAY) gettimeofday(&timeInfo, 0); #else # error No supported timer available. #endif } // **************************************************************************** // Constructor: Timer::Timer // // Programmer: Jeremy Meredith // Creation: August 9, 2004 // // **************************************************************************** Timer::Timer() { // Initialize some timer methods and reserve some space. startTimes.reserve(1000); timeLengths.reserve(1000); descriptions.reserve(1000); currentActiveTimers = 0; } // **************************************************************************** // Destructor: // // Programmer: Jeremy Meredith // Creation: August 9, 2004 // // **************************************************************************** Timer::~Timer() { // nothing to do } // **************************************************************************** // Method: Timer::Instance // // Purpose: // Return the timer singleton. // // Arguments: // // // Programmer: Jeremy Meredith // Creation: August 9, 2004 // // **************************************************************************** Timer *Timer::Instance() { if (!instance) { instance = new Timer; } return instance; } // **************************************************************************** // Method: Timer::Start // // Purpose: // Start a timer, and return a handle. // // Arguments: // none // // Programmer: Jeremy Meredith // Creation: August 9, 2004 // // **************************************************************************** int Timer::Start() { return Instance()->real_Start(); } // **************************************************************************** // Method: Timer::Stop // // Purpose: // Stop a timer and add its length to our list. // // Arguments: // handle a timer handle returned by Timer::Start // desription a description for the event timed // // Programmer: Jeremy Meredith // Creation: August 9, 2004 // // **************************************************************************** double Timer::Stop(int handle, const std::string &description) { return Instance()->real_Stop(handle, description); } // **************************************************************************** // Method: Timer::Insert // // Purpose: // Add a user-generated (e.g. calculated) timing to the list // // Arguments: // desription a description for the event timed // value the runtime to insert // // Programmer: Jeremy Meredith // Creation: October 22, 2007 // // **************************************************************************** void Timer::Insert(const std::string &description, double value) { Instance()->real_Insert(description, value); } // **************************************************************************** // Method: Timer::Dump // // Purpose: // Add timings to on ostream. // // Arguments: // out the stream to print to. // // Programmer: Jeremy Meredith // Creation: August 9, 2004 // // **************************************************************************** void Timer::Dump(std::ostream &out) { return Instance()->real_Dump(out); } // **************************************************************************** // Method: Timer::real_Start // // Purpose: // the true start routine // // Arguments: // none // // Programmer: Jeremy Meredith // Creation: August 9, 2004 // // **************************************************************************** int Timer::real_Start() { int handle = startTimes.size(); currentActiveTimers++; struct TIMEINFO t; GetCurrentTimeInfo(t); startTimes.push_back(t); return handle; } // **************************************************************************** // Method: Timer::real_Stop // // Purpose: // the true stop routine // // Arguments: // handle a timer handle returned by Timer::Start // desription a description for the event timed // // Programmer: Jeremy Meredith // Creation: August 9, 2004 // // **************************************************************************** double Timer::real_Stop(int handle, const std::string &description) { if ((unsigned int)handle > startTimes.size()) { cerr << "Invalid timer handle '"<<handle<<"'\n"; exit(1); } struct TIMEINFO t; GetCurrentTimeInfo(t); double length = DiffTime(startTimes[handle], t); timeLengths.push_back(length); char str[2048]; sprintf(str, "%*s%s", currentActiveTimers*3, " ", description.c_str()); descriptions.push_back(str); currentActiveTimers--; return length; } // **************************************************************************** // Method: Timer::real_Insert // // Purpose: // the true insert routine // // Arguments: // desription a description for the event timed // value the run time to insert // // Programmer: Jeremy Meredith // Creation: October 22, 2007 // // **************************************************************************** void Timer::real_Insert(const std::string &description, double value) { #if 0 // can disable inserting just to make sure it isn't broken cerr << description << " " << value << endl; #else timeLengths.push_back(value); char str[2048]; sprintf(str, "%*s[%s]", (currentActiveTimers+1)*3, " ", description.c_str()); descriptions.push_back(str); #endif } // **************************************************************************** // Method: Timer::real_Dump // // Purpose: // the true dump routine // // Arguments: // out the stream to print to. // // Programmer: Jeremy Meredith // Creation: August 9, 2004 // // **************************************************************************** void Timer::real_Dump(std::ostream &out) { size_t maxlen = 0; for (unsigned int i=0; i<descriptions.size(); i++) maxlen = max(maxlen, descriptions[i].length()); out << "\nTimings\n-------\n"; for (unsigned int i=0; i<descriptions.size(); i++) { char desc[10000]; sprintf(desc, "%-*s", (int)maxlen, descriptions[i].c_str()); out << desc << " took " << timeLengths[i] << endl; } }
// Copyright 2015-2019 Autoware Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NORMAL_DISTRIBUTIONS_TRANSFORM_PCL_MODIFIED_HPP #define NORMAL_DISTRIBUTIONS_TRANSFORM_PCL_MODIFIED_HPP #include "ndt/pcl_modified.hpp" #include <vector> template <class PointSource, class PointTarget> NormalDistributionsTransformPCLModified< PointSource, PointTarget>::NormalDistributionsTransformPCLModified() : ndt_ptr_(new pcl::NormalDistributionsTransformModified<PointSource, PointTarget>) { } template <class PointSource, class PointTarget> void NormalDistributionsTransformPCLModified<PointSource, PointTarget>::align( pcl::PointCloud<PointSource> & output, const Eigen::Matrix4f & guess) { ndt_ptr_->align(output, guess); } template <class PointSource, class PointTarget> void NormalDistributionsTransformPCLModified<PointSource, PointTarget>::setInputTarget( const boost::shared_ptr<pcl::PointCloud<PointTarget>> & map_ptr) { ndt_ptr_->setInputTarget(map_ptr); } template <class PointSource, class PointTarget> void NormalDistributionsTransformPCLModified<PointSource, PointTarget>::setInputSource( const boost::shared_ptr<pcl::PointCloud<PointSource>> & scan_ptr) { ndt_ptr_->setInputSource(scan_ptr); } template <class PointSource, class PointTarget> void NormalDistributionsTransformPCLModified<PointSource, PointTarget>::setMaximumIterations( int max_iter) { ndt_ptr_->setMaximumIterations(max_iter); } template <class PointSource, class PointTarget> void NormalDistributionsTransformPCLModified<PointSource, PointTarget>::setResolution(float res) { ndt_ptr_->setResolution(res); } template <class PointSource, class PointTarget> void NormalDistributionsTransformPCLModified<PointSource, PointTarget>::setStepSize( double step_size) { ndt_ptr_->setStepSize(step_size); } template <class PointSource, class PointTarget> void NormalDistributionsTransformPCLModified<PointSource, PointTarget>::setTransformationEpsilon( double trans_eps) { ndt_ptr_->setTransformationEpsilon(trans_eps); } template <class PointSource, class PointTarget> int NormalDistributionsTransformPCLModified<PointSource, PointTarget>::getMaximumIterations() { return ndt_ptr_->getMaximumIterations(); } template <class PointSource, class PointTarget> int NormalDistributionsTransformPCLModified<PointSource, PointTarget>::getFinalNumIteration() const { return ndt_ptr_->getFinalNumIteration(); } template <class PointSource, class PointTarget> float NormalDistributionsTransformPCLModified<PointSource, PointTarget>::getResolution() const { return ndt_ptr_->getResolution(); } template <class PointSource, class PointTarget> double NormalDistributionsTransformPCLModified<PointSource, PointTarget>::getStepSize() const { return ndt_ptr_->getStepSize(); } template <class PointSource, class PointTarget> double NormalDistributionsTransformPCLModified<PointSource, PointTarget>::getTransformationEpsilon() { return ndt_ptr_->getTransformationEpsilon(); } template <class PointSource, class PointTarget> double NormalDistributionsTransformPCLModified< PointSource, PointTarget>::getTransformationProbability() const { return ndt_ptr_->getTransformationProbability(); } template <class PointSource, class PointTarget> double NormalDistributionsTransformPCLModified<PointSource, PointTarget>::getFitnessScore() { return ndt_ptr_->getFitnessScore(); } template <class PointSource, class PointTarget> boost::shared_ptr<const pcl::PointCloud<PointTarget>> NormalDistributionsTransformPCLModified<PointSource, PointTarget>::getInputTarget() const { return ndt_ptr_->getInputTarget(); } template <class PointSource, class PointTarget> boost::shared_ptr<const pcl::PointCloud<PointSource>> NormalDistributionsTransformPCLModified<PointSource, PointTarget>::getInputSource() const { return ndt_ptr_->getInputSource(); } template <class PointSource, class PointTarget> Eigen::Matrix4f NormalDistributionsTransformPCLModified<PointSource, PointTarget>::getFinalTransformation() const { return ndt_ptr_->getFinalTransformation(); } template <class PointSource, class PointTarget> std::vector<Eigen::Matrix4f> NormalDistributionsTransformPCLModified< PointSource, PointTarget>::getFinalTransformationArray() const { return ndt_ptr_->getFinalTransformationArray(); } template <class PointSource, class PointTarget> Eigen::Matrix<double, 6, 6> NormalDistributionsTransformPCLModified<PointSource, PointTarget>::getHessian() const { return ndt_ptr_->getHessian(); } template <class PointSource, class PointTarget> boost::shared_ptr<pcl::search::KdTree<PointTarget>> NormalDistributionsTransformPCLModified<PointSource, PointTarget>::getSearchMethodTarget() const { return ndt_ptr_->getSearchMethodTarget(); } #endif // NORMAL_DISTRIBUTIONS_TRANSFORM_PCL_MODIFIED_HPP
#include "directorconfigclient.h" // POSIX++ #include <climits> #include <cstdio> // PUT #include <put/object.h> #include <put/cxxutils/hashing.h> #include <put/cxxutils/syslogstream.h> #ifndef SCFS_PATH #define SCFS_PATH "/svc" #endif #ifndef DIRECTOR_CONFIG_DIR #define DIRECTOR_CONFIG_DIR "/etc/" DIRECTOR_USERNAME #endif #define NO_CONNECTION_TO_CONFIGURATION_PROVIDER 0x10 #define UNABLE_TO_READ_CONFIGURATION_DIRECTORY 0x11 #define UNABLE_TO_READ_CONFIGURATION 0x11 #define UNABLE_TO_PARSE_CONFIGURATION 0x12 #ifndef NO_CONFIG_FALLBACK #include <dirent.h> #include <put/cxxutils/configmanip.h> static const char* extract_provider_name(const char* filename) { char provider[NAME_MAX] = { 0 }; const char* start = posix::strrchr(filename, '/'); const char* end = posix::strrchr(filename, '.'); if(start == nullptr || // if '/' NOT found OR end == nullptr || // '.' found AND end < start || // occur in the incorrect order OR posix::strcmp(end, ".conf")) // doesn't end with ".conf" return nullptr; return posix::strncpy(provider, start + 1, posix::size_t(end - start + 1)); // extract provider name } static const char* director_configfilename(const char* filename) { // construct config filename static char fullpath[PATH_MAX]; posix::memset(fullpath, 0, PATH_MAX); if(posix::snprintf(fullpath, PATH_MAX, "%s/%s", DIRECTOR_CONFIG_DIR, filename) == posix::error_response) // I don't how this could fail return nullptr; // unable to build config filename return fullpath; } static bool readconfig(const char* name, std::string& buffer) { posix::FILE* file = posix::fopen(name, "rb"); if(file == nullptr) { posix::syslog << posix::priority::warning << "Unable to open file: %1 : %2" << name << posix::strerror(errno) << posix::eom; return false; } buffer.clear(); buffer.resize(posix::size_t(posix::ftell(file)), '\n'); if(buffer.size()) { posix::rewind(file); posix::fread(const_cast<char*>(buffer.data()), sizeof(std::string::value_type), buffer.size(), file); } posix::fclose(file); return true; } #endif DirectorConfigClient::DirectorConfigClient(void) noexcept : m_sync(false) { Object::connect(newMessage, this, &DirectorConfigClient::receive); Object::singleShot(this, &DirectorConfigClient::resync, errno = posix::success_response); } const std::unordered_map<std::string, std::string>& DirectorConfigClient::data(const std::string& config) const { static const std::unordered_map<std::string, std::string> nullval; auto pos = m_data.find(config); if(pos == m_data.end()) return nullval; return pos->second; } void DirectorConfigClient::resync(posix::error_t errcode) noexcept { m_data.clear(); m_sync = false; if(isConnected()) disconnect(); bool try_connecting = true; if(errcode != posix::success_response) // if not the first connection attempt { // connection counter stuff here } if(try_connecting && connect(SCFS_PATH CONFIG_DIRECTOR_SOCKET) && write(vfifo("RPC", "syncCall"), posix::invalid_descriptor)) // no errors! { } else { if(try_connecting) { if(!isConnected()) posix::syslog << posix::priority::warning << "Unable to connect to socket file %1" << SCFS_PATH CONFIG_DIRECTOR_SOCKET << posix::eom; else posix::syslog << posix::priority::warning << "Connection error for socket file %1 : %2" << SCFS_PATH CONFIG_DIRECTOR_SOCKET << posix::strerror(errno) << posix::eom; } #ifdef NO_CONFIG_FALLBACK Application::quit(NO_CONNECTION_TO_CONFIGURATION_PROVIDER); #else posix::syslog << posix::priority::warning << "Continuing without configuration provider connection for Director. Falling back on direct file access." << posix::eom; DIR* dir = ::opendir(DIRECTOR_CONFIG_DIR); dirent* entry = nullptr; const char* provider = nullptr; const char* filename = nullptr; if(dir == nullptr) { posix::syslog << posix::priority::critical << "Unable to read directory of Director configuation files: %1" << DIRECTOR_CONFIG_DIR << posix::eom; } else { std::string buffer; ConfigManip tmp_config; while((entry = ::readdir(dir)) != nullptr) { if(entry->d_name[0] == '.') // skip dot files/dirs continue; if((provider = extract_provider_name (entry->d_name)) == nullptr || // if provider name extraction failed OR (filename = director_configfilename(entry->d_name)) == nullptr) // failed to build filename continue; // skip file tmp_config.clear(); if(!readconfig(filename, buffer)) { posix::syslog << posix::priority::critical << "Unable to read Director configuation file: %1 : %2" << filename << posix::strerror(errno) << posix::eom; } else if(!tmp_config.importText(buffer)) { posix::syslog << posix::priority::critical << "Parsing failed will processing Director configuation file: %1" << filename << posix::eom; } tmp_config.exportKeyPairs(m_data[provider]); } m_sync = true; Object::enqueue(synchronized); } #endif } } void DirectorConfigClient::valueSet(const std::string& config, const std::string& key, const std::string& value) noexcept { m_data[config][key] = value; } void DirectorConfigClient::valueUnset(const std::string& config, const std::string& key) noexcept { auto configdata = m_data.find(config); if(configdata != m_data.end()) { auto pos = configdata->second.begin(); while(pos != configdata->second.end()) // search for key or children { if(pos->first.find(key) == 0) // key starts with search key (could be a child) pos = configdata->second.erase(pos); // delete it else ++pos; } } } std::list<std::string> DirectorConfigClient::listConfigs(void) const noexcept { std::list<std::string> names; for(const auto& pair : m_data) names.emplace_back(pair.first); return names; } const std::string& DirectorConfigClient::get(const std::string& config, const std::string& key) const noexcept { static std::string nullvalue; auto configdata = m_data.find(config); if(configdata == m_data.end()) return nullvalue; auto keydata = configdata->second.find(key); if(keydata == configdata->second.end()) return nullvalue; return keydata->second; } void DirectorConfigClient::set(const std::string& config, const std::string& key, const std::string& value) noexcept { valueSet(config, key, value); if(isConnected() && !write(vfifo("RPC", "setCall", config, key, value), posix::invalid_descriptor)) Object::singleShot(this, &DirectorConfigClient::resync, errno); } void DirectorConfigClient::unset(const std::string& config, const std::string& key) noexcept { valueUnset(config, key); if(isConnected() && !write(vfifo("RPC", "unsetCall", config, key), posix::invalid_descriptor)) Object::singleShot(this, &DirectorConfigClient::resync, errno); } void DirectorConfigClient::receive(posix::fd_t socket, vfifo buffer, posix::fd_t fd) noexcept { (void)socket; (void)fd; posix::error_t errcode; std::string str, config, key, value; if(!(buffer >> str).hadError() && str == "RPC" && !(buffer >> str).hadError()) { switch(hash(str)) { case "syncReturn"_hash: { buffer >> errcode; if(buffer.hadError() || errcode != posix::success_response) Object::singleShot(this, &DirectorConfigClient::resync, errcode); else if(errcode == posix::success_response) { m_sync = true; Object::enqueue(synchronized); } } break; case "valueSet"_hash: { buffer >> config >> key >> value; if(buffer.hadError()) Object::singleShot(this, &DirectorConfigClient::resync, errcode); else valueSet(config, key, value); } break; case "valueUnset"_hash: { buffer >> config >> key; if(buffer.hadError()) Object::singleShot(this, &DirectorConfigClient::resync, errcode); else valueUnset(config, key); } break; case "unsetReturn"_hash: { buffer >> errcode >> config >> key; if(buffer.hadError() || errcode != posix::success_response) Object::singleShot(this, &DirectorConfigClient::resync, errcode); } break; case "setReturn"_hash: { buffer >> errcode >> config >> key; if(buffer.hadError() || errcode != posix::success_response) Object::singleShot(this, &DirectorConfigClient::resync, errcode); } break; } } }
//===--- Bridging.cpp - Bridging imported Clang types to Swift ------------===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See http://swift.org/LICENSE.txt for license information // See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// // // This file defines routines relating to bridging Swift types to C types, // working in concert with the Clang importer. // //===----------------------------------------------------------------------===// #define DEBUG_TYPE "libsil" #include "swift/SIL/SILType.h" #include "swift/SIL/SILModule.h" #include "swift/AST/Decl.h" #include "swift/AST/DiagnosticsSIL.h" #include "swift/Basic/Fallthrough.h" #include "clang/AST/DeclObjC.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" using namespace swift; using namespace swift::Lowering; SILType TypeConverter::getLoweredTypeOfGlobal(VarDecl *var) { AbstractionPattern origType = getAbstractionPattern(var); assert(!origType.isTypeParameter()); return getLoweredType(origType, origType.getType()).getObjectType(); } CanType TypeConverter::getBridgedInputType(SILFunctionTypeRepresentation rep, AbstractionPattern pattern, CanType input) { if (auto tuple = dyn_cast<TupleType>(input)) { SmallVector<TupleTypeElt, 4> bridgedFields; bool changed = false; for (unsigned i : indices(tuple->getElements())) { auto &elt = tuple->getElement(i); Type bridged = getLoweredBridgedType(pattern.getTupleElementType(i), elt.getType(), rep, TypeConverter::ForArgument); if (!bridged) { Context.Diags.diagnose(SourceLoc(), diag::could_not_find_bridge_type, elt.getType()); llvm::report_fatal_error("unable to set up the ObjC bridge!"); } CanType canBridged = bridged->getCanonicalType(); if (canBridged != CanType(elt.getType())) { changed = true; bridgedFields.push_back(elt.getWithType(canBridged)); } else { bridgedFields.push_back(elt); } } if (!changed) return input; return CanType(TupleType::get(bridgedFields, input->getASTContext())); } auto loweredBridgedType = getLoweredBridgedType(pattern, input, rep, TypeConverter::ForArgument); if (!loweredBridgedType) { Context.Diags.diagnose(SourceLoc(), diag::could_not_find_bridge_type, input); llvm::report_fatal_error("unable to set up the ObjC bridge!"); } return loweredBridgedType->getCanonicalType(); } /// Bridge a result type. CanType TypeConverter::getBridgedResultType(SILFunctionTypeRepresentation rep, AbstractionPattern pattern, CanType result, bool suppressOptional) { auto loweredType = getLoweredBridgedType(pattern, result, rep, suppressOptional ? TypeConverter::ForNonOptionalResult : TypeConverter::ForResult); if (!loweredType) { Context.Diags.diagnose(SourceLoc(), diag::could_not_find_bridge_type, result); llvm::report_fatal_error("unable to set up the ObjC bridge!"); } return loweredType->getCanonicalType(); } Type TypeConverter::getLoweredBridgedType(AbstractionPattern pattern, Type t, SILFunctionTypeRepresentation rep, BridgedTypePurpose purpose) { switch (rep) { case SILFunctionTypeRepresentation::Thick: case SILFunctionTypeRepresentation::Thin: case SILFunctionTypeRepresentation::Method: case SILFunctionTypeRepresentation::WitnessMethod: case SILFunctionTypeRepresentation::Closure: // No bridging needed for native CCs. return t; case SILFunctionTypeRepresentation::CFunctionPointer: case SILFunctionTypeRepresentation::ObjCMethod: case SILFunctionTypeRepresentation::Block: // Map native types back to bridged types. bool canBridgeBool = (rep == SILFunctionTypeRepresentation::ObjCMethod); // Look through optional types. OptionalTypeKind optKind; if (auto valueTy = t->getAnyOptionalObjectType(optKind)) { pattern = pattern.transformType([](CanType patternTy) { return CanType(patternTy->getAnyOptionalObjectType()); }); auto ty = getLoweredCBridgedType(pattern, valueTy, canBridgeBool, false); return ty ? OptionalType::get(optKind, ty) : ty; } return getLoweredCBridgedType(pattern, t, canBridgeBool, purpose == ForResult); } }; Type TypeConverter::getLoweredCBridgedType(AbstractionPattern pattern, Type t, bool canBridgeBool, bool bridgedCollectionsAreOptional) { auto clangTy = pattern.isClangType() ? pattern.getClangType() : nullptr; // Bridge Bool back to ObjC bool, unless the original Clang type was _Bool // or the Darwin Boolean type. auto nativeBoolTy = getBoolType(); if (nativeBoolTy && t->isEqual(nativeBoolTy)) { if (clangTy) { if (clangTy->isBooleanType()) return t; if (clangTy->isSpecificBuiltinType(clang::BuiltinType::UChar)) return getDarwinBooleanType(); } if (clangTy || canBridgeBool) return getObjCBoolType(); return t; } // Class metatypes bridge to ObjC metatypes. if (auto metaTy = t->getAs<MetatypeType>()) { if (metaTy->getInstanceType()->getClassOrBoundGenericClass()) { return MetatypeType::get(metaTy->getInstanceType(), MetatypeRepresentation::ObjC); } } // ObjC-compatible existential metatypes. if (auto metaTy = t->getAs<ExistentialMetatypeType>()) { if (metaTy->getInstanceType()->isObjCExistentialType()) { return ExistentialMetatypeType::get(metaTy->getInstanceType(), MetatypeRepresentation::ObjC); } } // `Any` can bridge to `AnyObject` (`id` in ObjC). if (t->isAny()) { return Context.getProtocol(KnownProtocolKind::AnyObject)->getDeclaredType(); } if (auto funTy = t->getAs<FunctionType>()) { switch (funTy->getExtInfo().getSILRepresentation()) { // Functions that are already represented as blocks or C function pointers // don't need bridging. case SILFunctionType::Representation::Block: case SILFunctionType::Representation::CFunctionPointer: case SILFunctionType::Representation::Thin: case SILFunctionType::Representation::Method: case SILFunctionType::Representation::ObjCMethod: case SILFunctionType::Representation::WitnessMethod: case SILFunctionType::Representation::Closure: return t; case SILFunctionType::Representation::Thick: { // Thick functions (TODO: conditionally) get bridged to blocks. // This bridging is more powerful than usual block bridging, however, // so we use the ObjCMethod representation. Type newInput = getBridgedInputType(SILFunctionType::Representation::ObjCMethod, pattern.getFunctionInputType(), funTy->getInput()->getCanonicalType()); Type newResult = getBridgedResultType(SILFunctionType::Representation::ObjCMethod, pattern.getFunctionResultType(), funTy->getResult()->getCanonicalType(), /*non-optional*/false); return FunctionType::get(newInput, newResult, funTy->getExtInfo().withSILRepresentation( SILFunctionType::Representation::Block)); } } } auto foreignRepresentation = t->getForeignRepresentableIn(ForeignLanguage::ObjectiveC, M.TheSwiftModule); switch (foreignRepresentation.first) { case ForeignRepresentableKind::None: case ForeignRepresentableKind::Trivial: case ForeignRepresentableKind::Object: return t; case ForeignRepresentableKind::Bridged: case ForeignRepresentableKind::StaticBridged: { auto conformance = foreignRepresentation.second; assert(conformance && "Missing conformance?"); Type bridgedTy = ProtocolConformance::getTypeWitnessByName( t, conformance, M.getASTContext().Id_ObjectiveCType, nullptr); assert(bridgedTy && "Missing _ObjectiveCType witness?"); if (bridgedCollectionsAreOptional && clangTy) bridgedTy = OptionalType::get(bridgedTy); return bridgedTy; } case ForeignRepresentableKind::BridgedError: { auto nsErrorDecl = M.getASTContext().getNSErrorDecl(); assert(nsErrorDecl && "Cannot bridge when NSError isn't available"); return nsErrorDecl->getDeclaredInterfaceType(); } } return t; }
/* ID: tushar.4 PROG: gift1 LANG: C++11 */ #include <iostream> #include <fstream> #include <string> #include <sstream> #include <unordered_set> #include <set> #include <unordered_map> #include <map> #include <vector> #include <queue> #include <deque> #include <array> #include <forward_list> #include <list> #include <stack> #include <algorithm> #include <functional> #include <limits> #include <memory> #include <tuple> #include <initializer_list> #include <utility> #include <iterator> #include <bitset> using namespace std; int main() { ofstream fout("gift1.out"); ifstream fin("gift1.in"); int np{0}; string line{}; getline(fin, line); stringstream(line) >> np; if(np == 0) return 0; unordered_map<string, int> records; vector<string> names(np); for(int i = 0; i < np; ++i){ getline(fin, line); names[i] = line; records[line] = 0; } line = ""; getline(fin, line); while(!line.empty()){ string name{line}; int amount{0}; int givers{0}; getline(fin, line); stringstream(line) >> amount >> givers; records[name] += amount > 0 ? amount * -1 : amount; int share_per_person{0}; if(givers != 0) share_per_person = amount / givers; int remaining = amount - (share_per_person * givers); records[name] += remaining; for(int j = 0; j < givers; ++j){ getline(fin, line); records[line] += share_per_person; } getline(fin, line); } for(const auto& name : names){ fout << name << ' ' << records[name] << '\n'; } return 0; }
//================================================================================================= /*! // \file src/mathtest/smatdmatmult/SCaSDa.cpp // \brief Source file for the SCaSDa sparse matrix/dense matrix multiplication math test // // Copyright (C) 2012-2019 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= //************************************************************************************************* // Includes //************************************************************************************************* #include <cstdlib> #include <iostream> #include <blaze/math/CompressedMatrix.h> #include <blaze/math/DynamicMatrix.h> #include <blaze/math/SymmetricMatrix.h> #include <blazetest/mathtest/Creator.h> #include <blazetest/mathtest/smatdmatmult/OperationTest.h> #include <blazetest/system/MathTest.h> #ifdef BLAZE_USE_HPX_THREADS # include <hpx/hpx_main.hpp> #endif //================================================================================================= // // MAIN FUNCTION // //================================================================================================= //************************************************************************************************* int main() { std::cout << " Running 'SCaSDa'..." << std::endl; using blazetest::mathtest::TypeA; try { // Matrix type definitions using SCa = blaze::SymmetricMatrix< blaze::CompressedMatrix<TypeA> >; using SDa = blaze::SymmetricMatrix< blaze::DynamicMatrix<TypeA> >; // Creator type definitions using CSCa = blazetest::Creator<SCa>; using CSDa = blazetest::Creator<SDa>; // Running tests with small matrices for( size_t i=0UL; i<=6UL; ++i ) { for( size_t j=0UL; j<=i*i; ++j ) { RUN_SMATDMATMULT_OPERATION_TEST( CSCa( i, j ), CSDa( i ) ); } } // Running tests with large matrices RUN_SMATDMATMULT_OPERATION_TEST( CSCa( 31UL, 7UL ), CSDa( 31UL ) ); RUN_SMATDMATMULT_OPERATION_TEST( CSCa( 67UL, 7UL ), CSDa( 67UL ) ); RUN_SMATDMATMULT_OPERATION_TEST( CSCa( 127UL, 13UL ), CSDa( 127UL ) ); RUN_SMATDMATMULT_OPERATION_TEST( CSCa( 32UL, 8UL ), CSDa( 32UL ) ); RUN_SMATDMATMULT_OPERATION_TEST( CSCa( 64UL, 8UL ), CSDa( 64UL ) ); RUN_SMATDMATMULT_OPERATION_TEST( CSCa( 128UL, 16UL ), CSDa( 128UL ) ); } catch( std::exception& ex ) { std::cerr << "\n\n ERROR DETECTED during sparse matrix/dense matrix multiplication:\n" << ex.what() << "\n"; return EXIT_FAILURE; } return EXIT_SUCCESS; } //*************************************************************************************************
/* * Copyright (c) 2016 MariaDB Corporation Ab * * Use of this software is governed by the Business Source License included * in the LICENSE.TXT file and at www.mariadb.com/bsl11. * * Change Date: 2024-08-24 * * On the date above, in accordance with the Business Source License, use * of this software will be governed by version 2 or later of the General * Public License. */ // All log messages from this module are prefixed with this #define MXS_MODULE_NAME "commentfilter" #include "commentfiltersession.hh" #include "commentfilter.hh" #include <maxscale/modutil.hh> #include <string> #include <regex> using namespace std; CommentFilterSession::CommentFilterSession(MXS_SESSION* pSession, const CommentFilter* pFilter) : maxscale::FilterSession(pSession) , m_filter(*pFilter) { } CommentFilterSession::~CommentFilterSession() { } // static CommentFilterSession* CommentFilterSession::create(MXS_SESSION* pSession, const CommentFilter* pFilter) { return new CommentFilterSession(pSession, pFilter); } void CommentFilterSession::close() { } int CommentFilterSession::routeQuery(GWBUF* pPacket) { if (modutil_is_SQL(pPacket)) { string sql = mxs::extract_sql(pPacket); string comment = parseComment(m_filter.comment()); string newsql = string("/* ").append(comment).append(" */").append(sql); pPacket = modutil_replace_SQL(pPacket, (char*)newsql.c_str()); // maxscale expects contiguous memory to arrive from client so we must make the buffer contiguous // after using modutil_replace_SQL. GWBUF* pModified_packet = gwbuf_make_contiguous(pPacket); if (pModified_packet) { pPacket = pModified_packet; } else { gwbuf_free(pPacket); pPacket = NULL; } } return pPacket ? mxs::FilterSession::routeQuery(pPacket) : 1; } int CommentFilterSession::clientReply(GWBUF* pPacket) { return mxs::FilterSession::clientReply(pPacket); } // TODO this probably should be refactored in some way in case we add more variables string CommentFilterSession::parseComment(string comment) { string ip = m_pSession->client_dcb->remote; string parsedComment = std::regex_replace(comment, std::regex("\\$IP"), ip); return parsedComment; }
#include <bits/stdc++.h> using namespace std; /* Suppose an arithmetic expression is given as a binary tree. Each leaf is an integer and each internal node is one of '+', '−', '∗', or '/'. Given the root to such a tree, write a function to evaluate it. For example, given the following tree: * / \ + + / \ / \ 3 2 4 5 You should return 45, as it is (3 + 2) * (4 + 5). */ struct node{ char data; node *left, *right; }; node* newNode(char data){ node* root = new node; root->data = data; root->left = root->right = nullptr; return root; } int evaluateExp(int num1, int num2, char op){ switch(op){ case '+' : return num1 + num2; case '-' : return num1 - num2; case '*' : return num1 * num2; default : return num1 / num2; } } int evaluateTree(node* root){ if(!root) return 0; if(!root->left && !root->right){ return (root->data - '0'); } int left_eval = evaluateTree(root->left), right_eval = evaluateTree(root->right); return evaluateExp(left_eval, right_eval, root->data); } // main function int main(){ node* root = newNode('*'); root->left = newNode('+'); root->right = newNode('+'); root->left->left = newNode('3'); root->left->right = newNode('2'); root->right->left = newNode('4'); root->right->right = newNode('5'); cout << evaluateTree(root) << "\n"; return 0; }
// Autogenerated from CppHeaderCreator // Created by Sc2ad // ========================================================================= #pragma once // Begin includes #include "beatsaber-hook/shared/utils/typedefs.h" #include "beatsaber-hook/shared/utils/byref.hpp" // Including type: System.Enum #include "System/Enum.hpp" // Completed includes // Type namespace: OVR.OpenVR namespace OVR::OpenVR { // Forward declaring type: EVRTrackedCameraFrameLayout struct EVRTrackedCameraFrameLayout; } #include "beatsaber-hook/shared/utils/il2cpp-type-check.hpp" DEFINE_IL2CPP_ARG_TYPE(::OVR::OpenVR::EVRTrackedCameraFrameLayout, "OVR.OpenVR", "EVRTrackedCameraFrameLayout"); // Type namespace: OVR.OpenVR namespace OVR::OpenVR { // Size: 0x4 #pragma pack(push, 1) // Autogenerated type: OVR.OpenVR.EVRTrackedCameraFrameLayout // [TokenAttribute] Offset: FFFFFFFF struct EVRTrackedCameraFrameLayout/*, public ::System::Enum*/ { public: public: // public System.Int32 value__ // Size: 0x4 // Offset: 0x0 int value; // Field size check static_assert(sizeof(int) == 0x4); public: // Creating value type constructor for type: EVRTrackedCameraFrameLayout constexpr EVRTrackedCameraFrameLayout(int value_ = {}) noexcept : value{value_} {} // Creating interface conversion operator: operator ::System::Enum operator ::System::Enum() noexcept { return *reinterpret_cast<::System::Enum*>(this); } // Creating conversion operator: operator int constexpr operator int() const noexcept { return value; } // static field const value: static public OVR.OpenVR.EVRTrackedCameraFrameLayout Mono static constexpr const int Mono = 1; // Get static field: static public OVR.OpenVR.EVRTrackedCameraFrameLayout Mono static ::OVR::OpenVR::EVRTrackedCameraFrameLayout _get_Mono(); // Set static field: static public OVR.OpenVR.EVRTrackedCameraFrameLayout Mono static void _set_Mono(::OVR::OpenVR::EVRTrackedCameraFrameLayout value); // static field const value: static public OVR.OpenVR.EVRTrackedCameraFrameLayout Stereo static constexpr const int Stereo = 2; // Get static field: static public OVR.OpenVR.EVRTrackedCameraFrameLayout Stereo static ::OVR::OpenVR::EVRTrackedCameraFrameLayout _get_Stereo(); // Set static field: static public OVR.OpenVR.EVRTrackedCameraFrameLayout Stereo static void _set_Stereo(::OVR::OpenVR::EVRTrackedCameraFrameLayout value); // static field const value: static public OVR.OpenVR.EVRTrackedCameraFrameLayout VerticalLayout static constexpr const int VerticalLayout = 16; // Get static field: static public OVR.OpenVR.EVRTrackedCameraFrameLayout VerticalLayout static ::OVR::OpenVR::EVRTrackedCameraFrameLayout _get_VerticalLayout(); // Set static field: static public OVR.OpenVR.EVRTrackedCameraFrameLayout VerticalLayout static void _set_VerticalLayout(::OVR::OpenVR::EVRTrackedCameraFrameLayout value); // static field const value: static public OVR.OpenVR.EVRTrackedCameraFrameLayout HorizontalLayout static constexpr const int HorizontalLayout = 32; // Get static field: static public OVR.OpenVR.EVRTrackedCameraFrameLayout HorizontalLayout static ::OVR::OpenVR::EVRTrackedCameraFrameLayout _get_HorizontalLayout(); // Set static field: static public OVR.OpenVR.EVRTrackedCameraFrameLayout HorizontalLayout static void _set_HorizontalLayout(::OVR::OpenVR::EVRTrackedCameraFrameLayout value); // Get instance field reference: public System.Int32 value__ int& dyn_value__(); }; // OVR.OpenVR.EVRTrackedCameraFrameLayout #pragma pack(pop) static check_size<sizeof(EVRTrackedCameraFrameLayout), 0 + sizeof(int)> __OVR_OpenVR_EVRTrackedCameraFrameLayoutSizeCheck; static_assert(sizeof(EVRTrackedCameraFrameLayout) == 0x4); } #include "beatsaber-hook/shared/utils/il2cpp-utils-methods.hpp"
/* FILENAME... devPIE816.cc USAGE... Motor record device level support for Physik Instrumente (PI) GmbH & Co. E-816 motor controller. Version: 1.1 Modified By: sullivan Last Modified: 2007/03/30 20:01:05 */ /* * Original Author: Ron Sluiter * Date: 12/17/03 * Current Author: Joe Sullivan * * Experimental Physics and Industrial Control System (EPICS) * * Copyright 1991, the Regents of the University of California, * and the University of Chicago Board of Governors. * * This software was produced under U.S. Government contracts: * (W-7405-ENG-36) at the Los Alamos National Laboratory, * and (W-31-109-ENG-38) at Argonne National Laboratory. * * Initial development by: * The Controls and Automation Group (AT-8) * Ground Test Accelerator * Accelerator Technology Division * Los Alamos National Laboratory * * Co-developed with * The Controls and Computing Group * Accelerator Systems Division * Advanced Photon Source * Argonne National Laboratory * * Modification Log: * ----------------- * .01 01/29/06 jps - copied from devPIE710.cc */ #include <string.h> #include "motorRecord.h" #include "motor.h" #include "motordevCom.h" #include "drvPIE816.h" #include "epicsExport.h" extern struct driver_table PIE816_access; /* ----------------Create the dsets for devPIE816----------------- */ static struct driver_table *drvtabptr; static long PIE816_init(void *); static long PIE816_init_record(void *); static long PIE816_start_trans(struct motorRecord *); static RTN_STATUS PIE816_build_trans(motor_cmnd, double *, struct motorRecord *); static RTN_STATUS PIE816_end_trans(struct motorRecord *); struct motor_dset devPIE816 = { {8, NULL, (DEVSUPFUN) PIE816_init, (DEVSUPFUN) PIE816_init_record, NULL}, motor_update_values, PIE816_start_trans, PIE816_build_trans, PIE816_end_trans }; extern "C" {epicsExportAddress(dset,devPIE816);} /* --------------------------- program data --------------------- */ /* This table is used to define the command types */ /* WARNING! this must match "motor_cmnd" in motor.h */ static msg_types PIE816_table[] = { MOTION, /* MOVE_ABS */ MOTION, /* MOVE_REL */ MOTION, /* HOME_FOR */ MOTION, /* HOME_REV */ IMMEDIATE, /* LOAD_POS */ IMMEDIATE, /* SET_VEL_BASE */ IMMEDIATE, /* SET_VELOCITY */ IMMEDIATE, /* SET_ACCEL */ IMMEDIATE, /* GO */ IMMEDIATE, /* SET_ENC_RATIO */ INFO, /* GET_INFO */ MOVE_TERM, /* STOP_AXIS */ VELOCITY, /* JOG */ IMMEDIATE, /* SET_PGAIN */ IMMEDIATE, /* SET_IGAIN */ IMMEDIATE, /* SET_DGAIN */ IMMEDIATE, /* ENABLE_TORQUE */ IMMEDIATE, /* DISABL_TORQUE */ IMMEDIATE, /* PRIMITIVE */ IMMEDIATE, /* SET_HIGH_LIMIT */ IMMEDIATE, /* SET_LOW_LIMIT */ VELOCITY /* JOG_VELOCITY */ }; static struct board_stat **PIE816_cards; /* --------------------------- program data --------------------- */ /* initialize device support for PIE816 stepper motor */ static long PIE816_init(void *arg) { long rtnval; int after = (arg == 0) ? 0 : 1; if (after == 0) { drvtabptr = &PIE816_access; (drvtabptr->init)(); } rtnval = motor_init_com(after, *drvtabptr->cardcnt_ptr, drvtabptr, &PIE816_cards); return(rtnval); } /* initialize a record instance */ static long PIE816_init_record(void *arg) { struct motorRecord *mr = (struct motorRecord *) arg; /* Disable change of direction testing in record support */ /* This is a closed-loop device */ mr->ntm = menuYesNoNO; return(motor_init_record_com(mr, *drvtabptr->cardcnt_ptr, drvtabptr, PIE816_cards)); } /* start building a transaction */ static long PIE816_start_trans(struct motorRecord *mr) { motor_start_trans_com(mr, PIE816_cards); return(OK); } /* end building a transaction */ static RTN_STATUS PIE816_end_trans(struct motorRecord *mr) { motor_end_trans_com(mr, drvtabptr); return(OK); } /* add a part to the transaction */ static RTN_STATUS PIE816_build_trans(motor_cmnd command, double *parms, struct motorRecord *mr) { struct motor_trans *trans = (struct motor_trans *) mr->dpvt; struct mess_node *motor_call; struct controller *brdptr; struct PIE816controller *cntrl; char buff[110]; int card, maxdigits; unsigned int size; double dval, cntrl_units, res; RTN_STATUS rtnval; bool send; send = true; /* Default to send motor command. */ rtnval = OK; buff[0] = '\0'; /* Protect against NULL pointer with WRTITE_MSG(GO/STOP_AXIS/GET_INFO, NULL). */ dval = (parms == NULL) ? 0.0 : *parms; rtnval = (RTN_STATUS) motor_start_trans_com(mr, PIE816_cards); motor_call = &(trans->motor_call); card = motor_call->card; brdptr = (*trans->tabptr->card_array)[card]; if (brdptr == NULL) return(rtnval = ERROR); cntrl = (struct PIE816controller *) brdptr->DevicePrivate; res = cntrl->drive_resolution[motor_call->signal]; cntrl_units = dval; maxdigits = 3; if (PIE816_table[command] > motor_call->type) motor_call->type = PIE816_table[command]; if (trans->state != BUILD_STATE) return(rtnval = ERROR); if (command == PRIMITIVE && mr->init != NULL && strlen(mr->init) != 0) strcat(motor_call->message, mr->init); switch (command) { case MOVE_ABS: case MOVE_REL: case HOME_FOR: case HOME_REV: case JOG: if (strlen(mr->prem) != 0) { strcat(motor_call->message, mr->prem); strcat(motor_call->message, EOL_E816); } if (strlen(mr->post) != 0) motor_call->postmsgptr = (char *) &mr->post; break; default: break; } switch (command) { case MOVE_ABS: sprintf(buff, "MOV #%.*f", maxdigits, (cntrl_units * res)); strcat(buff, EOL_E816); break; case MOVE_REL: sprintf(buff, "MVR #%.*f", maxdigits, (cntrl_units * res)); strcat(buff, EOL_E816); break; case HOME_FOR: case HOME_REV: rtnval = ERROR; break; case LOAD_POS: rtnval = ERROR; break; case SET_VEL_BASE: send = false; /* DC motor; not base velocity. */ break; case SET_VELOCITY: sprintf(buff, "VEL #%.*f", maxdigits, (cntrl_units * res)); strcat(buff, EOL_E816); break; case ENABLE_TORQUE: strcpy(buff, "SVO #1"); strcat(buff, EOL_E816); break; case DISABL_TORQUE: strcpy(buff, "SVO #0"); strcat(buff, EOL_E816); break; case SET_ACCEL: /* The PIE816 does not support acceleration commands. */ case GO: /* The PIE816 starts moving immediately on move commands, GO command * does nothing. */ send = false; break; case PRIMITIVE: case GET_INFO: /* These commands are not actually done by sending a message, but rather they will indirectly cause the driver to read the status of all motors */ break; case STOP_AXIS: /* No stop command available - use move relative 0 */ sprintf(buff, "MVR #0"); strcat(buff, EOL_E816); break; case JOG_VELOCITY: case JOG: sprintf(buff, "VEL #%.*f", maxdigits, cntrl_units); strcat(buff, EOL_E816); break; case SET_PGAIN: send = false; break; case SET_IGAIN: send = false; break; case SET_DGAIN: send = false; break; case SET_HIGH_LIMIT: case SET_LOW_LIMIT: case SET_ENC_RATIO: trans->state = IDLE_STATE; /* No command sent to the controller. */ send = false; break; default: send = false; rtnval = ERROR; } size = strlen(buff); if (send == false) return(rtnval); else if (size > sizeof(buff) || (strlen(motor_call->message) + size) > MAX_MSG_SIZE) errlogMessage("PIE816_build_trans(): buffer overflow.\n"); else { strcat(motor_call->message, buff); rtnval = motor_end_trans_com(mr, drvtabptr); } return(rtnval); }
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. #include "olap/olap_meta.h" #include <sstream> #include <vector> #include "common/logging.h" #include "olap/olap_define.h" #include "rocksdb/db.h" #include "rocksdb/options.h" #include "rocksdb/slice.h" #include "rocksdb/slice_transform.h" #include "util/doris_metrics.h" #include "util/runtime_profile.h" using rocksdb::DB; using rocksdb::DBOptions; using rocksdb::ColumnFamilyDescriptor; using rocksdb::ColumnFamilyHandle; using rocksdb::ColumnFamilyOptions; using rocksdb::ReadOptions; using rocksdb::WriteOptions; using rocksdb::Slice; using rocksdb::Iterator; using rocksdb::kDefaultColumnFamilyName; using rocksdb::NewFixedPrefixTransform; namespace doris { const std::string META_POSTFIX = "/meta"; const size_t PREFIX_LENGTH = 4; OlapMeta::OlapMeta(const std::string& root_path) : _root_path(root_path), _db(nullptr) {} OlapMeta::~OlapMeta() { if (_db != nullptr) { for (auto& handle : _handles) { _db->DestroyColumnFamilyHandle(handle); handle = nullptr; } delete _db; _db = nullptr; } } Status OlapMeta::init() { // init db DBOptions options; options.IncreaseParallelism(); options.create_if_missing = true; options.create_missing_column_families = true; std::string db_path = _root_path + META_POSTFIX; std::vector<ColumnFamilyDescriptor> column_families; // default column family is required column_families.emplace_back(DEFAULT_COLUMN_FAMILY, ColumnFamilyOptions()); column_families.emplace_back(DORIS_COLUMN_FAMILY, ColumnFamilyOptions()); // meta column family add prefix extractor to improve performance and ensure correctness ColumnFamilyOptions meta_column_family; meta_column_family.prefix_extractor.reset(NewFixedPrefixTransform(PREFIX_LENGTH)); column_families.emplace_back(META_COLUMN_FAMILY, meta_column_family); rocksdb::Status s = DB::Open(options, db_path, column_families, &_handles, &_db); if (!s.ok() || _db == nullptr) { LOG(WARNING) << "rocks db open failed, reason:" << s.ToString(); return Status::OLAPInternalError(OLAP_ERR_META_OPEN_DB); } return Status::OK(); } Status OlapMeta::get(const int column_family_index, const std::string& key, std::string* value) { DorisMetrics::instance()->meta_read_request_total->increment(1); rocksdb::ColumnFamilyHandle* handle = _handles[column_family_index]; int64_t duration_ns = 0; rocksdb::Status s; { SCOPED_RAW_TIMER(&duration_ns); s = _db->Get(ReadOptions(), handle, rocksdb::Slice(key), value); } DorisMetrics::instance()->meta_read_request_duration_us->increment(duration_ns / 1000); if (s.IsNotFound()) { return Status::OLAPInternalError(OLAP_ERR_META_KEY_NOT_FOUND); } else if (!s.ok()) { LOG(WARNING) << "rocks db get key:" << key << " failed, reason:" << s.ToString(); return Status::OLAPInternalError(OLAP_ERR_META_GET); } return Status::OK(); } bool OlapMeta::key_may_exist(const int column_family_index, const std::string& key, std::string* value) { DorisMetrics::instance()->meta_read_request_total->increment(1); rocksdb::ColumnFamilyHandle* handle = _handles[column_family_index]; int64_t duration_ns = 0; bool is_exist = false; { SCOPED_RAW_TIMER(&duration_ns); is_exist = _db->KeyMayExist(ReadOptions(), handle, rocksdb::Slice(key), value); } DorisMetrics::instance()->meta_read_request_duration_us->increment(duration_ns / 1000); return is_exist; } Status OlapMeta::put(const int column_family_index, const std::string& key, const std::string& value) { DorisMetrics::instance()->meta_write_request_total->increment(1); rocksdb::ColumnFamilyHandle* handle = _handles[column_family_index]; int64_t duration_ns = 0; rocksdb::Status s; { SCOPED_RAW_TIMER(&duration_ns); WriteOptions write_options; write_options.sync = config::sync_tablet_meta; s = _db->Put(write_options, handle, rocksdb::Slice(key), rocksdb::Slice(value)); } DorisMetrics::instance()->meta_write_request_duration_us->increment(duration_ns / 1000); if (!s.ok()) { LOG(WARNING) << "rocks db put key:" << key << " failed, reason:" << s.ToString(); return Status::OLAPInternalError(OLAP_ERR_META_PUT); } return Status::OK(); } Status OlapMeta::remove(const int column_family_index, const std::string& key) { DorisMetrics::instance()->meta_write_request_total->increment(1); rocksdb::ColumnFamilyHandle* handle = _handles[column_family_index]; rocksdb::Status s; int64_t duration_ns = 0; { SCOPED_RAW_TIMER(&duration_ns); WriteOptions write_options; write_options.sync = config::sync_tablet_meta; s = _db->Delete(write_options, handle, rocksdb::Slice(key)); } DorisMetrics::instance()->meta_write_request_duration_us->increment(duration_ns / 1000); if (!s.ok()) { LOG(WARNING) << "rocks db delete key:" << key << " failed, reason:" << s.ToString(); return Status::OLAPInternalError(OLAP_ERR_META_DELETE); } return Status::OK(); } Status OlapMeta::iterate( const int column_family_index, const std::string& prefix, std::function<bool(const std::string&, const std::string&)> const& func) { rocksdb::ColumnFamilyHandle* handle = _handles[column_family_index]; std::unique_ptr<Iterator> it(_db->NewIterator(ReadOptions(), handle)); if (prefix == "") { it->SeekToFirst(); } else { it->Seek(prefix); } rocksdb::Status status = it->status(); if (!status.ok()) { LOG(WARNING) << "rocksdb seek failed. reason:" << status.ToString(); return Status::OLAPInternalError(OLAP_ERR_META_ITERATOR); } for (; it->Valid(); it->Next()) { if (prefix != "") { if (!it->key().starts_with(prefix)) { return Status::OK(); } } std::string key = it->key().ToString(); std::string value = it->value().ToString(); bool ret = func(key, value); if (!ret) { break; } } if (!it->status().ok()) { LOG(WARNING) << "rocksdb iterator failed. reason:" << status.ToString(); return Status::OLAPInternalError(OLAP_ERR_META_ITERATOR); } return Status::OK(); } std::string OlapMeta::get_root_path() { return _root_path; } } // namespace doris
#include "ix/bplus.h" #include "ix/bplusnode.h" #include <iostream> using namespace std; void BPlusNode::getPage(bool isNewPage) { if (isNewPage) page = (CharBufType)owner->bpm->allocPage(owner->fileId, pageId, pageIndex); else page = (CharBufType)owner->bpm->getPage(owner->fileId, pageId, pageIndex); owner->usedPages.insert(pageIndex); } void BPlusNode::release() { owner->bpm->writeBack(pageIndex); owner->usedPages.erase(pageIndex); } void BPlusNode::clear() { memset(page + 10, 0, PAGE_SIZE - 10); owner->bpm->markDirty(pageIndex); owner->usedPages.insert(pageIndex); } int BPlusNode::getAttrLen() const { return owner->attrLen; } short BPlusNode::count() const { return spage[0]; } void BPlusNode::setCount(short _count) { spage[0] = _count; owner->bpm->markDirty(pageIndex); owner->usedPages.insert(pageIndex); } short BPlusNode::type() const { return spage[1]; } void BPlusNode::setType(short _type) { spage[1] = _type; owner->bpm->markDirty(pageIndex); owner->usedPages.insert(pageIndex); } int BPlusNode::parent() const { return ipage[1]; } void BPlusNode::setParent(int _p) { ipage[1] = _p; owner->bpm->markDirty(pageIndex); owner->usedPages.insert(pageIndex); } short BPlusNode::parentPtr() const { return spage[4]; } void BPlusNode::setParentPtr(short _pp) { spage[4] = _pp; owner->bpm->markDirty(pageIndex); owner->usedPages.insert(pageIndex); } void* BPlusNode::block(int i) { return (void*)((uchar*)val(i) - 6); } void BPlusNode::setBlock(int i, void* pbData) { memcpy(block(i), pbData, getAttrLen() + 6); owner->bpm->markDirty(pageIndex); owner->usedPages.insert(pageIndex); } void BPlusNode::clearBlock(int i) { memset(block(i), 0, getAttrLen() + 6); owner->bpm->markDirty(pageIndex); owner->usedPages.insert(pageIndex); } void* BPlusNode::val(int i) const { int offs = 16 + (getAttrLen() + 6) * i; return (void*)(page + offs); } void BPlusNode::setVal(int i, void* pData) { int offs = 16 + (getAttrLen() + 6) * i; memcpy(page + offs, pData, getAttrLen()); owner->bpm->markDirty(pageIndex); owner->usedPages.insert(pageIndex); } void BPlusNode::clearVal(int i) { int offs = 16 + (getAttrLen() + 6) * i; memset(page + offs, 0, getAttrLen()); owner->bpm->markDirty(pageIndex); owner->usedPages.insert(pageIndex); } int BPlusNode::child(int i) const { int offs = 10 + (getAttrLen() + 6) * i; return *((int*)(page + offs)); } void BPlusNode::setChild(int i, int ch) { int offs = 10 + (getAttrLen() + 6) * i; *((int*)(page + offs)) = ch; owner->bpm->markDirty(pageIndex); owner->usedPages.insert(pageIndex); } int BPlusNode::lastPtr() const { return child(owner->fanOut); } void BPlusNode::setLastPtr(int p) { setChild(owner->fanOut, p); } RID BPlusNode::rec(int i) const { int offs = 10 + (getAttrLen() + 6) * i; int* ptrPage = (int*)(page + offs); short* ptrSlot = (short*)(page + offs + 4); return RID(*ptrPage, *ptrSlot); } void BPlusNode::setRec(int i, const RID& rid) { int offs = 10 + (getAttrLen() + 6) * i; int* ptrPage = (int*)(page + offs); short* ptrSlot = (short*)(page + offs + 4); *ptrPage = rid.getPage(); *ptrSlot = rid.getSlot(); owner->bpm->markDirty(pageIndex); owner->usedPages.insert(pageIndex); }
/************************************************************************* Crytek Source File. Copyright (C), Crytek Studios, 2001-2004. ------------------------------------------------------------------------- $Id$ $DateTime$ ------------------------------------------------------------------------- History: - 30:8:2005 12:30 : Created by Márcio Martins *************************************************************************/ #include "StdAfx.h" #include "Item.h" #include "ItemSharedParams.h" #include <ICryAnimation.h> #include <CryPath.h> #include "Actor.h" #include "Game.h" #include "PlayerAnimation.h" #include "StringUtils.h" #include "ItemResourceCache.h" #include "Weapon.h" #include "ItemAnimation.h" #if !defined(_RELEASE) #define DEBUG_ITEM_ACTIONS_ENABLED 1 #else #define DEBUG_ITEM_ACTIONS_ENABLED 0 #endif namespace { void OverrideAttachmentMaterial(IMaterial* pMaterial, CItem* pItem, int slot) { IEntity* pEntity = pItem->GetEntity(); SEntitySlotInfo info; if (!pEntity->GetSlotInfo(slot, info)) return; EntityId parentId = pItem->GetParentId(); IEntity* pParent = gEnv->pEntitySystem->GetEntity(parentId); if (pParent && info.pStatObj) { ICharacterInstance* pCharacter = pParent->GetCharacter(slot); IAttachmentManager* pAttachmentManager = pCharacter ? pCharacter->GetIAttachmentManager() : 0; if (pAttachmentManager) { int attNum = pAttachmentManager->GetAttachmentCount(); for (int attSlot = 0; attSlot < attNum; ++attSlot) { IAttachment* pAttachment = pAttachmentManager->GetInterfaceByIndex(attSlot); IAttachmentObject* pAttObject = pAttachment ? pAttachment->GetIAttachmentObject() : 0; if (pAttObject && pAttObject->GetAttachmentType() == IAttachmentObject::eAttachment_StatObj) { CCGFAttachment* pCGFAtt = static_cast<CCGFAttachment*>(pAttObject); if (pCGFAtt->pObj == info.pStatObj) { pCGFAtt->SetReplacementMaterial(pMaterial,0); } } } } } } } //------------------------------------------------------------------------ struct CItem::SwitchHandAction { SwitchHandAction(CItem *_item, int _hand): item(_item), hand(_hand) {}; CItem *item; int hand; void execute(CItem *_this) { item->SwitchToHand(hand); } }; //------------------------------------------------------------------------ void CItem::RemoveEntity(bool force) { if (gEnv->IsEditor() && !force) Hide(true); else if (IsServer() || force) gEnv->pEntitySystem->RemoveEntity(GetEntityId()); } //------------------------------------------------------------------------ bool CItem::CreateCharacterAttachment(int slot, const char *name, int type, const char *bone) { ICharacterInstance *pCharacter = GetEntity()->GetCharacter(slot); if (!pCharacter) return false; IAttachmentManager *pAttachmentManager = pCharacter->GetIAttachmentManager(); IAttachment *pAttachment = pAttachmentManager->GetInterfaceByName(name); if (pAttachment) { // GameWarning("Item '%s' trying to create attachment '%s' which already exists!", GetEntity()->GetName(), name); return false; } pAttachment = pAttachmentManager->CreateAttachment(name, type, bone); if (!pAttachment) { if (type == CA_BONE) GameWarning("Item '%s' failed to create attachment '%s' on bone '%s'!", GetEntity()->GetName(), name, bone); return false; } return true; } //------------------------------------------------------------------------ void CItem::DestroyCharacterAttachment(int slot, const char *name) { ICharacterInstance *pCharacter = GetEntity()->GetCharacter(slot); if (!pCharacter) return; IAttachmentManager *pAttachmentManager = pCharacter->GetIAttachmentManager(); pAttachmentManager->RemoveAttachmentByName(name); } ICharacterInstance* CItem::GetAppropriateCharacter(int slot, bool owner) { ICharacterInstance* pCharacter = NULL; if(owner) { IEntity* pOwner = GetOwner(); if(pOwner) { pCharacter = pOwner->GetCharacter(0); } } else { pCharacter = GetEntity()->GetCharacter(slot); } return pCharacter; } //------------------------------------------------------------------------ void CItem::ResetCharacterAttachment(int slot, const char *name, bool owner, EntityId attachedEntID) { if(IAttachment * pAttachment = GetCharacterAttachment(GetAppropriateCharacter(slot, owner), name)) { //--- Early out if this is not a matching entity. Very specific minimal fix for DT: 32803 //--- where we know that the bug is going from entityAttachment to another entityAttachment if (attachedEntID && pAttachment->GetIAttachmentObject()) { if (pAttachment->GetIAttachmentObject()->GetAttachmentType() == IAttachmentObject::eAttachment_Entity) { CEntityAttachment *entAttachment = (CEntityAttachment *)pAttachment->GetIAttachmentObject(); if (attachedEntID != entAttachment->GetEntityId()) { return; } } } pAttachment->ClearBinding(); } } //------------------------------------------------------------------------ IMaterial* CItem::GetCharacterAttachmentMaterial(int slot, const char* name, bool owner) { IAttachmentObject* pAttachmentObject = NULL; if(IAttachment * pAttachment = GetCharacterAttachment(GetAppropriateCharacter(slot, owner), name)) { pAttachmentObject = pAttachment->GetIAttachmentObject(); } return pAttachmentObject ? (IMaterial*)pAttachmentObject->GetBaseMaterial() : 0; } //------------------------------------------------------------------------ const char *CItem::GetCharacterAttachmentBone(int slot, const char *name) { ICharacterInstance *pCharacter = GetEntity()->GetCharacter(slot); if(IAttachment * pAttachment = GetCharacterAttachment(pCharacter, name)) { return pCharacter->GetIDefaultSkeleton().GetJointNameByID(pAttachment->GetJointID()); } return NULL; } //------------------------------------------------------------------------ void CItem::SetCharacterAttachment(int slot, const char *name, IEntity *pEntity, bool owner) { if(IAttachment * pAttachment = GetCharacterAttachment(GetAppropriateCharacter(slot, owner), name)) { CEntityAttachment *pEntityAttachment = new CEntityAttachment(); pEntityAttachment->SetEntityId(pEntity->GetId()); pAttachment->AddBinding(pEntityAttachment); pAttachment->HideAttachment(0); } } //------------------------------------------------------------------------ void CItem::SetCharacterAttachment(int slot, const char *name, IStatObj *pObj, bool owner) { if(IAttachment * pAttachment = GetCharacterAttachment(GetAppropriateCharacter(slot, owner), name)) { CCGFAttachment *pStatAttachment = new CCGFAttachment(); pStatAttachment->pObj = pObj; pAttachment->AddBinding(pStatAttachment); } } //------------------------------------------------------------------------ void CItem::SetCharacterAttachment(int slot, const char *name, ICharacterInstance *pAttachedCharacter, bool owner) { if(IAttachment * pAttachment = GetCharacterAttachment(GetAppropriateCharacter(slot, owner), name)) { //--- Stop attached characters from updating via the entity pAttachedCharacter->SetFlags(pAttachedCharacter->GetFlags() & ~CS_FLAG_UPDATE); CSKELAttachment *pCharacterAttachment = new CSKELAttachment(); pCharacterAttachment->m_pCharInstance = pAttachedCharacter; pAttachment->AddBinding(pCharacterAttachment); } } //------------------------------------------------------------------------ void CItem::SetCharacterAttachment(int slot, const char *name, IEntity *pEntity, int objSlot, bool owner) { SEntitySlotInfo info; if (!pEntity->GetSlotInfo(objSlot, info)) return; if (info.pCharacter) SetCharacterAttachment(slot, name, info.pCharacter, owner); else if (info.pStatObj) SetCharacterAttachment(slot, name, info.pStatObj, owner); } //------------------------------------------------------------------------ IAttachment * CItem::GetCharacterAttachment(ICharacterInstance * pCharacter, const char *name) const { if(pCharacter) { IAttachmentManager *pAttachmentManager = pCharacter->GetIAttachmentManager(); if (IAttachment * pAttachment = pAttachmentManager->GetInterfaceByName(name)) { return pAttachment; } GameWarning("Item '%s' trying to get attachment '%s' which does not exist!", GetEntity()->GetName(), name); } return NULL; } //------------------------------------------------------------------------ void CItem::SetCharacterAttachmentLocalTM(int slot, const char *name, const Matrix34 &tm) { if(IAttachment * pAttachment = GetCharacterAttachment(GetEntity()->GetCharacter(slot), name)) pAttachment->SetAttRelativeDefault( QuatT(tm)); } //------------------------------------------------------------------------ void CItem::SetCharacterAttachmentWorldTM(int slot, const char *name, const Matrix34 &tm) { ICharacterInstance *pCharacter = GetEntity()->GetCharacter(slot); if(IAttachment * pAttachment = GetCharacterAttachment(pCharacter, name)) { Matrix34 boneWorldMatrix = GetEntity()->GetSlotWorldTM(slot) * Matrix34(pCharacter->GetISkeletonPose()->GetAbsJointByID(pAttachment->GetJointID()) ); Matrix34 localAttachmentMatrix = (boneWorldMatrix.GetInverted()*tm); pAttachment->SetAttRelativeDefault(QuatT(localAttachmentMatrix)); } } //------------------------------------------------------------------------ Matrix34 CItem::GetCharacterAttachmentLocalTM(int slot, const char *name) { if(IAttachment * pAttachment = GetCharacterAttachment(GetEntity()->GetCharacter(slot), name)) { return Matrix34(pAttachment->GetAttRelativeDefault()); } else { return Matrix34::CreateIdentity(); } } //------------------------------------------------------------------------ Matrix34 CItem::GetCharacterAttachmentWorldTM(int slot, const char *name) { if(IAttachment * pAttachment = GetCharacterAttachment(GetEntity()->GetCharacter(slot), name)) { return Matrix34(pAttachment->GetAttWorldAbsolute()); } else { return Matrix34::CreateIdentity(); } } //------------------------------------------------------------------------ void CItem::HideCharacterAttachment(int slot, const char *name, bool hide) { if(IAttachment * pAttachment = GetCharacterAttachment(GetEntity()->GetCharacter(slot), name)) { pAttachment->HideAttachment(hide?1:0); } } //------------------------------------------------------------------------ void CItem::HideCharacterAttachmentMaster(int slot, const char *name, bool hide) { ICharacterInstance *pCharacter = GetEntity()->GetCharacter(slot); if (!pCharacter) return; pCharacter->HideMaster(hide?1:0); } //------------------------------------------------------------------------ void CItem::CreateAttachmentHelpers(int slot) { for (THelperVector::const_iterator it = m_sharedparams->helpers.begin(); it != m_sharedparams->helpers.end(); ++it) { if (it->slot != slot) continue; CreateCharacterAttachment(slot, it->name.c_str(), CA_BONE, it->bone.c_str()); } } //------------------------------------------------------------------------ void CItem::DestroyAttachmentHelpers(int slot) { for (THelperVector::const_iterator it = m_sharedparams->helpers.begin(); it != m_sharedparams->helpers.end(); ++it) { if (it->slot != slot) continue; DestroyCharacterAttachment(slot, it->name.c_str()); } } //------------------------------------------------------------------------ const THelperVector& CItem::GetAttachmentHelpers() { return m_sharedparams->helpers; } //------------------------------------------------------------------------ bool CItem::SetGeometry(int slot, const ItemString& name, const ItemString& material, bool useParentMaterial, const Vec3& poffset, const Ang3& aoffset, float scale, bool forceReload) { assert(slot >= 0 && slot < eIGS_Last); bool changedfp=false; switch(slot) { case eIGS_Owner: break; case eIGS_FirstPerson: case eIGS_ThirdPerson: default: { if (name.empty() || forceReload) { GetEntity()->FreeSlot(slot); #ifndef ITEM_USE_SHAREDSTRING m_geometry[slot].resize(0); #else m_geometry[slot].reset(); #endif } DestroyAttachmentHelpers(slot); if (!name.empty()) { if (m_geometry[slot] != name) { const char* ext = PathUtil::GetExt(name.c_str()); if ((stricmp(ext, "chr") == 0) || (stricmp(ext, "cdf") == 0) || (stricmp(ext, "cga") == 0) ) GetEntity()->LoadCharacter(slot, name.c_str(), 0); else GetEntity()->LoadGeometry(slot, name.c_str(), 0, 0); changedfp=slot==eIGS_FirstPerson; } CreateAttachmentHelpers(slot); } /* if (slot == eIGS_FirstPerson) { ICharacterInstance *pCharacter = GetEntity()->GetCharacter(eIGS_FirstPerson); if (pCharacter) { pCharacter->SetFlags(pCharacter->GetFlags()&(~CS_FLAG_UPDATE)); } } else */if (slot == eIGS_Destroyed) DrawSlot(eIGS_Destroyed, false); } break; } Matrix34 slotTM; slotTM = Matrix34::CreateRotationXYZ(aoffset); slotTM.ScaleColumn(Vec3(scale, scale, scale)); slotTM.SetTranslation(poffset); GetEntity()->SetSlotLocalTM(slot, slotTM); if (changedfp && m_stats.mounted) { if (m_sharedparams->pMountParams && !m_sharedparams->pMountParams->pivot.empty()) { Matrix34 tm=GetEntity()->GetSlotLocalTM(eIGS_FirstPerson, false); Vec3 pivot = GetSlotHelperPos(eIGS_FirstPerson, m_sharedparams->pMountParams->pivot.c_str(), false); tm.AddTranslation(pivot); GetEntity()->SetSlotLocalTM(eIGS_FirstPerson, tm); } GetEntity()->InvalidateTM(); } m_geometry[slot] = name ? name : ItemString(); ReAttachAccessories(); IEntity* pParentEntity = gEnv->pEntitySystem->GetEntity(GetParentId()); IMaterial* pOverrideMaterial = 0; if (!material.empty()) { pOverrideMaterial = gEnv->p3DEngine->GetMaterialManager()->LoadMaterial(material.c_str()); } else if (useParentMaterial && pParentEntity) { ICharacterInstance* pParentCharacter = pParentEntity->GetCharacter(slot); IEntityRenderProxy* pParentRenderProxy = static_cast<IEntityRenderProxy*>(pParentEntity->GetProxy(ENTITY_PROXY_RENDER)); if (pParentCharacter) pOverrideMaterial = pParentCharacter->GetIMaterial(); else if (pParentRenderProxy) pOverrideMaterial = pParentRenderProxy->GetSlotMaterial(slot); } if (pOverrideMaterial) { ICharacterInstance* pCharacter = GetEntity()->GetCharacter(slot); IEntityRenderProxy* pRenderProxy = static_cast<IEntityRenderProxy*>(GetEntity()->GetProxy(ENTITY_PROXY_RENDER)); OverrideAttachmentMaterial(pOverrideMaterial, this, slot); if (pCharacter) pCharacter->SetIMaterial_Instance(pOverrideMaterial); else if (pRenderProxy) pRenderProxy->SetSlotMaterial(slot, pOverrideMaterial); } if(slot == eIGS_FirstPerson && IsSelected()) { CActor* pOwnerActor = GetOwnerActor(); IActionController *pActionController = GetActionController(); if(pActionController && pOwnerActor && pOwnerActor->IsClient()) { UpdateScopeContexts(pActionController); } } return true; } //------------------------------------------------------------------------ bool CItem::PlayFragment(IAction* pAction, float speedOverride, float timeOverride, float animWeight, float ffeedbackWeight, bool concentratedFire) { _smart_ptr<IAction> pActionPtr(pAction); CRY_ASSERT(pAction); if(!pAction) { return false; } CWeapon *pWeapon = static_cast<CWeapon*>(GetIWeapon()); if (pWeapon && pWeapon->IsProxyWeapon()) { return false; } bool success = false; float speed = (float)__fsel(-speedOverride, 1.0f, speedOverride); FragmentID fragID = pAction->GetFragmentID(); pAction->SetSubContext(m_subContext); IActionController *pActionController = GetActionController(); if ((fragID != FRAGMENT_ID_INVALID) && pActionController) { float fragmentDuration, transitionDuration; if (pActionController->QueryDuration(*pAction, fragmentDuration, transitionDuration)) { float duration = fragmentDuration+transitionDuration; if ((duration > 0.0f) && (timeOverride > 0.0f)) { speed = (duration / timeOverride); CRY_ASSERT((speed > 0.0f) && (speed < 99999.0f)); } if(duration > 0.f) { m_animationTime[eIGS_Owner] = (uint32) MAX((duration*1000.0f/speed) - 20, 0.0f); } pAction->SetSpeedBias(speed); pAction->SetAnimWeight(animWeight); if(concentratedFire) { pAction->SetParam(CItem::sActionParamCRCs.concentratedFire, 1.f); } if(ffeedbackWeight != 1.f) { pAction->SetParam(CItem::sActionParamCRCs.ffeedbackScale, ffeedbackWeight); } pActionController->Queue(*pAction); success = true; } } return success; } //------------------------------------------------------------------------ int CItem::GetFragmentID(const char* actionName, const CTagDefinition** tagDef) { int fragmentID = FRAGMENT_ID_INVALID; IActionController *pActionController = GetActionController(); if (pActionController) { SAnimationContext &animContext = pActionController->GetContext(); fragmentID = animContext.controllerDef.m_fragmentIDs.Find(actionName); if(tagDef && fragmentID != FRAGMENT_ID_INVALID) { *tagDef = animContext.controllerDef.GetFragmentTagDef(fragmentID); } } return fragmentID; } //------------------------------------------------------------------------ _smart_ptr<IAction> CItem::PlayAction(FragmentID action, int layer, bool loop, uint32 flags, float speedOverride, float animWeigth, float ffeedbackWeight) { _smart_ptr<IAction> pAction; const CWeapon* pWeapon = static_cast<CWeapon*>(GetIWeapon()); if (pWeapon && pWeapon->IsProxyWeapon()) { return pAction; } IActionController* pActionController = GetActionController(); if (pActionController && action != FRAGMENT_ID_INVALID) { SAnimationContext& animContext = pActionController->GetContext(); const CTagDefinition* pTagDefinition = animContext.controllerDef.GetFragmentTagDef(action); float timeOverride = -1.0f; bool concentratedFire = (flags&eIPAF_ConcentratedFire) != 0; TagState actionTags = TAG_STATE_EMPTY; if (pTagDefinition) { CTagState fragTags(*pTagDefinition); SetFragmentTags(fragTags); actionTags = fragTags.GetMask(); } pAction = new CItemAction(PP_PlayerAction, action, actionTags); PlayFragment(pAction, speedOverride, timeOverride, animWeigth, ffeedbackWeight, concentratedFire); } return pAction; } //------------------------------------------------------------------------ uint32 CItem::GetCurrentAnimationTime(int slot) { return m_animationTime[slot]; } //------------------------------------------------------------------------ void CItem::DrawSlot(int slot, bool bDraw, bool bNear) { uint32 flags = GetEntity()->GetSlotFlags(slot); if (bDraw) flags |= ENTITY_SLOT_RENDER; else flags &= ~ENTITY_SLOT_RENDER; if (bNear) flags |= ENTITY_SLOT_RENDER_NEAREST; else flags &= ~ENTITY_SLOT_RENDER_NEAREST; GetEntity()->SetSlotFlags(slot, flags); } //------------------------------------------------------------------------ Vec3 CItem::GetSlotHelperPos(int slot, const char *helper, bool worldSpace, bool relative) const { Vec3 position(0,0,0); SEntitySlotInfo info; if (GetEntity()->GetSlotInfo(slot, info)) { if (info.pStatObj) { IStatObj *pStatsObj = info.pStatObj; position = pStatsObj->GetHelperPos(helper); position = GetEntity()->GetSlotLocalTM(slot, false).TransformPoint(position); } else if (info.pCharacter) { ICharacterInstance *pCharacter = info.pCharacter; IAttachment* pAttachment = pCharacter->GetIAttachmentManager()->GetInterfaceByName(helper); if (pAttachment) { position = worldSpace ? pAttachment->GetAttWorldAbsolute().t : pAttachment->GetAttModelRelative().t; return position; } else { IDefaultSkeleton& rIDefaultSkeleton = pCharacter->GetIDefaultSkeleton(); ISkeletonPose* pSkeletonPose = pCharacter->GetISkeletonPose(); int16 id = rIDefaultSkeleton.GetJointIDByName(helper); if (id > -1) { position = relative ? pSkeletonPose->GetRelJointByID(id).t : pSkeletonPose->GetAbsJointByID(id).t; } } if (!relative) { position = GetEntity()->GetSlotLocalTM(slot, false).TransformPoint(position); } } } if (worldSpace) { position = GetWorldTM().TransformPoint(position); } return position; } //------------------------------------------------------------------------ const Matrix33 &CItem::GetSlotHelperRotation(int slot, const char *helper, bool worldSpace, bool relative) { static Matrix33 rotation; rotation.SetIdentity(); IEntity* pEntity = GetEntity(); if(!pEntity) return rotation; SEntitySlotInfo info; if (pEntity->GetSlotInfo(slot, info)) { if (info.pStatObj) { IStatObj *pStatObj = info.pStatObj; rotation = Matrix33(pStatObj->GetHelperTM(helper)); rotation.OrthonormalizeFast(); rotation = Matrix33(GetEntity()->GetSlotLocalTM(slot, false))*rotation; } else if (info.pCharacter) { ICharacterInstance *pCharacter = info.pCharacter; if(!pCharacter) return rotation; IAttachment* pAttachment = pCharacter->GetIAttachmentManager()->GetInterfaceByName(helper); if(pAttachment) { rotation = Matrix33(worldSpace ? pAttachment->GetAttWorldAbsolute().q : pAttachment->GetAttModelRelative().q); return rotation; } else { IDefaultSkeleton& rIDefaultSkeleton = pCharacter->GetIDefaultSkeleton(); ISkeletonPose* pSkeletonPose = pCharacter->GetISkeletonPose(); int16 id = rIDefaultSkeleton.GetJointIDByName(helper); if (id > -1) { rotation = relative ? Matrix33(pSkeletonPose->GetRelJointByID(id).q) : Matrix33(pSkeletonPose->GetAbsJointByID(id).q); } } if (!relative) { rotation = Matrix33(pEntity->GetSlotLocalTM(slot, false)) * rotation; } } } if (worldSpace) { rotation = Matrix33(pEntity->GetWorldTM()) * rotation; } return rotation; } //------------------------------------------------------------------------ //void CItem::StopSound(tSoundID id) //{ // if (id == INVALID_SOUNDID) // return; // // bool synchSound = false; // IEntityAudioProxy *pIEntityAudioProxy = GetAudioProxy(false); // if (pIEntityAudioProxy) // { // if(synchSound) // pIEntityAudioProxy->StopSound(id, ESoundStopMode_OnSyncPoint); // else // pIEntityAudioProxy->StopSound(id); // } //} //------------------------------------------------------------------------ void CItem::Quiet() { REINST("needs verification!"); /*IEntityAudioProxy *pIEntityAudioProxy = GetAudioProxy(false); if (pIEntityAudioProxy) { pIEntityAudioProxy->StopAllSounds(); }*/ } //------------------------------------------------------------------------ //ISound *CItem::GetISound(tSoundID id) //{ // IEntityAudioProxy *pIEntityAudioProxy = GetAudioProxy(false); // if (pIEntityAudioProxy) // return pIEntityAudioProxy->GetSound(id); // // return 0; //} //------------------------------------------------------------------------ IEntityAudioProxy *CItem::GetAudioProxy(bool create) { IEntityAudioProxy *pIEntityAudioProxy = (IEntityAudioProxy *)GetEntity()->GetProxy(ENTITY_PROXY_AUDIO); if (!pIEntityAudioProxy && create) pIEntityAudioProxy = crycomponent_cast<IEntityAudioProxyPtr> (GetEntity()->CreateProxy(ENTITY_PROXY_AUDIO)).get(); return pIEntityAudioProxy; } //------------------------------------------------------------------------ IEntityRenderProxy *CItem::GetRenderProxy(bool create) { IEntityRenderProxy *pRenderProxy = (IEntityRenderProxy *)GetEntity()->GetProxy(ENTITY_PROXY_RENDER); if (!pRenderProxy && create) pRenderProxy = crycomponent_cast<IEntityRenderProxyPtr> (GetEntity()->CreateProxy(ENTITY_PROXY_RENDER)).get(); return pRenderProxy; } //------------------------------------------------------------------------ IEntityPhysicalProxy *CItem::GetPhysicalProxy(bool create) { IEntityPhysicalProxy *pPhysicalProxy = (IEntityPhysicalProxy *)GetEntity()->GetProxy(ENTITY_PROXY_PHYSICS); if (!pPhysicalProxy && create) pPhysicalProxy = crycomponent_cast<IEntityPhysicalProxyPtr> (GetEntity()->CreateProxy(ENTITY_PROXY_PHYSICS)).get(); return pPhysicalProxy; } //------------------------------------------------------------------------ void CItem::DestroyedGeometry(bool use) { if (!m_geometry[eIGS_Destroyed].empty()) { DrawSlot(eIGS_Destroyed, use); if (m_stats.viewmode&eIVM_FirstPerson) DrawSlot(eIGS_FirstPerson, !use); else DrawSlot(eIGS_ThirdPerson, !use); if (use) GetEntity()->SetSlotLocalTM(eIGS_Destroyed, GetEntity()->GetSlotLocalTM(eIGS_ThirdPerson, false)); } }
#include "ConversionTables.h" /************************************************************* // initializes the tables with values for the // ordinary amino acids (unmodified). **************************************************************/ void ConversionTables::init_for_standard_aas() { int i; int max_char = 0xFF; char2aa.clear(); char2mass.clear(); aa2mass.clear(); aa2label.clear(); aa_positions.clear(); char2aa.resize(max_char); char2mass.resize(max_char); for (i=0; i<max_char; i++) { char2aa[i]=-999999; char2mass[i]=-999999; } aa2mass.resize(Val+1); aa2label.resize(Val+1); aa2char.resize(Val+1); aa_positions.resize(Val+1,0); org_aa.resize(Val+1); for (i=0; i<=Val; i++) org_aa[i]=i; // char2aa['^']=N_TERM; aa2char[N_TERM]='^'; char2mass['^']=0.0; aa2mass[N_TERM]=0.0; // char2aa['$']=C_TERM; aa2char[C_TERM]='$'; char2mass['$']=0.0; aa2mass[C_TERM]=0.0; char2aa['_']=Gap; aa2char[Gap]='_'; char2mass['_']= 9999.999; aa2mass[Gap]=9999.999; char2aa['X']=Xle; aa2char[Xle]='X'; char2mass['X']= 113.08406; aa2mass[Xle]= 113.08406; char2aa['A']=Ala; aa2char[Ala]='A'; char2mass['A']= 71.03711; aa2mass[Ala]= 71.03711; char2aa['R']=Arg; aa2char[Arg]='R'; char2mass['R']= 156.10111; aa2mass[Arg]= 156.10111; char2aa['N']=Asn; aa2char[Asn]='N'; char2mass['N']= 114.04293; aa2mass[Asn]= 114.04293; char2aa['D']=Asp; aa2char[Asp]='D'; char2mass['D']= 115.02694; aa2mass[Asp]= 115.02694; char2aa['C']=Cys; aa2char[Cys]='C'; char2mass['C']= 103.00919; aa2mass[Cys]= 103.00919; char2aa['Q']=Gln; aa2char[Gln]='Q'; char2mass['Q']= 128.05858; aa2mass[Gln]= 128.05858; char2aa['E']=Glu; aa2char[Glu]='E'; char2mass['E']= 129.04259; aa2mass[Glu]= 129.04259; char2aa['G']=Gly; aa2char[Gly]='G'; char2mass['G']= 57.02146; aa2mass[Gly]= 57.02146; char2aa['H']=His; aa2char[His]='H'; char2mass['H']= 137.05891; aa2mass[His]= 137.05891; char2aa['I']=Ile; aa2char[Ile]='I'; char2mass['I']= 113.08406; aa2mass[Ile]= 113.08406; char2aa['L']=Leu; aa2char[Leu]='L'; char2mass['L']= 113.08406; aa2mass[Leu]= 113.08406; char2aa['K']=Lys; aa2char[Lys]='K'; char2mass['K']= 128.09496; aa2mass[Lys]= 128.09496; char2aa['M']=Met; aa2char[Met]='M'; char2mass['M']= 131.04049; aa2mass[Met]= 131.04049; char2aa['F']=Phe; aa2char[Phe]='F'; char2mass['F']= 147.06841; aa2mass[Phe]= 147.06841; char2aa['P']=Pro; aa2char[Pro]='P'; char2mass['P']= 97.05276; aa2mass[Pro]= 97.05276; char2aa['S']=Ser; aa2char[Ser]='S'; char2mass['S']= 87.03203; aa2mass[Ser]= 87.03203; char2aa['T']=Thr; aa2char[Thr]='T'; char2mass['T']= 101.04768; aa2mass[Thr]= 101.04768; char2aa['W']=Trp; aa2char[Trp]='W'; char2mass['W']= 186.07931; aa2mass[Trp]= 186.07931; char2aa['Y']=Tyr; aa2char[Tyr]='Y'; char2mass['Y']= 163.06333; aa2mass[Tyr]= 163.06333; char2aa['V']=Val; aa2char[Val]='V'; char2mass['V']= 99.06841; aa2mass[Val]= 99.06841; // chose to make B=D so char2aa['B']=Asp; char2mass['B']= 115.088; for (i=Gap; i<=Val; i++) aa2label[i]=aa2char[i]; } bool ConversionTables::add_optional_PTM_aa(int aa, const string& label, mass_t delta, int position) { if (aa2mass[aa]+delta<12.00) { cout << "Warning: illegal mass for PTM " << label << " : " << aa2mass[aa]+delta << " (mass of optional PTM should be above 12 Da)" << endl; return false; } org_aa.push_back(aa); aa2mass.push_back(aa2mass[aa]+delta); aa2label.push_back(label); aa_positions.push_back(position); return true; } bool ConversionTables::make_fixed_mod(int aa, mass_t delta) { if (aa2mass[aa]+delta<12.00) return false; aa2mass[aa]+=delta; if (aa>0 && aa<=Val) char2mass[aa2char[aa]] += delta; return true; } bool ConversionTables::add_optional_PTM_terminal_aa(mass_t delta, int position, const string& ptm_label) { if (position != -1 && position != 1) { cout << "Error: terminal PTMs must have position +1 or -1!" << endl; exit(1); } if (delta<-50.0) { cout << "Warning: illegal mass offset for terminal PTM: " << ptm_label << " (offset should not be less than -50 Da)" << endl; return false; } int aa; for (aa=Ala; aa<=Val; aa++) { org_aa.push_back(aa); aa2mass.push_back(aa2mass[aa]+delta); aa_positions.push_back(position); // make label string aa_label = aa2char[aa] + ptm_label; aa_label[0] = aa2char[aa]; aa2label.push_back(aa_label); } return true; }
/* * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include <aws/docdb/model/UpgradeTarget.h> #include <aws/core/utils/xml/XmlSerializer.h> #include <aws/core/utils/StringUtils.h> #include <aws/core/utils/memory/stl/AWSStringStream.h> #include <utility> using namespace Aws::Utils::Xml; using namespace Aws::Utils; namespace Aws { namespace DocDB { namespace Model { UpgradeTarget::UpgradeTarget() : m_engineHasBeenSet(false), m_engineVersionHasBeenSet(false), m_descriptionHasBeenSet(false), m_autoUpgrade(false), m_autoUpgradeHasBeenSet(false), m_isMajorVersionUpgrade(false), m_isMajorVersionUpgradeHasBeenSet(false) { } UpgradeTarget::UpgradeTarget(const XmlNode& xmlNode) : m_engineHasBeenSet(false), m_engineVersionHasBeenSet(false), m_descriptionHasBeenSet(false), m_autoUpgrade(false), m_autoUpgradeHasBeenSet(false), m_isMajorVersionUpgrade(false), m_isMajorVersionUpgradeHasBeenSet(false) { *this = xmlNode; } UpgradeTarget& UpgradeTarget::operator =(const XmlNode& xmlNode) { XmlNode resultNode = xmlNode; if(!resultNode.IsNull()) { XmlNode engineNode = resultNode.FirstChild("Engine"); if(!engineNode.IsNull()) { m_engine = engineNode.GetText(); m_engineHasBeenSet = true; } XmlNode engineVersionNode = resultNode.FirstChild("EngineVersion"); if(!engineVersionNode.IsNull()) { m_engineVersion = engineVersionNode.GetText(); m_engineVersionHasBeenSet = true; } XmlNode descriptionNode = resultNode.FirstChild("Description"); if(!descriptionNode.IsNull()) { m_description = descriptionNode.GetText(); m_descriptionHasBeenSet = true; } XmlNode autoUpgradeNode = resultNode.FirstChild("AutoUpgrade"); if(!autoUpgradeNode.IsNull()) { m_autoUpgrade = StringUtils::ConvertToBool(StringUtils::Trim(autoUpgradeNode.GetText().c_str()).c_str()); m_autoUpgradeHasBeenSet = true; } XmlNode isMajorVersionUpgradeNode = resultNode.FirstChild("IsMajorVersionUpgrade"); if(!isMajorVersionUpgradeNode.IsNull()) { m_isMajorVersionUpgrade = StringUtils::ConvertToBool(StringUtils::Trim(isMajorVersionUpgradeNode.GetText().c_str()).c_str()); m_isMajorVersionUpgradeHasBeenSet = true; } } return *this; } void UpgradeTarget::OutputToStream(Aws::OStream& oStream, const char* location, unsigned index, const char* locationValue) const { if(m_engineHasBeenSet) { oStream << location << index << locationValue << ".Engine=" << StringUtils::URLEncode(m_engine.c_str()) << "&"; } if(m_engineVersionHasBeenSet) { oStream << location << index << locationValue << ".EngineVersion=" << StringUtils::URLEncode(m_engineVersion.c_str()) << "&"; } if(m_descriptionHasBeenSet) { oStream << location << index << locationValue << ".Description=" << StringUtils::URLEncode(m_description.c_str()) << "&"; } if(m_autoUpgradeHasBeenSet) { oStream << location << index << locationValue << ".AutoUpgrade=" << std::boolalpha << m_autoUpgrade << "&"; } if(m_isMajorVersionUpgradeHasBeenSet) { oStream << location << index << locationValue << ".IsMajorVersionUpgrade=" << std::boolalpha << m_isMajorVersionUpgrade << "&"; } } void UpgradeTarget::OutputToStream(Aws::OStream& oStream, const char* location) const { if(m_engineHasBeenSet) { oStream << location << ".Engine=" << StringUtils::URLEncode(m_engine.c_str()) << "&"; } if(m_engineVersionHasBeenSet) { oStream << location << ".EngineVersion=" << StringUtils::URLEncode(m_engineVersion.c_str()) << "&"; } if(m_descriptionHasBeenSet) { oStream << location << ".Description=" << StringUtils::URLEncode(m_description.c_str()) << "&"; } if(m_autoUpgradeHasBeenSet) { oStream << location << ".AutoUpgrade=" << std::boolalpha << m_autoUpgrade << "&"; } if(m_isMajorVersionUpgradeHasBeenSet) { oStream << location << ".IsMajorVersionUpgrade=" << std::boolalpha << m_isMajorVersionUpgrade << "&"; } } } // namespace Model } // namespace DocDB } // namespace Aws
// This may look like C code, but it is really -*- C++ -*- // // Copyright Bob Friesenhahn, 1999, 2000, 2001, 2002, 2003 // // Implementation of Montage // #define MAGICK_IMPLEMENTATION #define MAGICK_PLUSPLUS_IMPLEMENTATION #include "Magick++/Include.h" #include <string> #include <string.h> #include "Magick++/Montage.h" #include "Magick++/Functions.h" Magick::Montage::Montage ( void ) : _backgroundColor("#ffffff"), _compose(OverCompositeOp), _fileName(), _fill("#000000ff"), _font(), _geometry("120x120+4+3>"), _gravity(CenterGravity), _label(), _pointSize(12), _shadow(false), _stroke(), _texture(), _tile("6x4"), _title(), _transparentColor() { } Magick::Montage::~Montage( void ) { // Nothing to do } Magick::MontageFramed::MontageFramed ( void ) : _borderColor("#dfdfdf"), _borderWidth(0), _frame(), _matteColor("#bdbdbd") { } /* virtual */ Magick::MontageFramed::~MontageFramed ( void ) { // Nothing to do } void Magick::Montage::updateMontageInfo ( MontageInfo &montageInfo_ ) const { memset(&montageInfo_,0,sizeof(MontageInfo)); // background_color montageInfo_.background_color = _backgroundColor; // border_color montageInfo_.border_color = Color(); // border_width montageInfo_.border_width = 0; // filename _fileName.copy( montageInfo_.filename, MaxTextExtent - 1 ); montageInfo_.filename[ _fileName.length() ] = 0; // null terminate // fill montageInfo_.fill = _fill; // font if ( _font.length() != 0 ) Magick::CloneString( &montageInfo_.font, _font ); else MagickFreeMemory(montageInfo_.font); // frame MagickFreeMemory(montageInfo_.frame); // geometry if ( _geometry.isValid() ) Magick::CloneString( &montageInfo_.geometry, _geometry ); else MagickFreeMemory(montageInfo_.geometry); // gravity montageInfo_.gravity = _gravity; // matte_color montageInfo_.matte_color = Color(); // pointsize montageInfo_.pointsize = _pointSize; // shadow montageInfo_.shadow = static_cast<int>(_shadow); // signature (validity stamp) montageInfo_.signature = MagickSignature; // stroke montageInfo_.stroke = _stroke; // texture if ( _texture.length() != 0 ) Magick::CloneString( &montageInfo_.texture, _texture ); else MagickFreeMemory(montageInfo_.texture); // tile if ( _tile.isValid() ) Magick::CloneString( &montageInfo_.tile, _tile ); else MagickFreeMemory(montageInfo_.tile); // title if ( _title.length() != 0 ) Magick::CloneString( &montageInfo_.title, _title ); else MagickFreeMemory(montageInfo_.title); } // // Implementation of MontageFramed // /* virtual */ void Magick::MontageFramed::updateMontageInfo ( MontageInfo &montageInfo_ ) const { // Do base updates Montage::updateMontageInfo ( montageInfo_ ); // border_color montageInfo_.border_color = _borderColor; // border_width montageInfo_.border_width = _borderWidth; // frame if ( _frame.isValid() ) Magick::CloneString( &montageInfo_.frame, _frame ); else MagickFreeMemory(montageInfo_.frame); // matte_color montageInfo_.matte_color = _matteColor; }
#include "StaticImageRenderer.h" #include "CommandLineArguments.h" #include <iostream> #include <fstream> #include <string> #include <thread> #include <chrono> #include <atomic> // FIXME: add osx fs headers once it is supported #if defined(_WIN32) #include <filesystem> #elif defined(__linux__) #include <experimental/filesystem> #endif PH_CLI_NAMESPACE_BEGIN StaticImageRenderer::StaticImageRenderer(const CommandLineArguments& args) : m_engineId(0), m_sceneFilePath(), m_imageFilePath(args.getImageFilePath()), m_numRenderThreads(args.getNumRenderThreads()), m_isPostProcessRequested(args.isPostProcessRequested()) { phCreateEngine(&m_engineId, static_cast<PHuint32>(m_numRenderThreads)); setSceneFilePath(args.getSceneFilePath()); } StaticImageRenderer::~StaticImageRenderer() { phDeleteEngine(m_engineId); } void StaticImageRenderer::render() const { if(!loadCommandsFromSceneFile()) { return; } phUpdate(m_engineId); std::thread renderThread([=]() { phRender(m_engineId); }); std::atomic<bool> isRenderingCompleted = false; std::thread queryThread([&]() { /*PHuint32 x, y, w, h; int regionStatus = phAsyncPollUpdatedFilmRegion(engineId, &x, &y, &w, &h); if(regionStatus != PH_FILM_REGION_STATUS_INVALID) { std::cout << "xywh: " << x << ", " << y << ", " << w << ", " << h << std::endl; }*/ using namespace std::chrono_literals; PHfloat32 currentProgress = 0, samplesPerSecond = 0; PHfloat32 lastProgress = 0; while(!isRenderingCompleted) { phAsyncGetRendererStatistics(m_engineId, &currentProgress, &samplesPerSecond); if((currentProgress - lastProgress) > 1.0f) { lastProgress = currentProgress; std::cout << "progress: " << currentProgress << " % | " << "samples/sec: " << samplesPerSecond << std::endl; } std::this_thread::sleep_for(2s); } }); renderThread.join(); isRenderingCompleted = true; std::cout << "render completed" << std::endl; PHuint32 filmWpx, filmHpx; phGetFilmDimension(m_engineId, &filmWpx, &filmHpx); PHuint64 frameId; phCreateFrame(&frameId, filmWpx, filmHpx); if(m_isPostProcessRequested) { phDevelopFilm(m_engineId, frameId, PH_EATTRIBUTE::LIGHT_ENERGY); } else { phDevelopFilmRaw(m_engineId, frameId, PH_EATTRIBUTE::LIGHT_ENERGY); } std::cout << "saving image to <" << m_imageFilePath << ">" << std::endl; phSaveFrame(frameId, m_imageFilePath.c_str()); phDeleteFrame(frameId); queryThread.join(); } void StaticImageRenderer::setSceneFilePath(const std::string& path) { m_sceneFilePath = path; #ifndef __APPLE__ namespace fs = std::experimental::filesystem; const std::string sceneDirectory = fs::path(path).parent_path().string(); phSetWorkingDirectory(m_engineId, sceneDirectory.c_str()); #else const std::size_t slashIndex = path.find_last_of('/'); if(slashIndex != std::string::npos) { const std::string sceneDirectory = path.substr(0, slashIndex + 1); phSetWorkingDirectory(m_engineId, sceneDirectory.c_str()); } else { std::cerr << "warning: cannot retrieve scene directory from path <" << path << ">" << std::endl; } #endif } void StaticImageRenderer::setImageFilePath(const std::string& path) { m_imageFilePath = path; } bool StaticImageRenderer::loadCommandsFromSceneFile() const { std::ifstream sceneFile; sceneFile.open(m_sceneFilePath, std::ios::in); if(!sceneFile.is_open()) { std::cerr << "warning: scene file <" << m_sceneFilePath << "> opening failed" << std::endl; return false; } else { std::cerr << "loading scene file <" << m_sceneFilePath << ">" << std::endl; std::string lineCommand; while(sceneFile.good()) { std::getline(sceneFile, lineCommand); lineCommand += '\n'; phEnterCommand(m_engineId, lineCommand.c_str()); } phEnterCommand(m_engineId, "->"); return true; } } PH_CLI_NAMESPACE_END
#include <boost/multi_index_container.hpp> #include <boost/multi_index/hashed_index.hpp> #include <boost/multi_index/member.hpp> #include <iostream> #include <string> struct person { std::string name; int age; person(const std::string &n, int a) : name(n), age(a) { } }; typedef boost::multi_index::multi_index_container< person, boost::multi_index::indexed_by< boost::multi_index::hashed_non_unique< boost::multi_index::member< person, std::string, &person::name > >, boost::multi_index::hashed_non_unique< boost::multi_index::member< person, int, &person::age > > > > person_multi; void set_age(person &p) { p.age = 32; } int main() { person_multi persons; persons.insert(person("Boris", 31)); persons.insert(person("Anton", 35)); persons.insert(person("Caesar", 25)); person_multi::iterator it = persons.find("Boris"); persons.modify(it, set_age); const person_multi::nth_index<1>::type &age_index = persons.get<1>(); std::cout << age_index.count(32) << std::endl; }
// Spear: Statistical Platform for Elucidating moleculAr Reactivity // Copyright (C) Purdue University -- BSD license #ifndef SPEAR_IDATM_HPP #define SPEAR_IDATM_HPP #include "spear/AtomType.hpp" #include <map> namespace Spear { class Molecule; class AtomVertex; /// algorithm based on E.C. Meng / R.A. Lewis paper /// "Determination of Molecular Topology and Atomic Hybridization /// States from Heavy Atom Coordinates", J. Comp. Chem., v12#7, 891-898 /// and on example code from idatm.f implementation by E.C. Meng /// differences: No boron types. Double-bonded Npls are split off /// as N2. Sox split into Sxd (sulfoxide), and Son (sulfone). /// Carbons in aromatic rings are type Car. Aromatic oxygens are Oar. /// still missing types: C1-,O1+,O1 class SPEAR_EXPORT IDATM final : public AtomType { public: IDATM(const Molecule& mol, TypingMode mode); const std::string& name() const override { return name_; } bool is_aromatic(size_t atom_id) const override; bool is_planar(size_t atom_id) const override; size_t add_atom(size_t idx) override; void add_bond(size_t idx1, size_t idx2, Bond::Order bo) override; void remove_atom(size_t idx) override { erase(begin() + static_cast<std::ptrdiff_t>(idx)); } Hybridization hybridization(size_t atom_id) const override; private: /// Algorithm to type the molecule based on the 3D positioning of atoms /// and makes extensive use of coordinates to determine atom type. void type_atoms_3d_(); /// Algorithm to type the molecule based on the previously given bond order void type_atoms_topo_(); /// infallible pass: type hydrogens / deuteriums and compute number of /// heavy atoms connected to each atom. /// also applies templated residues and marks them are mapped void infallible_(); /// valence pass: elements d valences > 1 /// also fills the 'redo' array for hard to determine types /// /// valence 4 /// C must be sp3 (C3) /// N must be part of an N-oxide (Nox) or a quaternary /// amine (N3+) /// P must be part of a phosphate (Pac), a P-oxide (Pox) /// or a quaternary phosphine (P3+) /// S must be part of a sulfate, sulfonate or sulfamate /// (Sac), or sulfone (Son) /// /// valence 3 /// calculate the three bond angles and average them; /// since hydrogens may be missing, cannot count on valence /// to determine the hybridization state. Average bond angle /// assists in discriminating hybridization /// C may be sp3 (C3), sp2 (C2), or part of a carboxylate /// (Cac) /// N may be sp3 (N3), sp2, or planar (as in amides and /// aniline deriviatives), or part of a nitro /// group (Ntr) /// S may be, depending on oxidation state, sulfoxide (Sxd) /// or S3+ /// /// valence 2 /// calculate the bond angle and assign a tentative atom /// type accordingly (a single angle is often not a good /// indicator of type). Mark these atoms for further /// analysis by putting a non-zero value for them in the /// 'redo' array. /// C may be sp3 (C3), sp2 (C2), or sp (C1) /// N may be sp3 (N3), sp2 or planar (Npl), or sp (N1) /// O and S are sp3 (O3 and S3, respectively) size_t valence_(const AtomVertex& av); /// valence pass: elements d valences > 1 /// also fills the 'redo' array for hard to determine types /// /// valence 4 /// C must be sp3 (C3) /// N must be part of an N-oxide (Nox) or a quaternary /// amine (N3+) /// P must be part of a phosphate (Pac), a P-oxide (Pox) /// or a quaternary phosphine (P3+) /// S must be part of a sulfate, sulfonate or sulfamate /// (Sac), or sulfone (Son) size_t valence_topo_(const AtomVertex& atom); /// terminal pass: determine types of valence 1 atoms. These were typed by /// element only in previous pass, but can be typed more accurately /// now that the atoms they are bonded to have been typed. /// Bond lengths are used in this pass to perform this assignment. size_t terminal_(const AtomVertex& atom); /// Re-examine all atoms with non-zero 'redo' values and /// retype them if necessary size_t redo_(const AtomVertex& atom); /// change isolated sp2 carbons to sp3 since it is /// impossible for an atom to be sp2 hybrizided if all its /// neighbors are sp3 hybridized. In addition, a carbon atom cannot /// be double bonded to a carboxylate carbon, phosphate phosphorus, /// sulfate sulfur, sulfone sulfur, sulfoxide sulfur, or sp1 carbon. /// Addition not in original idatm: Nox also void fix_C2_(); /// 1) make decisions about the charge states of nitrogens. If an /// sp3 nitrogen is bonded to sp3 carbons and/or hydrogens and/or /// deuteriums only, assume that it is positively charged (the pKa /// of its conjugate acid is probably high enough that the /// protonated form predominates at physiological pH). If an sp2 /// carbon is bonded to three planar nitrogens, it may be part of /// a guanidinium group. Make the nitrogens positively charged /// (Ng+) if guanidine or similar structures can be ruled out (if /// 'noplus' is false). /// 2) make carboxyl oxygens negatively charged even if the proton is /// present (the pKa of the carboxyl group is probably low enough /// that the unprotonated form predominates at physiological pH). void charges_(); /// Assign aromaticity /// /// 1) Check that all the atoms of the ring are planar types /// 2) Check bond lengths around the ring; see if they are /// consistent with aromatic bond lengths void aromatic_(); /// Split off heavy-atom-valence-Npls that have no hydrogens as type N2. /// Discrimination criteria is the average bond length of the two /// heavy-atom bonds (shorter implies more double-bond character, /// thereby no hydrogen). void fix_N2_(); /// "pass 9": Assign aromatic types void heteroaromatics_(); /// "pass 10": change O2- to O3- for sulfates, phosphates, N-oxide, S3- for /// thiophosphate and other terminal atoms now that we have more types. size_t fix_tetrahedrals_(const AtomVertex& atom); /// "pass 11": Additional Special groups size_t fix_special_(const AtomVertex& atom); /// Original molecule to be typed const Molecule& mol_; std::multimap<size_t, size_t> aromatic_ring_sizes_; /// number of heavy atoms bonded std::vector<size_t> heavys_; /// Have we typed the atom? std::vector<bool> mapped_; /// Generated name based on initialization std::string name_; std::vector<size_t> redo; template<typename func> void apply_function_to_all_atoms(func&& f); }; template<> std::string SPEAR_EXPORT atomtype_name_for_id<IDATM>(size_t id); template<> size_t SPEAR_EXPORT atomtype_id_for_name<IDATM>(const std::string& name); template<> size_t SPEAR_EXPORT atomtype_id_count<IDATM>(); template<> double SPEAR_EXPORT van_der_waals<IDATM>(size_t id); } #endif
/*========================================================================= Program: Visualization Toolkit Module: vtkInformation.cxx Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen All rights reserved. See Copyright.txt or http://www.kitware.com/Copyright.htm for details. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the above copyright notice for more information. =========================================================================*/ #include "vtkInformation.h" #include "vtkCommand.h" #include "vtkGarbageCollector.h" #include "vtkInformationDataObjectKey.h" #include "vtkInformationDoubleKey.h" #include "vtkInformationDoubleVectorKey.h" #include "vtkInformationIdTypeKey.h" #include "vtkInformationInformationKey.h" #include "vtkInformationInformationVectorKey.h" #include "vtkInformationIntegerKey.h" #include "vtkInformationIntegerPointerKey.h" #include "vtkInformationIntegerVectorKey.h" #include "vtkInformationIterator.h" #include "vtkInformationKeyVectorKey.h" #include "vtkInformationObjectBaseKey.h" #include "vtkInformationRequestKey.h" #include "vtkInformationStringKey.h" #include "vtkInformationStringVectorKey.h" #include "vtkInformationUnsignedLongKey.h" #include "vtkInformationVariantKey.h" #include "vtkInformationVariantVectorKey.h" #include "vtkObjectFactory.h" #include "vtkSmartPointer.h" #include "vtkVariant.h" #include <algorithm> #include <utility> #include <vector> #include "vtkInformationInternals.h" vtkStandardNewMacro(vtkInformation); //---------------------------------------------------------------------------- vtkInformation::vtkInformation() { // Allocate the internal representation. this->Internal = new vtkInformationInternals; // There is no request key stored initially. this->Request = 0; } //---------------------------------------------------------------------------- vtkInformation::~vtkInformation() { // Delete the internal representation. delete this->Internal; } //---------------------------------------------------------------------------- void vtkInformation::PrintSelf(ostream& os, vtkIndent indent) { this->Superclass::PrintSelf(os, indent); // Print the request if one is set. if(this->Request) { os << indent << "Request: " << this->Request->GetName() << "\n"; } this->PrintKeys(os, indent); } //---------------------------------------------------------------------------- void vtkInformation::PrintKeys(ostream& os, vtkIndent indent) { typedef vtkInformationInternals::MapType MapType; for(MapType::const_iterator i = this->Internal->Map.begin(); i != this->Internal->Map.end(); ++i) { // Print the key name first. vtkInformationKey* key = i->first; os << indent << key->GetName() << ": "; // Ask the key to print its value. key->Print(os, this); os << "\n"; } } //---------------------------------------------------------------------------- // call modified on superclass void vtkInformation::Modified() { this->Superclass::Modified(); } //---------------------------------------------------------------------------- // Update MTime and invoke a modified event with // the information key as call data void vtkInformation::Modified(vtkInformationKey* key) { this->MTime.Modified(); this->InvokeEvent(vtkCommand::ModifiedEvent, key); } //---------------------------------------------------------------------------- // Return the number of keys as a result of iteration. int vtkInformation::GetNumberOfKeys() { vtkSmartPointer<vtkInformationIterator> infoIterator = vtkSmartPointer<vtkInformationIterator>::New(); infoIterator->SetInformation( this ); int numberOfKeys = 0; for (infoIterator->InitTraversal(); !infoIterator->IsDoneWithTraversal(); infoIterator->GoToNextItem()) { numberOfKeys++; } return numberOfKeys; } //---------------------------------------------------------------------------- void vtkInformation::SetAsObjectBase(vtkInformationKey* key, vtkObjectBase* newvalue) { if(!key) { return; } typedef vtkInformationInternals::MapType MapType; MapType::iterator i = this->Internal->Map.find(key); if(i != this->Internal->Map.end()) { vtkObjectBase* oldvalue = i->second; if(newvalue) { i->second = newvalue; newvalue->Register(0); } else { this->Internal->Map.erase(i); } oldvalue->UnRegister(0); } else if(newvalue) { MapType::value_type entry(key, newvalue); this->Internal->Map.insert(entry); newvalue->Register(0); } this->Modified(key); } //---------------------------------------------------------------------------- const vtkObjectBase* vtkInformation::GetAsObjectBase( const vtkInformationKey* key) const { if(key) { typedef vtkInformationInternals::MapType MapType; MapType::const_iterator i = this->Internal->Map.find(const_cast<vtkInformationKey*>(key)); if(i != this->Internal->Map.end()) { return i->second; } } return 0; } //---------------------------------------------------------------------------- vtkObjectBase* vtkInformation::GetAsObjectBase(vtkInformationKey* key) { if(key) { typedef vtkInformationInternals::MapType MapType; MapType::const_iterator i = this->Internal->Map.find(key); if(i != this->Internal->Map.end()) { return i->second; } } return 0; } //---------------------------------------------------------------------------- void vtkInformation::Clear() { this->Copy(0); } //---------------------------------------------------------------------------- void vtkInformation::Copy(vtkInformation* from, int deep) { vtkInformationInternals* oldInternal = this->Internal; this->Internal = new vtkInformationInternals; if(from) { typedef vtkInformationInternals::MapType MapType; for(MapType::const_iterator i = from->Internal->Map.begin(); i != from->Internal->Map.end(); ++i) { this->CopyEntry(from, i->first, deep); } } delete oldInternal; } //---------------------------------------------------------------------------- void vtkInformation::Append(vtkInformation* from, int deep) { if(from) { typedef vtkInformationInternals::MapType MapType; for(MapType::const_iterator i = from->Internal->Map.begin(); i != from->Internal->Map.end(); ++i) { this->CopyEntry(from, i->first, deep); } } } //---------------------------------------------------------------------------- void vtkInformation::CopyEntry(vtkInformation* from, vtkInformationKey* key, int deep) { if (!deep) { key->ShallowCopy(from, this); } else { key->DeepCopy(from, this); } } //---------------------------------------------------------------------------- void vtkInformation::CopyEntry(vtkInformation* from, vtkInformationDataObjectKey* key, int deep) { if (!deep) { key->ShallowCopy(from, this); } else { key->DeepCopy(from, this); } } //---------------------------------------------------------------------------- void vtkInformation::CopyEntry(vtkInformation* from, vtkInformationInformationKey* key, int deep) { if (!deep) { key->ShallowCopy(from, this); } else { key->DeepCopy(from, this); } } //---------------------------------------------------------------------------- void vtkInformation::CopyEntry(vtkInformation* from, vtkInformationInformationVectorKey* key, int deep) { if (!deep) { key->ShallowCopy(from, this); } else { key->DeepCopy(from, this); } } //---------------------------------------------------------------------------- void vtkInformation::CopyEntry(vtkInformation* from, vtkInformationIntegerKey* key, int deep) { if (!deep) { key->ShallowCopy(from, this); } else { key->DeepCopy(from, this); } } //---------------------------------------------------------------------------- void vtkInformation::CopyEntry(vtkInformation* from, vtkInformationRequestKey* key, int deep) { if (!deep) { key->ShallowCopy(from, this); } else { key->DeepCopy(from, this); } } //---------------------------------------------------------------------------- void vtkInformation::CopyEntry(vtkInformation* from, vtkInformationIntegerVectorKey* key, int deep) { if (!deep) { key->ShallowCopy(from, this); } else { key->DeepCopy(from, this); } } //---------------------------------------------------------------------------- void vtkInformation::CopyEntry(vtkInformation* from, vtkInformationDoubleVectorKey* key, int deep) { if (!deep) { key->ShallowCopy(from, this); } else { key->DeepCopy(from, this); } } //---------------------------------------------------------------------------- void vtkInformation::CopyEntry(vtkInformation* from, vtkInformationVariantKey* key, int deep) { if (!deep) { key->ShallowCopy(from, this); } else { key->DeepCopy(from, this); } } //---------------------------------------------------------------------------- void vtkInformation::CopyEntry(vtkInformation* from, vtkInformationVariantVectorKey* key, int deep) { if (!deep) { key->ShallowCopy(from, this); } else { key->DeepCopy(from, this); } } //---------------------------------------------------------------------------- void vtkInformation::CopyEntry(vtkInformation* from, vtkInformationStringKey* key, int deep) { if (!deep) { key->ShallowCopy(from, this); } else { key->DeepCopy(from, this); } } //---------------------------------------------------------------------------- void vtkInformation::CopyEntry(vtkInformation* from, vtkInformationUnsignedLongKey* key, int deep) { if (!deep) { key->ShallowCopy(from, this); } else { key->DeepCopy(from, this); } } //---------------------------------------------------------------------------- void vtkInformation::CopyEntry(vtkInformation* from, vtkInformationStringVectorKey* key, int deep) { if (!deep) { key->ShallowCopy(from, this); } else { key->DeepCopy(from, this); } } //---------------------------------------------------------------------------- void vtkInformation::CopyEntries(vtkInformation* from, vtkInformationKeyVectorKey* key, int deep) { int numberOfKeys = from->Length(key); vtkInformationKey** keys = from->Get(key); for(int i=0; i < numberOfKeys; ++i) { this->CopyEntry(from, keys[i], deep); } } //---------------------------------------------------------------------------- int vtkInformation::Has(vtkInformationKey* key) { // Use the virtual interface in case this is a special-cased key. return key->Has(this)?1:0; } //---------------------------------------------------------------------------- void vtkInformation::Remove(vtkInformationKey* key) { // Use the virtual interface in case this is a special-cased key. key->Remove(this); } void vtkInformation::Set(vtkInformationRequestKey* key) { key->Set(this); } void vtkInformation::Remove(vtkInformationRequestKey* key) { key->vtkInformationRequestKey::Remove(this); } int vtkInformation::Has(vtkInformationRequestKey* key) { return key->vtkInformationRequestKey::Has(this); } //---------------------------------------------------------------------------- #define VTK_INFORMATION_DEFINE_SCALAR_PROPERTY(name, type) \ void vtkInformation::Set(vtkInformation##name##Key* key, type value) \ { \ key->Set(this, value); \ } \ void vtkInformation::Remove(vtkInformation##name##Key* key) \ { \ key->vtkInformation##name##Key::Remove(this); \ } \ type vtkInformation::Get(vtkInformation##name##Key* key) \ { \ return key->Get(this); \ } \ int vtkInformation::Has(vtkInformation##name##Key* key) \ { \ return key->vtkInformation##name##Key::Has(this); \ } VTK_INFORMATION_DEFINE_SCALAR_PROPERTY(IdType, vtkIdType); VTK_INFORMATION_DEFINE_SCALAR_PROPERTY(Integer, int); VTK_INFORMATION_DEFINE_SCALAR_PROPERTY(Double, double); VTK_INFORMATION_DEFINE_SCALAR_PROPERTY(UnsignedLong, unsigned long); VTK_INFORMATION_DEFINE_SCALAR_PROPERTY(String, const char*); VTK_INFORMATION_DEFINE_SCALAR_PROPERTY(DataObject, vtkDataObject*); VTK_INFORMATION_DEFINE_SCALAR_PROPERTY(Information, vtkInformation*); VTK_INFORMATION_DEFINE_SCALAR_PROPERTY(InformationVector, vtkInformationVector*); VTK_INFORMATION_DEFINE_SCALAR_PROPERTY(ObjectBase, vtkObjectBase*); VTK_INFORMATION_DEFINE_SCALAR_PROPERTY(Variant, const vtkVariant&); #undef VTK_INFORMATION_DEFINE_SCALAR_PROPERTY //---------------------------------------------------------------------------- #define VTK_INFORMATION_DEFINE_VECTOR_PROPERTY(name, type) \ void vtkInformation::Append(vtkInformation##name##VectorKey* key, \ type value) \ { \ key->Append(this, value); \ } \ void vtkInformation::Set(vtkInformation##name##VectorKey* key, \ type const* value, int length) \ { \ key->Set(this, value, length); \ } \ type* vtkInformation::Get(vtkInformation##name##VectorKey* key) \ { \ return key->Get(this); \ } \ type vtkInformation::Get(vtkInformation##name##VectorKey* key, int idx) \ { \ return key->Get(this, idx); \ } \ void vtkInformation::Get(vtkInformation##name##VectorKey* key, \ type* value) \ { \ key->Get(this, value); \ } \ int vtkInformation::Length(vtkInformation##name##VectorKey* key) \ { \ return key->Length(this); \ } \ void vtkInformation::Remove(vtkInformation##name##VectorKey* key) \ { \ key->vtkInformation##name##VectorKey::Remove(this); \ } \ int vtkInformation::Has(vtkInformation##name##VectorKey* key) \ { \ return key->vtkInformation##name##VectorKey::Has(this); \ } VTK_INFORMATION_DEFINE_VECTOR_PROPERTY(Integer, int); VTK_INFORMATION_DEFINE_VECTOR_PROPERTY(Double, double); // Variant vector key is slightly different to accommodate efficient // pass-by-reference instead of pass-by-value calls. void vtkInformation::Append(vtkInformationVariantVectorKey* key, const vtkVariant& value) { key->Append(this, value); } void vtkInformation::Set(vtkInformationVariantVectorKey* key, const vtkVariant* value, int length) { key->Set(this, value, length); } const vtkVariant* vtkInformation::Get(vtkInformationVariantVectorKey* key) { return key->Get(this); } const vtkVariant& vtkInformation::Get(vtkInformationVariantVectorKey* key, int idx) { return key->Get(this, idx); } void vtkInformation::Get(vtkInformationVariantVectorKey* key, vtkVariant* value) { key->Get(this, value); } int vtkInformation::Length(vtkInformationVariantVectorKey* key) { return key->Length(this); } void vtkInformation::Remove(vtkInformationVariantVectorKey* key) { key->vtkInformationVariantVectorKey::Remove(this); } int vtkInformation::Has(vtkInformationVariantVectorKey* key) { return key->vtkInformationVariantVectorKey::Has(this); } // String vector key is slightly different to make it backwards compatible with // the scalar string key. void vtkInformation::Append(vtkInformationStringVectorKey* key, const char* value) { key->Append(this, value); } void vtkInformation::Set(vtkInformationStringVectorKey* key, const char* value, int idx) { key->Set(this, value, idx); } const char* vtkInformation::Get(vtkInformationStringVectorKey* key, int idx) { return key->Get(this, idx); } int vtkInformation::Length(vtkInformationStringVectorKey* key) { return key->Length(this); } void vtkInformation::Remove(vtkInformationStringVectorKey* key) { key->vtkInformationStringVectorKey::Remove(this); } int vtkInformation::Has(vtkInformationStringVectorKey* key) { return key->vtkInformationStringVectorKey::Has(this); } VTK_INFORMATION_DEFINE_VECTOR_PROPERTY(Key, vtkInformationKey*); #define VTK_INFORMATION_DEFINE_VECTOR_VALUE2_PROPERTY(name, type, atype) \ void vtkInformation::Set(vtkInformation##name##VectorKey* key, \ atype value1, atype value2, atype value3, \ atype value4, atype value5, atype value6) \ { \ type value[6]; \ value[0] = value1; \ value[1] = value2; \ value[2] = value3; \ value[3] = value4; \ value[4] = value5; \ value[5] = value6; \ key->Set(this, value, 6); \ } \ void vtkInformation::Set(vtkInformation##name##VectorKey* key, \ atype value1, atype value2, atype value3) \ { \ type value[3]; \ value[0] = value1; \ value[1] = value2; \ value[2] = value3; \ key->Set(this, value, 3); \ } #define VTK_INFORMATION_DEFINE_VECTOR_VALUE_PROPERTY(name, type) \ VTK_INFORMATION_DEFINE_VECTOR_VALUE2_PROPERTY(name, type, type) VTK_INFORMATION_DEFINE_VECTOR_VALUE_PROPERTY(Integer, int); VTK_INFORMATION_DEFINE_VECTOR_VALUE_PROPERTY(Double, double); VTK_INFORMATION_DEFINE_VECTOR_VALUE2_PROPERTY(Variant, vtkVariant, const vtkVariant&); #undef VTK_INFORMATION_DEFINE_VECTOR_VALUE_PROPERTY #undef VTK_INFORMATION_DEFINE_VECTOR_PROPERTY //---------------------------------------------------------------------------- #define VTK_INFORMATION_DEFINE_POINTER_PROPERTY(name, type) \ void vtkInformation::Set(vtkInformation##name##PointerKey* key, \ type* value, int length) \ { \ key->Set(this, value, length); \ } \ type* vtkInformation::Get(vtkInformation##name##PointerKey* key) \ { \ return key->Get(this); \ } \ void vtkInformation::Get(vtkInformation##name##PointerKey* key, \ type* value) \ { \ key->Get(this, value); \ } \ int vtkInformation::Length(vtkInformation##name##PointerKey* key) \ { \ return key->Length(this); \ } \ void vtkInformation::Remove(vtkInformation##name##PointerKey* key) \ { \ key->vtkInformation##name##PointerKey::Remove(this); \ } \ int vtkInformation::Has(vtkInformation##name##PointerKey* key) \ { \ return key->vtkInformation##name##PointerKey::Has(this); \ } VTK_INFORMATION_DEFINE_POINTER_PROPERTY(Integer, int); #undef VTK_INFORMATION_DEFINE_POINTER_PROPERTY //---------------------------------------------------------------------------- void vtkInformation::Append(vtkInformationKeyVectorKey* key, vtkInformationDataObjectKey* value) { key->Append(this, value); } //---------------------------------------------------------------------------- void vtkInformation::Append(vtkInformationKeyVectorKey* key, vtkInformationDoubleKey* value) { key->Append(this, value); } //---------------------------------------------------------------------------- void vtkInformation::Append(vtkInformationKeyVectorKey* key, vtkInformationDoubleVectorKey* value) { key->Append(this, value); } //---------------------------------------------------------------------------- void vtkInformation::Append(vtkInformationKeyVectorKey* key, vtkInformationInformationKey* value) { key->Append(this, value); } //---------------------------------------------------------------------------- void vtkInformation::Remove(vtkInformationKeyVectorKey* key, vtkInformationKey* value) { key->RemoveItem(this, value); } //---------------------------------------------------------------------------- void vtkInformation::Append(vtkInformationKeyVectorKey* key, vtkInformationInformationVectorKey* value) { key->Append(this, value); } //---------------------------------------------------------------------------- void vtkInformation::Append(vtkInformationKeyVectorKey* key, vtkInformationIntegerKey* value) { key->Append(this, value); } //---------------------------------------------------------------------------- void vtkInformation::Append(vtkInformationKeyVectorKey* key, vtkInformationIntegerVectorKey* value) { key->Append(this, value); } //---------------------------------------------------------------------------- void vtkInformation::Append(vtkInformationKeyVectorKey* key, vtkInformationStringKey* value) { key->Append(this, value); } //---------------------------------------------------------------------------- void vtkInformation::Append(vtkInformationKeyVectorKey* key, vtkInformationUnsignedLongKey* value) { key->Append(this, value); } //---------------------------------------------------------------------------- void vtkInformation::Append(vtkInformationKeyVectorKey* key, vtkInformationObjectBaseKey* value) { key->Append(this, value); } //---------------------------------------------------------------------------- void vtkInformation::Append(vtkInformationKeyVectorKey* key, vtkInformationStringVectorKey* value) { key->Append(this, value); } //---------------------------------------------------------------------------- void vtkInformation::AppendUnique(vtkInformationKeyVectorKey* key, vtkInformationDataObjectKey* value) { key->AppendUnique(this, value); } //---------------------------------------------------------------------------- void vtkInformation::AppendUnique(vtkInformationKeyVectorKey* key, vtkInformationKey* value) { key->AppendUnique(this, value); } //---------------------------------------------------------------------------- void vtkInformation::AppendUnique(vtkInformationKeyVectorKey* key, vtkInformationDoubleKey* value) { key->AppendUnique(this, value); } //---------------------------------------------------------------------------- void vtkInformation::AppendUnique(vtkInformationKeyVectorKey* key, vtkInformationDoubleVectorKey* value) { key->AppendUnique(this, value); } //---------------------------------------------------------------------------- void vtkInformation::AppendUnique(vtkInformationKeyVectorKey* key, vtkInformationInformationKey* value) { key->AppendUnique(this, value); } //---------------------------------------------------------------------------- void vtkInformation::AppendUnique(vtkInformationKeyVectorKey* key, vtkInformationInformationVectorKey* value) { key->AppendUnique(this, value); } //---------------------------------------------------------------------------- void vtkInformation::AppendUnique(vtkInformationKeyVectorKey* key, vtkInformationIntegerKey* value) { key->AppendUnique(this, value); } //---------------------------------------------------------------------------- void vtkInformation::AppendUnique(vtkInformationKeyVectorKey* key, vtkInformationIntegerVectorKey* value) { key->AppendUnique(this, value); } //---------------------------------------------------------------------------- void vtkInformation::AppendUnique(vtkInformationKeyVectorKey* key, vtkInformationStringKey* value) { key->AppendUnique(this, value); } //---------------------------------------------------------------------------- void vtkInformation::AppendUnique(vtkInformationKeyVectorKey* key, vtkInformationUnsignedLongKey* value) { key->AppendUnique(this, value); } //---------------------------------------------------------------------------- void vtkInformation::AppendUnique(vtkInformationKeyVectorKey* key, vtkInformationObjectBaseKey* value) { key->AppendUnique(this, value); } //---------------------------------------------------------------------------- void vtkInformation::AppendUnique(vtkInformationKeyVectorKey* key, vtkInformationStringVectorKey* value) { key->AppendUnique(this, value); } //---------------------------------------------------------------------------- vtkInformationKey* vtkInformation::GetKey(vtkInformationKey* key) { return key; } //---------------------------------------------------------------------------- vtkInformationKey* vtkInformation::GetKey(vtkInformationDataObjectKey* key) { return key; } //---------------------------------------------------------------------------- vtkInformationKey* vtkInformation::GetKey(vtkInformationInformationKey* key) { return key; } //---------------------------------------------------------------------------- vtkInformationKey* vtkInformation::GetKey(vtkInformationInformationVectorKey* key) { return key; } //---------------------------------------------------------------------------- vtkInformationKey* vtkInformation::GetKey(vtkInformationIntegerKey* key) { return key; } //---------------------------------------------------------------------------- vtkInformationKey* vtkInformation::GetKey(vtkInformationRequestKey* key) { return key; } //---------------------------------------------------------------------------- vtkInformationKey* vtkInformation::GetKey(vtkInformationDoubleKey* key) { return key; } //---------------------------------------------------------------------------- vtkInformationKey* vtkInformation::GetKey(vtkInformationIntegerVectorKey* key) { return key; } //---------------------------------------------------------------------------- vtkInformationKey* vtkInformation::GetKey(vtkInformationDoubleVectorKey* key) { return key; } //---------------------------------------------------------------------------- vtkInformationKey* vtkInformation::GetKey(vtkInformationStringKey* key) { return key; } //---------------------------------------------------------------------------- vtkInformationKey* vtkInformation::GetKey(vtkInformationStringVectorKey* key) { return key; } //---------------------------------------------------------------------------- vtkInformationKey* vtkInformation::GetKey(vtkInformationUnsignedLongKey* key) { return key; } //---------------------------------------------------------------------------- vtkInformationKey* vtkInformation::GetKey(vtkInformationVariantKey* key) { return key; } //---------------------------------------------------------------------------- vtkInformationKey* vtkInformation::GetKey(vtkInformationVariantVectorKey* key) { return key; } //---------------------------------------------------------------------------- void vtkInformation::Register(vtkObjectBase* o) { this->RegisterInternal(o, 1); } //---------------------------------------------------------------------------- void vtkInformation::UnRegister(vtkObjectBase* o) { this->UnRegisterInternal(o, 1); } //---------------------------------------------------------------------------- void vtkInformation::ReportReferences(vtkGarbageCollector* collector) { this->Superclass::ReportReferences(collector); // Ask each key/value pair to report any references it holds. typedef vtkInformationInternals::MapType MapType; for(MapType::const_iterator i = this->Internal->Map.begin(); i != this->Internal->Map.end(); ++i) { i->first->Report(this, collector); } } //---------------------------------------------------------------------------- void vtkInformation::ReportAsObjectBase(vtkInformationKey* key, vtkGarbageCollector* collector) { if(key) { typedef vtkInformationInternals::MapType MapType; MapType::iterator i = this->Internal->Map.find(key); if(i != this->Internal->Map.end()) { vtkGarbageCollectorReport(collector, i->second, key->GetName()); } } } //---------------------------------------------------------------------------- void vtkInformation::SetRequest(vtkInformationRequestKey* request) { this->Request = request; } //---------------------------------------------------------------------------- vtkInformationRequestKey* vtkInformation::GetRequest() { return this->Request; }
#include "Server.hpp" void Server::start() { m_config.loadFromFile(); generateWorld(); // Starting threads std::thread positionUpdater(&Server::udpThread, this); // Starting packet position thread std::thread packetHandler(&Server::tcpThread, this); packetHandler.join(); positionUpdater.join(); freeWorldData(); } void Server::tcpThread(){ // Server variables sf::TcpListener listener; sf::SocketSelector selector; // Starting server std::cout << "Listening for connection..." << std::endl; listener.listen(m_config.getServerPort()); selector.add(listener); while(!m_isDone){ if(selector.wait()){ // Wait for event to happen if(selector.isReady(listener)){ // Got new connection, so we are going to handle that by creating a new client addClient(listener, selector); } else { // Got data from a connected client so we are going to send it to all other clients sf::Packet receivedPacket; unsigned int senderIndex; uint8_t code = getReceivedPacket(selector, receivedPacket, senderIndex); switch(code){ case 1: // Disconnect disconnectPlayer(selector, senderIndex); break; case 2: // Block Update updateWorldWithBlockUpdatePacket(receivedPacket); addCodeToBlockUpdatePacket(receivedPacket); sendPacketToOtherClients(receivedPacket, m_clients[senderIndex].id); break; } } } } } void Server::udpThread(){ //Variables for algorithm sf::UdpSocket socket; sf::Packet receivedPacket; sf::IpAddress remoteIp; unsigned short remotePort; //Initializing variables socket.bind(m_config.getServerPort()); socket.setBlocking(false); while(!m_isDone){ while(socket.receive(receivedPacket, remoteIp, remotePort) == sf::Socket::Done){ uint8_t id; receivedPacket >> id; receivedPacket << id; for(auto& i : m_clients){ if(i.id != id){ socket.send(receivedPacket, i.socket->getRemoteAddress(), m_config.getClientPort()); } } } std::this_thread::sleep_for(std::chrono::milliseconds(10)); } } void Server::generateWorld(){ unsigned int ww = m_config.getWorldWidth(); unsigned int wl = m_config.getWorldLength(); unsigned int wh = m_config.getWorldHeight(); unsigned int cw = m_config.getChunkWidth(); // Allocate memory for the world m_worldData = static_cast<uint8_t*>(malloc(ww * wl * wh * cw * cw * cw)); unsigned int maxW = ww * cw; unsigned int maxL = wl * cw; // Fill in the memory for(unsigned int y = 0; y < wh * cw; y++){ for(unsigned int z = 0; z < wl * cw; z++){ for(unsigned int x = 0; x < ww * cw; x++){ if(y < 20){ m_worldData[(y * maxW * maxL) + (z * maxW) + x] = 5; }else if(x == z){ m_worldData[(y * maxW * maxL) + (z * maxW) + x] = 2; }else{ m_worldData[(y * maxW * maxL) + (z * maxW) + x] = 0; } } } } } uint64_t Server::createUniqueID() { static uint64_t uid; return ++uid; } bool Server::doesIDExist(uint64_t id){ /* Client ids are allocated serially so are always in order; we can use a * binary search here */ size_t l = 0; size_t r = m_clients.size(); while (l <= r) { size_t m = l + (r - l) / 2; if (m_clients[m].id == id) return true; else if (m_clients[m].id > id) r = m - 1; else l = m + 1; } return false; } void Server::addClient(sf::TcpListener& listener, sf::SocketSelector& selector) { // Adding new client to list of clients Client client; listener.accept(*client.socket); client.id = createUniqueID(); m_clients.push_back(client); selector.add(*client.socket); // Sending chosen ID to client sf::Packet packet; for (uint8_t i = 0; i < 64; i += 8) packet << (uint8_t)(client.id >> i & 0xff); client.socket->send(packet); packet.clear(); std::cout << "New client connected with ID: " << client.id << std::endl; } void Server::freeWorldData(){ free(m_worldData); } uint8_t Server::getReceivedPacket(sf::SocketSelector& selector, sf::Packet& packet, unsigned int& _senderIndex) { for(unsigned int i = 0; i < m_clients.size(); i++){ if(selector.isReady(*m_clients[i].socket)){ sf::Socket::Status status = m_clients[i].socket->receive(packet); if(status == sf::Socket::Done){ // Got a valid packet _senderIndex = i; uint8_t code; packet >> code; return code; } else if (status == sf::Socket::Disconnected) { // The client has disconnected // We remove the client and send a packet to other clients that a player has disconnected _senderIndex = i; return 1; // We return 1 because 1 is the code for a disconnect request } } } return 0; } void Server::disconnectPlayer(sf::SocketSelector& _selector, unsigned int _playerID){ sf::Packet packet; packet.clear(); packet << (uint8_t)1; // We send the keycode 1 because that is the code for a disconnection for (uint8_t i = 0; i < 64; i += 8) packet << (uint8_t)(m_clients[_playerID].id >> i & 0xff); sendPacketToAllClients(packet); std::cout << "Client Disconnected with ID: " << (unsigned int)m_clients[_playerID].id << std::endl; _selector.remove(*m_clients[_playerID].socket); delete m_clients[_playerID].socket; m_clients[_playerID] = m_clients.back(); m_clients.pop_back(); } void Server::sendPacketToAllClients(sf::Packet& packet){ for(auto& i : m_clients){ i.socket->send(packet); } } void Server::sendPacketToOtherClients(sf::Packet& packet, uint8_t senderID){ for(auto& i : m_clients){ if(i.id != senderID) i.socket->send(packet); } } void Server::setBlock(int _x, int _y, int _z, uint8_t _block){ unsigned int maxW = m_config.getWorldWidth() * m_config.getChunkWidth(); unsigned int maxL = m_config.getWorldLength() * m_config.getChunkWidth(); m_worldData[(_y * maxW * maxL) + (_z * maxW) + _x] = _block; } void Server::updateWorldWithBlockUpdatePacket(sf::Packet& _packet){ int x = 0; int y = 0; int z = 0; uint8_t block = 0; _packet >> x >> y >> z >> block; setBlock(x, y, z, block); } void Server::addCodeToBlockUpdatePacket(sf::Packet& _packet){ int x, y, z; uint8_t block; _packet >> x >> y >> z >> block; _packet.clear(); _packet << (uint8_t)2 << x << y << z << block; }
// Copyright 2016 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "src/ledger/bin/app/merging/merge_resolver.h" #include <lib/async/cpp/task.h> #include <lib/fit/function.h> #include <string> #include <utility> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "src/ledger/bin/app/constants.h" #include "src/ledger/bin/app/merging/last_one_wins_merge_strategy.h" #include "src/ledger/bin/app/merging/test_utils.h" #include "src/ledger/bin/app/page_utils.h" #include "src/ledger/bin/encryption/primitives/hash.h" #include "src/ledger/bin/storage/fake/fake_page_storage.h" #include "src/ledger/bin/storage/public/constants.h" #include "src/ledger/bin/storage/public/page_storage.h" #include "src/ledger/bin/storage/public/types.h" #include "src/ledger/lib/backoff/testing/test_backoff.h" #include "src/ledger/lib/callback/capture.h" #include "src/ledger/lib/callback/set_when_called.h" #include "src/ledger/lib/convert/convert.h" namespace ledger { namespace { using ::testing::AnyOf; using ::testing::Eq; using ::testing::UnorderedElementsAre; std::vector<storage::CommitId> ToCommitIds( const std::vector<std::unique_ptr<const storage::Commit>>& commits) { std::vector<storage::CommitId> ids; ids.reserve(commits.size()); for (const auto& commit : commits) { ids.push_back(commit->GetId()); } return ids; } class FakePageStorageImpl : public storage::PageStorageEmptyImpl { public: explicit FakePageStorageImpl(std::unique_ptr<storage::PageStorage> page_storage) : storage_(std::move(page_storage)) {} FakePageStorageImpl(const FakePageStorageImpl&) = delete; FakePageStorageImpl& operator=(const FakePageStorageImpl&) = delete; void MarkCommitContentsUnavailable(storage::CommitIdView commit_id) { removed_commit_ids_.insert(convert::ToString(commit_id)); } Status GetHeadCommits( std::vector<std::unique_ptr<const storage::Commit>>* head_commits) override { return storage_->GetHeadCommits(head_commits); } void GetMergeCommitIds( storage::CommitIdView parent1, storage::CommitIdView parent2, fit::function<void(Status, std::vector<storage::CommitId>)> callback) override { storage_->GetMergeCommitIds(parent1, parent2, std::move(callback)); } void GetCommit( storage::CommitIdView commit_id, fit::function<void(Status, std::unique_ptr<const storage::Commit>)> callback) override { storage_->GetCommit(commit_id, std::move(callback)); } void AddCommitWatcher(storage::CommitWatcher* watcher) override { storage_->AddCommitWatcher(watcher); } void RemoveCommitWatcher(storage::CommitWatcher* watcher) override { storage_->RemoveCommitWatcher(watcher); } void GetObject( storage::ObjectIdentifier object_identifier, Location location, fit::function<void(Status, std::unique_ptr<const storage::Object>)> callback) override { storage_->GetObject(std::move(object_identifier), location, std::move(callback)); } std::unique_ptr<storage::Journal> StartCommit( std::unique_ptr<const storage::Commit> commit) override { return storage_->StartCommit(std::move(commit)); } std::unique_ptr<storage::Journal> StartMergeCommit( std::unique_ptr<const storage::Commit> left, std::unique_ptr<const storage::Commit> right) override { return storage_->StartMergeCommit(std::move(left), std::move(right)); } void CommitJournal( std::unique_ptr<storage::Journal> journal, fit::function<void(Status, std::unique_ptr<const storage::Commit>)> callback) override { storage_->CommitJournal(std::move(journal), std::move(callback)); } void AddObjectFromLocal( storage::ObjectType object_type, std::unique_ptr<storage::DataSource> data_source, storage::ObjectReferencesAndPriority tree_references, fit::function<void(Status, storage::ObjectIdentifier)> callback) override { storage_->AddObjectFromLocal(object_type, std::move(data_source), std::move(tree_references), std::move(callback)); } void GetCommitContents(const storage::Commit& commit, std::string min_key, fit::function<bool(storage::Entry)> on_next, fit::function<void(Status)> on_done) override { storage_->GetCommitContents(commit, std::move(min_key), std::move(on_next), std::move(on_done)); } void GetCommitContentsDiff(const storage::Commit& base_commit, const storage::Commit& other_commit, std::string min_key, fit::function<bool(storage::EntryChange)> on_next_diff, fit::function<void(Status)> on_done) override { if (removed_commit_ids_.find(base_commit.GetId()) != removed_commit_ids_.end() || removed_commit_ids_.find(other_commit.GetId()) != removed_commit_ids_.end()) { on_done(Status::NETWORK_ERROR); return; } storage_->GetCommitContentsDiff(base_commit, other_commit, std::move(min_key), std::move(on_next_diff), std::move(on_done)); } private: std::set<storage::CommitId> removed_commit_ids_; std::unique_ptr<storage::PageStorage> storage_; }; class RecordingTestStrategy : public MergeStrategy { public: RecordingTestStrategy() = default; ~RecordingTestStrategy() override = default; void SetOnError(fit::closure on_error) override { this->on_error = std::move(on_error); } void SetOnMerge(fit::closure on_merge) { on_merge_ = std::move(on_merge); } void Merge(storage::PageStorage* storage, ActivePageManager* active_page_manager, std::unique_ptr<const storage::Commit> merge_head_1, std::unique_ptr<const storage::Commit> merge_head_2, std::unique_ptr<const storage::Commit> merge_ancestor, fit::function<void(Status)> merge_callback) override { EXPECT_TRUE(storage::Commit::TimestampOrdered(merge_head_1, merge_head_2)); storage_ = storage; active_page_manager_ = active_page_manager; callback = std::move(merge_callback); head_1 = std::move(merge_head_1); head_2 = std::move(merge_head_2); ancestor = std::move(merge_ancestor); merge_calls++; if (on_merge_) { on_merge_(); } } void Forward(MergeStrategy* strategy) { strategy->Merge(storage_, active_page_manager_, std::move(head_1), std::move(head_2), std::move(ancestor), std::move(callback)); } void Cancel() override { cancel_calls++; } fit::closure on_error; std::unique_ptr<const storage::Commit> head_1; std::unique_ptr<const storage::Commit> head_2; std::unique_ptr<const storage::Commit> ancestor; fit::function<void(Status)> callback; uint32_t merge_calls = 0; uint32_t cancel_calls = 0; private: storage::PageStorage* storage_; ActivePageManager* active_page_manager_; fit::closure on_merge_; }; class MergeResolverTest : public TestWithPageStorage { public: MergeResolverTest() = default; MergeResolverTest(const MergeResolverTest&) = delete; MergeResolverTest& operator=(const MergeResolverTest&) = delete; ~MergeResolverTest() override = default; protected: storage::PageStorage* page_storage() override { return page_storage_.get(); } void SetUp() override { TestWithPageStorage::SetUp(); std::unique_ptr<storage::PageStorage> storage; ASSERT_TRUE(CreatePageStorage(&storage)); page_storage_ = std::make_unique<FakePageStorageImpl>(std::move(storage)); } storage::CommitId CreateCommit(storage::CommitIdView parent_id, fit::function<void(storage::Journal*)> contents) { return CreateCommit(page_storage_.get(), parent_id, std::move(contents)); } storage::CommitId CreateCommit(storage::PageStorage* storage, storage::CommitIdView parent_id, fit::function<void(storage::Journal*)> contents) { Status status; bool called; std::unique_ptr<const storage::Commit> base; storage->GetCommit(parent_id, Capture(SetWhenCalled(&called), &status, &base)); RunLoopUntilIdle(); EXPECT_TRUE(called); EXPECT_EQ(status, Status::OK); std::unique_ptr<storage::Journal> journal = storage->StartCommit(std::move(base)); contents(journal.get()); std::unique_ptr<const storage::Commit> commit; storage->CommitJournal(std::move(journal), Capture(SetWhenCalled(&called), &status, &commit)); RunLoopUntilIdle(); EXPECT_TRUE(called); EXPECT_EQ(status, Status::OK); return commit->GetId(); } storage::CommitId CreateMergeCommit(storage::CommitIdView parent_id1, storage::CommitIdView parent_id2, fit::function<void(storage::Journal*)> contents) { return CreateMergeCommit(page_storage_.get(), parent_id1, parent_id2, std::move(contents)); } storage::CommitId CreateMergeCommit(storage::PageStorage* storage, storage::CommitIdView parent_id1, storage::CommitIdView parent_id2, fit::function<void(storage::Journal*)> contents) { Status status; bool called; std::unique_ptr<const storage::Commit> base1; storage->GetCommit(parent_id1, Capture(SetWhenCalled(&called), &status, &base1)); RunLoopUntilIdle(); EXPECT_TRUE(called); EXPECT_EQ(status, Status::OK); std::unique_ptr<const storage::Commit> base2; storage->GetCommit(parent_id2, Capture(SetWhenCalled(&called), &status, &base2)); RunLoopUntilIdle(); EXPECT_TRUE(called); EXPECT_EQ(status, Status::OK); std::unique_ptr<storage::Journal> journal = storage->StartMergeCommit(std::move(base1), std::move(base2)); contents(journal.get()); Status actual_status; std::unique_ptr<const storage::Commit> actual_commit; storage->CommitJournal(std::move(journal), Capture(SetWhenCalled(&called), &actual_status, &actual_commit)); RunLoopUntilIdle(); EXPECT_TRUE(called); EXPECT_EQ(actual_status, Status::OK); return actual_commit->GetId(); } std::vector<storage::Entry> GetCommitContents(const storage::Commit& commit) { Status status; std::vector<storage::Entry> result; auto on_next = [&result](storage::Entry e) { result.push_back(e); return true; }; bool called; page_storage_->GetCommitContents(commit, "", std::move(on_next), Capture(SetWhenCalled(&called), &status)); RunLoopUntilIdle(); EXPECT_TRUE(called); EXPECT_EQ(status, Status::OK); return result; } // Checks that a string represents a valid set of changes: it is sorted and // does not contain duplicates. bool ValidSet(std::string state) { return std::is_sorted(state.begin(), state.end()) && std::adjacent_find(state.begin(), state.end()) == state.end(); } // Merge two sets of changes, represented by sorted strings. Assuming that all // changes are represented by unique letters, this checks that the base has // exactly the common changes between left and right, and returns a version // that includes all the changes of left and right. // This is exactly the property we expect merging to verify. std::string MergeAsSets(std::string left, std::string right, std::string base) { std::string out; std::string expected_base; EXPECT_TRUE(ValidSet(base)); EXPECT_TRUE(ValidSet(left)); EXPECT_TRUE(ValidSet(right)); std::set_intersection(left.begin(), left.end(), right.begin(), right.end(), std::back_inserter(expected_base)); EXPECT_EQ(base, expected_base) << " when merging " << left << " and " << right; std::set_union(left.begin(), left.end(), right.begin(), right.end(), std::back_inserter(out)); EXPECT_TRUE(ValidSet(out)); return out; } std::string GetKeyOrEmpty(const storage::Commit& commit, std::string key) { std::vector<storage::Entry> entries = GetCommitContents(commit); auto it = std::find_if(entries.begin(), entries.end(), [&key](storage::Entry entry) { return entry.key == key; }); if (it == entries.end()) { return ""; } std::string value; EXPECT_TRUE(GetValue(it->object_identifier, &value)); return value; } void MergeCommitsAsSets(const storage::Commit& left, const storage::Commit& right, const storage::Commit& base) { std::string merge = MergeAsSets(GetKeyOrEmpty(left, "k"), GetKeyOrEmpty(right, "k"), GetKeyOrEmpty(base, "k")); CreateMergeCommit(left.GetId(), right.GetId(), AddKeyValueToJournal("k", merge)); } std::unique_ptr<FakePageStorageImpl> page_storage_; }; TEST_F(MergeResolverTest, Empty) { // Set up conflict CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("foo", "bar")); CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("foo", "baz")); std::unique_ptr<LastOneWinsMergeStrategy> strategy = std::make_unique<LastOneWinsMergeStrategy>(); MergeResolver resolver([] {}, &environment_, page_storage_.get(), std::make_unique<TestBackoff>()); resolver.SetMergeStrategy(std::move(strategy)); resolver.SetOnDiscardable(QuitLoopClosure()); std::vector<std::unique_ptr<const storage::Commit>> commits; Status status = page_storage_->GetHeadCommits(&commits); EXPECT_EQ(status, Status::OK); EXPECT_EQ(commits.size(), 2u); RunLoopUntilIdle(); EXPECT_TRUE(resolver.IsDiscardable()); commits.clear(); status = page_storage_->GetHeadCommits(&commits); EXPECT_EQ(status, Status::OK); EXPECT_EQ(commits.size(), 1u); } TEST_F(MergeResolverTest, CommonAncestor) { // Add commits forming the following history graph: // (root) -> (1) -> (2) -> (3) // \ // -> (4) -> (5) storage::CommitId commit_1 = CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("key1", "val1.0")); storage::CommitId commit_2 = CreateCommit(commit_1, AddKeyValueToJournal("key2", "val2.0")); storage::CommitId commit_3 = CreateCommit(commit_2, AddKeyValueToJournal("key3", "val3.0")); storage::CommitId commit_4 = CreateCommit(commit_2, DeleteKeyFromJournal("key1")); storage::CommitId commit_5 = CreateCommit(commit_4, AddKeyValueToJournal("key2", "val2.1")); RunLoopUntilIdle(); // Set a merge strategy to capture the requested merge. MergeResolver resolver([] {}, &environment_, page_storage_.get(), std::make_unique<TestBackoff>()); std::unique_ptr<RecordingTestStrategy> strategy = std::make_unique<RecordingTestStrategy>(); auto strategy_ptr = strategy.get(); resolver.SetMergeStrategy(std::move(strategy)); RunLoopUntilIdle(); // Verify that the strategy is asked to merge commits 5 and 3, with 2 as the // common ancestor. EXPECT_EQ(strategy_ptr->head_1->GetId(), commit_3); EXPECT_EQ(strategy_ptr->head_2->GetId(), commit_5); EXPECT_EQ(strategy_ptr->ancestor->GetId(), commit_2); // Resolve the conflict. CreateMergeCommit(strategy_ptr->head_1->GetId(), strategy_ptr->head_2->GetId(), AddKeyValueToJournal("key_foo", "abc")); strategy_ptr->callback(Status::OK); strategy_ptr->callback = nullptr; RunLoopUntilIdle(); EXPECT_TRUE(resolver.IsDiscardable()); } TEST_F(MergeResolverTest, LastOneWins) { // Set up conflict storage::CommitId commit_1 = CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("key1", "val1.0")); storage::CommitId commit_2 = CreateCommit(commit_1, AddKeyValueToJournal("key2", "val2.0")); storage::CommitId commit_3 = CreateCommit(commit_2, AddKeyValueToJournal("key3", "val3.0")); storage::CommitId commit_4 = CreateCommit(commit_2, DeleteKeyFromJournal("key1")); storage::CommitId commit_5 = CreateCommit(commit_4, AddKeyValueToJournal("key2", "val2.1")); std::vector<std::unique_ptr<const storage::Commit>> commits; Status status = page_storage_->GetHeadCommits(&commits); EXPECT_EQ(status, Status::OK); auto ids = ToCommitIds(commits); EXPECT_THAT(ids, UnorderedElementsAre(commit_3, commit_5)); bool called; std::unique_ptr<LastOneWinsMergeStrategy> strategy = std::make_unique<LastOneWinsMergeStrategy>(); MergeResolver resolver([] {}, &environment_, page_storage_.get(), std::make_unique<TestBackoff>()); resolver.SetMergeStrategy(std::move(strategy)); resolver.SetOnDiscardable(SetWhenCalled(&called)); RunLoopUntilIdle(); ASSERT_TRUE(called); EXPECT_TRUE(resolver.IsDiscardable()); commits.clear(); status = page_storage_->GetHeadCommits(&commits); EXPECT_EQ(status, Status::OK); EXPECT_EQ(commits.size(), 1u); std::vector<storage::Entry> content_vector = GetCommitContents(*commits[0]); // Entries are ordered by keys ASSERT_EQ(content_vector.size(), 2u); EXPECT_EQ(content_vector[0].key, "key2"); std::string value; EXPECT_TRUE(GetValue(content_vector[0].object_identifier, &value)); EXPECT_EQ(value, "val2.1"); EXPECT_EQ(content_vector[1].key, "key3"); EXPECT_TRUE(GetValue(content_vector[1].object_identifier, &value)); EXPECT_EQ(value, "val3.0"); } TEST_F(MergeResolverTest, LastOneWinsDiffNotAvailable) { // Set up conflict storage::CommitId commit_1 = CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("key1", "val1.0")); storage::CommitId commit_2 = CreateCommit(commit_1, AddKeyValueToJournal("key2", "val2.0")); storage::CommitId commit_3 = CreateCommit(commit_2, AddKeyValueToJournal("key3", "val3.0")); storage::CommitId commit_4 = CreateCommit(commit_2, DeleteKeyFromJournal("key1")); storage::CommitId commit_5 = CreateCommit(commit_4, AddKeyValueToJournal("key2", "val2.1")); std::vector<std::unique_ptr<const storage::Commit>> commits; Status status = page_storage_->GetHeadCommits(&commits); EXPECT_EQ(status, Status::OK); EXPECT_THAT(ToCommitIds(commits), UnorderedElementsAre(commit_3, commit_5)); page_storage_->MarkCommitContentsUnavailable(commit_2); bool called; std::unique_ptr<LastOneWinsMergeStrategy> strategy = std::make_unique<LastOneWinsMergeStrategy>(); MergeResolver resolver([] {}, &environment_, page_storage_.get(), std::make_unique<TestBackoff>()); resolver.SetMergeStrategy(std::move(strategy)); resolver.SetOnDiscardable(SetWhenCalled(&called)); RunLoopUntilIdle(); ASSERT_TRUE(called); EXPECT_TRUE(resolver.IsDiscardable()); commits.clear(); status = page_storage_->GetHeadCommits(&commits); EXPECT_EQ(status, Status::OK); EXPECT_EQ(commits.size(), 2u); } TEST_F(MergeResolverTest, None) { // Set up conflict storage::CommitId commit_1 = CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("key1", "val1.0")); storage::CommitId commit_2 = CreateCommit(commit_1, AddKeyValueToJournal("key2", "val2.0")); storage::CommitId commit_3 = CreateCommit(commit_2, AddKeyValueToJournal("key3", "val3.0")); storage::CommitId commit_4 = CreateCommit(commit_2, DeleteKeyFromJournal("key1")); storage::CommitId commit_5 = CreateCommit(commit_4, AddKeyValueToJournal("key2", "val2.1")); std::vector<std::unique_ptr<const storage::Commit>> commits; Status status = page_storage_->GetHeadCommits(&commits); EXPECT_EQ(status, Status::OK); EXPECT_EQ(commits.size(), 2u); std::vector<storage::CommitId> ids = ToCommitIds(commits); EXPECT_NE(ids.end(), std::find(ids.begin(), ids.end(), commit_3)); EXPECT_NE(ids.end(), std::find(ids.begin(), ids.end(), commit_5)); MergeResolver resolver([] {}, &environment_, page_storage_.get(), std::make_unique<TestBackoff>()); resolver.SetOnDiscardable(QuitLoopClosure()); RunLoopUntilIdle(); EXPECT_TRUE(resolver.IsDiscardable()); commits.clear(); status = page_storage_->GetHeadCommits(&commits); EXPECT_EQ(status, Status::OK); EXPECT_EQ(commits.size(), 2u); } TEST_F(MergeResolverTest, UpdateMidResolution) { // Set up conflict storage::CommitId commit_1 = CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("key1", "val1.0")); storage::CommitId commit_2 = CreateCommit(commit_1, AddKeyValueToJournal("key2", "val2.0")); storage::CommitId commit_3 = CreateCommit(commit_1, AddKeyValueToJournal("key3", "val3.0")); std::vector<std::unique_ptr<const storage::Commit>> commits; Status status = page_storage_->GetHeadCommits(&commits); EXPECT_EQ(status, Status::OK); EXPECT_EQ(commits.size(), 2u); EXPECT_THAT(ToCommitIds(commits), UnorderedElementsAre(commit_2, commit_3)); bool called; MergeResolver resolver([] {}, &environment_, page_storage_.get(), std::make_unique<TestBackoff>()); resolver.SetOnDiscardable(SetWhenCalled(&called)); resolver.SetMergeStrategy(std::make_unique<LastOneWinsMergeStrategy>()); async::PostTask(dispatcher(), [&resolver] { resolver.SetMergeStrategy(std::make_unique<LastOneWinsMergeStrategy>()); }); RunLoopUntilIdle(); ASSERT_TRUE(called); EXPECT_TRUE(resolver.IsDiscardable()); commits.clear(); status = page_storage_->GetHeadCommits(&commits); EXPECT_EQ(status, Status::OK); EXPECT_EQ(commits.size(), 1u); } // Merge of merges backoff is only triggered when commits are coming from sync. // To test this, we need to create conflicts and make it as if they are not // created locally. This is done by preventing commit notifications for new // commits, then issuing manually a commit notification "from sync". As this // implies using a fake PageStorage, we don't test the resolution itself, only // that backoff is triggered correctly. TEST_F(MergeResolverTest, WaitOnMergeOfMerges) { storage::fake::FakePageStorage page_storage(&environment_, "page_id"); bool on_discardable_called; auto backoff = std::make_unique<TestBackoff>(); auto backoff_ptr = backoff.get(); MergeResolver resolver([] {}, &environment_, &page_storage, std::move(backoff)); resolver.SetOnDiscardable(SetWhenCalled(&on_discardable_called)); auto strategy = std::make_unique<RecordingTestStrategy>(); strategy->SetOnMerge(QuitLoopClosure()); resolver.SetMergeStrategy(std::move(strategy)); RunLoopUntilIdle(); EXPECT_TRUE(on_discardable_called); page_storage.SetDropCommitNotifications(true); // Set up conflict storage::CommitId commit_0 = CreateCommit(&page_storage, storage::kFirstPageCommitId, [](storage::Journal*) {}); storage::CommitId commit_1 = CreateCommit(&page_storage, commit_0, AddKeyValueToJournal("key1", "val1.0")); storage::CommitId commit_2 = CreateCommit(&page_storage, commit_0, AddKeyValueToJournal("key1", "val1.0")); storage::CommitId commit_3 = CreateCommit(&page_storage, commit_0, AddKeyValueToJournal("key2", "val2.0")); storage::CommitId merge_1 = CreateMergeCommit(&page_storage, commit_1, commit_3, AddKeyValueToJournal("key3", "val3.0")); storage::CommitId merge_2 = CreateMergeCommit(&page_storage, commit_2, commit_3, AddKeyValueToJournal("key3", "val3.0")); std::vector<std::unique_ptr<const storage::Commit>> commits; Status status = page_storage.GetHeadCommits(&commits); EXPECT_EQ(status, Status::OK); EXPECT_EQ(commits.size(), 2u); EXPECT_THAT(ToCommitIds(commits), UnorderedElementsAre(merge_1, merge_2)); page_storage.SetDropCommitNotifications(false); storage::CommitWatcher* watcher = &resolver; watcher->OnNewCommits({}, storage::ChangeSource::CLOUD); // Note we can't use "RunLoopUntilIdle()" because the FakePageStorage delays // before inserting tasks into the message loop. RunLoopFor(zx::sec(5)); EXPECT_GT(backoff_ptr->get_next_count, 0); } TEST_F(MergeResolverTest, NoConflictCallback_ConflictsResolved) { // Set up conflict. CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("foo", "bar")); CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("foo", "baz")); std::unique_ptr<LastOneWinsMergeStrategy> strategy = std::make_unique<LastOneWinsMergeStrategy>(); MergeResolver resolver([] {}, &environment_, page_storage_.get(), std::make_unique<TestBackoff>()); resolver.SetMergeStrategy(std::move(strategy)); resolver.SetOnDiscardable(MakeQuitTaskOnce()); std::vector<std::unique_ptr<const storage::Commit>> commits; Status status = page_storage_->GetHeadCommits(&commits); EXPECT_EQ(status, Status::OK); EXPECT_EQ(commits.size(), 2u); RunLoopUntilIdle(); size_t callback_calls = 0; auto conflicts_resolved_callback = [&resolver, &callback_calls]() { EXPECT_TRUE(resolver.IsDiscardable()); callback_calls++; }; ConflictResolutionWaitStatus wait_status; resolver.RegisterNoConflictCallback(Capture(conflicts_resolved_callback, &wait_status)); resolver.RegisterNoConflictCallback(Capture(conflicts_resolved_callback, &wait_status)); // Check that the callback was called 2 times. RunLoopUntilIdle(); EXPECT_TRUE(resolver.IsDiscardable()); EXPECT_EQ(callback_calls, 2u); EXPECT_EQ(wait_status, ConflictResolutionWaitStatus::CONFLICTS_RESOLVED); commits.clear(); status = page_storage_->GetHeadCommits(&commits); EXPECT_EQ(status, Status::OK); EXPECT_EQ(commits.size(), 1u); callback_calls = 0; CreateCommit(commits[0]->GetId(), AddKeyValueToJournal("foo", "baw")); CreateCommit(commits[0]->GetId(), AddKeyValueToJournal("foo", "bat")); RunLoopUntilIdle(); EXPECT_TRUE(resolver.IsDiscardable()); // Check that callback wasn't called (callback queue cleared after all the // callbacks in it were called). RunLoopFor(zx::sec(10)); EXPECT_EQ(callback_calls, 0u); } TEST_F(MergeResolverTest, NoConflictCallback_NoConflicts) { CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("foo", "baz")); std::unique_ptr<LastOneWinsMergeStrategy> strategy = std::make_unique<LastOneWinsMergeStrategy>(); MergeResolver resolver([] {}, &environment_, page_storage_.get(), std::make_unique<TestBackoff>()); resolver.SetMergeStrategy(std::move(strategy)); resolver.SetOnDiscardable(MakeQuitTaskOnce()); size_t callback_calls = 0; auto conflicts_resolved_callback = [&resolver, &callback_calls]() { EXPECT_TRUE(resolver.IsDiscardable()); callback_calls++; }; ConflictResolutionWaitStatus wait_status; resolver.RegisterNoConflictCallback(Capture(conflicts_resolved_callback, &wait_status)); // Check that the callback was called 1 times. RunLoopUntilIdle(); EXPECT_TRUE(resolver.IsDiscardable()); EXPECT_EQ(callback_calls, 1u); EXPECT_EQ(wait_status, ConflictResolutionWaitStatus::NO_CONFLICTS); } TEST_F(MergeResolverTest, HasUnfinishedMerges) { MergeResolver resolver([] {}, &environment_, page_storage_.get(), std::make_unique<TestBackoff>()); std::unique_ptr<RecordingTestStrategy> strategy = std::make_unique<RecordingTestStrategy>(); auto strategy_ptr = strategy.get(); resolver.SetMergeStrategy(std::move(strategy)); RunLoopUntilIdle(); EXPECT_FALSE(resolver.HasUnfinishedMerges()); // Set up a conflict and verify that HasUnfinishedMerges() returns true. storage::CommitId commit_1 = CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("foo", "bar")); storage::CommitId commit_2 = CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("foo", "baz")); RunLoopUntilIdle(); EXPECT_TRUE(resolver.HasUnfinishedMerges()); // Resolve the conflict and verify that HasUnfinishedMerges() returns false. ASSERT_TRUE(strategy_ptr->head_1); ASSERT_TRUE(strategy_ptr->head_2); ASSERT_TRUE(strategy_ptr->ancestor); ASSERT_TRUE(strategy_ptr->callback); CreateMergeCommit(strategy_ptr->head_1->GetId(), strategy_ptr->head_2->GetId(), AddKeyValueToJournal("key3", "val3.0")); strategy_ptr->callback(Status::OK); strategy_ptr->callback = nullptr; RunLoopUntilIdle(); EXPECT_FALSE(resolver.HasUnfinishedMerges()); } // The commit graph is as follows: // (root) // / | \ // (A) (B) (C) // | X \ / // |/ \ (E) // (D) \ | // (F) // (D) and (F) are both heads, with (D) containing the changes (A) and (B), and // (F) containing (A), (B), (C). This should merge to the content of (F) without // invoking the conflict resolver. TEST_F(MergeResolverTest, MergeSubsets) { storage::CommitId commit_a = CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("k", "a")); storage::CommitId commit_b = CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("k", "b")); storage::CommitId commit_c = CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("k", "c")); storage::CommitId commit_d = CreateMergeCommit(commit_a, commit_b, AddKeyValueToJournal("k", "d")); storage::CommitId commit_e = CreateMergeCommit(commit_b, commit_c, AddKeyValueToJournal("k", "e")); storage::CommitId commit_f = CreateMergeCommit(commit_a, commit_e, AddKeyValueToJournal("k", "f")); RunLoopUntilIdle(); // Set a merge strategy to check that no merge is requested MergeResolver resolver([] {}, &environment_, page_storage_.get(), std::make_unique<TestBackoff>()); std::unique_ptr<RecordingTestStrategy> strategy = std::make_unique<RecordingTestStrategy>(); auto strategy_ptr = strategy.get(); resolver.SetMergeStrategy(std::move(strategy)); RunLoopUntilIdle(); // Verify that the strategy has not been called EXPECT_FALSE(strategy_ptr->callback); // Verify there is only one head with the content of commit F std::vector<std::unique_ptr<const storage::Commit>> commits; Status status = page_storage_->GetHeadCommits(&commits); EXPECT_EQ(status, Status::OK); ASSERT_EQ(commits.size(), 1u); bool called; std::unique_ptr<const storage::Commit> commitptr_f; page_storage_->GetCommit(commit_f, Capture(SetWhenCalled(&called), &status, &commitptr_f)); RunLoopUntilIdle(); ASSERT_TRUE(called); EXPECT_EQ(status, Status::OK); ASSERT_TRUE(commitptr_f); EXPECT_EQ(commitptr_f->GetRootIdentifier(), commits[0]->GetRootIdentifier()); } // Check that two equivalent commits are merged to a commit with the content of // one of the two. The commit graph is as follows: // (root) // | | // (A) (B) // | \/ | // | /\ | // (C) (D) TEST_F(MergeResolverTest, MergeEquivalents) { storage::CommitId commit_a = CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("k", "a")); storage::CommitId commit_b = CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("k", "b")); storage::CommitId commit_c = CreateMergeCommit(commit_a, commit_b, AddKeyValueToJournal("k", "c")); storage::CommitId commit_d = CreateMergeCommit(commit_a, commit_b, AddKeyValueToJournal("k", "d")); RunLoopUntilIdle(); // Set a merge strategy to check that no merge is requested MergeResolver resolver([] {}, &environment_, page_storage_.get(), std::make_unique<TestBackoff>()); std::unique_ptr<RecordingTestStrategy> strategy = std::make_unique<RecordingTestStrategy>(); auto strategy_ptr = strategy.get(); resolver.SetMergeStrategy(std::move(strategy)); RunLoopUntilIdle(); // Verify that the strategy has not been called EXPECT_FALSE(strategy_ptr->callback); // Verify there is only one head with the content of commit C or D std::vector<std::unique_ptr<const storage::Commit>> commits; Status status = page_storage_->GetHeadCommits(&commits); EXPECT_EQ(status, Status::OK); ASSERT_EQ(commits.size(), 1u); bool called; std::unique_ptr<const storage::Commit> commitptr_c; page_storage_->GetCommit(commit_c, Capture(SetWhenCalled(&called), &status, &commitptr_c)); RunLoopUntilIdle(); ASSERT_TRUE(called); EXPECT_EQ(status, Status::OK); ASSERT_TRUE(commitptr_c); std::unique_ptr<const storage::Commit> commitptr_d; page_storage_->GetCommit(commit_d, Capture(SetWhenCalled(&called), &status, &commitptr_d)); RunLoopUntilIdle(); ASSERT_TRUE(called); EXPECT_EQ(status, Status::OK); ASSERT_TRUE(commitptr_d); EXPECT_THAT(commits[0]->GetRootIdentifier(), AnyOf(Eq(commitptr_c->GetRootIdentifier()), Eq(commitptr_d->GetRootIdentifier()))); } // Tests that already existing merges are used // The commit graph is: // In this test, the commits have the following structure: // (root) // / \ // (A) (B) // | \ / | // (C) \/ (D) // | /\ | // | / \ | // (E) (F) // | (G) // | / // (H) // and (G) is a merge of (A) and (B) // Then merging (F) and (H) should be done using (G) as a base. TEST_F(MergeResolverTest, ReuseExistingMerge) { storage::CommitId commit_a = CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("k", "a")); storage::CommitId commit_b = CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("k", "b")); storage::CommitId commit_c = CreateCommit(commit_a, AddKeyValueToJournal("k", "c")); storage::CommitId commit_d = CreateCommit(commit_b, AddKeyValueToJournal("k", "d")); storage::CommitId commit_e = CreateMergeCommit(commit_c, commit_b, AddKeyValueToJournal("k", "e")); storage::CommitId commit_f = CreateMergeCommit(commit_a, commit_d, AddKeyValueToJournal("k", "f")); storage::CommitId commit_g = CreateMergeCommit(commit_a, commit_b, AddKeyValueToJournal("k", "g")); // commit (H) is necessary because otherwise (G) is a head storage::CommitId commit_h = CreateMergeCommit(commit_e, commit_g, AddKeyValueToJournal("k", "h")); RunLoopUntilIdle(); // Set a merge strategy. MergeResolver resolver([] {}, &environment_, page_storage_.get(), std::make_unique<TestBackoff>()); std::unique_ptr<RecordingTestStrategy> strategy = std::make_unique<RecordingTestStrategy>(); auto strategy_ptr = strategy.get(); resolver.SetMergeStrategy(std::move(strategy)); RunLoopUntilIdle(); // The merge strategy is called once to merge E and F with G as a base ASSERT_TRUE(strategy_ptr->callback); EXPECT_EQ(strategy_ptr->ancestor->GetId(), commit_g); EXPECT_THAT((std::vector<storage::CommitId>{strategy_ptr->head_1->GetId(), strategy_ptr->head_2->GetId()}), UnorderedElementsAre(commit_f, commit_h)); // Create the merge CreateMergeCommit(strategy_ptr->head_1->GetId(), strategy_ptr->head_2->GetId(), AddKeyValueToJournal("k", "merge")); strategy_ptr->callback(Status::OK); RunLoopUntilIdle(); // There is only one head now std::vector<std::unique_ptr<const storage::Commit>> commits; Status status = page_storage_->GetHeadCommits(&commits); EXPECT_EQ(status, Status::OK); EXPECT_EQ(commits.size(), 1u); } // Tests that recursive merge work correctly: they terminate and produce a // commit that integrates each change once. // The commit graph is the following: // (root) // / | \ // (A) (B) (C) // | \/ \/ | // (D)/\ /\(E) // |/ X \| // (F) / \ (G) // | / \ | // (H) (I) // Then a merge of (H) and (I) will use (A), (B), (C) as a base. // The merge can proceed in different ways, but will always call the strategy 3 // times. The conflict resolver computes left+right-base on sets represented as // strings. The final state should be equivalent to "abcde". TEST_F(MergeResolverTest, RecursiveMerge) { storage::CommitId commit_a = CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("k", "a")); storage::CommitId commit_b = CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("k", "b")); storage::CommitId commit_c = CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("k", "c")); storage::CommitId commit_d = CreateCommit(commit_a, AddKeyValueToJournal("k", "ad")); storage::CommitId commit_e = CreateCommit(commit_c, AddKeyValueToJournal("k", "ce")); storage::CommitId commit_f = CreateMergeCommit(commit_b, commit_d, AddKeyValueToJournal("k", "abd")); storage::CommitId commit_g = CreateMergeCommit(commit_b, commit_e, AddKeyValueToJournal("k", "bce")); storage::CommitId commit_h = CreateMergeCommit(commit_f, commit_c, AddKeyValueToJournal("k", "abcd")); storage::CommitId commit_i = CreateMergeCommit(commit_a, commit_g, AddKeyValueToJournal("k", "abce")); RunLoopUntilIdle(); // Set up a merge strategy. MergeResolver resolver([] {}, &environment_, page_storage_.get(), std::make_unique<TestBackoff>()); std::unique_ptr<RecordingTestStrategy> strategy = std::make_unique<RecordingTestStrategy>(); auto strategy_ptr = strategy.get(); resolver.SetMergeStrategy(std::move(strategy)); RunLoopUntilIdle(); // Do three merges, merging values as sets. for (int i = 0; i < 3; i++) { EXPECT_TRUE(strategy_ptr->callback); if (strategy_ptr->callback) { MergeCommitsAsSets(*strategy_ptr->head_1, *strategy_ptr->head_2, *strategy_ptr->ancestor); strategy_ptr->callback(Status::OK); } strategy_ptr->callback = nullptr; RunLoopUntilIdle(); } EXPECT_FALSE(strategy_ptr->callback); std::vector<std::unique_ptr<const storage::Commit>> commits; Status status = page_storage_->GetHeadCommits(&commits); EXPECT_EQ(status, Status::OK); ASSERT_EQ(commits.size(), 1u); // Check the value of k in the commit. EXPECT_EQ(GetKeyOrEmpty(*commits[0], "k"), "abcde"); } // Check that merges are done in timestamp order: in a merge with three bases, // the two commits with highest timestamp are used first. The commit graph is // the following: we add the commits (U) and (V) to ensure that (B) and (C) have // a higher generation than (A), so we can detect if merging is done in // generation order instead of timestamp order. // (root) // / | \ // | (U) (V) // | | | // (A) (B) (C) // | \/ \/ | // (D)/\ /\(E) // |/ X \| // (F) / \ (G) // | / \ | // (H) (I) // We do not test the order of subsequent merges. TEST_F(MergeResolverTest, RecursiveMergeOrder) { storage::CommitId commit_u = CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("k", "u")); storage::CommitId commit_v = CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("k", "v")); // Commit a, b and c can be done in any order. storage::CommitId commit_b = CreateCommit(commit_u, AddKeyValueToJournal("k", "bu")); // Ensure time advances between the commits RunLoopFor(zx::duration(1)); storage::CommitId commit_a = CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("k", "a")); RunLoopFor(zx::duration(1)); storage::CommitId commit_c = CreateCommit(commit_v, AddKeyValueToJournal("k", "cv")); storage::CommitId commit_d = CreateCommit(commit_a, AddKeyValueToJournal("k", "ad")); storage::CommitId commit_e = CreateCommit(commit_c, AddKeyValueToJournal("k", "cev")); storage::CommitId commit_f = CreateMergeCommit(commit_b, commit_d, AddKeyValueToJournal("k", "abdu")); storage::CommitId commit_g = CreateMergeCommit(commit_b, commit_e, AddKeyValueToJournal("k", "bcev")); storage::CommitId commit_h = CreateMergeCommit(commit_f, commit_c, AddKeyValueToJournal("k", "abcduv")); storage::CommitId commit_i = CreateMergeCommit(commit_a, commit_g, AddKeyValueToJournal("k", "abceuv")); RunLoopUntilIdle(); // Set up a merge strategy MergeResolver resolver([] {}, &environment_, page_storage_.get(), std::make_unique<TestBackoff>()); std::unique_ptr<RecordingTestStrategy> strategy = std::make_unique<RecordingTestStrategy>(); auto strategy_ptr = strategy.get(); resolver.SetMergeStrategy(std::move(strategy)); RunLoopUntilIdle(); // Inspect the first merge. It should be between b and a. ASSERT_TRUE(strategy_ptr->callback); EXPECT_EQ(strategy_ptr->ancestor->GetId(), storage::kFirstPageCommitId); EXPECT_EQ(commit_b, strategy_ptr->head_1->GetId()); EXPECT_EQ(commit_a, strategy_ptr->head_2->GetId()); } // Checks that last-one-wins picks up changes in the right order for recursive // merges. When doing recursive merges, the set of commits to be merged is to // construct the base is known in advance, so the order should be completely // determinstic: keys coming from newer commits always win against older // commits, even with intermediate merges. // // The commit graph is the following. The goal is to observe the construction of // the merge base (we are not interested in the final merge), so we construct // commits (H) and (I) whose set of common ancestors is {(A), (B), (C)}. // // (root) // / | \ // (A) (B) (C) // | \/ \/ | // (D)/\ /\(E) // |/ X \| // (F) / \ (G) // | / \ | // (H) (I) // // The merge can proceed in different ways: they may be intervening merges that // are done without calling the conflict resolver because one commit contains a // subset of the changes of the other. This test only checks the merges that // involve the conflict resolver. There are three such merges: one between A and // B, one between a merge of A and B, and C, and one between commits equivalent // to H and I. // // At the time of writing this comment, the actual sequence of merges is the // following (assuming D < E in timestamp order): // - Try to merge H and I. The set of ancestors is {A, B, C} // - Merge A and B to J, calling the LastOneWinsStrategy // - Try to merge J and H (they are the two oldest heads). This is an automatic // merge to K, with the same content as H. // - Try to merge K and I. The set of ancestors is still {A, B, C} // - A and B are already merged to J // - Merge J and C to L, calling the LastOneWinsStrategy // - Try to merge K and L. This is an automatic merge to M, with the same // content as H. // - Try to merge M and I. The set of ancestors is {A, B, C} // - A and B are already merged to J // - J and C are already merged to L. // - Merge M and I (identical to H and I) with ancestor L. TEST_F(MergeResolverTest, RecursiveMergeLastOneWins) { // Ensure that A, B, C are in chronological order // We insert a key k1 in A, B and C. The value in C should win. // We also insert a key k2 in A and B. If A and C are merged first, the value // in A will be "refreshed" and be considered as recent as C, and will win // against the value in B. We check that this does not happen. storage::CommitId commit_a = CreateCommit(storage::kFirstPageCommitId, [this](auto journal) { AddKeyValueToJournal("k1", "a")(journal); AddKeyValueToJournal("k2", "a")(journal); }); RunLoopFor(zx::duration(1)); storage::CommitId commit_b = CreateCommit(storage::kFirstPageCommitId, [this](auto journal) { AddKeyValueToJournal("k1", "b")(journal); AddKeyValueToJournal("k2", "b")(journal); }); RunLoopFor(zx::duration(1)); storage::CommitId commit_c = CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("k1", "c")); // Build the rest of the graph. We add values to generate changes. storage::CommitId commit_d = CreateCommit(commit_a, AddKeyValueToJournal("k", "d")); storage::CommitId commit_e = CreateCommit(commit_c, AddKeyValueToJournal("k", "e")); storage::CommitId commit_f = CreateMergeCommit(commit_b, commit_d, [](auto journal) {}); storage::CommitId commit_g = CreateMergeCommit(commit_b, commit_e, [](auto journal) {}); storage::CommitId commit_h = CreateMergeCommit(commit_f, commit_c, [](auto journal) {}); storage::CommitId commit_i = CreateMergeCommit(commit_a, commit_g, [](auto journal) {}); RunLoopUntilIdle(); // Set up a merge strategy. MergeResolver resolver([] {}, &environment_, page_storage_.get(), std::make_unique<TestBackoff>()); std::unique_ptr<RecordingTestStrategy> strategy = std::make_unique<RecordingTestStrategy>(); auto strategy_ptr = strategy.get(); resolver.SetMergeStrategy(std::move(strategy)); // Set up a last one wins strategy to forward merges to. LastOneWinsMergeStrategy last_one_wins_strategy; // Do two merges using last-one-wins. Check that they are merges of A and B // (generating a commit AB whose id we cannot recover), then of AB and C. RunLoopUntilIdle(); ASSERT_TRUE(strategy_ptr->callback); EXPECT_EQ(strategy_ptr->head_1->GetId(), commit_a); EXPECT_EQ(strategy_ptr->head_2->GetId(), commit_b); EXPECT_EQ(strategy_ptr->ancestor->GetId(), storage::kFirstPageCommitId); strategy_ptr->Forward(&last_one_wins_strategy); RunLoopUntilIdle(); ASSERT_TRUE(strategy_ptr->callback); EXPECT_EQ(strategy_ptr->head_2->GetId(), commit_c); EXPECT_EQ(strategy_ptr->ancestor->GetId(), storage::kFirstPageCommitId); // Check that the first head for the second merge holds the correct values. EXPECT_EQ(GetKeyOrEmpty(*strategy_ptr->head_1, "k1"), "b"); EXPECT_EQ(GetKeyOrEmpty(*strategy_ptr->head_1, "k2"), "b"); strategy_ptr->Forward(&last_one_wins_strategy); // Inspect the last merge: its base is the merge of A, B and C. RunLoopUntilIdle(); ASSERT_TRUE(strategy_ptr->callback); // Check if the ancestor is the one we expect. EXPECT_EQ(GetKeyOrEmpty(*strategy_ptr->ancestor, "k1"), "c"); EXPECT_EQ(GetKeyOrEmpty(*strategy_ptr->ancestor, "k2"), "b"); } // Identical change commits should not be considered equivalent. // This creates two commits with identical contents, and check that the conflict // resolver is called anyway. TEST_F(MergeResolverTest, DoNotAutoMergeIdenticalCommits) { storage::CommitId commit_a = CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("k", "v")); storage::CommitId commit_b = CreateCommit(storage::kFirstPageCommitId, AddKeyValueToJournal("k", "v")); // Set up a merge strategy. MergeResolver resolver([] {}, &environment_, page_storage_.get(), std::make_unique<TestBackoff>()); std::unique_ptr<RecordingTestStrategy> strategy = std::make_unique<RecordingTestStrategy>(); auto strategy_ptr = strategy.get(); resolver.SetMergeStrategy(std::move(strategy)); RunLoopUntilIdle(); // Inspect the first merge ASSERT_TRUE(strategy_ptr->callback); EXPECT_EQ(strategy_ptr->ancestor->GetId(), storage::kFirstPageCommitId); EXPECT_THAT((std::vector<storage::CommitId>{strategy_ptr->head_1->GetId(), strategy_ptr->head_2->GetId()}), UnorderedElementsAre(commit_a, commit_b)); } } // namespace } // namespace ledger
// Copyright Oliver Kowalke 2009. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #include <iostream> #include <sstream> #include <stdexcept> #include <string> #include <utility> #include <boost/array.hpp> #include <boost/assert.hpp> #include <boost/test/unit_test.hpp> #include <boost/utility.hpp> #include <boost/context/all.hpp> namespace ctx = boost::context; ctx::fcontext_t fcm; ctx::fcontext_t * fc = 0; int value1 = 0; std::string value2; double value3 = 0.; void f1( intptr_t) { ++value1; ctx::jump_fcontext( fc, & fcm, 0); } void f3( intptr_t) { ++value1; ctx::jump_fcontext( fc, & fcm, 0); ++value1; ctx::jump_fcontext( fc, & fcm, 0); } void f4( intptr_t) { ctx::jump_fcontext( fc, & fcm, 7); } void f5( intptr_t arg) { ctx::jump_fcontext( fc, & fcm, arg); } void f6( intptr_t arg) { std::pair< int, int > data = * ( std::pair< int, int > * ) arg; int res = data.first + data.second; data = * ( std::pair< int, int > *) ctx::jump_fcontext( fc, & fcm, ( intptr_t) res); res = data.first + data.second; ctx::jump_fcontext( fc, & fcm, ( intptr_t) res); } void f7( intptr_t arg) { try { throw std::runtime_error( ( char *) arg); } catch ( std::runtime_error const& e) { value2 = e.what(); } ctx::jump_fcontext( fc, & fcm, arg); } void f8( intptr_t arg) { double d = * ( double *) arg; d += 3.45; value3 = d; ctx::jump_fcontext( fc, & fcm, 0); } void test_stack() { ctx::guarded_stack_allocator alloc; bool unbound = ctx::guarded_stack_allocator::is_stack_unbound(); std::size_t min = ctx::guarded_stack_allocator::minimum_stacksize(); std::size_t def = ctx::guarded_stack_allocator::default_stacksize(); BOOST_CHECK( min <= def); if ( ! unbound) { std::size_t max = ctx::guarded_stack_allocator::maximum_stacksize(); BOOST_CHECK( max >= def); } } void test_setup() { ctx::guarded_stack_allocator alloc; void * sp = alloc.allocate( ctx::guarded_stack_allocator::minimum_stacksize() ); fc = ctx::make_fcontext( sp, ctx::guarded_stack_allocator::minimum_stacksize(), f1); BOOST_CHECK( fc); BOOST_CHECK_EQUAL( sp, fc->fc_stack.sp); BOOST_CHECK_EQUAL( ctx::guarded_stack_allocator::minimum_stacksize(), fc->fc_stack.size); } void test_start() { value1 = 0; ctx::guarded_stack_allocator alloc; void * sp = alloc.allocate( ctx::guarded_stack_allocator::minimum_stacksize() ); fc = ctx::make_fcontext( sp, ctx::guarded_stack_allocator::minimum_stacksize(), f1); BOOST_CHECK( fc); BOOST_CHECK_EQUAL( sp, fc->fc_stack.sp); BOOST_CHECK_EQUAL( ctx::guarded_stack_allocator::minimum_stacksize(), fc->fc_stack.size); BOOST_CHECK_EQUAL( 0, value1); ctx::jump_fcontext( & fcm, fc, 0); BOOST_CHECK_EQUAL( 1, value1); } void test_jump() { value1 = 0; ctx::guarded_stack_allocator alloc; void * sp = alloc.allocate( ctx::guarded_stack_allocator::minimum_stacksize() ); fc = ctx::make_fcontext( sp, ctx::guarded_stack_allocator::minimum_stacksize(), f3); BOOST_CHECK( fc); BOOST_CHECK_EQUAL( sp, fc->fc_stack.sp); BOOST_CHECK_EQUAL( ctx::guarded_stack_allocator::minimum_stacksize(), fc->fc_stack.size); BOOST_CHECK_EQUAL( 0, value1); ctx::jump_fcontext( & fcm, fc, 0); BOOST_CHECK_EQUAL( 1, value1); ctx::jump_fcontext( & fcm, fc, 0); BOOST_CHECK_EQUAL( 2, value1); } void test_result() { ctx::guarded_stack_allocator alloc; void * sp = alloc.allocate( ctx::guarded_stack_allocator::minimum_stacksize() ); fc = ctx::make_fcontext( sp, ctx::guarded_stack_allocator::minimum_stacksize(), f4); BOOST_CHECK( fc); BOOST_CHECK_EQUAL( sp, fc->fc_stack.sp); BOOST_CHECK_EQUAL( ctx::guarded_stack_allocator::minimum_stacksize(), fc->fc_stack.size); int result = ( int) ctx::jump_fcontext( & fcm, fc, 0); BOOST_CHECK_EQUAL( 7, result); } void test_arg() { ctx::guarded_stack_allocator alloc; int i = 7; void * sp = alloc.allocate( ctx::guarded_stack_allocator::minimum_stacksize() ); fc = ctx::make_fcontext( sp, ctx::guarded_stack_allocator::minimum_stacksize(), f5); BOOST_CHECK( fc); BOOST_CHECK_EQUAL( sp, fc->fc_stack.sp); BOOST_CHECK_EQUAL( ctx::guarded_stack_allocator::minimum_stacksize(), fc->fc_stack.size); int result = ( int) ctx::jump_fcontext( & fcm, fc, i); BOOST_CHECK_EQUAL( i, result); } void test_transfer() { ctx::guarded_stack_allocator alloc; std::pair< int, int > data = std::make_pair( 3, 7); void * sp = alloc.allocate( ctx::guarded_stack_allocator::minimum_stacksize() ); fc = ctx::make_fcontext( sp, ctx::guarded_stack_allocator::minimum_stacksize(), f6); BOOST_CHECK( fc); BOOST_CHECK_EQUAL( sp, fc->fc_stack.sp); BOOST_CHECK_EQUAL( ctx::guarded_stack_allocator::minimum_stacksize(), fc->fc_stack.size); int result = ( int) ctx::jump_fcontext( & fcm, fc, ( intptr_t) & data); BOOST_CHECK_EQUAL( 10, result); data = std::make_pair( 7, 7); result = ( int) ctx::jump_fcontext( & fcm, fc, ( intptr_t) & data); BOOST_CHECK_EQUAL( 14, result); } void test_exception() { ctx::guarded_stack_allocator alloc; const char * what = "hello world"; void * sp = alloc.allocate( ctx::guarded_stack_allocator::default_stacksize() ); fc = ctx::make_fcontext( sp, ctx::guarded_stack_allocator::default_stacksize(), f7); BOOST_CHECK( fc); BOOST_CHECK_EQUAL( sp, fc->fc_stack.sp); BOOST_CHECK_EQUAL( ctx::guarded_stack_allocator::default_stacksize(), fc->fc_stack.size); ctx::jump_fcontext( & fcm, fc, ( intptr_t) what); BOOST_CHECK_EQUAL( std::string( what), value2); } void test_fp() { ctx::guarded_stack_allocator alloc; double d = 7.13; void * sp = alloc.allocate( ctx::guarded_stack_allocator::minimum_stacksize() ); fc = ctx::make_fcontext( sp, ctx::guarded_stack_allocator::minimum_stacksize(), f8); BOOST_CHECK( fc); BOOST_CHECK_EQUAL( sp, fc->fc_stack.sp); BOOST_CHECK_EQUAL( ctx::guarded_stack_allocator::minimum_stacksize(), fc->fc_stack.size); ctx::jump_fcontext( & fcm, fc, (intptr_t) & d); BOOST_CHECK_EQUAL( 10.58, value3); } boost::unit_test::test_suite * init_unit_test_suite( int, char* []) { boost::unit_test::test_suite * test = BOOST_TEST_SUITE("Boost.Context: context test suite"); test->add( BOOST_TEST_CASE( & test_stack) ); test->add( BOOST_TEST_CASE( & test_setup) ); test->add( BOOST_TEST_CASE( & test_start) ); test->add( BOOST_TEST_CASE( & test_jump) ); test->add( BOOST_TEST_CASE( & test_result) ); test->add( BOOST_TEST_CASE( & test_arg) ); test->add( BOOST_TEST_CASE( & test_transfer) ); test->add( BOOST_TEST_CASE( & test_exception) ); test->add( BOOST_TEST_CASE( & test_fp) ); return test; }
// -*- mode:c++; tab-width:2; indent-tabs-mode:nil; c-basic-offset:2 -*- /* * Binarizer.cpp * zxing * * Created by Ralf Kistner on 16/10/2009. * Copyright 2008 ZXing authors All rights reserved. * Modified by Lukasz Warchol on 02/02/2010. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <zxing/Binarizer.h> namespace zxing { Binarizer::Binarizer(Ref<LuminanceSource> source) : source_(source) { } Binarizer::~Binarizer() { } Ref<LuminanceSource> Binarizer::getLuminanceSource() const { return source_; } }
// ------------------------------------------------------------ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License (MIT). See License.txt in the repo root for license information. // ------------------------------------------------------------ #include "stdafx.h" using namespace Management; using namespace BackupRestoreAgentComponent; using namespace BackupCopier; StringLiteral const TraceComponent("BackupCopierProxy"); AzureStorageDownloadBackupAsyncOperation::AzureStorageDownloadBackupAsyncOperation( __in BackupCopierProxy & owner, Common::ActivityId const & activityId, TimeSpan const timeout, AsyncCallback const & callback, AsyncOperationSPtr const & parent, wstring const & connectionString, bool isConnectionStringEncrypted, wstring const & containerName, wstring const & backupStoreBaseFolderPath, wstring const & sourceFileOrFolderPath, wstring const & targetFolderPath) : AzureStorageBackupCopierAsyncOperationBase( owner, activityId, timeout, callback, parent, connectionString, isConnectionStringEncrypted, containerName, backupStoreBaseFolderPath, sourceFileOrFolderPath, targetFolderPath) { } ErrorCode AzureStorageDownloadBackupAsyncOperation::End(AsyncOperationSPtr const & operation) { auto casted = AsyncOperation::End<AzureStorageDownloadBackupAsyncOperation>(operation); return casted->Error; } void AzureStorageDownloadBackupAsyncOperation::OnCompleted() { __super::OnCompleted(); } void AzureStorageDownloadBackupAsyncOperation::OnProcessJob(AsyncOperationSPtr const & thisSPtr) { this->DoDownloadBackup(thisSPtr); } void AzureStorageDownloadBackupAsyncOperation::DoDownloadBackup(AsyncOperationSPtr const & thisSPtr) { this->DoDataValidation(BackupCopierProxy::AzureBlobStoreDownload); this->Owner.WriteInfo( TraceComponent, "{0}+{1}: processing download backup.", BackupCopierAsyncOperationBase::TraceId, this->ActivityId); wstring cmdLineArgs, cmdLineArgsLogString; this->InitializeAndPopulateCommandLineArguments(cmdLineArgs, cmdLineArgsLogString); auto operation = this->Owner.BeginRunBackupCopierExe( this->ActivityId, BackupCopierProxy::AzureBlobStoreDownload, cmdLineArgs, cmdLineArgsLogString, this->Timeout, [this](AsyncOperationSPtr const & operation) { this->OnRunBackupCopierComplete(operation, false); }, thisSPtr); this->OnRunBackupCopierComplete(operation, true); } void AzureStorageDownloadBackupAsyncOperation::OnRunBackupCopierComplete(AsyncOperationSPtr const & operation, bool expectedCompletedSynchronously) { if (operation->CompletedSynchronously != expectedCompletedSynchronously) { return; } auto const & thisSPtr = operation->Parent; auto error = this->Owner.EndRunBackupCopierExe(operation); this->TryComplete(thisSPtr, move(error)); }
/* * @title: KMP 字符串匹配 * @description: KMP 字符串匹配算法 * @arguments: * S: 源串的首指针 * P: 模式串的首指针 * N: Next 数组 [alloc only] * @performance: * Time: O(|P| + |S|) * Space: O(|S| + |P| + |N|) * Extra: O(|N|) * @dependence: * Matched(int index); * @note: S 与 P 的终止符为 '\0' */ void KMPInit(char *P, int *N) { int k = N[0] = 0; for (int i = 1; P[i] != '\0'; i++) { while (k && P[k] != P[i]) { k = N[k - 1]; } if (P[k] == P[i]) { k++; } N[i] = k; } } void KMP(char *S, char *P, int *N) { int k = 0; for (int i = 0; S[i] != '\0'; i++) { while (k && P[k] != S[i]) { k = N[k - 1]; } if (P[k] == S[i]) { k++; } if (P[k] == '\0') { Matched(i); k = N[k - 1]; } } }
#include "abstract_operator.hpp" #include <chrono> #include <memory> #include <string> #include <vector> #include "storage/table.hpp" #include "utils/assert.hpp" namespace opossum { AbstractOperator::AbstractOperator(const std::shared_ptr<const AbstractOperator> left, const std::shared_ptr<const AbstractOperator> right) : _input_left(left), _input_right(right) {} void AbstractOperator::execute() { _output = _on_execute(); } std::shared_ptr<const Table> AbstractOperator::get_output() const { // TODO(anyone): You should place some meaningful checks here return _output; } std::shared_ptr<const Table> AbstractOperator::_input_table_left() const { return _input_left->get_output(); } std::shared_ptr<const Table> AbstractOperator::_input_table_right() const { return _input_right->get_output(); } } // namespace opossum
#pragma once #include "messages/LimitedQueue.hpp" #include "widgets/BaseWidget.hpp" #include "widgets/helper/ScrollbarHighlight.hpp" #include <QMutex> #include <QPropertyAnimation> #include <QWidget> #include <pajlada/signals/signal.hpp> namespace chatterino { class ChannelView; class Scrollbar : public BaseWidget { Q_OBJECT public: Scrollbar(ChannelView *parent = nullptr); void addHighlight(ScrollbarHighlight highlight); void addHighlightsAtStart( const std::vector<ScrollbarHighlight> &highlights_); void replaceHighlight(size_t index, ScrollbarHighlight replacement); void pauseHighlights(); void unpauseHighlights(); void clearHighlights(); void scrollToBottom(bool animate = false); bool isAtBottom() const; void setMaximum(qreal value); void setMinimum(qreal value); void setLargeChange(qreal value); void setSmallChange(qreal value); void setDesiredValue(qreal value, bool animated = false); qreal getMaximum() const; qreal getMinimum() const; qreal getLargeChange() const; qreal getSmallChange() const; qreal getDesiredValue() const; qreal getCurrentValue() const; const QPropertyAnimation &getCurrentValueAnimation() const; // offset the desired value without breaking smooth scolling void offset(qreal value); pajlada::Signals::NoArgSignal &getCurrentValueChanged(); pajlada::Signals::NoArgSignal &getDesiredValueChanged(); void setCurrentValue(qreal value); void printCurrentState(const QString &prefix = QString()) const; Q_PROPERTY(qreal desiredValue_ READ getDesiredValue WRITE setDesiredValue) protected: void paintEvent(QPaintEvent *) override; void resizeEvent(QResizeEvent *) override; void mouseMoveEvent(QMouseEvent *event) override; void mousePressEvent(QMouseEvent *event) override; void mouseReleaseEvent(QMouseEvent *event) override; void leaveEvent(QEvent *) override; private: Q_PROPERTY(qreal currentValue_ READ getCurrentValue WRITE setCurrentValue) LimitedQueueSnapshot<ScrollbarHighlight> getHighlightSnapshot(); void updateScroll(); QMutex mutex_; QPropertyAnimation currentValueAnimation_; LimitedQueue<ScrollbarHighlight> highlights_; bool highlightsPaused_{false}; LimitedQueueSnapshot<ScrollbarHighlight> highlightSnapshot_; bool atBottom_{false}; int mouseOverIndex_ = -1; int mouseDownIndex_ = -1; QPoint lastMousePosition_; int buttonHeight_ = 0; int trackHeight_ = 100; QRect thumbRect_; qreal maximum_ = 0; qreal minimum_ = 0; qreal largeChange_ = 0; qreal smallChange_ = 5; qreal desiredValue_ = 0; qreal currentValue_ = 0; qreal smoothScrollingOffset_ = 0; pajlada::Signals::NoArgSignal currentValueChanged_; pajlada::Signals::NoArgSignal desiredValueChanged_; }; } // namespace chatterino
#pragma once #include "d3d_pch.hpp" #include <optional> // the DescriptorAllocator is CPU visiable only, for those used shader visiable descriptor, // use the dynamic descriptor heap (in context). namespace hitagi::graphics::backend::DX12 { class DescriptorPage; struct Descriptor { Descriptor(D3D12_CPU_DESCRIPTOR_HANDLE handle, std::shared_ptr<DescriptorPage> page_from); Descriptor(const Descriptor& rhs) : handle(rhs.handle), page_from(nullptr) {} Descriptor& operator=(const Descriptor& rhs) { if (this != &rhs) handle = rhs.handle; return *this; } Descriptor(Descriptor&&) = default; Descriptor& operator=(Descriptor&&) = default; ~Descriptor(); D3D12_CPU_DESCRIPTOR_HANDLE handle; std::shared_ptr<DescriptorPage> page_from; }; class DescriptorPage : public std::enable_shared_from_this<DescriptorPage> { public: static std::shared_ptr<DescriptorPage> Create(ID3D12Device* device, D3D12_DESCRIPTOR_HEAP_TYPE type, size_t num_descriptors) { struct CreatePage : public DescriptorPage { CreatePage(ID3D12Device* device, D3D12_DESCRIPTOR_HEAP_TYPE type, size_t num_descriptors) : DescriptorPage(device, type, num_descriptors) {} }; return std::dynamic_pointer_cast<DescriptorPage>(std::make_shared<CreatePage>(device, type, num_descriptors)); } void DiscardDescriptor(Descriptor& descriptor); std::optional<std::vector<Descriptor>> Allocate(size_t num_descriptors); protected: DescriptorPage(ID3D12Device* device, D3D12_DESCRIPTOR_HEAP_TYPE type, size_t num_descriptors); private: ComPtr<ID3D12DescriptorHeap> m_DescriptorHeap; D3D12_CPU_DESCRIPTOR_HANDLE m_Handle{}; size_t m_IncrementSize; using BlockSize = size_t; using BlockOffset = size_t; using SizeIter = std::multimap<size_t, size_t>::iterator; // * according to cpp17 std www.open-std.org/jtc1/sc22/wg21/docs/papers/2017/n4659.pdf [26.2.6/9] [26.2.6/10] // * For associative containers `insert` and `emplace` members shall not affect the validity of iterators to the container // * and `erase` and `extract` members invalidate only iterators to the erased elements that we will not use in the future std::map<BlockOffset, SizeIter> m_AvailableDescriptors; std::multimap<BlockSize, BlockOffset> m_SearchMap; }; class DescriptorAllocator { public: DescriptorAllocator(D3D12_DESCRIPTOR_HEAP_TYPE type, size_t num_descriptor_per_page = 1024); void Initialize(ID3D12Device* device); Descriptor Allocate(); std::vector<Descriptor> Allocate(size_t num_descriptors); D3D12_DESCRIPTOR_HEAP_TYPE GetType() const { return m_Type; } private: ID3D12Device* m_Device{}; D3D12_DESCRIPTOR_HEAP_TYPE m_Type; size_t m_NumDescriptorPerPage; std::list<std::shared_ptr<DescriptorPage>> m_PagePool; }; } // namespace hitagi::graphics::backend::DX12
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE122_Heap_Based_Buffer_Overflow__cpp_CWE805_wchar_t_ncat_84_bad.cpp Label Definition File: CWE122_Heap_Based_Buffer_Overflow__cpp_CWE805.string.label.xml Template File: sources-sink-84_bad.tmpl.cpp */ /* * @description * CWE: 122 Heap Based Buffer Overflow * BadSource: Allocate using new[] and set data pointer to a small buffer * GoodSource: Allocate using new[] and set data pointer to a large buffer * Sinks: ncat * BadSink : Copy string to data using wcsncat * Flow Variant: 84 Data flow: data passed to class constructor and destructor by declaring the class object on the heap and deleting it after use * * */ #ifndef OMITBAD #include "std_testcase.h" #include "CWE122_Heap_Based_Buffer_Overflow__cpp_CWE805_wchar_t_ncat_84.h" namespace CWE122_Heap_Based_Buffer_Overflow__cpp_CWE805_wchar_t_ncat_84 { CWE122_Heap_Based_Buffer_Overflow__cpp_CWE805_wchar_t_ncat_84_bad::CWE122_Heap_Based_Buffer_Overflow__cpp_CWE805_wchar_t_ncat_84_bad(wchar_t * dataCopy) { data = dataCopy; /* FLAW: Allocate using new[] and point data to a small buffer that is smaller than the large buffer used in the sinks */ data = new wchar_t[50]; data[0] = L'\0'; /* null terminate */ } CWE122_Heap_Based_Buffer_Overflow__cpp_CWE805_wchar_t_ncat_84_bad::~CWE122_Heap_Based_Buffer_Overflow__cpp_CWE805_wchar_t_ncat_84_bad() { { wchar_t source[100]; wmemset(source, L'C', 100-1); /* fill with L'C's */ source[100-1] = L'\0'; /* null terminate */ /* POTENTIAL FLAW: Possible buffer overflow if source is larger than sizeof(data)-strlen(data) */ wcsncat(data, source, 100); printWLine(data); delete [] data; } } } #endif /* OMITBAD */
/****************************************************************************** * The MIT License (MIT) * * Copyright (c) 2015-2019 Baldur Karlsson * Copyright (c) 2014 Crytek * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. ******************************************************************************/ #include "driver/dxgi/dxgi_wrapped.h" #include "core/core.h" #include "serialise/serialiser.h" ID3D11Resource *UnwrapDXResource(void *dxObject); IDXGIResource *UnwrapDXGIResource(void *dxgiObject); WRAPPED_POOL_INST(WrappedIDXGIDevice4); std::vector<D3DDeviceCallback> WrappedIDXGISwapChain4::m_D3DCallbacks; ID3DDevice *GetD3DDevice(IUnknown *pDevice) { ID3DDevice *wrapDevice = NULL; if(WrappedIDXGIDevice4::IsAlloc(pDevice)) wrapDevice = ((WrappedIDXGIDevice4 *)(IDXGIDevice3 *)pDevice)->GetD3DDevice(); if(wrapDevice == NULL) wrapDevice = WrappedIDXGISwapChain4::GetD3DDevice(pDevice); return wrapDevice; } bool RefCountDXGIObject::HandleWrap(REFIID riid, void **ppvObject) { if(ppvObject == NULL || *ppvObject == NULL) { RDCWARN("HandleWrap called with NULL ppvObject"); return false; } // unknown GUID that we only want to print once to avoid log spam // {79D2046C-22EF-451B-9E74-2245D9C760EA} static const GUID Unknown_uuid = { 0x79d2046c, 0x22ef, 0x451b, {0x9e, 0x74, 0x22, 0x45, 0xd9, 0xc7, 0x60, 0xea}}; // ditto // {9B7E4C04-342C-4106-A19F-4F2704F689F0} static const GUID ID3D10Texture2D_uuid = { 0x9b7e4c04, 0x342c, 0x4106, {0xa1, 0x9f, 0x4f, 0x27, 0x04, 0xf6, 0x89, 0xf0}}; if(riid == __uuidof(IDXGIDevice)) { // should have been handled elsewhere, so we can properly create this device RDCERR("Unexpected uuid in RefCountDXGIObject::HandleWrap"); return false; } else if(riid == __uuidof(IDXGIAdapter)) { IDXGIAdapter *real = (IDXGIAdapter *)(*ppvObject); *ppvObject = (IDXGIAdapter *)(new WrappedIDXGIAdapter4(real)); return true; } else if(riid == __uuidof(IDXGIFactory)) { // yes I know PRECISELY how fucked up this is. Speak to microsoft - after KB2670838 the internal // D3D11 device creation function will pass in __uuidof(IDXGIFactory) then attempt to call // EnumDevices1 (which is in the IDXGIFactory1 vtable). Doing this *should* be safe as using a // IDXGIFactory1 like a IDXGIFactory should all just work by definition, but there's no way to // know now if someone trying to create a IDXGIFactory really means it or not. IDXGIFactory *real = (IDXGIFactory *)(*ppvObject); *ppvObject = (IDXGIFactory *)(new WrappedIDXGIFactory(real)); return true; } else if(riid == __uuidof(IDXGIDevice1)) { // should have been handled elsewhere, so we can properly create this device RDCERR("Unexpected uuid in RefCountDXGIObject::HandleWrap"); return false; } else if(riid == __uuidof(IDXGIAdapter1)) { IDXGIAdapter1 *real = (IDXGIAdapter1 *)(*ppvObject); *ppvObject = (IDXGIAdapter1 *)(new WrappedIDXGIAdapter4(real)); return true; } else if(riid == __uuidof(IDXGIFactory1)) { IDXGIFactory1 *real = (IDXGIFactory1 *)(*ppvObject); *ppvObject = (IDXGIFactory1 *)(new WrappedIDXGIFactory(real)); return true; } else if(riid == __uuidof(IDXGIAdapter2)) { IDXGIAdapter2 *real = (IDXGIAdapter2 *)(*ppvObject); *ppvObject = (IDXGIAdapter2 *)(new WrappedIDXGIAdapter4(real)); return true; } else if(riid == __uuidof(IDXGIAdapter3)) { IDXGIAdapter3 *real = (IDXGIAdapter3 *)(*ppvObject); *ppvObject = (IDXGIAdapter3 *)(new WrappedIDXGIAdapter4(real)); return true; } else if(riid == __uuidof(IDXGIFactory2)) { IDXGIFactory2 *real = (IDXGIFactory2 *)(*ppvObject); *ppvObject = (IDXGIFactory2 *)(new WrappedIDXGIFactory(real)); return true; } else if(riid == __uuidof(IDXGIFactory3)) { IDXGIFactory3 *real = (IDXGIFactory3 *)(*ppvObject); *ppvObject = (IDXGIFactory3 *)(new WrappedIDXGIFactory(real)); return true; } else if(riid == __uuidof(IDXGIFactory4)) { IDXGIFactory4 *real = (IDXGIFactory4 *)(*ppvObject); *ppvObject = (IDXGIFactory4 *)(new WrappedIDXGIFactory(real)); return true; } else if(riid == __uuidof(IDXGIFactory5)) { IDXGIFactory5 *real = (IDXGIFactory5 *)(*ppvObject); *ppvObject = (IDXGIFactory5 *)(new WrappedIDXGIFactory(real)); return true; } else if(riid == __uuidof(IDXGIFactory6)) { IDXGIFactory6 *real = (IDXGIFactory6 *)(*ppvObject); *ppvObject = (IDXGIFactory6 *)(new WrappedIDXGIFactory(real)); return true; } else if(riid == __uuidof(IDXGIFactory7)) { IDXGIFactory7 *real = (IDXGIFactory7 *)(*ppvObject); *ppvObject = (IDXGIFactory7 *)(new WrappedIDXGIFactory(real)); return true; } else if(riid == ID3D10Texture2D_uuid) { static bool printed = false; if(!printed) { printed = true; RDCWARN("Querying IDXGIObject for unsupported D3D10 interface: %s", ToStr(riid).c_str()); } return false; } else if(riid == Unknown_uuid) { static bool printed = false; if(!printed) { printed = true; RDCWARN("Querying IDXGIObject for unknown GUID: %s", ToStr(riid).c_str()); } } else { WarnUnknownGUID("IDXGIObject", riid); } return false; } HRESULT STDMETHODCALLTYPE RefCountDXGIObject::GetParent( /* [in] */ REFIID riid, /* [retval][out] */ void **ppParent) { HRESULT ret = m_pReal->GetParent(riid, ppParent); if(SUCCEEDED(ret)) HandleWrap(riid, ppParent); return ret; } HRESULT RefCountDXGIObject::WrapQueryInterface(IUnknown *real, REFIID riid, void **ppvObject) { HRESULT ret = real->QueryInterface(riid, ppvObject); if(SUCCEEDED(ret)) HandleWrap(riid, ppvObject); return ret; } WrappedIDXGISwapChain4::WrappedIDXGISwapChain4(IDXGISwapChain *real, HWND wnd, ID3DDevice *device) : RefCountDXGIObject(real), m_pReal(real), m_pDevice(device), m_Wnd(wnd) { DXGI_SWAP_CHAIN_DESC desc; real->GetDesc(&desc); m_pDevice->AddRef(); m_pReal1 = NULL; real->QueryInterface(__uuidof(IDXGISwapChain1), (void **)&m_pReal1); m_pReal2 = NULL; real->QueryInterface(__uuidof(IDXGISwapChain2), (void **)&m_pReal2); m_pReal3 = NULL; real->QueryInterface(__uuidof(IDXGISwapChain3), (void **)&m_pReal3); m_pReal4 = NULL; real->QueryInterface(__uuidof(IDXGISwapChain4), (void **)&m_pReal4); WrapBuffersAfterResize(); // we do a 'fake' present right at the start, so that we can capture frame 1, by // going from this fake present to the first present. m_pDevice->FirstFrame(this); } WrappedIDXGISwapChain4::~WrappedIDXGISwapChain4() { m_pDevice->ReleaseSwapchainResources(this, 0, NULL, NULL); SAFE_RELEASE(m_pDevice); SAFE_RELEASE(m_pReal1); SAFE_RELEASE(m_pReal2); SAFE_RELEASE(m_pReal3); SAFE_RELEASE(m_pReal4); SAFE_RELEASE(m_pReal); } HRESULT STDMETHODCALLTYPE WrappedIDXGISwapChain4::QueryInterface(REFIID riid, void **ppvObject) { if(riid == __uuidof(IDXGISwapChain)) { AddRef(); *ppvObject = (IDXGISwapChain *)this; return S_OK; } else if(riid == __uuidof(IDXGISwapChain1)) { if(m_pReal1) { AddRef(); *ppvObject = (IDXGISwapChain1 *)this; return S_OK; } else { return E_NOINTERFACE; } } else if(riid == __uuidof(IDXGISwapChain2)) { if(m_pReal2) { AddRef(); *ppvObject = (IDXGISwapChain2 *)this; return S_OK; } else { return E_NOINTERFACE; } } else if(riid == __uuidof(IDXGISwapChain3)) { if(m_pReal3) { AddRef(); *ppvObject = (IDXGISwapChain3 *)this; return S_OK; } else { return E_NOINTERFACE; } } else if(riid == __uuidof(IDXGISwapChain4)) { if(m_pReal4) { AddRef(); *ppvObject = (IDXGISwapChain4 *)this; return S_OK; } else { return E_NOINTERFACE; } } else { WarnUnknownGUID("IDXGISwapChain", riid); } return RefCountDXGIObject::QueryInterface(riid, ppvObject); } void WrappedIDXGISwapChain4::ReleaseBuffersForResize(UINT QueueCount, IUnknown *const *ppPresentQueue, IUnknown **unwrappedQueues) { m_pDevice->ReleaseSwapchainResources(this, QueueCount, ppPresentQueue, unwrappedQueues); } void WrappedIDXGISwapChain4::WrapBuffersAfterResize() { DXGI_SWAP_CHAIN_DESC desc; m_pReal->GetDesc(&desc); int bufCount = desc.BufferCount; if(desc.SwapEffect == DXGI_SWAP_EFFECT_DISCARD) bufCount = 1; RDCASSERT(bufCount < MAX_NUM_BACKBUFFERS); for(int i = 0; i < MAX_NUM_BACKBUFFERS; i++) { m_pBackBuffers[i] = NULL; if(i < bufCount) { GetBuffer(i, m_pDevice->GetBackbufferUUID(), (void **)&m_pBackBuffers[i]); m_pDevice->NewSwapchainBuffer(m_pBackBuffers[i]); } } } HRESULT WrappedIDXGISwapChain4::ResizeBuffers( /* [in] */ UINT BufferCount, /* [in] */ UINT Width, /* [in] */ UINT Height, /* [in] */ DXGI_FORMAT NewFormat, /* [in] */ UINT SwapChainFlags) { ReleaseBuffersForResize(0, NULL, NULL); HRESULT ret = m_pReal->ResizeBuffers(BufferCount, Width, Height, NewFormat, SwapChainFlags); WrapBuffersAfterResize(); m_LastPresentedBuffer = -1; return ret; } HRESULT STDMETHODCALLTYPE WrappedIDXGISwapChain4::GetContainingOutput(IDXGIOutput **ppOutput) { HRESULT ret = m_pReal->GetContainingOutput(ppOutput); if(SUCCEEDED(ret) && ppOutput && *ppOutput) *ppOutput = (IDXGIOutput *)(new WrappedIDXGIOutput6(this, *ppOutput)); return ret; } HRESULT WrappedIDXGISwapChain4::ResizeBuffers1(_In_ UINT BufferCount, _In_ UINT Width, _In_ UINT Height, _In_ DXGI_FORMAT Format, _In_ UINT SwapChainFlags, _In_reads_(BufferCount) const UINT *pCreationNodeMask, _In_reads_(BufferCount) IUnknown *const *ppPresentQueue) { IUnknown **unwrappedQueues = NULL; if(ppPresentQueue) unwrappedQueues = new IUnknown *[BufferCount]; ReleaseBuffersForResize(BufferCount, ppPresentQueue, unwrappedQueues); HRESULT ret = m_pReal3->ResizeBuffers1(BufferCount, Width, Height, Format, SwapChainFlags, pCreationNodeMask, unwrappedQueues); SAFE_DELETE_ARRAY(unwrappedQueues); WrapBuffersAfterResize(); m_LastPresentedBuffer = -1; return ret; } HRESULT WrappedIDXGISwapChain4::SetFullscreenState( /* [in] */ BOOL Fullscreen, /* [in] */ IDXGIOutput *pTarget) { WrappedIDXGIOutput6 *wrappedOutput = (WrappedIDXGIOutput6 *)pTarget; IDXGIOutput *unwrappedOutput = wrappedOutput ? wrappedOutput->GetReal() : NULL; if(RenderDoc::Inst().GetCaptureOptions().allowFullscreen) return m_pReal->SetFullscreenState(Fullscreen, unwrappedOutput); return S_OK; } HRESULT WrappedIDXGISwapChain4::GetFullscreenState( /* [out] */ BOOL *pFullscreen, /* [out] */ IDXGIOutput **ppTarget) { HRESULT ret = m_pReal->GetFullscreenState(pFullscreen, ppTarget); if(SUCCEEDED(ret) && ppTarget && *ppTarget) *ppTarget = (IDXGIOutput *)(new WrappedIDXGIOutput6(this, *ppTarget)); return ret; } HRESULT WrappedIDXGISwapChain4::GetBuffer( /* [in] */ UINT Buffer, /* [in] */ REFIID riid, /* [out][in] */ void **ppSurface) { if(ppSurface == NULL) return E_INVALIDARG; // ID3D10Texture2D UUID {9B7E4C04-342C-4106-A19F-4F2704F689F0} static const GUID ID3D10Texture2D_uuid = { 0x9b7e4c04, 0x342c, 0x4106, {0xa1, 0x9f, 0x4f, 0x27, 0x04, 0xf6, 0x89, 0xf0}}; // ID3D10Resource UUID {9B7E4C01-342C-4106-A19F-4F2704F689F0} static const GUID ID3D10Resource_uuid = { 0x9b7e4c01, 0x342c, 0x4106, {0xa1, 0x9f, 0x4f, 0x27, 0x04, 0xf6, 0x89, 0xf0}}; IID uuid = riid; if(uuid == ID3D10Texture2D_uuid || uuid == ID3D10Resource_uuid) { RDCERR("Querying swapchain buffers via D3D10 interface UUIDs is not supported"); return E_NOINTERFACE; } else if(uuid == __uuidof(IDXGISurface)) { RDCWARN("Querying swapchain buffer for IDXGISurface. This query is ambiguous."); // query as native format so that wrapping works as expected uuid = m_pDevice->GetBackbufferUUID(); } else if(uuid != __uuidof(ID3D11Texture2D) && uuid != __uuidof(ID3D11Texture2D1) && uuid != __uuidof(ID3D11Resource) && uuid != __uuidof(ID3D12Resource) && uuid != __uuidof(ID3D12Resource1)) { RDCERR("Unsupported or unrecognised UUID passed to IDXGISwapChain::GetBuffer - %s", ToStr(uuid).c_str()); return E_NOINTERFACE; } RDCASSERT(uuid == __uuidof(ID3D11Texture2D) || uuid == __uuidof(ID3D11Texture2D1) || uuid == __uuidof(ID3D11Resource) || uuid == __uuidof(ID3D12Resource) || uuid == __uuidof(ID3D12Resource1)); HRESULT ret = m_pReal->GetBuffer(Buffer, uuid, ppSurface); { IUnknown *realSurface = (IUnknown *)*ppSurface; IUnknown *tex = realSurface; if(FAILED(ret)) { RDCERR("Failed to get swapchain backbuffer %d: HRESULT: %s", Buffer, ToStr(ret).c_str()); SAFE_RELEASE(realSurface); tex = NULL; } else { DXGI_SWAP_CHAIN_DESC desc; GetDesc(&desc); tex = m_pDevice->WrapSwapchainBuffer(this, desc.BufferDesc.Format, Buffer, realSurface); } // if the original UUID was IDXGISurface, fixup for the expected interface being returned if(tex && riid == __uuidof(IDXGISurface)) { IDXGISurface *surf = NULL; HRESULT hr = tex->QueryInterface(__uuidof(IDXGISurface), (void **)&surf); RDCASSERTEQUAL(hr, S_OK); tex->Release(); tex = surf; } *ppSurface = tex; } return ret; } HRESULT WrappedIDXGISwapChain4::GetDevice( /* [in] */ REFIID riid, /* [retval][out] */ void **ppDevice) { HRESULT ret = m_pReal->GetDevice(riid, ppDevice); if(SUCCEEDED(ret)) { // try one of the trivial wraps, we don't mind making a new one of those if(m_pDevice->IsDeviceUUID(riid)) { // probably they're asking for the device device. *ppDevice = m_pDevice->GetDeviceInterface(riid); m_pDevice->AddRef(); } else if(riid == __uuidof(IDXGISwapChain)) { // don't think anyone would try this, but what the hell. *ppDevice = this; AddRef(); } else if(!HandleWrap(riid, ppDevice)) { // can probably get away with returning the real result here, // but it worries me a bit. RDCUNIMPLEMENTED("Not returning trivial type"); } } return ret; } HRESULT WrappedIDXGISwapChain4::Present( /* [in] */ UINT SyncInterval, /* [in] */ UINT Flags) { if(!RenderDoc::Inst().GetCaptureOptions().allowVSync) { SyncInterval = 0; } if((Flags & DXGI_PRESENT_TEST) == 0) { TickLastPresentedBuffer(); m_pDevice->Present(this, SyncInterval, Flags); } return m_pReal->Present(SyncInterval, Flags); } HRESULT WrappedIDXGISwapChain4::Present1(UINT SyncInterval, UINT Flags, const DXGI_PRESENT_PARAMETERS *pPresentParameters) { if(!RenderDoc::Inst().GetCaptureOptions().allowVSync) { SyncInterval = 0; } if((Flags & DXGI_PRESENT_TEST) == 0) { TickLastPresentedBuffer(); m_pDevice->Present(this, SyncInterval, Flags); } return m_pReal1->Present1(SyncInterval, Flags, pPresentParameters); } HRESULT STDMETHODCALLTYPE WrappedIDXGISwapChain4::GetRestrictToOutput(IDXGIOutput **ppRestrictToOutput) { HRESULT ret = m_pReal1->GetRestrictToOutput(ppRestrictToOutput); if(SUCCEEDED(ret) && ppRestrictToOutput && *ppRestrictToOutput) *ppRestrictToOutput = (IDXGIOutput *)(new WrappedIDXGIOutput6(this, *ppRestrictToOutput)); return ret; } WrappedIDXGIOutput6::WrappedIDXGIOutput6(RefCountDXGIObject *owner, IDXGIOutput *real) : RefCountDXGIObject(real), m_Owner(owner), m_pReal(real) { SAFE_ADDREF(m_Owner); m_pReal1 = NULL; real->QueryInterface(__uuidof(IDXGIOutput1), (void **)&m_pReal1); m_pReal2 = NULL; real->QueryInterface(__uuidof(IDXGIOutput2), (void **)&m_pReal2); m_pReal3 = NULL; real->QueryInterface(__uuidof(IDXGIOutput3), (void **)&m_pReal3); m_pReal4 = NULL; real->QueryInterface(__uuidof(IDXGIOutput4), (void **)&m_pReal4); m_pReal5 = NULL; real->QueryInterface(__uuidof(IDXGIOutput5), (void **)&m_pReal5); m_pReal6 = NULL; real->QueryInterface(__uuidof(IDXGIOutput6), (void **)&m_pReal6); } WrappedIDXGIOutput6::~WrappedIDXGIOutput6() { SAFE_RELEASE(m_pReal1); SAFE_RELEASE(m_pReal2); SAFE_RELEASE(m_pReal3); SAFE_RELEASE(m_pReal4); SAFE_RELEASE(m_pReal5); SAFE_RELEASE(m_pReal6); SAFE_RELEASE(m_pReal); SAFE_RELEASE(m_Owner); } HRESULT STDMETHODCALLTYPE WrappedIDXGIOutput6::QueryInterface(REFIID riid, void **ppvObject) { if(riid == __uuidof(IDXGIOutput)) { AddRef(); *ppvObject = (IDXGIOutput *)this; return S_OK; } else if(riid == __uuidof(IDXGIOutput1)) { if(m_pReal1) { AddRef(); *ppvObject = (IDXGIOutput1 *)this; return S_OK; } else { return E_NOINTERFACE; } } else if(riid == __uuidof(IDXGIOutput2)) { if(m_pReal2) { AddRef(); *ppvObject = (IDXGIOutput2 *)this; return S_OK; } else { return E_NOINTERFACE; } } else if(riid == __uuidof(IDXGIOutput3)) { if(m_pReal3) { AddRef(); *ppvObject = (IDXGIOutput3 *)this; return S_OK; } else { return E_NOINTERFACE; } } else if(riid == __uuidof(IDXGIOutput4)) { if(m_pReal4) { AddRef(); *ppvObject = (IDXGIOutput4 *)this; return S_OK; } else { return E_NOINTERFACE; } } else if(riid == __uuidof(IDXGIOutput5)) { if(m_pReal5) { AddRef(); *ppvObject = (IDXGIOutput5 *)this; return S_OK; } else { return E_NOINTERFACE; } } else if(riid == __uuidof(IDXGIOutput6)) { if(m_pReal6) { AddRef(); *ppvObject = (IDXGIOutput6 *)this; return S_OK; } else { return E_NOINTERFACE; } } else { WarnUnknownGUID("IDXGIOutput", riid); } return RefCountDXGIObject::QueryInterface(riid, ppvObject); } WrappedIDXGIAdapter4::WrappedIDXGIAdapter4(IDXGIAdapter *real) : RefCountDXGIObject(real), m_pReal(real) { m_pReal1 = NULL; real->QueryInterface(__uuidof(IDXGIAdapter1), (void **)&m_pReal1); m_pReal2 = NULL; real->QueryInterface(__uuidof(IDXGIAdapter2), (void **)&m_pReal2); m_pReal3 = NULL; real->QueryInterface(__uuidof(IDXGIAdapter3), (void **)&m_pReal3); m_pReal4 = NULL; real->QueryInterface(__uuidof(IDXGIAdapter4), (void **)&m_pReal4); } WrappedIDXGIAdapter4::~WrappedIDXGIAdapter4() { SAFE_RELEASE(m_pReal1); SAFE_RELEASE(m_pReal2); SAFE_RELEASE(m_pReal3); SAFE_RELEASE(m_pReal4); SAFE_RELEASE(m_pReal); } HRESULT STDMETHODCALLTYPE WrappedIDXGIAdapter4::QueryInterface(REFIID riid, void **ppvObject) { if(riid == __uuidof(IDXGIAdapter)) { AddRef(); *ppvObject = (IDXGIAdapter *)this; return S_OK; } else if(riid == __uuidof(IDXGIAdapter1)) { if(m_pReal1) { AddRef(); *ppvObject = (IDXGIAdapter1 *)this; return S_OK; } else { return E_NOINTERFACE; } } else if(riid == __uuidof(IDXGIAdapter2)) { if(m_pReal2) { AddRef(); *ppvObject = (IDXGIAdapter2 *)this; return S_OK; } else { return E_NOINTERFACE; } } else if(riid == __uuidof(IDXGIAdapter3)) { if(m_pReal3) { AddRef(); *ppvObject = (IDXGIAdapter3 *)this; return S_OK; } else { return E_NOINTERFACE; } } else { WarnUnknownGUID("IDXGIAdapter", riid); } return RefCountDXGIObject::QueryInterface(riid, ppvObject); } WrappedIDXGIDevice4::WrappedIDXGIDevice4(IDXGIDevice *real, ID3DDevice *d3d) : RefCountDXGIObject(real), m_pReal(real), m_pD3DDevice(d3d) { m_pD3DDevice->AddRef(); m_pReal1 = NULL; real->QueryInterface(__uuidof(IDXGIDevice1), (void **)&m_pReal1); m_pReal2 = NULL; real->QueryInterface(__uuidof(IDXGIDevice2), (void **)&m_pReal2); m_pReal3 = NULL; real->QueryInterface(__uuidof(IDXGIDevice3), (void **)&m_pReal3); m_pReal4 = NULL; real->QueryInterface(__uuidof(IDXGIDevice4), (void **)&m_pReal4); } WrappedIDXGIDevice4::~WrappedIDXGIDevice4() { SAFE_RELEASE(m_pReal1); SAFE_RELEASE(m_pReal2); SAFE_RELEASE(m_pReal3); SAFE_RELEASE(m_pReal4); SAFE_RELEASE(m_pReal); SAFE_RELEASE(m_pD3DDevice); } HRESULT STDMETHODCALLTYPE WrappedIDXGIDevice4::QueryInterface(REFIID riid, void **ppvObject) { if(m_pD3DDevice->IsDeviceUUID(riid)) { m_pD3DDevice->AddRef(); *ppvObject = m_pD3DDevice->GetDeviceInterface(riid); return S_OK; } else if(riid == __uuidof(ID3D11Multithread)) { // forward to the device as the lock is shared amongst all things return m_pD3DDevice->QueryInterface(riid, ppvObject); } else if(riid == __uuidof(IDXGIDevice)) { AddRef(); *ppvObject = (IDXGIDevice *)this; return S_OK; } else if(riid == __uuidof(IDXGIDevice1)) { if(m_pReal1) { AddRef(); *ppvObject = (IDXGIDevice1 *)this; return S_OK; } else { return E_NOINTERFACE; } } else if(riid == __uuidof(IDXGIDevice2)) { if(m_pReal2) { AddRef(); *ppvObject = (IDXGIDevice2 *)this; return S_OK; } else { return E_NOINTERFACE; } } else if(riid == __uuidof(IDXGIDevice3)) { if(m_pReal3) { AddRef(); *ppvObject = (IDXGIDevice3 *)this; return S_OK; } else { return E_NOINTERFACE; } } else if(riid == __uuidof(IDXGIDevice4)) { if(m_pReal4) { AddRef(); *ppvObject = (IDXGIDevice4 *)this; return S_OK; } else { return E_NOINTERFACE; } } else { WarnUnknownGUID("IDXGIDevice", riid); } return RefCountDXGIObject::QueryInterface(riid, ppvObject); } std::vector<IDXGIResource *> UnwrapResourceSet(UINT NumResources, IDXGIResource *const *ppResources) { std::vector<IDXGIResource *> resources; resources.resize(NumResources); for(UINT i = 0; i < NumResources; i++) { WrappedDXGIInterface<WrappedIDXGIOutput6> *wrapped = (WrappedDXGIInterface<WrappedIDXGIOutput6> *)ppResources[i]; resources[i] = UnwrapDXGIResource(wrapped->GetWrapped()); if(resources[i] == NULL) { RDCERR("Unrecognised resource %p!", ppResources[i]); resources[i] = ppResources[i]; } } return resources; } HRESULT STDMETHODCALLTYPE WrappedIDXGIDevice4::OfferResources(UINT NumResources, IDXGIResource *const *ppResources, DXGI_OFFER_RESOURCE_PRIORITY Priority) { std::vector<IDXGIResource *> resources = UnwrapResourceSet(NumResources, ppResources); return m_pReal2->OfferResources(NumResources, resources.data(), Priority); } HRESULT STDMETHODCALLTYPE WrappedIDXGIDevice4::ReclaimResources(UINT NumResources, IDXGIResource *const *ppResources, BOOL *pDiscarded) { std::vector<IDXGIResource *> resources = UnwrapResourceSet(NumResources, ppResources); return m_pReal2->ReclaimResources(NumResources, resources.data(), pDiscarded); } HRESULT STDMETHODCALLTYPE WrappedIDXGIDevice4::OfferResources1(UINT NumResources, IDXGIResource *const *ppResources, DXGI_OFFER_RESOURCE_PRIORITY Priority, UINT Flags) { std::vector<IDXGIResource *> resources = UnwrapResourceSet(NumResources, ppResources); return m_pReal4->OfferResources1(NumResources, resources.data(), Priority, Flags); } HRESULT STDMETHODCALLTYPE WrappedIDXGIDevice4::ReclaimResources1(UINT NumResources, IDXGIResource *const *ppResources, DXGI_RECLAIM_RESOURCE_RESULTS *pResults) { std::vector<IDXGIResource *> resources = UnwrapResourceSet(NumResources, ppResources); return m_pReal4->ReclaimResources1(NumResources, resources.data(), pResults); } WrappedIDXGIFactory::WrappedIDXGIFactory(IDXGIFactory *real) : RefCountDXGIObject(real), m_pReal(real) { m_pReal1 = NULL; real->QueryInterface(__uuidof(IDXGIFactory1), (void **)&m_pReal1); m_pReal2 = NULL; real->QueryInterface(__uuidof(IDXGIFactory2), (void **)&m_pReal2); m_pReal3 = NULL; real->QueryInterface(__uuidof(IDXGIFactory3), (void **)&m_pReal3); m_pReal4 = NULL; real->QueryInterface(__uuidof(IDXGIFactory4), (void **)&m_pReal4); m_pReal5 = NULL; real->QueryInterface(__uuidof(IDXGIFactory5), (void **)&m_pReal5); m_pReal6 = NULL; real->QueryInterface(__uuidof(IDXGIFactory6), (void **)&m_pReal6); m_pReal7 = NULL; real->QueryInterface(__uuidof(IDXGIFactory7), (void **)&m_pReal7); } WrappedIDXGIFactory::~WrappedIDXGIFactory() { SAFE_RELEASE(m_pReal1); SAFE_RELEASE(m_pReal2); SAFE_RELEASE(m_pReal3); SAFE_RELEASE(m_pReal4); SAFE_RELEASE(m_pReal5); SAFE_RELEASE(m_pReal6); SAFE_RELEASE(m_pReal7); SAFE_RELEASE(m_pReal); } HRESULT STDMETHODCALLTYPE WrappedIDXGIFactory::QueryInterface(REFIID riid, void **ppvObject) { // {713f394e-92ca-47e7-ab81-1159c2791e54} static const GUID IDXGIFactoryDWM_uuid = { 0x713f394e, 0x92ca, 0x47e7, {0xab, 0x81, 0x11, 0x59, 0xc2, 0x79, 0x1e, 0x54}}; // {1ddd77aa-9a4a-4cc8-9e55-98c196bafc8f} static const GUID IDXGIFactoryDWM8_uuid = { 0x1ddd77aa, 0x9a4a, 0x4cc8, {0x9e, 0x55, 0x98, 0xc1, 0x96, 0xba, 0xfc, 0x8f}}; if(riid == __uuidof(IDXGIFactory)) { AddRef(); *ppvObject = (IDXGIFactory *)this; return S_OK; } else if(riid == __uuidof(IDXGIFactory1)) { if(m_pReal1) { AddRef(); *ppvObject = (IDXGIFactory1 *)this; return S_OK; } else { return E_NOINTERFACE; } } else if(riid == __uuidof(IDXGIFactory2)) { if(m_pReal2) { AddRef(); *ppvObject = (IDXGIFactory2 *)this; return S_OK; } else { return E_NOINTERFACE; } } else if(riid == __uuidof(IDXGIFactory3)) { if(m_pReal3) { AddRef(); *ppvObject = (IDXGIFactory3 *)this; return S_OK; } else { return E_NOINTERFACE; } } else if(riid == __uuidof(IDXGIFactory4)) { if(m_pReal4) { AddRef(); *ppvObject = (IDXGIFactory4 *)this; return S_OK; } else { return E_NOINTERFACE; } } else if(riid == __uuidof(IDXGIFactory5)) { if(m_pReal5) { AddRef(); *ppvObject = (IDXGIFactory5 *)this; return S_OK; } else { return E_NOINTERFACE; } } else if(riid == __uuidof(IDXGIFactory6)) { if(m_pReal6) { AddRef(); *ppvObject = (IDXGIFactory6 *)this; return S_OK; } else { return E_NOINTERFACE; } } else if(riid == __uuidof(IDXGIFactory7)) { if(m_pReal7) { AddRef(); *ppvObject = (IDXGIFactory7 *)this; return S_OK; } else { return E_NOINTERFACE; } } else if(riid == IDXGIFactoryDWM_uuid) { RDCWARN("Blocking QueryInterface for IDXGIFactoryDWM"); return E_NOINTERFACE; } else if(riid == IDXGIFactoryDWM8_uuid) { RDCWARN("Blocking QueryInterface for IDXGIFactoryDWM8"); return E_NOINTERFACE; } else { WarnUnknownGUID("IDXGIFactory", riid); } return RefCountDXGIObject::QueryInterface(riid, ppvObject); } HRESULT WrappedIDXGIFactory::CreateSwapChain(IUnknown *pDevice, DXGI_SWAP_CHAIN_DESC *pDesc, IDXGISwapChain **ppSwapChain) { ID3DDevice *wrapDevice = GetD3DDevice(pDevice); if(wrapDevice) { DXGI_SWAP_CHAIN_DESC local = {}; DXGI_SWAP_CHAIN_DESC *desc = NULL; if(pDesc) { local = *pDesc; desc = &local; } local.BufferUsage |= DXGI_USAGE_RENDER_TARGET_OUTPUT; if(!RenderDoc::Inst().GetCaptureOptions().allowFullscreen) local.Windowed = TRUE; HRESULT ret = m_pReal->CreateSwapChain(wrapDevice->GetRealIUnknown(), desc, ppSwapChain); if(SUCCEEDED(ret)) { *ppSwapChain = new WrappedIDXGISwapChain4(*ppSwapChain, desc ? desc->OutputWindow : NULL, wrapDevice); } return ret; } RDCERR("Creating swap chain with non-hooked device!"); return m_pReal->CreateSwapChain(pDevice, pDesc, ppSwapChain); } HRESULT WrappedIDXGIFactory::CreateSwapChainForHwnd( IUnknown *pDevice, HWND hWnd, const DXGI_SWAP_CHAIN_DESC1 *pDesc, const DXGI_SWAP_CHAIN_FULLSCREEN_DESC *pFullscreenDesc, IDXGIOutput *pRestrictToOutput, IDXGISwapChain1 **ppSwapChain) { ID3DDevice *wrapDevice = GetD3DDevice(pDevice); WrappedIDXGIOutput6 *wrappedOutput = (WrappedIDXGIOutput6 *)pRestrictToOutput; IDXGIOutput *unwrappedOutput = wrappedOutput ? wrappedOutput->GetReal() : NULL; if(wrapDevice) { DXGI_SWAP_CHAIN_DESC1 local; DXGI_SWAP_CHAIN_DESC1 *desc = NULL; if(pDesc) { local = *pDesc; desc = &local; } local.BufferUsage |= DXGI_USAGE_RENDER_TARGET_OUTPUT; if(!RenderDoc::Inst().GetCaptureOptions().allowFullscreen && pFullscreenDesc) { pFullscreenDesc = NULL; } HRESULT ret = m_pReal2->CreateSwapChainForHwnd(wrapDevice->GetRealIUnknown(), hWnd, desc, pFullscreenDesc, unwrappedOutput, ppSwapChain); if(SUCCEEDED(ret)) { *ppSwapChain = new WrappedIDXGISwapChain4(*ppSwapChain, hWnd, wrapDevice); } return ret; } else { RDCERR("Creating swap chain with non-hooked device!"); } return m_pReal2->CreateSwapChainForHwnd(pDevice, hWnd, pDesc, pFullscreenDesc, unwrappedOutput, ppSwapChain); } HRESULT WrappedIDXGIFactory::CreateSwapChainForCoreWindow(IUnknown *pDevice, IUnknown *pWindow, const DXGI_SWAP_CHAIN_DESC1 *pDesc, IDXGIOutput *pRestrictToOutput, IDXGISwapChain1 **ppSwapChain) { ID3DDevice *wrapDevice = GetD3DDevice(pDevice); WrappedIDXGIOutput6 *wrappedOutput = (WrappedIDXGIOutput6 *)pRestrictToOutput; IDXGIOutput *unwrappedOutput = wrappedOutput ? wrappedOutput->GetReal() : NULL; if(!RenderDoc::Inst().GetCaptureOptions().allowFullscreen) { RDCWARN("Impossible to disallow fullscreen on call to CreateSwapChainForCoreWindow"); } if(wrapDevice) { DXGI_SWAP_CHAIN_DESC1 local = {}; DXGI_SWAP_CHAIN_DESC1 *desc = NULL; if(pDesc) { local = *pDesc; desc = &local; } local.BufferUsage |= DXGI_USAGE_RENDER_TARGET_OUTPUT; HRESULT ret = m_pReal2->CreateSwapChainForCoreWindow(wrapDevice->GetRealIUnknown(), pWindow, desc, unwrappedOutput, ppSwapChain); if(SUCCEEDED(ret)) { HWND wnd = NULL; (*ppSwapChain)->GetHwnd(&wnd); if(wnd == NULL) wnd = (HWND)pWindow; *ppSwapChain = new WrappedIDXGISwapChain4(*ppSwapChain, wnd, wrapDevice); } return ret; } else { RDCERR("Creating swap chain with non-hooked device!"); } return m_pReal2->CreateSwapChainForCoreWindow(pDevice, pWindow, pDesc, unwrappedOutput, ppSwapChain); } HRESULT WrappedIDXGIFactory::CreateSwapChainForComposition(IUnknown *pDevice, const DXGI_SWAP_CHAIN_DESC1 *pDesc, IDXGIOutput *pRestrictToOutput, IDXGISwapChain1 **ppSwapChain) { ID3DDevice *wrapDevice = GetD3DDevice(pDevice); WrappedIDXGIOutput6 *wrappedOutput = (WrappedIDXGIOutput6 *)pRestrictToOutput; IDXGIOutput *unwrappedOutput = wrappedOutput ? wrappedOutput->GetReal() : NULL; if(!RenderDoc::Inst().GetCaptureOptions().allowFullscreen) { RDCWARN("Impossible to disallow fullscreen on call to CreateSwapChainForComposition"); } if(wrapDevice) { DXGI_SWAP_CHAIN_DESC1 local = {}; DXGI_SWAP_CHAIN_DESC1 *desc = NULL; if(pDesc) { local = *pDesc; desc = &local; } local.BufferUsage |= DXGI_USAGE_RENDER_TARGET_OUTPUT; HRESULT ret = m_pReal2->CreateSwapChainForComposition(wrapDevice->GetRealIUnknown(), desc, unwrappedOutput, ppSwapChain); if(SUCCEEDED(ret)) { HWND wnd = NULL; (*ppSwapChain)->GetHwnd(&wnd); if(wnd == NULL) wnd = (HWND)0x1; *ppSwapChain = new WrappedIDXGISwapChain4(*ppSwapChain, wnd, wrapDevice); } return ret; } else { RDCERR("Creating swap chain with non-hooked device!"); } return m_pReal2->CreateSwapChainForComposition(pDevice, pDesc, unwrappedOutput, ppSwapChain); }
// Copyright 2015 Stellar Development Foundation and contributors. Licensed // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "historywork/DownloadBucketsWork.h" #include "bucket/BucketManager.h" #include "catchup/CatchupManager.h" #include "history/FileTransferInfo.h" #include "history/HistoryArchive.h" #include "historywork/GetAndUnzipRemoteFileWork.h" #include "historywork/VerifyBucketWork.h" #include "work/WorkWithCallback.h" #include <Tracy.hpp> #include <fmt/format.h> namespace stellar { DownloadBucketsWork::DownloadBucketsWork( Application& app, std::map<std::string, std::shared_ptr<Bucket>>& buckets, std::vector<std::string> hashes, TmpDir const& downloadDir, std::shared_ptr<HistoryArchive> archive) : BatchWork{app, "download-verify-buckets"} , mBuckets{buckets} , mHashes{hashes} , mNextBucketIter{mHashes.begin()} , mDownloadDir{downloadDir} , mArchive{archive} { } std::string DownloadBucketsWork::getStatus() const { if (!isDone() && !isAborting()) { if (!mHashes.empty()) { auto numStarted = std::distance(mHashes.begin(), mNextBucketIter); auto numDone = numStarted - getNumWorksInBatch(); auto total = static_cast<uint32_t>(mHashes.size()); auto pct = (100 * numDone) / total; return fmt::format( "downloading and verifying buckets: {:d}/{:d} ({:d}%)", numDone, total, pct); } } return Work::getStatus(); } bool DownloadBucketsWork::hasNext() const { return mNextBucketIter != mHashes.end(); } void DownloadBucketsWork::resetIter() { mNextBucketIter = mHashes.begin(); } std::shared_ptr<BasicWork> DownloadBucketsWork::yieldMoreWork() { ZoneScoped; if (!hasNext()) { throw std::runtime_error("Nothing to iterate over!"); } auto hash = *mNextBucketIter; FileTransferInfo ft(mDownloadDir, HISTORY_FILE_TYPE_BUCKET, hash); auto w1 = std::make_shared<GetAndUnzipRemoteFileWork>(mApp, ft, mArchive); auto getFileWeak = std::weak_ptr<GetAndUnzipRemoteFileWork>(w1); OnFailureCallback failureCb = [getFileWeak, hash]() { auto getFile = getFileWeak.lock(); if (getFile) { auto ar = getFile->getArchive(); if (ar) { CLOG_INFO(History, "Bucket {} from archive {}", hash, ar->getName()); } } }; std::weak_ptr<DownloadBucketsWork> weak( std::static_pointer_cast<DownloadBucketsWork>(shared_from_this())); auto successCb = [weak, ft, hash](Application& app) -> bool { auto self = weak.lock(); if (self) { auto bucketPath = ft.localPath_nogz(); auto b = app.getBucketManager().adoptFileAsBucket(bucketPath, hexToBin256(hash), /*objectsPut=*/0, /*bytesPut=*/0); self->mBuckets[hash] = b; } return true; }; auto w2 = std::make_shared<VerifyBucketWork>(mApp, ft.localPath_nogz(), hexToBin256(hash), failureCb); auto w3 = std::make_shared<WorkWithCallback>(mApp, "adopt-verified-bucket", successCb); std::vector<std::shared_ptr<BasicWork>> seq{w1, w2, w3}; auto w4 = std::make_shared<WorkSequence>( mApp, "download-verify-sequence-" + hash, seq); ++mNextBucketIter; return w4; } }
/* * Copyright (c) 2019-2020 Arm Limited. * * SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #if defined(__aarch64__) && (defined(FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)) #include "arm_gemm.hpp" #include "../../utils.hpp" #include <cassert> namespace arm_gemm { void a64_hybrid_fp16_mla_6x32 ( unsigned int num_strings, const unsigned int *string_lengths, IndirectInputArg<__fp16> A_arg, size_t M, size_t N, const __fp16 *B_ptr, IndirectOutputArg<__fp16> output_arg, const __fp16 *bias, Activation act, bool accumulate ) { struct KernelArgs { __fp16 maxval = static_cast<__fp16>(std::numeric_limits<float>::infinity()); __fp16 minval = - static_cast<__fp16>(std::numeric_limits<float>::infinity()); unsigned int num_strings = {}; const unsigned int *string_lengths = {}; size_t N = {}; const __fp16 *B_ptr = {}; size_t output_offset = {}; size_t input_initial_col = {}; size_t input_offset = {}; } ka; unsigned long flags=0; void *output_ptr; void *input_ptr; if (output_arg.is_indirect) { output_ptr=(void *)(output_arg.indirect.ptr); ka.output_offset=output_arg.indirect.offset; flags |= 0x4; } else { output_ptr=(void *)(output_arg.direct.base); ka.output_offset=output_arg.direct.stride; } if (A_arg.is_indirect) { input_ptr=(void *)(A_arg.indirect.ptr); ka.input_offset=A_arg.indirect.start_row; ka.input_initial_col=A_arg.indirect.start_col; flags |= 0x8; } else { assert(num_strings==1); input_ptr=(void *)(A_arg.direct.base); ka.input_offset=A_arg.direct.stride; } if (accumulate) { flags |= 0x1; } ka.num_strings = num_strings; ka.string_lengths = string_lengths; ka.N = N; ka.B_ptr = B_ptr; switch(act.type) { default: case Activation::Type::None: break; case Activation::Type::BoundedReLU: ka.maxval = static_cast<__fp16>(act.param1); /* fall through */ case Activation::Type::ReLU: ka.minval = 0; flags |= 0x2; break; } __asm__ __volatile__( #ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC ".arch armv8.2-a+fp16\n" #endif "1:" // Row loop "cmp %x[M], #0x6\n" "bge 251f\n" "cmp %x[M], #0x4\n" "bgt 201f\n" "beq 151f\n" "cmp %x[M], #0x2\n" "bgt 101f\n" "beq 51f\n" "ldr x16, [%x[args_ptr], %[offsetof_N]]\n" "ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n" "mov x14, %x[bias]\n" "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n" "tbz %x[flags], #2, 2f\n" "ldr x13, [%x[output_ptr], #0x0]\n" "add x13, x13, x19, LSL #1\n" "b 3f\n" "2:" // Height 1: setup direct output "mov x13, %x[output_ptr]\n" "3:" // Height 1: Column loop "cbz x14, 4f\n" "ldr q8, [x14, #0x0]\n" "ldr q9, [x14, #0x10]\n" "ldr q10, [x14, #0x20]\n" "ldr q11, [x14, #0x30]\n" "add x14, x14, #0x40\n" "b 23f\n" "4:" // Height 1: no bias "tbz %x[flags], #0, 22f\n" "cmp x16, #0x20\n" "bge 21f\n" "tbz x16, #4, 12f\n" "ld1 { v8.8h }, [x13], #0x10\n" "ld1 { v9.8h }, [x13], #0x10\n" "tbz x16, #3, 8f\n" "ld1 { v10.8h }, [x13], #0x10\n" "tbz x16, #2, 6f\n" "ldr d11, [x13], #0x8\n" "tbz x16, #1, 5f\n" "mov x19, #0x3c\n" "ld1 { v11.s }[2], [x13], #0x4\n" "tbz x16, #0, 20f\n" "ld1 { v11.h }[6], [x13]\n" "b 20f\n" "5:" // Height 1: Partial accumulate: partial_1_28 "mov x19, #0x38\n" "tbz x16, #0, 20f\n" "ld1 { v11.h }[4], [x13]\n" "b 20f\n" "6:" // Height 1: Partial accumulate: partial_2_24 "tbz x16, #1, 7f\n" "ldr s11, [x13], #0x4\n" "mov x19, #0x34\n" "tbz x16, #0, 20f\n" "ld1 { v11.h }[2], [x13]\n" "b 20f\n" "7:" // Height 1: Partial accumulate: partial_1_24 "mov x19, #0x30\n" "tbz x16, #0, 20f\n" "ldr h11, [x13, #0x0]\n" "b 20f\n" "8:" // Height 1: Partial accumulate: partial_4_16 "tbz x16, #2, 10f\n" "ldr d10, [x13], #0x8\n" "tbz x16, #1, 9f\n" "mov x19, #0x2c\n" "ld1 { v10.s }[2], [x13], #0x4\n" "tbz x16, #0, 20f\n" "ld1 { v10.h }[6], [x13]\n" "b 20f\n" "9:" // Height 1: Partial accumulate: partial_1_20 "mov x19, #0x28\n" "tbz x16, #0, 20f\n" "ld1 { v10.h }[4], [x13]\n" "b 20f\n" "10:" // Height 1: Partial accumulate: partial_2_16 "tbz x16, #1, 11f\n" "ldr s10, [x13], #0x4\n" "mov x19, #0x24\n" "tbz x16, #0, 20f\n" "ld1 { v10.h }[2], [x13]\n" "b 20f\n" "11:" // Height 1: Partial accumulate: partial_1_16 "mov x19, #0x20\n" "tbz x16, #0, 20f\n" "ldr h10, [x13, #0x0]\n" "b 20f\n" "12:" // Height 1: Partial accumulate: partial_8_0 "tbz x16, #3, 16f\n" "ld1 { v8.8h }, [x13], #0x10\n" "tbz x16, #2, 14f\n" "ldr d9, [x13], #0x8\n" "tbz x16, #1, 13f\n" "mov x19, #0x1c\n" "ld1 { v9.s }[2], [x13], #0x4\n" "tbz x16, #0, 20f\n" "ld1 { v9.h }[6], [x13]\n" "b 20f\n" "13:" // Height 1: Partial accumulate: partial_1_12 "mov x19, #0x18\n" "tbz x16, #0, 20f\n" "ld1 { v9.h }[4], [x13]\n" "b 20f\n" "14:" // Height 1: Partial accumulate: partial_2_8 "tbz x16, #1, 15f\n" "ldr s9, [x13], #0x4\n" "mov x19, #0x14\n" "tbz x16, #0, 20f\n" "ld1 { v9.h }[2], [x13]\n" "b 20f\n" "15:" // Height 1: Partial accumulate: partial_1_8 "mov x19, #0x10\n" "tbz x16, #0, 20f\n" "ldr h9, [x13, #0x0]\n" "b 20f\n" "16:" // Height 1: Partial accumulate: partial_4_0 "tbz x16, #2, 18f\n" "ldr d8, [x13], #0x8\n" "tbz x16, #1, 17f\n" "mov x19, #0xc\n" "ld1 { v8.s }[2], [x13], #0x4\n" "tbz x16, #0, 20f\n" "ld1 { v8.h }[6], [x13]\n" "b 20f\n" "17:" // Height 1: Partial accumulate: partial_1_4 "mov x19, #0x8\n" "tbz x16, #0, 20f\n" "ld1 { v8.h }[4], [x13]\n" "b 20f\n" "18:" // Height 1: Partial accumulate: partial_2_0 "tbz x16, #1, 19f\n" "ldr s8, [x13], #0x4\n" "mov x19, #0x4\n" "tbz x16, #0, 20f\n" "ld1 { v8.h }[2], [x13]\n" "b 20f\n" "19:" // Height 1: Partial accumulate: partial_1_0 "mov x19, #0x0\n" "ldr h8, [x13, #0x0]\n" "20:" // Height 1: Partial accumulate: Done "sub x13, x13, x19\n" "b 23f\n" "21:" // Height 1: full accumulate "ldr q8, [x13, #0x0]\n" "ldr q9, [x13, #0x10]\n" "ldr q10, [x13, #0x20]\n" "ldr q11, [x13, #0x30]\n" "b 23f\n" "22:" // Height 1: no accumulate "movi v8.16b, #0x0\n" "movi v9.16b, #0x0\n" "movi v10.16b, #0x0\n" "movi v11.16b, #0x0\n" "23:" // Height 1: setup done "mov x12, #0x0\n" "24:" // Height 1: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n" "ldr w11, [x20, x12, LSL #0x2]\n" "tbz %x[flags], #3, 25f\n" "ldr x20, [%x[input_ptr], x12, LSL #0x3]\n" "add x20, x20, x19, LSL #3\n" "ldr x10, [x20, #0x0]\n" "cbnz x12, 26f\n" "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n" "add x10, x10, x19, LSL #1\n" "b 26f\n" "25:" // Height 1: setup direct input "mov x10, %x[input_ptr]\n" "26:" // Height 1: input setup done "cmp x11, #0x8\n" "blt 29f\n" "cmp x11, #0x10\n" "blt 28f\n" "27:" // Height 1: Multiply loop: Main loop head "ldr q0, [x10, #0x0]\n" "ldr q6, [x15, #0x0]\n" "fmla v8.8h, v6.8h, v0.h[0]\n" "ldr q7, [x15, #0x10]\n" "ldr q6, [x15, #0x20]\n" "fmla v9.8h, v7.8h, v0.h[0]\n" "ldr q7, [x15, #0x30]\n" "fmla v10.8h, v6.8h, v0.h[0]\n" "ldr q6, [x15, #0x40]\n" "add x10, x10, #0x10\n" "fmla v11.8h, v7.8h, v0.h[0]\n" "prfm pldl1keep, [x10, #0x80]\n" "ldr q7, [x15, #0x50]\n" "fmla v8.8h, v6.8h, v0.h[1]\n" "ldr q6, [x15, #0x60]\n" "sub x11, x11, #0x8\n" "fmla v9.8h, v7.8h, v0.h[1]\n" "ldr q7, [x15, #0x70]\n" "cmp x11, #0x10\n" "fmla v10.8h, v6.8h, v0.h[1]\n" "ldr q6, [x15, #0x80]\n" "fmla v11.8h, v7.8h, v0.h[1]\n" "ldr q7, [x15, #0x90]\n" "fmla v8.8h, v6.8h, v0.h[2]\n" "ldr q6, [x15, #0xa0]\n" "fmla v9.8h, v7.8h, v0.h[2]\n" "ldr q7, [x15, #0xb0]\n" "fmla v10.8h, v6.8h, v0.h[2]\n" "ldr q6, [x15, #0xc0]\n" "fmla v11.8h, v7.8h, v0.h[2]\n" "ldr q7, [x15, #0xd0]\n" "fmla v8.8h, v6.8h, v0.h[3]\n" "ldr q6, [x15, #0xe0]\n" "fmla v9.8h, v7.8h, v0.h[3]\n" "ldr q7, [x15, #0xf0]\n" "fmla v10.8h, v6.8h, v0.h[3]\n" "ldr q6, [x15, #0x100]\n" "fmla v11.8h, v7.8h, v0.h[3]\n" "ldr q7, [x15, #0x110]\n" "fmla v8.8h, v6.8h, v0.h[4]\n" "ldr q6, [x15, #0x120]\n" "fmla v9.8h, v7.8h, v0.h[4]\n" "ldr q7, [x15, #0x130]\n" "fmla v10.8h, v6.8h, v0.h[4]\n" "ldr q6, [x15, #0x140]\n" "fmla v11.8h, v7.8h, v0.h[4]\n" "ldr q7, [x15, #0x150]\n" "fmla v8.8h, v6.8h, v0.h[5]\n" "ldr q6, [x15, #0x160]\n" "fmla v9.8h, v7.8h, v0.h[5]\n" "ldr q7, [x15, #0x170]\n" "fmla v10.8h, v6.8h, v0.h[5]\n" "ldr q6, [x15, #0x180]\n" "fmla v11.8h, v7.8h, v0.h[5]\n" "ldr q7, [x15, #0x190]\n" "fmla v8.8h, v6.8h, v0.h[6]\n" "ldr q6, [x15, #0x1a0]\n" "fmla v9.8h, v7.8h, v0.h[6]\n" "ldr q7, [x15, #0x1b0]\n" "fmla v10.8h, v6.8h, v0.h[6]\n" "ldr q6, [x15, #0x1c0]\n" "fmla v11.8h, v7.8h, v0.h[6]\n" "ldr q7, [x15, #0x1d0]\n" "fmla v8.8h, v6.8h, v0.h[7]\n" "ldr q6, [x15, #0x1e0]\n" "fmla v9.8h, v7.8h, v0.h[7]\n" "ldr q7, [x15, #0x1f0]\n" "add x15, x15, #0x200\n" "fmla v10.8h, v6.8h, v0.h[7]\n" "fmla v11.8h, v7.8h, v0.h[7]\n" "bge 27b\n" "28:" // Height 1: Multiply loop: Single iteration only "sub x11, x11, #0x8\n" "ldr q0, [x10, #0x0]\n" "ldr q6, [x15, #0x0]\n" "fmla v8.8h, v6.8h, v0.h[0]\n" "ldr q7, [x15, #0x10]\n" "ldr q6, [x15, #0x20]\n" "fmla v9.8h, v7.8h, v0.h[0]\n" "ldr q7, [x15, #0x30]\n" "fmla v10.8h, v6.8h, v0.h[0]\n" "ldr q6, [x15, #0x40]\n" "add x10, x10, #0x10\n" "fmla v11.8h, v7.8h, v0.h[0]\n" "prfm pldl1keep, [x10, #0x80]\n" "ldr q7, [x15, #0x50]\n" "fmla v8.8h, v6.8h, v0.h[1]\n" "ldr q6, [x15, #0x60]\n" "fmla v9.8h, v7.8h, v0.h[1]\n" "ldr q7, [x15, #0x70]\n" "fmla v10.8h, v6.8h, v0.h[1]\n" "ldr q6, [x15, #0x80]\n" "fmla v11.8h, v7.8h, v0.h[1]\n" "ldr q7, [x15, #0x90]\n" "fmla v8.8h, v6.8h, v0.h[2]\n" "ldr q6, [x15, #0xa0]\n" "fmla v9.8h, v7.8h, v0.h[2]\n" "ldr q7, [x15, #0xb0]\n" "fmla v10.8h, v6.8h, v0.h[2]\n" "ldr q6, [x15, #0xc0]\n" "fmla v11.8h, v7.8h, v0.h[2]\n" "ldr q7, [x15, #0xd0]\n" "fmla v8.8h, v6.8h, v0.h[3]\n" "ldr q6, [x15, #0xe0]\n" "fmla v9.8h, v7.8h, v0.h[3]\n" "ldr q7, [x15, #0xf0]\n" "fmla v10.8h, v6.8h, v0.h[3]\n" "ldr q6, [x15, #0x100]\n" "fmla v11.8h, v7.8h, v0.h[3]\n" "ldr q7, [x15, #0x110]\n" "fmla v8.8h, v6.8h, v0.h[4]\n" "ldr q6, [x15, #0x120]\n" "fmla v9.8h, v7.8h, v0.h[4]\n" "ldr q7, [x15, #0x130]\n" "fmla v10.8h, v6.8h, v0.h[4]\n" "ldr q6, [x15, #0x140]\n" "fmla v11.8h, v7.8h, v0.h[4]\n" "ldr q7, [x15, #0x150]\n" "fmla v8.8h, v6.8h, v0.h[5]\n" "ldr q6, [x15, #0x160]\n" "fmla v9.8h, v7.8h, v0.h[5]\n" "ldr q7, [x15, #0x170]\n" "fmla v10.8h, v6.8h, v0.h[5]\n" "ldr q6, [x15, #0x180]\n" "fmla v11.8h, v7.8h, v0.h[5]\n" "ldr q7, [x15, #0x190]\n" "fmla v8.8h, v6.8h, v0.h[6]\n" "ldr q6, [x15, #0x1a0]\n" "fmla v9.8h, v7.8h, v0.h[6]\n" "ldr q7, [x15, #0x1b0]\n" "fmla v10.8h, v6.8h, v0.h[6]\n" "ldr q6, [x15, #0x1c0]\n" "fmla v11.8h, v7.8h, v0.h[6]\n" "ldr q7, [x15, #0x1d0]\n" "fmla v8.8h, v6.8h, v0.h[7]\n" "ldr q6, [x15, #0x1e0]\n" "fmla v9.8h, v7.8h, v0.h[7]\n" "ldr q7, [x15, #0x1f0]\n" "add x15, x15, #0x200\n" "fmla v10.8h, v6.8h, v0.h[7]\n" "fmla v11.8h, v7.8h, v0.h[7]\n" "29:" // Height 1: Multiply loop: Main loop skip "cbz x11, 31f\n" "30:" // Height 1: Multiply loop: Odd block loop "ldr h0, [x10], #0x2\n" "ldr q6, [x15, #0x0]\n" "fmla v8.8h, v6.8h, v0.h[0]\n" "ldr q7, [x15, #0x10]\n" "ldr q6, [x15, #0x20]\n" "fmla v9.8h, v7.8h, v0.h[0]\n" "ldr q7, [x15, #0x30]\n" "fmla v10.8h, v6.8h, v0.h[0]\n" "sub x11, x11, #0x1\n" "add x15, x15, #0x40\n" "fmla v11.8h, v7.8h, v0.h[0]\n" "cbnz x11, 30b\n" "31:" // Height 1: Multiply loop: No odd multiplies "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "add x12, x12, #0x1\n" "cmp x12, x19\n" "bne 24b\n" "prfm pstl1keep, [x13, #0x0]\n" "tbz %x[flags], #1, 32f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1r { v1.8h }, [x19]\n" "add x19, %x[args_ptr], %[offset_max]\n" "ld1r { v0.8h }, [x19]\n" "fmin v8.8h, v8.8h, v0.8h\n" "fmin v9.8h, v9.8h, v0.8h\n" "fmin v10.8h, v10.8h, v0.8h\n" "fmin v11.8h, v11.8h, v0.8h\n" "fmax v8.8h, v8.8h, v1.8h\n" "fmax v9.8h, v9.8h, v1.8h\n" "fmax v10.8h, v10.8h, v1.8h\n" "fmax v11.8h, v11.8h, v1.8h\n" "32:" // Height 1: No activation "cmp x16, #0x20\n" "bge 49f\n" "tbz x16, #4, 40f\n" "st1 { v8.8h }, [x13], #0x10\n" "st1 { v9.8h }, [x13], #0x10\n" "tbz x16, #3, 36f\n" "st1 { v10.8h }, [x13], #0x10\n" "tbz x16, #2, 34f\n" "str d11, [x13], #0x8\n" "tbz x16, #1, 33f\n" "st1 { v11.s }[2], [x13], #0x4\n" "tbz x16, #0, 48f\n" "st1 { v11.h }[6], [x13]\n" "b 48f\n" "33:" // Height 1: Partial direct writeback: partial_1_28 "tbz x16, #0, 48f\n" "st1 { v11.h }[4], [x13]\n" "b 48f\n" "34:" // Height 1: Partial direct writeback: partial_2_24 "tbz x16, #1, 35f\n" "str s11, [x13], #0x4\n" "tbz x16, #0, 48f\n" "st1 { v11.h }[2], [x13]\n" "b 48f\n" "35:" // Height 1: Partial direct writeback: partial_1_24 "tbz x16, #0, 48f\n" "str h11, [x13, #0x0]\n" "b 48f\n" "36:" // Height 1: Partial direct writeback: partial_4_16 "tbz x16, #2, 38f\n" "str d10, [x13], #0x8\n" "tbz x16, #1, 37f\n" "st1 { v10.s }[2], [x13], #0x4\n" "tbz x16, #0, 48f\n" "st1 { v10.h }[6], [x13]\n" "b 48f\n" "37:" // Height 1: Partial direct writeback: partial_1_20 "tbz x16, #0, 48f\n" "st1 { v10.h }[4], [x13]\n" "b 48f\n" "38:" // Height 1: Partial direct writeback: partial_2_16 "tbz x16, #1, 39f\n" "str s10, [x13], #0x4\n" "tbz x16, #0, 48f\n" "st1 { v10.h }[2], [x13]\n" "b 48f\n" "39:" // Height 1: Partial direct writeback: partial_1_16 "tbz x16, #0, 48f\n" "str h10, [x13, #0x0]\n" "b 48f\n" "40:" // Height 1: Partial direct writeback: partial_8_0 "tbz x16, #3, 44f\n" "st1 { v8.8h }, [x13], #0x10\n" "tbz x16, #2, 42f\n" "str d9, [x13], #0x8\n" "tbz x16, #1, 41f\n" "st1 { v9.s }[2], [x13], #0x4\n" "tbz x16, #0, 48f\n" "st1 { v9.h }[6], [x13]\n" "b 48f\n" "41:" // Height 1: Partial direct writeback: partial_1_12 "tbz x16, #0, 48f\n" "st1 { v9.h }[4], [x13]\n" "b 48f\n" "42:" // Height 1: Partial direct writeback: partial_2_8 "tbz x16, #1, 43f\n" "str s9, [x13], #0x4\n" "tbz x16, #0, 48f\n" "st1 { v9.h }[2], [x13]\n" "b 48f\n" "43:" // Height 1: Partial direct writeback: partial_1_8 "tbz x16, #0, 48f\n" "str h9, [x13, #0x0]\n" "b 48f\n" "44:" // Height 1: Partial direct writeback: partial_4_0 "tbz x16, #2, 46f\n" "str d8, [x13], #0x8\n" "tbz x16, #1, 45f\n" "st1 { v8.s }[2], [x13], #0x4\n" "tbz x16, #0, 48f\n" "st1 { v8.h }[6], [x13]\n" "b 48f\n" "45:" // Height 1: Partial direct writeback: partial_1_4 "tbz x16, #0, 48f\n" "st1 { v8.h }[4], [x13]\n" "b 48f\n" "46:" // Height 1: Partial direct writeback: partial_2_0 "tbz x16, #1, 47f\n" "str s8, [x13], #0x4\n" "tbz x16, #0, 48f\n" "st1 { v8.h }[2], [x13]\n" "b 48f\n" "47:" // Height 1: Partial direct writeback: partial_1_0 "str h8, [x13, #0x0]\n" "48:" // Height 1: Partial direct writeback: Done "b 50f\n" "49:" // Height 1: Full writeback "str q8, [x13, #0x0]\n" "str q9, [x13, #0x10]\n" "str q10, [x13, #0x20]\n" "str q11, [x13, #0x30]\n" "add x13, x13, #0x40\n" "50:" // Height 1: Writeback done "subs x16, x16, #0x20\n" "bgt 3b\n" "b 302f\n" "51:" // Height 2 "ldr x16, [%x[args_ptr], %[offsetof_N]]\n" "mov x14, %x[bias]\n" "ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n" "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n" "tbz %x[flags], #2, 52f\n" "ldr x13, [%x[output_ptr], #0x0]\n" "add x13, x13, x19, LSL #1\n" "ldr x9, [%x[output_ptr], #0x8]\n" "add x9, x9, x19, LSL #1\n" "b 53f\n" "52:" // Height 2: setup direct output "mov x13, %x[output_ptr]\n" "add x9, x13, x19, LSL #1\n" "53:" // Height 2: Column loop "cbz x14, 54f\n" "ldr q8, [x14, #0x0]\n" "mov v12.16b, v8.16b\n" "ldr q9, [x14, #0x10]\n" "ldr q10, [x14, #0x20]\n" "mov v13.16b, v9.16b\n" "ldr q11, [x14, #0x30]\n" "mov v14.16b, v10.16b\n" "add x14, x14, #0x40\n" "mov v15.16b, v11.16b\n" "b 73f\n" "54:" // Height 2: no bias "tbz %x[flags], #0, 72f\n" "cmp x16, #0x20\n" "bge 71f\n" "tbz x16, #4, 62f\n" "ld1 { v8.8h }, [x13], #0x10\n" "ld1 { v12.8h }, [x9], #0x10\n" "ld1 { v9.8h }, [x13], #0x10\n" "ld1 { v13.8h }, [x9], #0x10\n" "tbz x16, #3, 58f\n" "ld1 { v10.8h }, [x13], #0x10\n" "ld1 { v14.8h }, [x9], #0x10\n" "tbz x16, #2, 56f\n" "ldr d11, [x13], #0x8\n" "ldr d15, [x9], #0x8\n" "tbz x16, #1, 55f\n" "mov x19, #0x3c\n" "ld1 { v11.s }[2], [x13], #0x4\n" "ld1 { v15.s }[2], [x9], #0x4\n" "tbz x16, #0, 70f\n" "ld1 { v11.h }[6], [x13]\n" "ld1 { v15.h }[6], [x9]\n" "b 70f\n" "55:" // Height 2: Partial accumulate: partial_1_28 "mov x19, #0x38\n" "tbz x16, #0, 70f\n" "ld1 { v11.h }[4], [x13]\n" "ld1 { v15.h }[4], [x9]\n" "b 70f\n" "56:" // Height 2: Partial accumulate: partial_2_24 "tbz x16, #1, 57f\n" "ldr s11, [x13], #0x4\n" "ldr s15, [x9], #0x4\n" "mov x19, #0x34\n" "tbz x16, #0, 70f\n" "ld1 { v11.h }[2], [x13]\n" "ld1 { v15.h }[2], [x9]\n" "b 70f\n" "57:" // Height 2: Partial accumulate: partial_1_24 "mov x19, #0x30\n" "tbz x16, #0, 70f\n" "ldr h11, [x13, #0x0]\n" "ldr h15, [x9, #0x0]\n" "b 70f\n" "58:" // Height 2: Partial accumulate: partial_4_16 "tbz x16, #2, 60f\n" "ldr d10, [x13], #0x8\n" "ldr d14, [x9], #0x8\n" "tbz x16, #1, 59f\n" "mov x19, #0x2c\n" "ld1 { v10.s }[2], [x13], #0x4\n" "ld1 { v14.s }[2], [x9], #0x4\n" "tbz x16, #0, 70f\n" "ld1 { v10.h }[6], [x13]\n" "ld1 { v14.h }[6], [x9]\n" "b 70f\n" "59:" // Height 2: Partial accumulate: partial_1_20 "mov x19, #0x28\n" "tbz x16, #0, 70f\n" "ld1 { v10.h }[4], [x13]\n" "ld1 { v14.h }[4], [x9]\n" "b 70f\n" "60:" // Height 2: Partial accumulate: partial_2_16 "tbz x16, #1, 61f\n" "ldr s10, [x13], #0x4\n" "ldr s14, [x9], #0x4\n" "mov x19, #0x24\n" "tbz x16, #0, 70f\n" "ld1 { v10.h }[2], [x13]\n" "ld1 { v14.h }[2], [x9]\n" "b 70f\n" "61:" // Height 2: Partial accumulate: partial_1_16 "mov x19, #0x20\n" "tbz x16, #0, 70f\n" "ldr h10, [x13, #0x0]\n" "ldr h14, [x9, #0x0]\n" "b 70f\n" "62:" // Height 2: Partial accumulate: partial_8_0 "tbz x16, #3, 66f\n" "ld1 { v8.8h }, [x13], #0x10\n" "ld1 { v12.8h }, [x9], #0x10\n" "tbz x16, #2, 64f\n" "ldr d9, [x13], #0x8\n" "ldr d13, [x9], #0x8\n" "tbz x16, #1, 63f\n" "mov x19, #0x1c\n" "ld1 { v9.s }[2], [x13], #0x4\n" "ld1 { v13.s }[2], [x9], #0x4\n" "tbz x16, #0, 70f\n" "ld1 { v9.h }[6], [x13]\n" "ld1 { v13.h }[6], [x9]\n" "b 70f\n" "63:" // Height 2: Partial accumulate: partial_1_12 "mov x19, #0x18\n" "tbz x16, #0, 70f\n" "ld1 { v9.h }[4], [x13]\n" "ld1 { v13.h }[4], [x9]\n" "b 70f\n" "64:" // Height 2: Partial accumulate: partial_2_8 "tbz x16, #1, 65f\n" "ldr s9, [x13], #0x4\n" "ldr s13, [x9], #0x4\n" "mov x19, #0x14\n" "tbz x16, #0, 70f\n" "ld1 { v9.h }[2], [x13]\n" "ld1 { v13.h }[2], [x9]\n" "b 70f\n" "65:" // Height 2: Partial accumulate: partial_1_8 "mov x19, #0x10\n" "tbz x16, #0, 70f\n" "ldr h9, [x13, #0x0]\n" "ldr h13, [x9, #0x0]\n" "b 70f\n" "66:" // Height 2: Partial accumulate: partial_4_0 "tbz x16, #2, 68f\n" "ldr d8, [x13], #0x8\n" "ldr d12, [x9], #0x8\n" "tbz x16, #1, 67f\n" "mov x19, #0xc\n" "ld1 { v8.s }[2], [x13], #0x4\n" "ld1 { v12.s }[2], [x9], #0x4\n" "tbz x16, #0, 70f\n" "ld1 { v8.h }[6], [x13]\n" "ld1 { v12.h }[6], [x9]\n" "b 70f\n" "67:" // Height 2: Partial accumulate: partial_1_4 "mov x19, #0x8\n" "tbz x16, #0, 70f\n" "ld1 { v8.h }[4], [x13]\n" "ld1 { v12.h }[4], [x9]\n" "b 70f\n" "68:" // Height 2: Partial accumulate: partial_2_0 "tbz x16, #1, 69f\n" "ldr s8, [x13], #0x4\n" "ldr s12, [x9], #0x4\n" "mov x19, #0x4\n" "tbz x16, #0, 70f\n" "ld1 { v8.h }[2], [x13]\n" "ld1 { v12.h }[2], [x9]\n" "b 70f\n" "69:" // Height 2: Partial accumulate: partial_1_0 "mov x19, #0x0\n" "ldr h8, [x13, #0x0]\n" "ldr h12, [x9, #0x0]\n" "70:" // Height 2: Partial accumulate: Done "sub x13, x13, x19\n" "sub x9, x9, x19\n" "b 73f\n" "71:" // Height 2: full accumulate "ldr q8, [x13, #0x0]\n" "ldr q9, [x13, #0x10]\n" "ldr q10, [x13, #0x20]\n" "ldr q11, [x13, #0x30]\n" "ldr q12, [x9, #0x0]\n" "ldr q13, [x9, #0x10]\n" "ldr q14, [x9, #0x20]\n" "ldr q15, [x9, #0x30]\n" "b 73f\n" "72:" // Height 2: no accumulate "movi v8.16b, #0x0\n" "movi v9.16b, #0x0\n" "movi v10.16b, #0x0\n" "movi v11.16b, #0x0\n" "movi v12.16b, #0x0\n" "movi v13.16b, #0x0\n" "movi v14.16b, #0x0\n" "movi v15.16b, #0x0\n" "73:" // Height 2: setup done "mov x12, #0x0\n" "74:" // Height 2: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n" "ldr w11, [x20, x12, LSL #0x2]\n" "tbz %x[flags], #3, 75f\n" "ldr x20, [%x[input_ptr], x12, LSL #0x3]\n" "add x20, x20, x19, LSL #3\n" "ldr x10, [x20, #0x0]\n" "ldr x28, [x20, #0x8]\n" "cbnz x12, 76f\n" "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n" "add x10, x10, x19, LSL #1\n" "add x28, x28, x19, LSL #1\n" "b 76f\n" "75:" // Height 2: setup direct input "mov x10, %x[input_ptr]\n" "add x28, x10, x19, LSL #1\n" "76:" // Height 2: input setup done "cmp x11, #0x8\n" "blt 79f\n" "cmp x11, #0x10\n" "blt 78f\n" "77:" // Height 2: Multiply loop: Main loop head "ldr q0, [x10, #0x0]\n" "ldr q1, [x28, #0x0]\n" "ldr q6, [x15, #0x0]\n" "fmla v8.8h, v6.8h, v0.h[0]\n" "ldr q7, [x15, #0x10]\n" "fmla v12.8h, v6.8h, v1.h[0]\n" "ldr q6, [x15, #0x20]\n" "add x10, x10, #0x10\n" "fmla v9.8h, v7.8h, v0.h[0]\n" "prfm pldl1keep, [x10, #0x80]\n" "add x28, x28, #0x10\n" "fmla v13.8h, v7.8h, v1.h[0]\n" "prfm pldl1keep, [x28, #0x80]\n" "ldr q7, [x15, #0x30]\n" "fmla v10.8h, v6.8h, v0.h[0]\n" "sub x11, x11, #0x8\n" "fmla v14.8h, v6.8h, v1.h[0]\n" "ldr q6, [x15, #0x40]\n" "cmp x11, #0x10\n" "fmla v11.8h, v7.8h, v0.h[0]\n" "fmla v15.8h, v7.8h, v1.h[0]\n" "ldr q7, [x15, #0x50]\n" "fmla v8.8h, v6.8h, v0.h[1]\n" "fmla v12.8h, v6.8h, v1.h[1]\n" "ldr q6, [x15, #0x60]\n" "fmla v9.8h, v7.8h, v0.h[1]\n" "fmla v13.8h, v7.8h, v1.h[1]\n" "ldr q7, [x15, #0x70]\n" "fmla v10.8h, v6.8h, v0.h[1]\n" "fmla v14.8h, v6.8h, v1.h[1]\n" "ldr q6, [x15, #0x80]\n" "fmla v11.8h, v7.8h, v0.h[1]\n" "fmla v15.8h, v7.8h, v1.h[1]\n" "ldr q7, [x15, #0x90]\n" "fmla v8.8h, v6.8h, v0.h[2]\n" "fmla v12.8h, v6.8h, v1.h[2]\n" "ldr q6, [x15, #0xa0]\n" "fmla v9.8h, v7.8h, v0.h[2]\n" "fmla v13.8h, v7.8h, v1.h[2]\n" "ldr q7, [x15, #0xb0]\n" "fmla v10.8h, v6.8h, v0.h[2]\n" "fmla v14.8h, v6.8h, v1.h[2]\n" "ldr q6, [x15, #0xc0]\n" "fmla v11.8h, v7.8h, v0.h[2]\n" "fmla v15.8h, v7.8h, v1.h[2]\n" "ldr q7, [x15, #0xd0]\n" "fmla v8.8h, v6.8h, v0.h[3]\n" "fmla v12.8h, v6.8h, v1.h[3]\n" "ldr q6, [x15, #0xe0]\n" "fmla v9.8h, v7.8h, v0.h[3]\n" "fmla v13.8h, v7.8h, v1.h[3]\n" "ldr q7, [x15, #0xf0]\n" "fmla v10.8h, v6.8h, v0.h[3]\n" "fmla v14.8h, v6.8h, v1.h[3]\n" "ldr q6, [x15, #0x100]\n" "fmla v11.8h, v7.8h, v0.h[3]\n" "fmla v15.8h, v7.8h, v1.h[3]\n" "ldr q7, [x15, #0x110]\n" "fmla v8.8h, v6.8h, v0.h[4]\n" "fmla v12.8h, v6.8h, v1.h[4]\n" "ldr q6, [x15, #0x120]\n" "fmla v9.8h, v7.8h, v0.h[4]\n" "fmla v13.8h, v7.8h, v1.h[4]\n" "ldr q7, [x15, #0x130]\n" "fmla v10.8h, v6.8h, v0.h[4]\n" "fmla v14.8h, v6.8h, v1.h[4]\n" "ldr q6, [x15, #0x140]\n" "fmla v11.8h, v7.8h, v0.h[4]\n" "fmla v15.8h, v7.8h, v1.h[4]\n" "ldr q7, [x15, #0x150]\n" "fmla v8.8h, v6.8h, v0.h[5]\n" "fmla v12.8h, v6.8h, v1.h[5]\n" "ldr q6, [x15, #0x160]\n" "fmla v9.8h, v7.8h, v0.h[5]\n" "fmla v13.8h, v7.8h, v1.h[5]\n" "ldr q7, [x15, #0x170]\n" "fmla v10.8h, v6.8h, v0.h[5]\n" "fmla v14.8h, v6.8h, v1.h[5]\n" "ldr q6, [x15, #0x180]\n" "fmla v11.8h, v7.8h, v0.h[5]\n" "fmla v15.8h, v7.8h, v1.h[5]\n" "ldr q7, [x15, #0x190]\n" "fmla v8.8h, v6.8h, v0.h[6]\n" "fmla v12.8h, v6.8h, v1.h[6]\n" "ldr q6, [x15, #0x1a0]\n" "fmla v9.8h, v7.8h, v0.h[6]\n" "fmla v13.8h, v7.8h, v1.h[6]\n" "ldr q7, [x15, #0x1b0]\n" "fmla v10.8h, v6.8h, v0.h[6]\n" "fmla v14.8h, v6.8h, v1.h[6]\n" "ldr q6, [x15, #0x1c0]\n" "fmla v11.8h, v7.8h, v0.h[6]\n" "fmla v15.8h, v7.8h, v1.h[6]\n" "ldr q7, [x15, #0x1d0]\n" "fmla v8.8h, v6.8h, v0.h[7]\n" "fmla v12.8h, v6.8h, v1.h[7]\n" "ldr q6, [x15, #0x1e0]\n" "fmla v9.8h, v7.8h, v0.h[7]\n" "fmla v13.8h, v7.8h, v1.h[7]\n" "ldr q7, [x15, #0x1f0]\n" "add x15, x15, #0x200\n" "fmla v10.8h, v6.8h, v0.h[7]\n" "fmla v14.8h, v6.8h, v1.h[7]\n" "fmla v11.8h, v7.8h, v0.h[7]\n" "fmla v15.8h, v7.8h, v1.h[7]\n" "bge 77b\n" "78:" // Height 2: Multiply loop: Single iteration only "sub x11, x11, #0x8\n" "ldr q0, [x10, #0x0]\n" "ldr q1, [x28, #0x0]\n" "ldr q6, [x15, #0x0]\n" "fmla v8.8h, v6.8h, v0.h[0]\n" "ldr q7, [x15, #0x10]\n" "fmla v12.8h, v6.8h, v1.h[0]\n" "ldr q6, [x15, #0x20]\n" "add x10, x10, #0x10\n" "fmla v9.8h, v7.8h, v0.h[0]\n" "prfm pldl1keep, [x10, #0x80]\n" "add x28, x28, #0x10\n" "fmla v13.8h, v7.8h, v1.h[0]\n" "prfm pldl1keep, [x28, #0x80]\n" "ldr q7, [x15, #0x30]\n" "fmla v10.8h, v6.8h, v0.h[0]\n" "fmla v14.8h, v6.8h, v1.h[0]\n" "ldr q6, [x15, #0x40]\n" "fmla v11.8h, v7.8h, v0.h[0]\n" "fmla v15.8h, v7.8h, v1.h[0]\n" "ldr q7, [x15, #0x50]\n" "fmla v8.8h, v6.8h, v0.h[1]\n" "fmla v12.8h, v6.8h, v1.h[1]\n" "ldr q6, [x15, #0x60]\n" "fmla v9.8h, v7.8h, v0.h[1]\n" "fmla v13.8h, v7.8h, v1.h[1]\n" "ldr q7, [x15, #0x70]\n" "fmla v10.8h, v6.8h, v0.h[1]\n" "fmla v14.8h, v6.8h, v1.h[1]\n" "ldr q6, [x15, #0x80]\n" "fmla v11.8h, v7.8h, v0.h[1]\n" "fmla v15.8h, v7.8h, v1.h[1]\n" "ldr q7, [x15, #0x90]\n" "fmla v8.8h, v6.8h, v0.h[2]\n" "fmla v12.8h, v6.8h, v1.h[2]\n" "ldr q6, [x15, #0xa0]\n" "fmla v9.8h, v7.8h, v0.h[2]\n" "fmla v13.8h, v7.8h, v1.h[2]\n" "ldr q7, [x15, #0xb0]\n" "fmla v10.8h, v6.8h, v0.h[2]\n" "fmla v14.8h, v6.8h, v1.h[2]\n" "ldr q6, [x15, #0xc0]\n" "fmla v11.8h, v7.8h, v0.h[2]\n" "fmla v15.8h, v7.8h, v1.h[2]\n" "ldr q7, [x15, #0xd0]\n" "fmla v8.8h, v6.8h, v0.h[3]\n" "fmla v12.8h, v6.8h, v1.h[3]\n" "ldr q6, [x15, #0xe0]\n" "fmla v9.8h, v7.8h, v0.h[3]\n" "fmla v13.8h, v7.8h, v1.h[3]\n" "ldr q7, [x15, #0xf0]\n" "fmla v10.8h, v6.8h, v0.h[3]\n" "fmla v14.8h, v6.8h, v1.h[3]\n" "ldr q6, [x15, #0x100]\n" "fmla v11.8h, v7.8h, v0.h[3]\n" "fmla v15.8h, v7.8h, v1.h[3]\n" "ldr q7, [x15, #0x110]\n" "fmla v8.8h, v6.8h, v0.h[4]\n" "fmla v12.8h, v6.8h, v1.h[4]\n" "ldr q6, [x15, #0x120]\n" "fmla v9.8h, v7.8h, v0.h[4]\n" "fmla v13.8h, v7.8h, v1.h[4]\n" "ldr q7, [x15, #0x130]\n" "fmla v10.8h, v6.8h, v0.h[4]\n" "fmla v14.8h, v6.8h, v1.h[4]\n" "ldr q6, [x15, #0x140]\n" "fmla v11.8h, v7.8h, v0.h[4]\n" "fmla v15.8h, v7.8h, v1.h[4]\n" "ldr q7, [x15, #0x150]\n" "fmla v8.8h, v6.8h, v0.h[5]\n" "fmla v12.8h, v6.8h, v1.h[5]\n" "ldr q6, [x15, #0x160]\n" "fmla v9.8h, v7.8h, v0.h[5]\n" "fmla v13.8h, v7.8h, v1.h[5]\n" "ldr q7, [x15, #0x170]\n" "fmla v10.8h, v6.8h, v0.h[5]\n" "fmla v14.8h, v6.8h, v1.h[5]\n" "ldr q6, [x15, #0x180]\n" "fmla v11.8h, v7.8h, v0.h[5]\n" "fmla v15.8h, v7.8h, v1.h[5]\n" "ldr q7, [x15, #0x190]\n" "fmla v8.8h, v6.8h, v0.h[6]\n" "fmla v12.8h, v6.8h, v1.h[6]\n" "ldr q6, [x15, #0x1a0]\n" "fmla v9.8h, v7.8h, v0.h[6]\n" "fmla v13.8h, v7.8h, v1.h[6]\n" "ldr q7, [x15, #0x1b0]\n" "fmla v10.8h, v6.8h, v0.h[6]\n" "fmla v14.8h, v6.8h, v1.h[6]\n" "ldr q6, [x15, #0x1c0]\n" "fmla v11.8h, v7.8h, v0.h[6]\n" "fmla v15.8h, v7.8h, v1.h[6]\n" "ldr q7, [x15, #0x1d0]\n" "fmla v8.8h, v6.8h, v0.h[7]\n" "fmla v12.8h, v6.8h, v1.h[7]\n" "ldr q6, [x15, #0x1e0]\n" "fmla v9.8h, v7.8h, v0.h[7]\n" "fmla v13.8h, v7.8h, v1.h[7]\n" "ldr q7, [x15, #0x1f0]\n" "add x15, x15, #0x200\n" "fmla v10.8h, v6.8h, v0.h[7]\n" "fmla v14.8h, v6.8h, v1.h[7]\n" "fmla v11.8h, v7.8h, v0.h[7]\n" "fmla v15.8h, v7.8h, v1.h[7]\n" "79:" // Height 2: Multiply loop: Main loop skip "cbz x11, 81f\n" "80:" // Height 2: Multiply loop: Odd block loop "ldr h0, [x10], #0x2\n" "ldr h1, [x28], #0x2\n" "ldr q6, [x15, #0x0]\n" "fmla v8.8h, v6.8h, v0.h[0]\n" "ldr q7, [x15, #0x10]\n" "fmla v12.8h, v6.8h, v1.h[0]\n" "ldr q6, [x15, #0x20]\n" "sub x11, x11, #0x1\n" "fmla v9.8h, v7.8h, v0.h[0]\n" "fmla v13.8h, v7.8h, v1.h[0]\n" "ldr q7, [x15, #0x30]\n" "fmla v10.8h, v6.8h, v0.h[0]\n" "add x15, x15, #0x40\n" "fmla v14.8h, v6.8h, v1.h[0]\n" "fmla v11.8h, v7.8h, v0.h[0]\n" "fmla v15.8h, v7.8h, v1.h[0]\n" "cbnz x11, 80b\n" "81:" // Height 2: Multiply loop: No odd multiplies "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "add x12, x12, #0x1\n" "cmp x12, x19\n" "bne 74b\n" "prfm pstl1keep, [x13, #0x0]\n" "prfm pstl1keep, [x9, #0x0]\n" "tbz %x[flags], #1, 82f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1r { v1.8h }, [x19]\n" "add x19, %x[args_ptr], %[offset_max]\n" "ld1r { v0.8h }, [x19]\n" "fmin v8.8h, v8.8h, v0.8h\n" "fmin v9.8h, v9.8h, v0.8h\n" "fmin v10.8h, v10.8h, v0.8h\n" "fmin v11.8h, v11.8h, v0.8h\n" "fmax v8.8h, v8.8h, v1.8h\n" "fmax v9.8h, v9.8h, v1.8h\n" "fmax v10.8h, v10.8h, v1.8h\n" "fmax v11.8h, v11.8h, v1.8h\n" "fmin v12.8h, v12.8h, v0.8h\n" "fmin v13.8h, v13.8h, v0.8h\n" "fmin v14.8h, v14.8h, v0.8h\n" "fmax v12.8h, v12.8h, v1.8h\n" "fmax v13.8h, v13.8h, v1.8h\n" "fmax v14.8h, v14.8h, v1.8h\n" "fmin v15.8h, v15.8h, v0.8h\n" "fmax v15.8h, v15.8h, v1.8h\n" "82:" // Height 2: No activation "cmp x16, #0x20\n" "bge 99f\n" "tbz x16, #4, 90f\n" "st1 { v8.8h }, [x13], #0x10\n" "st1 { v9.8h }, [x13], #0x10\n" "st1 { v12.8h }, [x9], #0x10\n" "st1 { v13.8h }, [x9], #0x10\n" "tbz x16, #3, 86f\n" "st1 { v10.8h }, [x13], #0x10\n" "st1 { v14.8h }, [x9], #0x10\n" "tbz x16, #2, 84f\n" "str d11, [x13], #0x8\n" "str d15, [x9], #0x8\n" "tbz x16, #1, 83f\n" "st1 { v11.s }[2], [x13], #0x4\n" "st1 { v15.s }[2], [x9], #0x4\n" "tbz x16, #0, 98f\n" "st1 { v11.h }[6], [x13]\n" "st1 { v15.h }[6], [x9]\n" "b 98f\n" "83:" // Height 2: Partial direct writeback: partial_1_28 "tbz x16, #0, 98f\n" "st1 { v11.h }[4], [x13]\n" "st1 { v15.h }[4], [x9]\n" "b 98f\n" "84:" // Height 2: Partial direct writeback: partial_2_24 "tbz x16, #1, 85f\n" "str s11, [x13], #0x4\n" "str s15, [x9], #0x4\n" "tbz x16, #0, 98f\n" "st1 { v11.h }[2], [x13]\n" "st1 { v15.h }[2], [x9]\n" "b 98f\n" "85:" // Height 2: Partial direct writeback: partial_1_24 "tbz x16, #0, 98f\n" "str h11, [x13, #0x0]\n" "str h15, [x9, #0x0]\n" "b 98f\n" "86:" // Height 2: Partial direct writeback: partial_4_16 "tbz x16, #2, 88f\n" "str d10, [x13], #0x8\n" "str d14, [x9], #0x8\n" "tbz x16, #1, 87f\n" "st1 { v10.s }[2], [x13], #0x4\n" "st1 { v14.s }[2], [x9], #0x4\n" "tbz x16, #0, 98f\n" "st1 { v10.h }[6], [x13]\n" "st1 { v14.h }[6], [x9]\n" "b 98f\n" "87:" // Height 2: Partial direct writeback: partial_1_20 "tbz x16, #0, 98f\n" "st1 { v10.h }[4], [x13]\n" "st1 { v14.h }[4], [x9]\n" "b 98f\n" "88:" // Height 2: Partial direct writeback: partial_2_16 "tbz x16, #1, 89f\n" "str s10, [x13], #0x4\n" "str s14, [x9], #0x4\n" "tbz x16, #0, 98f\n" "st1 { v10.h }[2], [x13]\n" "st1 { v14.h }[2], [x9]\n" "b 98f\n" "89:" // Height 2: Partial direct writeback: partial_1_16 "tbz x16, #0, 98f\n" "str h10, [x13, #0x0]\n" "str h14, [x9, #0x0]\n" "b 98f\n" "90:" // Height 2: Partial direct writeback: partial_8_0 "tbz x16, #3, 94f\n" "st1 { v8.8h }, [x13], #0x10\n" "st1 { v12.8h }, [x9], #0x10\n" "tbz x16, #2, 92f\n" "str d9, [x13], #0x8\n" "str d13, [x9], #0x8\n" "tbz x16, #1, 91f\n" "st1 { v9.s }[2], [x13], #0x4\n" "st1 { v13.s }[2], [x9], #0x4\n" "tbz x16, #0, 98f\n" "st1 { v9.h }[6], [x13]\n" "st1 { v13.h }[6], [x9]\n" "b 98f\n" "91:" // Height 2: Partial direct writeback: partial_1_12 "tbz x16, #0, 98f\n" "st1 { v9.h }[4], [x13]\n" "st1 { v13.h }[4], [x9]\n" "b 98f\n" "92:" // Height 2: Partial direct writeback: partial_2_8 "tbz x16, #1, 93f\n" "str s9, [x13], #0x4\n" "str s13, [x9], #0x4\n" "tbz x16, #0, 98f\n" "st1 { v9.h }[2], [x13]\n" "st1 { v13.h }[2], [x9]\n" "b 98f\n" "93:" // Height 2: Partial direct writeback: partial_1_8 "tbz x16, #0, 98f\n" "str h9, [x13, #0x0]\n" "str h13, [x9, #0x0]\n" "b 98f\n" "94:" // Height 2: Partial direct writeback: partial_4_0 "tbz x16, #2, 96f\n" "str d8, [x13], #0x8\n" "str d12, [x9], #0x8\n" "tbz x16, #1, 95f\n" "st1 { v8.s }[2], [x13], #0x4\n" "st1 { v12.s }[2], [x9], #0x4\n" "tbz x16, #0, 98f\n" "st1 { v8.h }[6], [x13]\n" "st1 { v12.h }[6], [x9]\n" "b 98f\n" "95:" // Height 2: Partial direct writeback: partial_1_4 "tbz x16, #0, 98f\n" "st1 { v8.h }[4], [x13]\n" "st1 { v12.h }[4], [x9]\n" "b 98f\n" "96:" // Height 2: Partial direct writeback: partial_2_0 "tbz x16, #1, 97f\n" "str s8, [x13], #0x4\n" "str s12, [x9], #0x4\n" "tbz x16, #0, 98f\n" "st1 { v8.h }[2], [x13]\n" "st1 { v12.h }[2], [x9]\n" "b 98f\n" "97:" // Height 2: Partial direct writeback: partial_1_0 "str h8, [x13, #0x0]\n" "str h12, [x9, #0x0]\n" "98:" // Height 2: Partial direct writeback: Done "b 100f\n" "99:" // Height 2: Full writeback "str q8, [x13, #0x0]\n" "str q9, [x13, #0x10]\n" "str q10, [x13, #0x20]\n" "str q11, [x13, #0x30]\n" "str q12, [x9, #0x0]\n" "str q13, [x9, #0x10]\n" "str q14, [x9, #0x20]\n" "str q15, [x9, #0x30]\n" "add x13, x13, #0x40\n" "add x9, x9, #0x40\n" "100:" // Height 2: Writeback done "subs x16, x16, #0x20\n" "bgt 53b\n" "b 302f\n" "101:" // Height 3 "ldr x16, [%x[args_ptr], %[offsetof_N]]\n" "mov x14, %x[bias]\n" "ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n" "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n" "tbz %x[flags], #2, 102f\n" "ldr x13, [%x[output_ptr], #0x0]\n" "add x13, x13, x19, LSL #1\n" "ldr x9, [%x[output_ptr], #0x8]\n" "ldr x27, [%x[output_ptr], #0x10]\n" "add x9, x9, x19, LSL #1\n" "add x27, x27, x19, LSL #1\n" "b 103f\n" "102:" // Height 3: setup direct output "mov x13, %x[output_ptr]\n" "add x9, x13, x19, LSL #1\n" "add x27, x9, x19, LSL #1\n" "103:" // Height 3: Column loop "cbz x14, 104f\n" "ldr q8, [x14, #0x0]\n" "mov v12.16b, v8.16b\n" "ldr q9, [x14, #0x10]\n" "mov v16.16b, v8.16b\n" "ldr q10, [x14, #0x20]\n" "ldr q11, [x14, #0x30]\n" "mov v13.16b, v9.16b\n" "add x14, x14, #0x40\n" "mov v17.16b, v9.16b\n" "mov v14.16b, v10.16b\n" "mov v15.16b, v11.16b\n" "mov v18.16b, v10.16b\n" "mov v19.16b, v11.16b\n" "b 123f\n" "104:" // Height 3: no bias "tbz %x[flags], #0, 122f\n" "cmp x16, #0x20\n" "bge 121f\n" "tbz x16, #4, 112f\n" "ld1 { v8.8h }, [x13], #0x10\n" "ld1 { v12.8h }, [x9], #0x10\n" "ld1 { v16.8h }, [x27], #0x10\n" "ld1 { v9.8h }, [x13], #0x10\n" "ld1 { v13.8h }, [x9], #0x10\n" "ld1 { v17.8h }, [x27], #0x10\n" "tbz x16, #3, 108f\n" "ld1 { v10.8h }, [x13], #0x10\n" "ld1 { v14.8h }, [x9], #0x10\n" "ld1 { v18.8h }, [x27], #0x10\n" "tbz x16, #2, 106f\n" "ldr d11, [x13], #0x8\n" "ldr d15, [x9], #0x8\n" "ldr d19, [x27], #0x8\n" "tbz x16, #1, 105f\n" "mov x19, #0x3c\n" "ld1 { v11.s }[2], [x13], #0x4\n" "ld1 { v15.s }[2], [x9], #0x4\n" "ld1 { v19.s }[2], [x27], #0x4\n" "tbz x16, #0, 120f\n" "ld1 { v11.h }[6], [x13]\n" "ld1 { v15.h }[6], [x9]\n" "ld1 { v19.h }[6], [x27]\n" "b 120f\n" "105:" // Height 3: Partial accumulate: partial_1_28 "mov x19, #0x38\n" "tbz x16, #0, 120f\n" "ld1 { v11.h }[4], [x13]\n" "ld1 { v15.h }[4], [x9]\n" "ld1 { v19.h }[4], [x27]\n" "b 120f\n" "106:" // Height 3: Partial accumulate: partial_2_24 "tbz x16, #1, 107f\n" "ldr s11, [x13], #0x4\n" "ldr s15, [x9], #0x4\n" "ldr s19, [x27], #0x4\n" "mov x19, #0x34\n" "tbz x16, #0, 120f\n" "ld1 { v11.h }[2], [x13]\n" "ld1 { v15.h }[2], [x9]\n" "ld1 { v19.h }[2], [x27]\n" "b 120f\n" "107:" // Height 3: Partial accumulate: partial_1_24 "mov x19, #0x30\n" "tbz x16, #0, 120f\n" "ldr h11, [x13, #0x0]\n" "ldr h15, [x9, #0x0]\n" "ldr h19, [x27, #0x0]\n" "b 120f\n" "108:" // Height 3: Partial accumulate: partial_4_16 "tbz x16, #2, 110f\n" "ldr d10, [x13], #0x8\n" "ldr d14, [x9], #0x8\n" "ldr d18, [x27], #0x8\n" "tbz x16, #1, 109f\n" "mov x19, #0x2c\n" "ld1 { v10.s }[2], [x13], #0x4\n" "ld1 { v14.s }[2], [x9], #0x4\n" "ld1 { v18.s }[2], [x27], #0x4\n" "tbz x16, #0, 120f\n" "ld1 { v10.h }[6], [x13]\n" "ld1 { v14.h }[6], [x9]\n" "ld1 { v18.h }[6], [x27]\n" "b 120f\n" "109:" // Height 3: Partial accumulate: partial_1_20 "mov x19, #0x28\n" "tbz x16, #0, 120f\n" "ld1 { v10.h }[4], [x13]\n" "ld1 { v14.h }[4], [x9]\n" "ld1 { v18.h }[4], [x27]\n" "b 120f\n" "110:" // Height 3: Partial accumulate: partial_2_16 "tbz x16, #1, 111f\n" "ldr s10, [x13], #0x4\n" "ldr s14, [x9], #0x4\n" "ldr s18, [x27], #0x4\n" "mov x19, #0x24\n" "tbz x16, #0, 120f\n" "ld1 { v10.h }[2], [x13]\n" "ld1 { v14.h }[2], [x9]\n" "ld1 { v18.h }[2], [x27]\n" "b 120f\n" "111:" // Height 3: Partial accumulate: partial_1_16 "mov x19, #0x20\n" "tbz x16, #0, 120f\n" "ldr h10, [x13, #0x0]\n" "ldr h14, [x9, #0x0]\n" "ldr h18, [x27, #0x0]\n" "b 120f\n" "112:" // Height 3: Partial accumulate: partial_8_0 "tbz x16, #3, 116f\n" "ld1 { v8.8h }, [x13], #0x10\n" "ld1 { v12.8h }, [x9], #0x10\n" "ld1 { v16.8h }, [x27], #0x10\n" "tbz x16, #2, 114f\n" "ldr d9, [x13], #0x8\n" "ldr d13, [x9], #0x8\n" "ldr d17, [x27], #0x8\n" "tbz x16, #1, 113f\n" "mov x19, #0x1c\n" "ld1 { v9.s }[2], [x13], #0x4\n" "ld1 { v13.s }[2], [x9], #0x4\n" "ld1 { v17.s }[2], [x27], #0x4\n" "tbz x16, #0, 120f\n" "ld1 { v9.h }[6], [x13]\n" "ld1 { v13.h }[6], [x9]\n" "ld1 { v17.h }[6], [x27]\n" "b 120f\n" "113:" // Height 3: Partial accumulate: partial_1_12 "mov x19, #0x18\n" "tbz x16, #0, 120f\n" "ld1 { v9.h }[4], [x13]\n" "ld1 { v13.h }[4], [x9]\n" "ld1 { v17.h }[4], [x27]\n" "b 120f\n" "114:" // Height 3: Partial accumulate: partial_2_8 "tbz x16, #1, 115f\n" "ldr s9, [x13], #0x4\n" "ldr s13, [x9], #0x4\n" "ldr s17, [x27], #0x4\n" "mov x19, #0x14\n" "tbz x16, #0, 120f\n" "ld1 { v9.h }[2], [x13]\n" "ld1 { v13.h }[2], [x9]\n" "ld1 { v17.h }[2], [x27]\n" "b 120f\n" "115:" // Height 3: Partial accumulate: partial_1_8 "mov x19, #0x10\n" "tbz x16, #0, 120f\n" "ldr h9, [x13, #0x0]\n" "ldr h13, [x9, #0x0]\n" "ldr h17, [x27, #0x0]\n" "b 120f\n" "116:" // Height 3: Partial accumulate: partial_4_0 "tbz x16, #2, 118f\n" "ldr d8, [x13], #0x8\n" "ldr d12, [x9], #0x8\n" "ldr d16, [x27], #0x8\n" "tbz x16, #1, 117f\n" "mov x19, #0xc\n" "ld1 { v8.s }[2], [x13], #0x4\n" "ld1 { v12.s }[2], [x9], #0x4\n" "ld1 { v16.s }[2], [x27], #0x4\n" "tbz x16, #0, 120f\n" "ld1 { v8.h }[6], [x13]\n" "ld1 { v12.h }[6], [x9]\n" "ld1 { v16.h }[6], [x27]\n" "b 120f\n" "117:" // Height 3: Partial accumulate: partial_1_4 "mov x19, #0x8\n" "tbz x16, #0, 120f\n" "ld1 { v8.h }[4], [x13]\n" "ld1 { v12.h }[4], [x9]\n" "ld1 { v16.h }[4], [x27]\n" "b 120f\n" "118:" // Height 3: Partial accumulate: partial_2_0 "tbz x16, #1, 119f\n" "ldr s8, [x13], #0x4\n" "ldr s12, [x9], #0x4\n" "ldr s16, [x27], #0x4\n" "mov x19, #0x4\n" "tbz x16, #0, 120f\n" "ld1 { v8.h }[2], [x13]\n" "ld1 { v12.h }[2], [x9]\n" "ld1 { v16.h }[2], [x27]\n" "b 120f\n" "119:" // Height 3: Partial accumulate: partial_1_0 "mov x19, #0x0\n" "ldr h8, [x13, #0x0]\n" "ldr h12, [x9, #0x0]\n" "ldr h16, [x27, #0x0]\n" "120:" // Height 3: Partial accumulate: Done "sub x13, x13, x19\n" "sub x9, x9, x19\n" "sub x27, x27, x19\n" "b 123f\n" "121:" // Height 3: full accumulate "ldr q8, [x13, #0x0]\n" "ldr q9, [x13, #0x10]\n" "ldr q10, [x13, #0x20]\n" "ldr q11, [x13, #0x30]\n" "ldr q12, [x9, #0x0]\n" "ldr q13, [x9, #0x10]\n" "ldr q14, [x9, #0x20]\n" "ldr q15, [x9, #0x30]\n" "ldr q16, [x27, #0x0]\n" "ldr q17, [x27, #0x10]\n" "ldr q18, [x27, #0x20]\n" "ldr q19, [x27, #0x30]\n" "b 123f\n" "122:" // Height 3: no accumulate "movi v8.16b, #0x0\n" "movi v9.16b, #0x0\n" "movi v10.16b, #0x0\n" "movi v11.16b, #0x0\n" "movi v12.16b, #0x0\n" "movi v13.16b, #0x0\n" "movi v14.16b, #0x0\n" "movi v15.16b, #0x0\n" "movi v16.16b, #0x0\n" "movi v17.16b, #0x0\n" "movi v18.16b, #0x0\n" "movi v19.16b, #0x0\n" "123:" // Height 3: setup done "mov x12, #0x0\n" "124:" // Height 3: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n" "ldr w11, [x20, x12, LSL #0x2]\n" "tbz %x[flags], #3, 125f\n" "ldr x20, [%x[input_ptr], x12, LSL #0x3]\n" "add x20, x20, x19, LSL #3\n" "ldr x10, [x20, #0x0]\n" "ldr x28, [x20, #0x8]\n" "ldr x26, [x20, #0x10]\n" "cbnz x12, 126f\n" "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n" "add x10, x10, x19, LSL #1\n" "add x28, x28, x19, LSL #1\n" "add x26, x26, x19, LSL #1\n" "b 126f\n" "125:" // Height 3: setup direct input "mov x10, %x[input_ptr]\n" "add x28, x10, x19, LSL #1\n" "add x26, x28, x19, LSL #1\n" "126:" // Height 3: input setup done "cmp x11, #0x8\n" "blt 129f\n" "cmp x11, #0x10\n" "blt 128f\n" "127:" // Height 3: Multiply loop: Main loop head "ldr q0, [x10, #0x0]\n" "ldr q1, [x28, #0x0]\n" "ldr q2, [x26, #0x0]\n" "ldr q6, [x15, #0x0]\n" "fmla v8.8h, v6.8h, v0.h[0]\n" "ldr q7, [x15, #0x10]\n" "fmla v12.8h, v6.8h, v1.h[0]\n" "add x10, x10, #0x10\n" "prfm pldl1keep, [x10, #0x80]\n" "fmla v16.8h, v6.8h, v2.h[0]\n" "ldr q6, [x15, #0x20]\n" "fmla v9.8h, v7.8h, v0.h[0]\n" "add x28, x28, #0x10\n" "prfm pldl1keep, [x28, #0x80]\n" "fmla v13.8h, v7.8h, v1.h[0]\n" "add x26, x26, #0x10\n" "fmla v17.8h, v7.8h, v2.h[0]\n" "prfm pldl1keep, [x26, #0x80]\n" "ldr q7, [x15, #0x30]\n" "fmla v10.8h, v6.8h, v0.h[0]\n" "sub x11, x11, #0x8\n" "fmla v14.8h, v6.8h, v1.h[0]\n" "cmp x11, #0x10\n" "fmla v18.8h, v6.8h, v2.h[0]\n" "ldr q6, [x15, #0x40]\n" "fmla v11.8h, v7.8h, v0.h[0]\n" "fmla v15.8h, v7.8h, v1.h[0]\n" "fmla v19.8h, v7.8h, v2.h[0]\n" "ldr q7, [x15, #0x50]\n" "fmla v8.8h, v6.8h, v0.h[1]\n" "fmla v12.8h, v6.8h, v1.h[1]\n" "fmla v16.8h, v6.8h, v2.h[1]\n" "ldr q6, [x15, #0x60]\n" "fmla v9.8h, v7.8h, v0.h[1]\n" "fmla v13.8h, v7.8h, v1.h[1]\n" "fmla v17.8h, v7.8h, v2.h[1]\n" "ldr q7, [x15, #0x70]\n" "fmla v10.8h, v6.8h, v0.h[1]\n" "fmla v14.8h, v6.8h, v1.h[1]\n" "fmla v18.8h, v6.8h, v2.h[1]\n" "ldr q6, [x15, #0x80]\n" "fmla v11.8h, v7.8h, v0.h[1]\n" "fmla v15.8h, v7.8h, v1.h[1]\n" "fmla v19.8h, v7.8h, v2.h[1]\n" "ldr q7, [x15, #0x90]\n" "fmla v8.8h, v6.8h, v0.h[2]\n" "fmla v12.8h, v6.8h, v1.h[2]\n" "fmla v16.8h, v6.8h, v2.h[2]\n" "ldr q6, [x15, #0xa0]\n" "fmla v9.8h, v7.8h, v0.h[2]\n" "fmla v13.8h, v7.8h, v1.h[2]\n" "fmla v17.8h, v7.8h, v2.h[2]\n" "ldr q7, [x15, #0xb0]\n" "fmla v10.8h, v6.8h, v0.h[2]\n" "fmla v14.8h, v6.8h, v1.h[2]\n" "fmla v18.8h, v6.8h, v2.h[2]\n" "ldr q6, [x15, #0xc0]\n" "fmla v11.8h, v7.8h, v0.h[2]\n" "fmla v15.8h, v7.8h, v1.h[2]\n" "fmla v19.8h, v7.8h, v2.h[2]\n" "ldr q7, [x15, #0xd0]\n" "fmla v8.8h, v6.8h, v0.h[3]\n" "fmla v12.8h, v6.8h, v1.h[3]\n" "fmla v16.8h, v6.8h, v2.h[3]\n" "ldr q6, [x15, #0xe0]\n" "fmla v9.8h, v7.8h, v0.h[3]\n" "fmla v13.8h, v7.8h, v1.h[3]\n" "fmla v17.8h, v7.8h, v2.h[3]\n" "ldr q7, [x15, #0xf0]\n" "fmla v10.8h, v6.8h, v0.h[3]\n" "fmla v14.8h, v6.8h, v1.h[3]\n" "fmla v18.8h, v6.8h, v2.h[3]\n" "ldr q6, [x15, #0x100]\n" "fmla v11.8h, v7.8h, v0.h[3]\n" "fmla v15.8h, v7.8h, v1.h[3]\n" "fmla v19.8h, v7.8h, v2.h[3]\n" "ldr q7, [x15, #0x110]\n" "fmla v8.8h, v6.8h, v0.h[4]\n" "fmla v12.8h, v6.8h, v1.h[4]\n" "fmla v16.8h, v6.8h, v2.h[4]\n" "ldr q6, [x15, #0x120]\n" "fmla v9.8h, v7.8h, v0.h[4]\n" "fmla v13.8h, v7.8h, v1.h[4]\n" "fmla v17.8h, v7.8h, v2.h[4]\n" "ldr q7, [x15, #0x130]\n" "fmla v10.8h, v6.8h, v0.h[4]\n" "fmla v14.8h, v6.8h, v1.h[4]\n" "fmla v18.8h, v6.8h, v2.h[4]\n" "ldr q6, [x15, #0x140]\n" "fmla v11.8h, v7.8h, v0.h[4]\n" "fmla v15.8h, v7.8h, v1.h[4]\n" "fmla v19.8h, v7.8h, v2.h[4]\n" "ldr q7, [x15, #0x150]\n" "fmla v8.8h, v6.8h, v0.h[5]\n" "fmla v12.8h, v6.8h, v1.h[5]\n" "fmla v16.8h, v6.8h, v2.h[5]\n" "ldr q6, [x15, #0x160]\n" "fmla v9.8h, v7.8h, v0.h[5]\n" "fmla v13.8h, v7.8h, v1.h[5]\n" "fmla v17.8h, v7.8h, v2.h[5]\n" "ldr q7, [x15, #0x170]\n" "fmla v10.8h, v6.8h, v0.h[5]\n" "fmla v14.8h, v6.8h, v1.h[5]\n" "fmla v18.8h, v6.8h, v2.h[5]\n" "ldr q6, [x15, #0x180]\n" "fmla v11.8h, v7.8h, v0.h[5]\n" "fmla v15.8h, v7.8h, v1.h[5]\n" "fmla v19.8h, v7.8h, v2.h[5]\n" "ldr q7, [x15, #0x190]\n" "fmla v8.8h, v6.8h, v0.h[6]\n" "fmla v12.8h, v6.8h, v1.h[6]\n" "fmla v16.8h, v6.8h, v2.h[6]\n" "ldr q6, [x15, #0x1a0]\n" "fmla v9.8h, v7.8h, v0.h[6]\n" "fmla v13.8h, v7.8h, v1.h[6]\n" "fmla v17.8h, v7.8h, v2.h[6]\n" "ldr q7, [x15, #0x1b0]\n" "fmla v10.8h, v6.8h, v0.h[6]\n" "fmla v14.8h, v6.8h, v1.h[6]\n" "fmla v18.8h, v6.8h, v2.h[6]\n" "ldr q6, [x15, #0x1c0]\n" "fmla v11.8h, v7.8h, v0.h[6]\n" "fmla v15.8h, v7.8h, v1.h[6]\n" "fmla v19.8h, v7.8h, v2.h[6]\n" "ldr q7, [x15, #0x1d0]\n" "fmla v8.8h, v6.8h, v0.h[7]\n" "fmla v12.8h, v6.8h, v1.h[7]\n" "fmla v16.8h, v6.8h, v2.h[7]\n" "ldr q6, [x15, #0x1e0]\n" "fmla v9.8h, v7.8h, v0.h[7]\n" "fmla v13.8h, v7.8h, v1.h[7]\n" "fmla v17.8h, v7.8h, v2.h[7]\n" "ldr q7, [x15, #0x1f0]\n" "add x15, x15, #0x200\n" "fmla v10.8h, v6.8h, v0.h[7]\n" "fmla v14.8h, v6.8h, v1.h[7]\n" "fmla v18.8h, v6.8h, v2.h[7]\n" "fmla v11.8h, v7.8h, v0.h[7]\n" "fmla v15.8h, v7.8h, v1.h[7]\n" "fmla v19.8h, v7.8h, v2.h[7]\n" "bge 127b\n" "128:" // Height 3: Multiply loop: Single iteration only "sub x11, x11, #0x8\n" "ldr q0, [x10, #0x0]\n" "ldr q1, [x28, #0x0]\n" "ldr q2, [x26, #0x0]\n" "ldr q6, [x15, #0x0]\n" "fmla v8.8h, v6.8h, v0.h[0]\n" "ldr q7, [x15, #0x10]\n" "fmla v12.8h, v6.8h, v1.h[0]\n" "add x10, x10, #0x10\n" "prfm pldl1keep, [x10, #0x80]\n" "fmla v16.8h, v6.8h, v2.h[0]\n" "ldr q6, [x15, #0x20]\n" "fmla v9.8h, v7.8h, v0.h[0]\n" "add x28, x28, #0x10\n" "prfm pldl1keep, [x28, #0x80]\n" "fmla v13.8h, v7.8h, v1.h[0]\n" "add x26, x26, #0x10\n" "fmla v17.8h, v7.8h, v2.h[0]\n" "prfm pldl1keep, [x26, #0x80]\n" "ldr q7, [x15, #0x30]\n" "fmla v10.8h, v6.8h, v0.h[0]\n" "fmla v14.8h, v6.8h, v1.h[0]\n" "fmla v18.8h, v6.8h, v2.h[0]\n" "ldr q6, [x15, #0x40]\n" "fmla v11.8h, v7.8h, v0.h[0]\n" "fmla v15.8h, v7.8h, v1.h[0]\n" "fmla v19.8h, v7.8h, v2.h[0]\n" "ldr q7, [x15, #0x50]\n" "fmla v8.8h, v6.8h, v0.h[1]\n" "fmla v12.8h, v6.8h, v1.h[1]\n" "fmla v16.8h, v6.8h, v2.h[1]\n" "ldr q6, [x15, #0x60]\n" "fmla v9.8h, v7.8h, v0.h[1]\n" "fmla v13.8h, v7.8h, v1.h[1]\n" "fmla v17.8h, v7.8h, v2.h[1]\n" "ldr q7, [x15, #0x70]\n" "fmla v10.8h, v6.8h, v0.h[1]\n" "fmla v14.8h, v6.8h, v1.h[1]\n" "fmla v18.8h, v6.8h, v2.h[1]\n" "ldr q6, [x15, #0x80]\n" "fmla v11.8h, v7.8h, v0.h[1]\n" "fmla v15.8h, v7.8h, v1.h[1]\n" "fmla v19.8h, v7.8h, v2.h[1]\n" "ldr q7, [x15, #0x90]\n" "fmla v8.8h, v6.8h, v0.h[2]\n" "fmla v12.8h, v6.8h, v1.h[2]\n" "fmla v16.8h, v6.8h, v2.h[2]\n" "ldr q6, [x15, #0xa0]\n" "fmla v9.8h, v7.8h, v0.h[2]\n" "fmla v13.8h, v7.8h, v1.h[2]\n" "fmla v17.8h, v7.8h, v2.h[2]\n" "ldr q7, [x15, #0xb0]\n" "fmla v10.8h, v6.8h, v0.h[2]\n" "fmla v14.8h, v6.8h, v1.h[2]\n" "fmla v18.8h, v6.8h, v2.h[2]\n" "ldr q6, [x15, #0xc0]\n" "fmla v11.8h, v7.8h, v0.h[2]\n" "fmla v15.8h, v7.8h, v1.h[2]\n" "fmla v19.8h, v7.8h, v2.h[2]\n" "ldr q7, [x15, #0xd0]\n" "fmla v8.8h, v6.8h, v0.h[3]\n" "fmla v12.8h, v6.8h, v1.h[3]\n" "fmla v16.8h, v6.8h, v2.h[3]\n" "ldr q6, [x15, #0xe0]\n" "fmla v9.8h, v7.8h, v0.h[3]\n" "fmla v13.8h, v7.8h, v1.h[3]\n" "fmla v17.8h, v7.8h, v2.h[3]\n" "ldr q7, [x15, #0xf0]\n" "fmla v10.8h, v6.8h, v0.h[3]\n" "fmla v14.8h, v6.8h, v1.h[3]\n" "fmla v18.8h, v6.8h, v2.h[3]\n" "ldr q6, [x15, #0x100]\n" "fmla v11.8h, v7.8h, v0.h[3]\n" "fmla v15.8h, v7.8h, v1.h[3]\n" "fmla v19.8h, v7.8h, v2.h[3]\n" "ldr q7, [x15, #0x110]\n" "fmla v8.8h, v6.8h, v0.h[4]\n" "fmla v12.8h, v6.8h, v1.h[4]\n" "fmla v16.8h, v6.8h, v2.h[4]\n" "ldr q6, [x15, #0x120]\n" "fmla v9.8h, v7.8h, v0.h[4]\n" "fmla v13.8h, v7.8h, v1.h[4]\n" "fmla v17.8h, v7.8h, v2.h[4]\n" "ldr q7, [x15, #0x130]\n" "fmla v10.8h, v6.8h, v0.h[4]\n" "fmla v14.8h, v6.8h, v1.h[4]\n" "fmla v18.8h, v6.8h, v2.h[4]\n" "ldr q6, [x15, #0x140]\n" "fmla v11.8h, v7.8h, v0.h[4]\n" "fmla v15.8h, v7.8h, v1.h[4]\n" "fmla v19.8h, v7.8h, v2.h[4]\n" "ldr q7, [x15, #0x150]\n" "fmla v8.8h, v6.8h, v0.h[5]\n" "fmla v12.8h, v6.8h, v1.h[5]\n" "fmla v16.8h, v6.8h, v2.h[5]\n" "ldr q6, [x15, #0x160]\n" "fmla v9.8h, v7.8h, v0.h[5]\n" "fmla v13.8h, v7.8h, v1.h[5]\n" "fmla v17.8h, v7.8h, v2.h[5]\n" "ldr q7, [x15, #0x170]\n" "fmla v10.8h, v6.8h, v0.h[5]\n" "fmla v14.8h, v6.8h, v1.h[5]\n" "fmla v18.8h, v6.8h, v2.h[5]\n" "ldr q6, [x15, #0x180]\n" "fmla v11.8h, v7.8h, v0.h[5]\n" "fmla v15.8h, v7.8h, v1.h[5]\n" "fmla v19.8h, v7.8h, v2.h[5]\n" "ldr q7, [x15, #0x190]\n" "fmla v8.8h, v6.8h, v0.h[6]\n" "fmla v12.8h, v6.8h, v1.h[6]\n" "fmla v16.8h, v6.8h, v2.h[6]\n" "ldr q6, [x15, #0x1a0]\n" "fmla v9.8h, v7.8h, v0.h[6]\n" "fmla v13.8h, v7.8h, v1.h[6]\n" "fmla v17.8h, v7.8h, v2.h[6]\n" "ldr q7, [x15, #0x1b0]\n" "fmla v10.8h, v6.8h, v0.h[6]\n" "fmla v14.8h, v6.8h, v1.h[6]\n" "fmla v18.8h, v6.8h, v2.h[6]\n" "ldr q6, [x15, #0x1c0]\n" "fmla v11.8h, v7.8h, v0.h[6]\n" "fmla v15.8h, v7.8h, v1.h[6]\n" "fmla v19.8h, v7.8h, v2.h[6]\n" "ldr q7, [x15, #0x1d0]\n" "fmla v8.8h, v6.8h, v0.h[7]\n" "fmla v12.8h, v6.8h, v1.h[7]\n" "fmla v16.8h, v6.8h, v2.h[7]\n" "ldr q6, [x15, #0x1e0]\n" "fmla v9.8h, v7.8h, v0.h[7]\n" "fmla v13.8h, v7.8h, v1.h[7]\n" "fmla v17.8h, v7.8h, v2.h[7]\n" "ldr q7, [x15, #0x1f0]\n" "add x15, x15, #0x200\n" "fmla v10.8h, v6.8h, v0.h[7]\n" "fmla v14.8h, v6.8h, v1.h[7]\n" "fmla v18.8h, v6.8h, v2.h[7]\n" "fmla v11.8h, v7.8h, v0.h[7]\n" "fmla v15.8h, v7.8h, v1.h[7]\n" "fmla v19.8h, v7.8h, v2.h[7]\n" "129:" // Height 3: Multiply loop: Main loop skip "cbz x11, 131f\n" "130:" // Height 3: Multiply loop: Odd block loop "ldr h0, [x10], #0x2\n" "ldr h1, [x28], #0x2\n" "ldr h2, [x26], #0x2\n" "ldr q6, [x15, #0x0]\n" "fmla v8.8h, v6.8h, v0.h[0]\n" "ldr q7, [x15, #0x10]\n" "fmla v12.8h, v6.8h, v1.h[0]\n" "sub x11, x11, #0x1\n" "fmla v16.8h, v6.8h, v2.h[0]\n" "ldr q6, [x15, #0x20]\n" "fmla v9.8h, v7.8h, v0.h[0]\n" "fmla v13.8h, v7.8h, v1.h[0]\n" "fmla v17.8h, v7.8h, v2.h[0]\n" "ldr q7, [x15, #0x30]\n" "fmla v10.8h, v6.8h, v0.h[0]\n" "add x15, x15, #0x40\n" "fmla v14.8h, v6.8h, v1.h[0]\n" "fmla v18.8h, v6.8h, v2.h[0]\n" "fmla v11.8h, v7.8h, v0.h[0]\n" "fmla v15.8h, v7.8h, v1.h[0]\n" "fmla v19.8h, v7.8h, v2.h[0]\n" "cbnz x11, 130b\n" "131:" // Height 3: Multiply loop: No odd multiplies "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "add x12, x12, #0x1\n" "cmp x12, x19\n" "bne 124b\n" "prfm pstl1keep, [x13, #0x0]\n" "prfm pstl1keep, [x9, #0x0]\n" "prfm pstl1keep, [x27, #0x0]\n" "tbz %x[flags], #1, 132f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1r { v1.8h }, [x19]\n" "add x19, %x[args_ptr], %[offset_max]\n" "ld1r { v0.8h }, [x19]\n" "fmin v8.8h, v8.8h, v0.8h\n" "fmin v9.8h, v9.8h, v0.8h\n" "fmin v10.8h, v10.8h, v0.8h\n" "fmin v11.8h, v11.8h, v0.8h\n" "fmax v8.8h, v8.8h, v1.8h\n" "fmax v9.8h, v9.8h, v1.8h\n" "fmax v10.8h, v10.8h, v1.8h\n" "fmax v11.8h, v11.8h, v1.8h\n" "fmin v12.8h, v12.8h, v0.8h\n" "fmin v13.8h, v13.8h, v0.8h\n" "fmin v14.8h, v14.8h, v0.8h\n" "fmax v12.8h, v12.8h, v1.8h\n" "fmax v13.8h, v13.8h, v1.8h\n" "fmax v14.8h, v14.8h, v1.8h\n" "fmin v15.8h, v15.8h, v0.8h\n" "fmin v16.8h, v16.8h, v0.8h\n" "fmin v17.8h, v17.8h, v0.8h\n" "fmax v15.8h, v15.8h, v1.8h\n" "fmax v16.8h, v16.8h, v1.8h\n" "fmax v17.8h, v17.8h, v1.8h\n" "fmin v18.8h, v18.8h, v0.8h\n" "fmin v19.8h, v19.8h, v0.8h\n" "fmax v18.8h, v18.8h, v1.8h\n" "fmax v19.8h, v19.8h, v1.8h\n" "132:" // Height 3: No activation "cmp x16, #0x20\n" "bge 149f\n" "tbz x16, #4, 140f\n" "st1 { v8.8h }, [x13], #0x10\n" "st1 { v9.8h }, [x13], #0x10\n" "st1 { v12.8h }, [x9], #0x10\n" "st1 { v13.8h }, [x9], #0x10\n" "st1 { v16.8h }, [x27], #0x10\n" "st1 { v17.8h }, [x27], #0x10\n" "tbz x16, #3, 136f\n" "st1 { v10.8h }, [x13], #0x10\n" "st1 { v14.8h }, [x9], #0x10\n" "st1 { v18.8h }, [x27], #0x10\n" "tbz x16, #2, 134f\n" "str d11, [x13], #0x8\n" "str d15, [x9], #0x8\n" "str d19, [x27], #0x8\n" "tbz x16, #1, 133f\n" "st1 { v11.s }[2], [x13], #0x4\n" "st1 { v15.s }[2], [x9], #0x4\n" "st1 { v19.s }[2], [x27], #0x4\n" "tbz x16, #0, 148f\n" "st1 { v11.h }[6], [x13]\n" "st1 { v15.h }[6], [x9]\n" "st1 { v19.h }[6], [x27]\n" "b 148f\n" "133:" // Height 3: Partial direct writeback: partial_1_28 "tbz x16, #0, 148f\n" "st1 { v11.h }[4], [x13]\n" "st1 { v15.h }[4], [x9]\n" "st1 { v19.h }[4], [x27]\n" "b 148f\n" "134:" // Height 3: Partial direct writeback: partial_2_24 "tbz x16, #1, 135f\n" "str s11, [x13], #0x4\n" "str s15, [x9], #0x4\n" "str s19, [x27], #0x4\n" "tbz x16, #0, 148f\n" "st1 { v11.h }[2], [x13]\n" "st1 { v15.h }[2], [x9]\n" "st1 { v19.h }[2], [x27]\n" "b 148f\n" "135:" // Height 3: Partial direct writeback: partial_1_24 "tbz x16, #0, 148f\n" "str h11, [x13, #0x0]\n" "str h15, [x9, #0x0]\n" "str h19, [x27, #0x0]\n" "b 148f\n" "136:" // Height 3: Partial direct writeback: partial_4_16 "tbz x16, #2, 138f\n" "str d10, [x13], #0x8\n" "str d14, [x9], #0x8\n" "str d18, [x27], #0x8\n" "tbz x16, #1, 137f\n" "st1 { v10.s }[2], [x13], #0x4\n" "st1 { v14.s }[2], [x9], #0x4\n" "st1 { v18.s }[2], [x27], #0x4\n" "tbz x16, #0, 148f\n" "st1 { v10.h }[6], [x13]\n" "st1 { v14.h }[6], [x9]\n" "st1 { v18.h }[6], [x27]\n" "b 148f\n" "137:" // Height 3: Partial direct writeback: partial_1_20 "tbz x16, #0, 148f\n" "st1 { v10.h }[4], [x13]\n" "st1 { v14.h }[4], [x9]\n" "st1 { v18.h }[4], [x27]\n" "b 148f\n" "138:" // Height 3: Partial direct writeback: partial_2_16 "tbz x16, #1, 139f\n" "str s10, [x13], #0x4\n" "str s14, [x9], #0x4\n" "str s18, [x27], #0x4\n" "tbz x16, #0, 148f\n" "st1 { v10.h }[2], [x13]\n" "st1 { v14.h }[2], [x9]\n" "st1 { v18.h }[2], [x27]\n" "b 148f\n" "139:" // Height 3: Partial direct writeback: partial_1_16 "tbz x16, #0, 148f\n" "str h10, [x13, #0x0]\n" "str h14, [x9, #0x0]\n" "str h18, [x27, #0x0]\n" "b 148f\n" "140:" // Height 3: Partial direct writeback: partial_8_0 "tbz x16, #3, 144f\n" "st1 { v8.8h }, [x13], #0x10\n" "st1 { v12.8h }, [x9], #0x10\n" "st1 { v16.8h }, [x27], #0x10\n" "tbz x16, #2, 142f\n" "str d9, [x13], #0x8\n" "str d13, [x9], #0x8\n" "str d17, [x27], #0x8\n" "tbz x16, #1, 141f\n" "st1 { v9.s }[2], [x13], #0x4\n" "st1 { v13.s }[2], [x9], #0x4\n" "st1 { v17.s }[2], [x27], #0x4\n" "tbz x16, #0, 148f\n" "st1 { v9.h }[6], [x13]\n" "st1 { v13.h }[6], [x9]\n" "st1 { v17.h }[6], [x27]\n" "b 148f\n" "141:" // Height 3: Partial direct writeback: partial_1_12 "tbz x16, #0, 148f\n" "st1 { v9.h }[4], [x13]\n" "st1 { v13.h }[4], [x9]\n" "st1 { v17.h }[4], [x27]\n" "b 148f\n" "142:" // Height 3: Partial direct writeback: partial_2_8 "tbz x16, #1, 143f\n" "str s9, [x13], #0x4\n" "str s13, [x9], #0x4\n" "str s17, [x27], #0x4\n" "tbz x16, #0, 148f\n" "st1 { v9.h }[2], [x13]\n" "st1 { v13.h }[2], [x9]\n" "st1 { v17.h }[2], [x27]\n" "b 148f\n" "143:" // Height 3: Partial direct writeback: partial_1_8 "tbz x16, #0, 148f\n" "str h9, [x13, #0x0]\n" "str h13, [x9, #0x0]\n" "str h17, [x27, #0x0]\n" "b 148f\n" "144:" // Height 3: Partial direct writeback: partial_4_0 "tbz x16, #2, 146f\n" "str d8, [x13], #0x8\n" "str d12, [x9], #0x8\n" "str d16, [x27], #0x8\n" "tbz x16, #1, 145f\n" "st1 { v8.s }[2], [x13], #0x4\n" "st1 { v12.s }[2], [x9], #0x4\n" "st1 { v16.s }[2], [x27], #0x4\n" "tbz x16, #0, 148f\n" "st1 { v8.h }[6], [x13]\n" "st1 { v12.h }[6], [x9]\n" "st1 { v16.h }[6], [x27]\n" "b 148f\n" "145:" // Height 3: Partial direct writeback: partial_1_4 "tbz x16, #0, 148f\n" "st1 { v8.h }[4], [x13]\n" "st1 { v12.h }[4], [x9]\n" "st1 { v16.h }[4], [x27]\n" "b 148f\n" "146:" // Height 3: Partial direct writeback: partial_2_0 "tbz x16, #1, 147f\n" "str s8, [x13], #0x4\n" "str s12, [x9], #0x4\n" "str s16, [x27], #0x4\n" "tbz x16, #0, 148f\n" "st1 { v8.h }[2], [x13]\n" "st1 { v12.h }[2], [x9]\n" "st1 { v16.h }[2], [x27]\n" "b 148f\n" "147:" // Height 3: Partial direct writeback: partial_1_0 "str h8, [x13, #0x0]\n" "str h12, [x9, #0x0]\n" "str h16, [x27, #0x0]\n" "148:" // Height 3: Partial direct writeback: Done "b 150f\n" "149:" // Height 3: Full writeback "str q8, [x13, #0x0]\n" "str q9, [x13, #0x10]\n" "str q10, [x13, #0x20]\n" "str q11, [x13, #0x30]\n" "str q12, [x9, #0x0]\n" "str q13, [x9, #0x10]\n" "str q14, [x9, #0x20]\n" "str q15, [x9, #0x30]\n" "str q16, [x27, #0x0]\n" "str q17, [x27, #0x10]\n" "str q18, [x27, #0x20]\n" "str q19, [x27, #0x30]\n" "add x13, x13, #0x40\n" "add x9, x9, #0x40\n" "add x27, x27, #0x40\n" "150:" // Height 3: Writeback done "subs x16, x16, #0x20\n" "bgt 103b\n" "b 302f\n" "151:" // Height 4 "ldr x16, [%x[args_ptr], %[offsetof_N]]\n" "mov x14, %x[bias]\n" "ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n" "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n" "tbz %x[flags], #2, 152f\n" "ldr x13, [%x[output_ptr], #0x0]\n" "add x13, x13, x19, LSL #1\n" "ldr x9, [%x[output_ptr], #0x8]\n" "ldr x27, [%x[output_ptr], #0x10]\n" "add x9, x9, x19, LSL #1\n" "ldr x25, [%x[output_ptr], #0x18]\n" "add x27, x27, x19, LSL #1\n" "add x25, x25, x19, LSL #1\n" "b 153f\n" "152:" // Height 4: setup direct output "mov x13, %x[output_ptr]\n" "add x9, x13, x19, LSL #1\n" "add x27, x9, x19, LSL #1\n" "add x25, x27, x19, LSL #1\n" "153:" // Height 4: Column loop "cbz x14, 154f\n" "ldr q8, [x14, #0x0]\n" "mov v12.16b, v8.16b\n" "ldr q9, [x14, #0x10]\n" "mov v16.16b, v8.16b\n" "ldr q10, [x14, #0x20]\n" "mov v20.16b, v8.16b\n" "ldr q11, [x14, #0x30]\n" "add x14, x14, #0x40\n" "mov v13.16b, v9.16b\n" "mov v17.16b, v9.16b\n" "mov v14.16b, v10.16b\n" "mov v15.16b, v11.16b\n" "mov v18.16b, v10.16b\n" "mov v19.16b, v11.16b\n" "mov v21.16b, v9.16b\n" "mov v22.16b, v10.16b\n" "mov v23.16b, v11.16b\n" "b 173f\n" "154:" // Height 4: no bias "tbz %x[flags], #0, 172f\n" "cmp x16, #0x20\n" "bge 171f\n" "tbz x16, #4, 162f\n" "ld1 { v8.8h }, [x13], #0x10\n" "ld1 { v12.8h }, [x9], #0x10\n" "ld1 { v16.8h }, [x27], #0x10\n" "ld1 { v20.8h }, [x25], #0x10\n" "ld1 { v9.8h }, [x13], #0x10\n" "ld1 { v13.8h }, [x9], #0x10\n" "ld1 { v17.8h }, [x27], #0x10\n" "ld1 { v21.8h }, [x25], #0x10\n" "tbz x16, #3, 158f\n" "ld1 { v10.8h }, [x13], #0x10\n" "ld1 { v14.8h }, [x9], #0x10\n" "ld1 { v18.8h }, [x27], #0x10\n" "ld1 { v22.8h }, [x25], #0x10\n" "tbz x16, #2, 156f\n" "ldr d11, [x13], #0x8\n" "ldr d15, [x9], #0x8\n" "ldr d19, [x27], #0x8\n" "ldr d23, [x25], #0x8\n" "tbz x16, #1, 155f\n" "mov x19, #0x3c\n" "ld1 { v11.s }[2], [x13], #0x4\n" "ld1 { v15.s }[2], [x9], #0x4\n" "ld1 { v19.s }[2], [x27], #0x4\n" "ld1 { v23.s }[2], [x25], #0x4\n" "tbz x16, #0, 170f\n" "ld1 { v11.h }[6], [x13]\n" "ld1 { v15.h }[6], [x9]\n" "ld1 { v19.h }[6], [x27]\n" "ld1 { v23.h }[6], [x25]\n" "b 170f\n" "155:" // Height 4: Partial accumulate: partial_1_28 "mov x19, #0x38\n" "tbz x16, #0, 170f\n" "ld1 { v11.h }[4], [x13]\n" "ld1 { v15.h }[4], [x9]\n" "ld1 { v19.h }[4], [x27]\n" "ld1 { v23.h }[4], [x25]\n" "b 170f\n" "156:" // Height 4: Partial accumulate: partial_2_24 "tbz x16, #1, 157f\n" "ldr s11, [x13], #0x4\n" "ldr s15, [x9], #0x4\n" "ldr s19, [x27], #0x4\n" "ldr s23, [x25], #0x4\n" "mov x19, #0x34\n" "tbz x16, #0, 170f\n" "ld1 { v11.h }[2], [x13]\n" "ld1 { v15.h }[2], [x9]\n" "ld1 { v19.h }[2], [x27]\n" "ld1 { v23.h }[2], [x25]\n" "b 170f\n" "157:" // Height 4: Partial accumulate: partial_1_24 "mov x19, #0x30\n" "tbz x16, #0, 170f\n" "ldr h11, [x13, #0x0]\n" "ldr h15, [x9, #0x0]\n" "ldr h19, [x27, #0x0]\n" "ldr h23, [x25, #0x0]\n" "b 170f\n" "158:" // Height 4: Partial accumulate: partial_4_16 "tbz x16, #2, 160f\n" "ldr d10, [x13], #0x8\n" "ldr d14, [x9], #0x8\n" "ldr d18, [x27], #0x8\n" "ldr d22, [x25], #0x8\n" "tbz x16, #1, 159f\n" "ld1 { v10.s }[2], [x13], #0x4\n" "ld1 { v14.s }[2], [x9], #0x4\n" "ld1 { v18.s }[2], [x27], #0x4\n" "ld1 { v22.s }[2], [x25], #0x4\n" "mov x19, #0x2c\n" "tbz x16, #0, 170f\n" "ld1 { v10.h }[6], [x13]\n" "ld1 { v14.h }[6], [x9]\n" "ld1 { v18.h }[6], [x27]\n" "ld1 { v22.h }[6], [x25]\n" "b 170f\n" "159:" // Height 4: Partial accumulate: partial_1_20 "mov x19, #0x28\n" "tbz x16, #0, 170f\n" "ld1 { v10.h }[4], [x13]\n" "ld1 { v14.h }[4], [x9]\n" "ld1 { v18.h }[4], [x27]\n" "ld1 { v22.h }[4], [x25]\n" "b 170f\n" "160:" // Height 4: Partial accumulate: partial_2_16 "tbz x16, #1, 161f\n" "ldr s10, [x13], #0x4\n" "ldr s14, [x9], #0x4\n" "ldr s18, [x27], #0x4\n" "ldr s22, [x25], #0x4\n" "mov x19, #0x24\n" "tbz x16, #0, 170f\n" "ld1 { v10.h }[2], [x13]\n" "ld1 { v14.h }[2], [x9]\n" "ld1 { v18.h }[2], [x27]\n" "ld1 { v22.h }[2], [x25]\n" "b 170f\n" "161:" // Height 4: Partial accumulate: partial_1_16 "mov x19, #0x20\n" "tbz x16, #0, 170f\n" "ldr h10, [x13, #0x0]\n" "ldr h14, [x9, #0x0]\n" "ldr h18, [x27, #0x0]\n" "ldr h22, [x25, #0x0]\n" "b 170f\n" "162:" // Height 4: Partial accumulate: partial_8_0 "tbz x16, #3, 166f\n" "ld1 { v8.8h }, [x13], #0x10\n" "ld1 { v12.8h }, [x9], #0x10\n" "ld1 { v16.8h }, [x27], #0x10\n" "ld1 { v20.8h }, [x25], #0x10\n" "tbz x16, #2, 164f\n" "ldr d9, [x13], #0x8\n" "ldr d13, [x9], #0x8\n" "ldr d17, [x27], #0x8\n" "ldr d21, [x25], #0x8\n" "tbz x16, #1, 163f\n" "mov x19, #0x1c\n" "ld1 { v9.s }[2], [x13], #0x4\n" "ld1 { v13.s }[2], [x9], #0x4\n" "ld1 { v17.s }[2], [x27], #0x4\n" "ld1 { v21.s }[2], [x25], #0x4\n" "tbz x16, #0, 170f\n" "ld1 { v9.h }[6], [x13]\n" "ld1 { v13.h }[6], [x9]\n" "ld1 { v17.h }[6], [x27]\n" "ld1 { v21.h }[6], [x25]\n" "b 170f\n" "163:" // Height 4: Partial accumulate: partial_1_12 "mov x19, #0x18\n" "tbz x16, #0, 170f\n" "ld1 { v9.h }[4], [x13]\n" "ld1 { v13.h }[4], [x9]\n" "ld1 { v17.h }[4], [x27]\n" "ld1 { v21.h }[4], [x25]\n" "b 170f\n" "164:" // Height 4: Partial accumulate: partial_2_8 "tbz x16, #1, 165f\n" "ldr s9, [x13], #0x4\n" "ldr s13, [x9], #0x4\n" "ldr s17, [x27], #0x4\n" "ldr s21, [x25], #0x4\n" "mov x19, #0x14\n" "tbz x16, #0, 170f\n" "ld1 { v9.h }[2], [x13]\n" "ld1 { v13.h }[2], [x9]\n" "ld1 { v17.h }[2], [x27]\n" "ld1 { v21.h }[2], [x25]\n" "b 170f\n" "165:" // Height 4: Partial accumulate: partial_1_8 "mov x19, #0x10\n" "tbz x16, #0, 170f\n" "ldr h9, [x13, #0x0]\n" "ldr h13, [x9, #0x0]\n" "ldr h17, [x27, #0x0]\n" "ldr h21, [x25, #0x0]\n" "b 170f\n" "166:" // Height 4: Partial accumulate: partial_4_0 "tbz x16, #2, 168f\n" "ldr d8, [x13], #0x8\n" "ldr d12, [x9], #0x8\n" "ldr d16, [x27], #0x8\n" "ldr d20, [x25], #0x8\n" "tbz x16, #1, 167f\n" "ld1 { v8.s }[2], [x13], #0x4\n" "ld1 { v12.s }[2], [x9], #0x4\n" "ld1 { v16.s }[2], [x27], #0x4\n" "ld1 { v20.s }[2], [x25], #0x4\n" "mov x19, #0xc\n" "tbz x16, #0, 170f\n" "ld1 { v8.h }[6], [x13]\n" "ld1 { v12.h }[6], [x9]\n" "ld1 { v16.h }[6], [x27]\n" "ld1 { v20.h }[6], [x25]\n" "b 170f\n" "167:" // Height 4: Partial accumulate: partial_1_4 "mov x19, #0x8\n" "tbz x16, #0, 170f\n" "ld1 { v8.h }[4], [x13]\n" "ld1 { v12.h }[4], [x9]\n" "ld1 { v16.h }[4], [x27]\n" "ld1 { v20.h }[4], [x25]\n" "b 170f\n" "168:" // Height 4: Partial accumulate: partial_2_0 "tbz x16, #1, 169f\n" "ldr s8, [x13], #0x4\n" "ldr s12, [x9], #0x4\n" "ldr s16, [x27], #0x4\n" "ldr s20, [x25], #0x4\n" "mov x19, #0x4\n" "tbz x16, #0, 170f\n" "ld1 { v8.h }[2], [x13]\n" "ld1 { v12.h }[2], [x9]\n" "ld1 { v16.h }[2], [x27]\n" "ld1 { v20.h }[2], [x25]\n" "b 170f\n" "169:" // Height 4: Partial accumulate: partial_1_0 "mov x19, #0x0\n" "ldr h8, [x13, #0x0]\n" "ldr h12, [x9, #0x0]\n" "ldr h16, [x27, #0x0]\n" "ldr h20, [x25, #0x0]\n" "170:" // Height 4: Partial accumulate: Done "sub x13, x13, x19\n" "sub x9, x9, x19\n" "sub x27, x27, x19\n" "sub x25, x25, x19\n" "b 173f\n" "171:" // Height 4: full accumulate "ldr q8, [x13, #0x0]\n" "ldr q9, [x13, #0x10]\n" "ldr q10, [x13, #0x20]\n" "ldr q11, [x13, #0x30]\n" "ldr q12, [x9, #0x0]\n" "ldr q13, [x9, #0x10]\n" "ldr q14, [x9, #0x20]\n" "ldr q15, [x9, #0x30]\n" "ldr q16, [x27, #0x0]\n" "ldr q17, [x27, #0x10]\n" "ldr q18, [x27, #0x20]\n" "ldr q19, [x27, #0x30]\n" "ldr q20, [x25, #0x0]\n" "ldr q21, [x25, #0x10]\n" "ldr q22, [x25, #0x20]\n" "ldr q23, [x25, #0x30]\n" "b 173f\n" "172:" // Height 4: no accumulate "movi v8.16b, #0x0\n" "movi v9.16b, #0x0\n" "movi v10.16b, #0x0\n" "movi v11.16b, #0x0\n" "movi v12.16b, #0x0\n" "movi v13.16b, #0x0\n" "movi v14.16b, #0x0\n" "movi v15.16b, #0x0\n" "movi v16.16b, #0x0\n" "movi v17.16b, #0x0\n" "movi v18.16b, #0x0\n" "movi v19.16b, #0x0\n" "movi v20.16b, #0x0\n" "movi v21.16b, #0x0\n" "movi v22.16b, #0x0\n" "movi v23.16b, #0x0\n" "173:" // Height 4: setup done "mov x12, #0x0\n" "174:" // Height 4: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n" "ldr w11, [x20, x12, LSL #0x2]\n" "tbz %x[flags], #3, 175f\n" "ldr x20, [%x[input_ptr], x12, LSL #0x3]\n" "add x20, x20, x19, LSL #3\n" "ldr x10, [x20, #0x0]\n" "ldr x28, [x20, #0x8]\n" "ldr x26, [x20, #0x10]\n" "ldr x24, [x20, #0x18]\n" "cbnz x12, 176f\n" "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n" "add x10, x10, x19, LSL #1\n" "add x28, x28, x19, LSL #1\n" "add x26, x26, x19, LSL #1\n" "add x24, x24, x19, LSL #1\n" "b 176f\n" "175:" // Height 4: setup direct input "mov x10, %x[input_ptr]\n" "add x28, x10, x19, LSL #1\n" "add x26, x28, x19, LSL #1\n" "add x24, x26, x19, LSL #1\n" "176:" // Height 4: input setup done "cmp x11, #0x8\n" "blt 179f\n" "cmp x11, #0x10\n" "blt 178f\n" "177:" // Height 4: Multiply loop: Main loop head "ldr q0, [x10, #0x0]\n" "ldr q1, [x28, #0x0]\n" "ldr q2, [x26, #0x0]\n" "ldr q3, [x24, #0x0]\n" "ldr q6, [x15, #0x0]\n" "fmla v8.8h, v6.8h, v0.h[0]\n" "ldr q7, [x15, #0x10]\n" "fmla v12.8h, v6.8h, v1.h[0]\n" "add x10, x10, #0x10\n" "prfm pldl1keep, [x10, #0x80]\n" "fmla v16.8h, v6.8h, v2.h[0]\n" "add x28, x28, #0x10\n" "fmla v20.8h, v6.8h, v3.h[0]\n" "prfm pldl1keep, [x28, #0x80]\n" "ldr q6, [x15, #0x20]\n" "fmla v9.8h, v7.8h, v0.h[0]\n" "add x26, x26, #0x10\n" "prfm pldl1keep, [x26, #0x80]\n" "fmla v13.8h, v7.8h, v1.h[0]\n" "add x24, x24, #0x10\n" "fmla v17.8h, v7.8h, v2.h[0]\n" "prfm pldl1keep, [x24, #0x80]\n" "sub x11, x11, #0x8\n" "fmla v21.8h, v7.8h, v3.h[0]\n" "ldr q7, [x15, #0x30]\n" "fmla v10.8h, v6.8h, v0.h[0]\n" "cmp x11, #0x10\n" "fmla v14.8h, v6.8h, v1.h[0]\n" "fmla v18.8h, v6.8h, v2.h[0]\n" "fmla v22.8h, v6.8h, v3.h[0]\n" "ldr q6, [x15, #0x40]\n" "fmla v11.8h, v7.8h, v0.h[0]\n" "fmla v15.8h, v7.8h, v1.h[0]\n" "fmla v19.8h, v7.8h, v2.h[0]\n" "fmla v23.8h, v7.8h, v3.h[0]\n" "ldr q7, [x15, #0x50]\n" "fmla v8.8h, v6.8h, v0.h[1]\n" "fmla v12.8h, v6.8h, v1.h[1]\n" "fmla v16.8h, v6.8h, v2.h[1]\n" "fmla v20.8h, v6.8h, v3.h[1]\n" "ldr q6, [x15, #0x60]\n" "fmla v9.8h, v7.8h, v0.h[1]\n" "fmla v13.8h, v7.8h, v1.h[1]\n" "fmla v17.8h, v7.8h, v2.h[1]\n" "fmla v21.8h, v7.8h, v3.h[1]\n" "ldr q7, [x15, #0x70]\n" "fmla v10.8h, v6.8h, v0.h[1]\n" "fmla v14.8h, v6.8h, v1.h[1]\n" "fmla v18.8h, v6.8h, v2.h[1]\n" "fmla v22.8h, v6.8h, v3.h[1]\n" "ldr q6, [x15, #0x80]\n" "fmla v11.8h, v7.8h, v0.h[1]\n" "fmla v15.8h, v7.8h, v1.h[1]\n" "fmla v19.8h, v7.8h, v2.h[1]\n" "fmla v23.8h, v7.8h, v3.h[1]\n" "ldr q7, [x15, #0x90]\n" "fmla v8.8h, v6.8h, v0.h[2]\n" "fmla v12.8h, v6.8h, v1.h[2]\n" "fmla v16.8h, v6.8h, v2.h[2]\n" "fmla v20.8h, v6.8h, v3.h[2]\n" "ldr q6, [x15, #0xa0]\n" "fmla v9.8h, v7.8h, v0.h[2]\n" "fmla v13.8h, v7.8h, v1.h[2]\n" "fmla v17.8h, v7.8h, v2.h[2]\n" "fmla v21.8h, v7.8h, v3.h[2]\n" "ldr q7, [x15, #0xb0]\n" "fmla v10.8h, v6.8h, v0.h[2]\n" "fmla v14.8h, v6.8h, v1.h[2]\n" "fmla v18.8h, v6.8h, v2.h[2]\n" "fmla v22.8h, v6.8h, v3.h[2]\n" "ldr q6, [x15, #0xc0]\n" "fmla v11.8h, v7.8h, v0.h[2]\n" "fmla v15.8h, v7.8h, v1.h[2]\n" "fmla v19.8h, v7.8h, v2.h[2]\n" "fmla v23.8h, v7.8h, v3.h[2]\n" "ldr q7, [x15, #0xd0]\n" "fmla v8.8h, v6.8h, v0.h[3]\n" "fmla v12.8h, v6.8h, v1.h[3]\n" "fmla v16.8h, v6.8h, v2.h[3]\n" "fmla v20.8h, v6.8h, v3.h[3]\n" "ldr q6, [x15, #0xe0]\n" "fmla v9.8h, v7.8h, v0.h[3]\n" "fmla v13.8h, v7.8h, v1.h[3]\n" "fmla v17.8h, v7.8h, v2.h[3]\n" "fmla v21.8h, v7.8h, v3.h[3]\n" "ldr q7, [x15, #0xf0]\n" "fmla v10.8h, v6.8h, v0.h[3]\n" "fmla v14.8h, v6.8h, v1.h[3]\n" "fmla v18.8h, v6.8h, v2.h[3]\n" "fmla v22.8h, v6.8h, v3.h[3]\n" "ldr q6, [x15, #0x100]\n" "fmla v11.8h, v7.8h, v0.h[3]\n" "fmla v15.8h, v7.8h, v1.h[3]\n" "fmla v19.8h, v7.8h, v2.h[3]\n" "fmla v23.8h, v7.8h, v3.h[3]\n" "ldr q7, [x15, #0x110]\n" "fmla v8.8h, v6.8h, v0.h[4]\n" "fmla v12.8h, v6.8h, v1.h[4]\n" "fmla v16.8h, v6.8h, v2.h[4]\n" "fmla v20.8h, v6.8h, v3.h[4]\n" "ldr q6, [x15, #0x120]\n" "fmla v9.8h, v7.8h, v0.h[4]\n" "fmla v13.8h, v7.8h, v1.h[4]\n" "fmla v17.8h, v7.8h, v2.h[4]\n" "fmla v21.8h, v7.8h, v3.h[4]\n" "ldr q7, [x15, #0x130]\n" "fmla v10.8h, v6.8h, v0.h[4]\n" "fmla v14.8h, v6.8h, v1.h[4]\n" "fmla v18.8h, v6.8h, v2.h[4]\n" "fmla v22.8h, v6.8h, v3.h[4]\n" "ldr q6, [x15, #0x140]\n" "fmla v11.8h, v7.8h, v0.h[4]\n" "fmla v15.8h, v7.8h, v1.h[4]\n" "fmla v19.8h, v7.8h, v2.h[4]\n" "fmla v23.8h, v7.8h, v3.h[4]\n" "ldr q7, [x15, #0x150]\n" "fmla v8.8h, v6.8h, v0.h[5]\n" "fmla v12.8h, v6.8h, v1.h[5]\n" "fmla v16.8h, v6.8h, v2.h[5]\n" "fmla v20.8h, v6.8h, v3.h[5]\n" "ldr q6, [x15, #0x160]\n" "fmla v9.8h, v7.8h, v0.h[5]\n" "fmla v13.8h, v7.8h, v1.h[5]\n" "fmla v17.8h, v7.8h, v2.h[5]\n" "fmla v21.8h, v7.8h, v3.h[5]\n" "ldr q7, [x15, #0x170]\n" "fmla v10.8h, v6.8h, v0.h[5]\n" "fmla v14.8h, v6.8h, v1.h[5]\n" "fmla v18.8h, v6.8h, v2.h[5]\n" "fmla v22.8h, v6.8h, v3.h[5]\n" "ldr q6, [x15, #0x180]\n" "fmla v11.8h, v7.8h, v0.h[5]\n" "fmla v15.8h, v7.8h, v1.h[5]\n" "fmla v19.8h, v7.8h, v2.h[5]\n" "fmla v23.8h, v7.8h, v3.h[5]\n" "ldr q7, [x15, #0x190]\n" "fmla v8.8h, v6.8h, v0.h[6]\n" "fmla v12.8h, v6.8h, v1.h[6]\n" "fmla v16.8h, v6.8h, v2.h[6]\n" "fmla v20.8h, v6.8h, v3.h[6]\n" "ldr q6, [x15, #0x1a0]\n" "fmla v9.8h, v7.8h, v0.h[6]\n" "fmla v13.8h, v7.8h, v1.h[6]\n" "fmla v17.8h, v7.8h, v2.h[6]\n" "fmla v21.8h, v7.8h, v3.h[6]\n" "ldr q7, [x15, #0x1b0]\n" "fmla v10.8h, v6.8h, v0.h[6]\n" "fmla v14.8h, v6.8h, v1.h[6]\n" "fmla v18.8h, v6.8h, v2.h[6]\n" "fmla v22.8h, v6.8h, v3.h[6]\n" "ldr q6, [x15, #0x1c0]\n" "fmla v11.8h, v7.8h, v0.h[6]\n" "fmla v15.8h, v7.8h, v1.h[6]\n" "fmla v19.8h, v7.8h, v2.h[6]\n" "fmla v23.8h, v7.8h, v3.h[6]\n" "ldr q7, [x15, #0x1d0]\n" "fmla v8.8h, v6.8h, v0.h[7]\n" "fmla v12.8h, v6.8h, v1.h[7]\n" "fmla v16.8h, v6.8h, v2.h[7]\n" "fmla v20.8h, v6.8h, v3.h[7]\n" "ldr q6, [x15, #0x1e0]\n" "fmla v9.8h, v7.8h, v0.h[7]\n" "fmla v13.8h, v7.8h, v1.h[7]\n" "fmla v17.8h, v7.8h, v2.h[7]\n" "fmla v21.8h, v7.8h, v3.h[7]\n" "ldr q7, [x15, #0x1f0]\n" "add x15, x15, #0x200\n" "fmla v10.8h, v6.8h, v0.h[7]\n" "fmla v14.8h, v6.8h, v1.h[7]\n" "fmla v18.8h, v6.8h, v2.h[7]\n" "fmla v22.8h, v6.8h, v3.h[7]\n" "fmla v11.8h, v7.8h, v0.h[7]\n" "fmla v15.8h, v7.8h, v1.h[7]\n" "fmla v19.8h, v7.8h, v2.h[7]\n" "fmla v23.8h, v7.8h, v3.h[7]\n" "bge 177b\n" "178:" // Height 4: Multiply loop: Single iteration only "sub x11, x11, #0x8\n" "ldr q0, [x10, #0x0]\n" "ldr q1, [x28, #0x0]\n" "ldr q2, [x26, #0x0]\n" "ldr q3, [x24, #0x0]\n" "ldr q6, [x15, #0x0]\n" "fmla v8.8h, v6.8h, v0.h[0]\n" "ldr q7, [x15, #0x10]\n" "fmla v12.8h, v6.8h, v1.h[0]\n" "add x10, x10, #0x10\n" "prfm pldl1keep, [x10, #0x80]\n" "fmla v16.8h, v6.8h, v2.h[0]\n" "add x28, x28, #0x10\n" "fmla v20.8h, v6.8h, v3.h[0]\n" "prfm pldl1keep, [x28, #0x80]\n" "ldr q6, [x15, #0x20]\n" "fmla v9.8h, v7.8h, v0.h[0]\n" "add x26, x26, #0x10\n" "prfm pldl1keep, [x26, #0x80]\n" "fmla v13.8h, v7.8h, v1.h[0]\n" "add x24, x24, #0x10\n" "fmla v17.8h, v7.8h, v2.h[0]\n" "prfm pldl1keep, [x24, #0x80]\n" "fmla v21.8h, v7.8h, v3.h[0]\n" "ldr q7, [x15, #0x30]\n" "fmla v10.8h, v6.8h, v0.h[0]\n" "fmla v14.8h, v6.8h, v1.h[0]\n" "fmla v18.8h, v6.8h, v2.h[0]\n" "fmla v22.8h, v6.8h, v3.h[0]\n" "ldr q6, [x15, #0x40]\n" "fmla v11.8h, v7.8h, v0.h[0]\n" "fmla v15.8h, v7.8h, v1.h[0]\n" "fmla v19.8h, v7.8h, v2.h[0]\n" "fmla v23.8h, v7.8h, v3.h[0]\n" "ldr q7, [x15, #0x50]\n" "fmla v8.8h, v6.8h, v0.h[1]\n" "fmla v12.8h, v6.8h, v1.h[1]\n" "fmla v16.8h, v6.8h, v2.h[1]\n" "fmla v20.8h, v6.8h, v3.h[1]\n" "ldr q6, [x15, #0x60]\n" "fmla v9.8h, v7.8h, v0.h[1]\n" "fmla v13.8h, v7.8h, v1.h[1]\n" "fmla v17.8h, v7.8h, v2.h[1]\n" "fmla v21.8h, v7.8h, v3.h[1]\n" "ldr q7, [x15, #0x70]\n" "fmla v10.8h, v6.8h, v0.h[1]\n" "fmla v14.8h, v6.8h, v1.h[1]\n" "fmla v18.8h, v6.8h, v2.h[1]\n" "fmla v22.8h, v6.8h, v3.h[1]\n" "ldr q6, [x15, #0x80]\n" "fmla v11.8h, v7.8h, v0.h[1]\n" "fmla v15.8h, v7.8h, v1.h[1]\n" "fmla v19.8h, v7.8h, v2.h[1]\n" "fmla v23.8h, v7.8h, v3.h[1]\n" "ldr q7, [x15, #0x90]\n" "fmla v8.8h, v6.8h, v0.h[2]\n" "fmla v12.8h, v6.8h, v1.h[2]\n" "fmla v16.8h, v6.8h, v2.h[2]\n" "fmla v20.8h, v6.8h, v3.h[2]\n" "ldr q6, [x15, #0xa0]\n" "fmla v9.8h, v7.8h, v0.h[2]\n" "fmla v13.8h, v7.8h, v1.h[2]\n" "fmla v17.8h, v7.8h, v2.h[2]\n" "fmla v21.8h, v7.8h, v3.h[2]\n" "ldr q7, [x15, #0xb0]\n" "fmla v10.8h, v6.8h, v0.h[2]\n" "fmla v14.8h, v6.8h, v1.h[2]\n" "fmla v18.8h, v6.8h, v2.h[2]\n" "fmla v22.8h, v6.8h, v3.h[2]\n" "ldr q6, [x15, #0xc0]\n" "fmla v11.8h, v7.8h, v0.h[2]\n" "fmla v15.8h, v7.8h, v1.h[2]\n" "fmla v19.8h, v7.8h, v2.h[2]\n" "fmla v23.8h, v7.8h, v3.h[2]\n" "ldr q7, [x15, #0xd0]\n" "fmla v8.8h, v6.8h, v0.h[3]\n" "fmla v12.8h, v6.8h, v1.h[3]\n" "fmla v16.8h, v6.8h, v2.h[3]\n" "fmla v20.8h, v6.8h, v3.h[3]\n" "ldr q6, [x15, #0xe0]\n" "fmla v9.8h, v7.8h, v0.h[3]\n" "fmla v13.8h, v7.8h, v1.h[3]\n" "fmla v17.8h, v7.8h, v2.h[3]\n" "fmla v21.8h, v7.8h, v3.h[3]\n" "ldr q7, [x15, #0xf0]\n" "fmla v10.8h, v6.8h, v0.h[3]\n" "fmla v14.8h, v6.8h, v1.h[3]\n" "fmla v18.8h, v6.8h, v2.h[3]\n" "fmla v22.8h, v6.8h, v3.h[3]\n" "ldr q6, [x15, #0x100]\n" "fmla v11.8h, v7.8h, v0.h[3]\n" "fmla v15.8h, v7.8h, v1.h[3]\n" "fmla v19.8h, v7.8h, v2.h[3]\n" "fmla v23.8h, v7.8h, v3.h[3]\n" "ldr q7, [x15, #0x110]\n" "fmla v8.8h, v6.8h, v0.h[4]\n" "fmla v12.8h, v6.8h, v1.h[4]\n" "fmla v16.8h, v6.8h, v2.h[4]\n" "fmla v20.8h, v6.8h, v3.h[4]\n" "ldr q6, [x15, #0x120]\n" "fmla v9.8h, v7.8h, v0.h[4]\n" "fmla v13.8h, v7.8h, v1.h[4]\n" "fmla v17.8h, v7.8h, v2.h[4]\n" "fmla v21.8h, v7.8h, v3.h[4]\n" "ldr q7, [x15, #0x130]\n" "fmla v10.8h, v6.8h, v0.h[4]\n" "fmla v14.8h, v6.8h, v1.h[4]\n" "fmla v18.8h, v6.8h, v2.h[4]\n" "fmla v22.8h, v6.8h, v3.h[4]\n" "ldr q6, [x15, #0x140]\n" "fmla v11.8h, v7.8h, v0.h[4]\n" "fmla v15.8h, v7.8h, v1.h[4]\n" "fmla v19.8h, v7.8h, v2.h[4]\n" "fmla v23.8h, v7.8h, v3.h[4]\n" "ldr q7, [x15, #0x150]\n" "fmla v8.8h, v6.8h, v0.h[5]\n" "fmla v12.8h, v6.8h, v1.h[5]\n" "fmla v16.8h, v6.8h, v2.h[5]\n" "fmla v20.8h, v6.8h, v3.h[5]\n" "ldr q6, [x15, #0x160]\n" "fmla v9.8h, v7.8h, v0.h[5]\n" "fmla v13.8h, v7.8h, v1.h[5]\n" "fmla v17.8h, v7.8h, v2.h[5]\n" "fmla v21.8h, v7.8h, v3.h[5]\n" "ldr q7, [x15, #0x170]\n" "fmla v10.8h, v6.8h, v0.h[5]\n" "fmla v14.8h, v6.8h, v1.h[5]\n" "fmla v18.8h, v6.8h, v2.h[5]\n" "fmla v22.8h, v6.8h, v3.h[5]\n" "ldr q6, [x15, #0x180]\n" "fmla v11.8h, v7.8h, v0.h[5]\n" "fmla v15.8h, v7.8h, v1.h[5]\n" "fmla v19.8h, v7.8h, v2.h[5]\n" "fmla v23.8h, v7.8h, v3.h[5]\n" "ldr q7, [x15, #0x190]\n" "fmla v8.8h, v6.8h, v0.h[6]\n" "fmla v12.8h, v6.8h, v1.h[6]\n" "fmla v16.8h, v6.8h, v2.h[6]\n" "fmla v20.8h, v6.8h, v3.h[6]\n" "ldr q6, [x15, #0x1a0]\n" "fmla v9.8h, v7.8h, v0.h[6]\n" "fmla v13.8h, v7.8h, v1.h[6]\n" "fmla v17.8h, v7.8h, v2.h[6]\n" "fmla v21.8h, v7.8h, v3.h[6]\n" "ldr q7, [x15, #0x1b0]\n" "fmla v10.8h, v6.8h, v0.h[6]\n" "fmla v14.8h, v6.8h, v1.h[6]\n" "fmla v18.8h, v6.8h, v2.h[6]\n" "fmla v22.8h, v6.8h, v3.h[6]\n" "ldr q6, [x15, #0x1c0]\n" "fmla v11.8h, v7.8h, v0.h[6]\n" "fmla v15.8h, v7.8h, v1.h[6]\n" "fmla v19.8h, v7.8h, v2.h[6]\n" "fmla v23.8h, v7.8h, v3.h[6]\n" "ldr q7, [x15, #0x1d0]\n" "fmla v8.8h, v6.8h, v0.h[7]\n" "fmla v12.8h, v6.8h, v1.h[7]\n" "fmla v16.8h, v6.8h, v2.h[7]\n" "fmla v20.8h, v6.8h, v3.h[7]\n" "ldr q6, [x15, #0x1e0]\n" "fmla v9.8h, v7.8h, v0.h[7]\n" "fmla v13.8h, v7.8h, v1.h[7]\n" "fmla v17.8h, v7.8h, v2.h[7]\n" "fmla v21.8h, v7.8h, v3.h[7]\n" "ldr q7, [x15, #0x1f0]\n" "add x15, x15, #0x200\n" "fmla v10.8h, v6.8h, v0.h[7]\n" "fmla v14.8h, v6.8h, v1.h[7]\n" "fmla v18.8h, v6.8h, v2.h[7]\n" "fmla v22.8h, v6.8h, v3.h[7]\n" "fmla v11.8h, v7.8h, v0.h[7]\n" "fmla v15.8h, v7.8h, v1.h[7]\n" "fmla v19.8h, v7.8h, v2.h[7]\n" "fmla v23.8h, v7.8h, v3.h[7]\n" "179:" // Height 4: Multiply loop: Main loop skip "cbz x11, 181f\n" "180:" // Height 4: Multiply loop: Odd block loop "ldr h0, [x10], #0x2\n" "ldr h1, [x28], #0x2\n" "ldr h2, [x26], #0x2\n" "ldr h3, [x24], #0x2\n" "ldr q6, [x15, #0x0]\n" "fmla v8.8h, v6.8h, v0.h[0]\n" "ldr q7, [x15, #0x10]\n" "fmla v12.8h, v6.8h, v1.h[0]\n" "sub x11, x11, #0x1\n" "fmla v16.8h, v6.8h, v2.h[0]\n" "fmla v20.8h, v6.8h, v3.h[0]\n" "ldr q6, [x15, #0x20]\n" "fmla v9.8h, v7.8h, v0.h[0]\n" "fmla v13.8h, v7.8h, v1.h[0]\n" "fmla v17.8h, v7.8h, v2.h[0]\n" "fmla v21.8h, v7.8h, v3.h[0]\n" "ldr q7, [x15, #0x30]\n" "fmla v10.8h, v6.8h, v0.h[0]\n" "add x15, x15, #0x40\n" "fmla v14.8h, v6.8h, v1.h[0]\n" "fmla v18.8h, v6.8h, v2.h[0]\n" "fmla v22.8h, v6.8h, v3.h[0]\n" "fmla v11.8h, v7.8h, v0.h[0]\n" "fmla v15.8h, v7.8h, v1.h[0]\n" "fmla v19.8h, v7.8h, v2.h[0]\n" "fmla v23.8h, v7.8h, v3.h[0]\n" "cbnz x11, 180b\n" "181:" // Height 4: Multiply loop: No odd multiplies "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "add x12, x12, #0x1\n" "cmp x12, x19\n" "bne 174b\n" "prfm pstl1keep, [x13, #0x0]\n" "prfm pstl1keep, [x9, #0x0]\n" "prfm pstl1keep, [x27, #0x0]\n" "prfm pstl1keep, [x25, #0x0]\n" "tbz %x[flags], #1, 182f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1r { v1.8h }, [x19]\n" "add x19, %x[args_ptr], %[offset_max]\n" "ld1r { v0.8h }, [x19]\n" "fmin v8.8h, v8.8h, v0.8h\n" "fmin v9.8h, v9.8h, v0.8h\n" "fmin v10.8h, v10.8h, v0.8h\n" "fmin v11.8h, v11.8h, v0.8h\n" "fmax v8.8h, v8.8h, v1.8h\n" "fmax v9.8h, v9.8h, v1.8h\n" "fmax v10.8h, v10.8h, v1.8h\n" "fmax v11.8h, v11.8h, v1.8h\n" "fmin v12.8h, v12.8h, v0.8h\n" "fmin v13.8h, v13.8h, v0.8h\n" "fmin v14.8h, v14.8h, v0.8h\n" "fmax v12.8h, v12.8h, v1.8h\n" "fmax v13.8h, v13.8h, v1.8h\n" "fmax v14.8h, v14.8h, v1.8h\n" "fmin v15.8h, v15.8h, v0.8h\n" "fmin v16.8h, v16.8h, v0.8h\n" "fmin v17.8h, v17.8h, v0.8h\n" "fmax v15.8h, v15.8h, v1.8h\n" "fmax v16.8h, v16.8h, v1.8h\n" "fmax v17.8h, v17.8h, v1.8h\n" "fmin v18.8h, v18.8h, v0.8h\n" "fmin v19.8h, v19.8h, v0.8h\n" "fmin v20.8h, v20.8h, v0.8h\n" "fmax v18.8h, v18.8h, v1.8h\n" "fmax v19.8h, v19.8h, v1.8h\n" "fmax v20.8h, v20.8h, v1.8h\n" "fmin v21.8h, v21.8h, v0.8h\n" "fmin v22.8h, v22.8h, v0.8h\n" "fmin v23.8h, v23.8h, v0.8h\n" "fmax v21.8h, v21.8h, v1.8h\n" "fmax v22.8h, v22.8h, v1.8h\n" "fmax v23.8h, v23.8h, v1.8h\n" "182:" // Height 4: No activation "cmp x16, #0x20\n" "bge 199f\n" "tbz x16, #4, 190f\n" "st1 { v8.8h }, [x13], #0x10\n" "st1 { v9.8h }, [x13], #0x10\n" "st1 { v12.8h }, [x9], #0x10\n" "st1 { v13.8h }, [x9], #0x10\n" "st1 { v16.8h }, [x27], #0x10\n" "st1 { v17.8h }, [x27], #0x10\n" "st1 { v20.8h }, [x25], #0x10\n" "st1 { v21.8h }, [x25], #0x10\n" "tbz x16, #3, 186f\n" "st1 { v10.8h }, [x13], #0x10\n" "st1 { v14.8h }, [x9], #0x10\n" "st1 { v18.8h }, [x27], #0x10\n" "st1 { v22.8h }, [x25], #0x10\n" "tbz x16, #2, 184f\n" "str d11, [x13], #0x8\n" "str d15, [x9], #0x8\n" "str d19, [x27], #0x8\n" "str d23, [x25], #0x8\n" "tbz x16, #1, 183f\n" "st1 { v11.s }[2], [x13], #0x4\n" "st1 { v15.s }[2], [x9], #0x4\n" "st1 { v19.s }[2], [x27], #0x4\n" "st1 { v23.s }[2], [x25], #0x4\n" "tbz x16, #0, 198f\n" "st1 { v11.h }[6], [x13]\n" "st1 { v15.h }[6], [x9]\n" "st1 { v19.h }[6], [x27]\n" "st1 { v23.h }[6], [x25]\n" "b 198f\n" "183:" // Height 4: Partial direct writeback: partial_1_28 "tbz x16, #0, 198f\n" "st1 { v11.h }[4], [x13]\n" "st1 { v15.h }[4], [x9]\n" "st1 { v19.h }[4], [x27]\n" "st1 { v23.h }[4], [x25]\n" "b 198f\n" "184:" // Height 4: Partial direct writeback: partial_2_24 "tbz x16, #1, 185f\n" "str s11, [x13], #0x4\n" "str s15, [x9], #0x4\n" "str s19, [x27], #0x4\n" "str s23, [x25], #0x4\n" "tbz x16, #0, 198f\n" "st1 { v11.h }[2], [x13]\n" "st1 { v15.h }[2], [x9]\n" "st1 { v19.h }[2], [x27]\n" "st1 { v23.h }[2], [x25]\n" "b 198f\n" "185:" // Height 4: Partial direct writeback: partial_1_24 "tbz x16, #0, 198f\n" "str h11, [x13, #0x0]\n" "str h15, [x9, #0x0]\n" "str h19, [x27, #0x0]\n" "str h23, [x25, #0x0]\n" "b 198f\n" "186:" // Height 4: Partial direct writeback: partial_4_16 "tbz x16, #2, 188f\n" "str d10, [x13], #0x8\n" "str d14, [x9], #0x8\n" "str d18, [x27], #0x8\n" "str d22, [x25], #0x8\n" "tbz x16, #1, 187f\n" "st1 { v10.s }[2], [x13], #0x4\n" "st1 { v14.s }[2], [x9], #0x4\n" "st1 { v18.s }[2], [x27], #0x4\n" "st1 { v22.s }[2], [x25], #0x4\n" "tbz x16, #0, 198f\n" "st1 { v10.h }[6], [x13]\n" "st1 { v14.h }[6], [x9]\n" "st1 { v18.h }[6], [x27]\n" "st1 { v22.h }[6], [x25]\n" "b 198f\n" "187:" // Height 4: Partial direct writeback: partial_1_20 "tbz x16, #0, 198f\n" "st1 { v10.h }[4], [x13]\n" "st1 { v14.h }[4], [x9]\n" "st1 { v18.h }[4], [x27]\n" "st1 { v22.h }[4], [x25]\n" "b 198f\n" "188:" // Height 4: Partial direct writeback: partial_2_16 "tbz x16, #1, 189f\n" "str s10, [x13], #0x4\n" "str s14, [x9], #0x4\n" "str s18, [x27], #0x4\n" "str s22, [x25], #0x4\n" "tbz x16, #0, 198f\n" "st1 { v10.h }[2], [x13]\n" "st1 { v14.h }[2], [x9]\n" "st1 { v18.h }[2], [x27]\n" "st1 { v22.h }[2], [x25]\n" "b 198f\n" "189:" // Height 4: Partial direct writeback: partial_1_16 "tbz x16, #0, 198f\n" "str h10, [x13, #0x0]\n" "str h14, [x9, #0x0]\n" "str h18, [x27, #0x0]\n" "str h22, [x25, #0x0]\n" "b 198f\n" "190:" // Height 4: Partial direct writeback: partial_8_0 "tbz x16, #3, 194f\n" "st1 { v8.8h }, [x13], #0x10\n" "st1 { v12.8h }, [x9], #0x10\n" "st1 { v16.8h }, [x27], #0x10\n" "st1 { v20.8h }, [x25], #0x10\n" "tbz x16, #2, 192f\n" "str d9, [x13], #0x8\n" "str d13, [x9], #0x8\n" "str d17, [x27], #0x8\n" "str d21, [x25], #0x8\n" "tbz x16, #1, 191f\n" "st1 { v9.s }[2], [x13], #0x4\n" "st1 { v13.s }[2], [x9], #0x4\n" "st1 { v17.s }[2], [x27], #0x4\n" "st1 { v21.s }[2], [x25], #0x4\n" "tbz x16, #0, 198f\n" "st1 { v9.h }[6], [x13]\n" "st1 { v13.h }[6], [x9]\n" "st1 { v17.h }[6], [x27]\n" "st1 { v21.h }[6], [x25]\n" "b 198f\n" "191:" // Height 4: Partial direct writeback: partial_1_12 "tbz x16, #0, 198f\n" "st1 { v9.h }[4], [x13]\n" "st1 { v13.h }[4], [x9]\n" "st1 { v17.h }[4], [x27]\n" "st1 { v21.h }[4], [x25]\n" "b 198f\n" "192:" // Height 4: Partial direct writeback: partial_2_8 "tbz x16, #1, 193f\n" "str s9, [x13], #0x4\n" "str s13, [x9], #0x4\n" "str s17, [x27], #0x4\n" "str s21, [x25], #0x4\n" "tbz x16, #0, 198f\n" "st1 { v9.h }[2], [x13]\n" "st1 { v13.h }[2], [x9]\n" "st1 { v17.h }[2], [x27]\n" "st1 { v21.h }[2], [x25]\n" "b 198f\n" "193:" // Height 4: Partial direct writeback: partial_1_8 "tbz x16, #0, 198f\n" "str h9, [x13, #0x0]\n" "str h13, [x9, #0x0]\n" "str h17, [x27, #0x0]\n" "str h21, [x25, #0x0]\n" "b 198f\n" "194:" // Height 4: Partial direct writeback: partial_4_0 "tbz x16, #2, 196f\n" "str d8, [x13], #0x8\n" "str d12, [x9], #0x8\n" "str d16, [x27], #0x8\n" "str d20, [x25], #0x8\n" "tbz x16, #1, 195f\n" "st1 { v8.s }[2], [x13], #0x4\n" "st1 { v12.s }[2], [x9], #0x4\n" "st1 { v16.s }[2], [x27], #0x4\n" "st1 { v20.s }[2], [x25], #0x4\n" "tbz x16, #0, 198f\n" "st1 { v8.h }[6], [x13]\n" "st1 { v12.h }[6], [x9]\n" "st1 { v16.h }[6], [x27]\n" "st1 { v20.h }[6], [x25]\n" "b 198f\n" "195:" // Height 4: Partial direct writeback: partial_1_4 "tbz x16, #0, 198f\n" "st1 { v8.h }[4], [x13]\n" "st1 { v12.h }[4], [x9]\n" "st1 { v16.h }[4], [x27]\n" "st1 { v20.h }[4], [x25]\n" "b 198f\n" "196:" // Height 4: Partial direct writeback: partial_2_0 "tbz x16, #1, 197f\n" "str s8, [x13], #0x4\n" "str s12, [x9], #0x4\n" "str s16, [x27], #0x4\n" "str s20, [x25], #0x4\n" "tbz x16, #0, 198f\n" "st1 { v8.h }[2], [x13]\n" "st1 { v12.h }[2], [x9]\n" "st1 { v16.h }[2], [x27]\n" "st1 { v20.h }[2], [x25]\n" "b 198f\n" "197:" // Height 4: Partial direct writeback: partial_1_0 "str h8, [x13, #0x0]\n" "str h12, [x9, #0x0]\n" "str h16, [x27, #0x0]\n" "str h20, [x25, #0x0]\n" "198:" // Height 4: Partial direct writeback: Done "b 200f\n" "199:" // Height 4: Full writeback "str q8, [x13, #0x0]\n" "str q9, [x13, #0x10]\n" "str q10, [x13, #0x20]\n" "str q11, [x13, #0x30]\n" "str q12, [x9, #0x0]\n" "str q13, [x9, #0x10]\n" "str q14, [x9, #0x20]\n" "str q15, [x9, #0x30]\n" "str q16, [x27, #0x0]\n" "str q17, [x27, #0x10]\n" "str q18, [x27, #0x20]\n" "str q19, [x27, #0x30]\n" "str q20, [x25, #0x0]\n" "str q21, [x25, #0x10]\n" "str q22, [x25, #0x20]\n" "str q23, [x25, #0x30]\n" "add x13, x13, #0x40\n" "add x9, x9, #0x40\n" "add x27, x27, #0x40\n" "add x25, x25, #0x40\n" "200:" // Height 4: Writeback done "subs x16, x16, #0x20\n" "bgt 153b\n" "b 302f\n" "201:" // Height 5 "ldr x16, [%x[args_ptr], %[offsetof_N]]\n" "mov x14, %x[bias]\n" "ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n" "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n" "tbz %x[flags], #2, 202f\n" "ldr x13, [%x[output_ptr], #0x0]\n" "add x13, x13, x19, LSL #1\n" "ldr x9, [%x[output_ptr], #0x8]\n" "ldr x27, [%x[output_ptr], #0x10]\n" "add x9, x9, x19, LSL #1\n" "ldr x25, [%x[output_ptr], #0x18]\n" "ldr x23, [%x[output_ptr], #0x20]\n" "add x27, x27, x19, LSL #1\n" "add x25, x25, x19, LSL #1\n" "add x23, x23, x19, LSL #1\n" "b 203f\n" "202:" // Height 5: setup direct output "mov x13, %x[output_ptr]\n" "add x9, x13, x19, LSL #1\n" "add x27, x9, x19, LSL #1\n" "add x25, x27, x19, LSL #1\n" "add x23, x25, x19, LSL #1\n" "203:" // Height 5: Column loop "cbz x14, 204f\n" "ldr q8, [x14, #0x0]\n" "mov v12.16b, v8.16b\n" "ldr q9, [x14, #0x10]\n" "mov v16.16b, v8.16b\n" "ldr q10, [x14, #0x20]\n" "mov v20.16b, v8.16b\n" "ldr q11, [x14, #0x30]\n" "mov v24.16b, v8.16b\n" "add x14, x14, #0x40\n" "mov v13.16b, v9.16b\n" "mov v17.16b, v9.16b\n" "mov v14.16b, v10.16b\n" "mov v15.16b, v11.16b\n" "mov v18.16b, v10.16b\n" "mov v19.16b, v11.16b\n" "mov v21.16b, v9.16b\n" "mov v22.16b, v10.16b\n" "mov v23.16b, v11.16b\n" "mov v25.16b, v9.16b\n" "mov v26.16b, v10.16b\n" "mov v27.16b, v11.16b\n" "b 223f\n" "204:" // Height 5: no bias "tbz %x[flags], #0, 222f\n" "cmp x16, #0x20\n" "bge 221f\n" "tbz x16, #4, 212f\n" "ld1 { v8.8h }, [x13], #0x10\n" "ld1 { v12.8h }, [x9], #0x10\n" "ld1 { v16.8h }, [x27], #0x10\n" "ld1 { v20.8h }, [x25], #0x10\n" "ld1 { v24.8h }, [x23], #0x10\n" "ld1 { v9.8h }, [x13], #0x10\n" "ld1 { v13.8h }, [x9], #0x10\n" "ld1 { v17.8h }, [x27], #0x10\n" "ld1 { v21.8h }, [x25], #0x10\n" "ld1 { v25.8h }, [x23], #0x10\n" "tbz x16, #3, 208f\n" "ld1 { v10.8h }, [x13], #0x10\n" "ld1 { v14.8h }, [x9], #0x10\n" "ld1 { v18.8h }, [x27], #0x10\n" "ld1 { v22.8h }, [x25], #0x10\n" "ld1 { v26.8h }, [x23], #0x10\n" "tbz x16, #2, 206f\n" "ldr d11, [x13], #0x8\n" "ldr d15, [x9], #0x8\n" "ldr d19, [x27], #0x8\n" "ldr d23, [x25], #0x8\n" "ldr d27, [x23], #0x8\n" "tbz x16, #1, 205f\n" "ld1 { v11.s }[2], [x13], #0x4\n" "ld1 { v15.s }[2], [x9], #0x4\n" "ld1 { v19.s }[2], [x27], #0x4\n" "ld1 { v23.s }[2], [x25], #0x4\n" "ld1 { v27.s }[2], [x23], #0x4\n" "mov x19, #0x3c\n" "tbz x16, #0, 220f\n" "ld1 { v11.h }[6], [x13]\n" "ld1 { v15.h }[6], [x9]\n" "ld1 { v19.h }[6], [x27]\n" "ld1 { v23.h }[6], [x25]\n" "ld1 { v27.h }[6], [x23]\n" "b 220f\n" "205:" // Height 5: Partial accumulate: partial_1_28 "mov x19, #0x38\n" "tbz x16, #0, 220f\n" "ld1 { v11.h }[4], [x13]\n" "ld1 { v15.h }[4], [x9]\n" "ld1 { v19.h }[4], [x27]\n" "ld1 { v23.h }[4], [x25]\n" "ld1 { v27.h }[4], [x23]\n" "b 220f\n" "206:" // Height 5: Partial accumulate: partial_2_24 "tbz x16, #1, 207f\n" "ldr s11, [x13], #0x4\n" "ldr s15, [x9], #0x4\n" "ldr s19, [x27], #0x4\n" "ldr s23, [x25], #0x4\n" "ldr s27, [x23], #0x4\n" "mov x19, #0x34\n" "tbz x16, #0, 220f\n" "ld1 { v11.h }[2], [x13]\n" "ld1 { v15.h }[2], [x9]\n" "ld1 { v19.h }[2], [x27]\n" "ld1 { v23.h }[2], [x25]\n" "ld1 { v27.h }[2], [x23]\n" "b 220f\n" "207:" // Height 5: Partial accumulate: partial_1_24 "mov x19, #0x30\n" "tbz x16, #0, 220f\n" "ldr h11, [x13, #0x0]\n" "ldr h15, [x9, #0x0]\n" "ldr h19, [x27, #0x0]\n" "ldr h23, [x25, #0x0]\n" "ldr h27, [x23, #0x0]\n" "b 220f\n" "208:" // Height 5: Partial accumulate: partial_4_16 "tbz x16, #2, 210f\n" "ldr d10, [x13], #0x8\n" "ldr d14, [x9], #0x8\n" "ldr d18, [x27], #0x8\n" "ldr d22, [x25], #0x8\n" "ldr d26, [x23], #0x8\n" "tbz x16, #1, 209f\n" "ld1 { v10.s }[2], [x13], #0x4\n" "ld1 { v14.s }[2], [x9], #0x4\n" "ld1 { v18.s }[2], [x27], #0x4\n" "ld1 { v22.s }[2], [x25], #0x4\n" "ld1 { v26.s }[2], [x23], #0x4\n" "mov x19, #0x2c\n" "tbz x16, #0, 220f\n" "ld1 { v10.h }[6], [x13]\n" "ld1 { v14.h }[6], [x9]\n" "ld1 { v18.h }[6], [x27]\n" "ld1 { v22.h }[6], [x25]\n" "ld1 { v26.h }[6], [x23]\n" "b 220f\n" "209:" // Height 5: Partial accumulate: partial_1_20 "mov x19, #0x28\n" "tbz x16, #0, 220f\n" "ld1 { v10.h }[4], [x13]\n" "ld1 { v14.h }[4], [x9]\n" "ld1 { v18.h }[4], [x27]\n" "ld1 { v22.h }[4], [x25]\n" "ld1 { v26.h }[4], [x23]\n" "b 220f\n" "210:" // Height 5: Partial accumulate: partial_2_16 "tbz x16, #1, 211f\n" "ldr s10, [x13], #0x4\n" "ldr s14, [x9], #0x4\n" "ldr s18, [x27], #0x4\n" "ldr s22, [x25], #0x4\n" "ldr s26, [x23], #0x4\n" "mov x19, #0x24\n" "tbz x16, #0, 220f\n" "ld1 { v10.h }[2], [x13]\n" "ld1 { v14.h }[2], [x9]\n" "ld1 { v18.h }[2], [x27]\n" "ld1 { v22.h }[2], [x25]\n" "ld1 { v26.h }[2], [x23]\n" "b 220f\n" "211:" // Height 5: Partial accumulate: partial_1_16 "mov x19, #0x20\n" "tbz x16, #0, 220f\n" "ldr h10, [x13, #0x0]\n" "ldr h14, [x9, #0x0]\n" "ldr h18, [x27, #0x0]\n" "ldr h22, [x25, #0x0]\n" "ldr h26, [x23, #0x0]\n" "b 220f\n" "212:" // Height 5: Partial accumulate: partial_8_0 "tbz x16, #3, 216f\n" "ld1 { v8.8h }, [x13], #0x10\n" "ld1 { v12.8h }, [x9], #0x10\n" "ld1 { v16.8h }, [x27], #0x10\n" "ld1 { v20.8h }, [x25], #0x10\n" "ld1 { v24.8h }, [x23], #0x10\n" "tbz x16, #2, 214f\n" "ldr d9, [x13], #0x8\n" "ldr d13, [x9], #0x8\n" "ldr d17, [x27], #0x8\n" "ldr d21, [x25], #0x8\n" "ldr d25, [x23], #0x8\n" "tbz x16, #1, 213f\n" "ld1 { v9.s }[2], [x13], #0x4\n" "ld1 { v13.s }[2], [x9], #0x4\n" "ld1 { v17.s }[2], [x27], #0x4\n" "ld1 { v21.s }[2], [x25], #0x4\n" "ld1 { v25.s }[2], [x23], #0x4\n" "mov x19, #0x1c\n" "tbz x16, #0, 220f\n" "ld1 { v9.h }[6], [x13]\n" "ld1 { v13.h }[6], [x9]\n" "ld1 { v17.h }[6], [x27]\n" "ld1 { v21.h }[6], [x25]\n" "ld1 { v25.h }[6], [x23]\n" "b 220f\n" "213:" // Height 5: Partial accumulate: partial_1_12 "mov x19, #0x18\n" "tbz x16, #0, 220f\n" "ld1 { v9.h }[4], [x13]\n" "ld1 { v13.h }[4], [x9]\n" "ld1 { v17.h }[4], [x27]\n" "ld1 { v21.h }[4], [x25]\n" "ld1 { v25.h }[4], [x23]\n" "b 220f\n" "214:" // Height 5: Partial accumulate: partial_2_8 "tbz x16, #1, 215f\n" "ldr s9, [x13], #0x4\n" "ldr s13, [x9], #0x4\n" "ldr s17, [x27], #0x4\n" "ldr s21, [x25], #0x4\n" "ldr s25, [x23], #0x4\n" "mov x19, #0x14\n" "tbz x16, #0, 220f\n" "ld1 { v9.h }[2], [x13]\n" "ld1 { v13.h }[2], [x9]\n" "ld1 { v17.h }[2], [x27]\n" "ld1 { v21.h }[2], [x25]\n" "ld1 { v25.h }[2], [x23]\n" "b 220f\n" "215:" // Height 5: Partial accumulate: partial_1_8 "mov x19, #0x10\n" "tbz x16, #0, 220f\n" "ldr h9, [x13, #0x0]\n" "ldr h13, [x9, #0x0]\n" "ldr h17, [x27, #0x0]\n" "ldr h21, [x25, #0x0]\n" "ldr h25, [x23, #0x0]\n" "b 220f\n" "216:" // Height 5: Partial accumulate: partial_4_0 "tbz x16, #2, 218f\n" "ldr d8, [x13], #0x8\n" "ldr d12, [x9], #0x8\n" "ldr d16, [x27], #0x8\n" "ldr d20, [x25], #0x8\n" "ldr d24, [x23], #0x8\n" "tbz x16, #1, 217f\n" "ld1 { v8.s }[2], [x13], #0x4\n" "ld1 { v12.s }[2], [x9], #0x4\n" "ld1 { v16.s }[2], [x27], #0x4\n" "ld1 { v20.s }[2], [x25], #0x4\n" "ld1 { v24.s }[2], [x23], #0x4\n" "mov x19, #0xc\n" "tbz x16, #0, 220f\n" "ld1 { v8.h }[6], [x13]\n" "ld1 { v12.h }[6], [x9]\n" "ld1 { v16.h }[6], [x27]\n" "ld1 { v20.h }[6], [x25]\n" "ld1 { v24.h }[6], [x23]\n" "b 220f\n" "217:" // Height 5: Partial accumulate: partial_1_4 "mov x19, #0x8\n" "tbz x16, #0, 220f\n" "ld1 { v8.h }[4], [x13]\n" "ld1 { v12.h }[4], [x9]\n" "ld1 { v16.h }[4], [x27]\n" "ld1 { v20.h }[4], [x25]\n" "ld1 { v24.h }[4], [x23]\n" "b 220f\n" "218:" // Height 5: Partial accumulate: partial_2_0 "tbz x16, #1, 219f\n" "ldr s8, [x13], #0x4\n" "ldr s12, [x9], #0x4\n" "ldr s16, [x27], #0x4\n" "ldr s20, [x25], #0x4\n" "ldr s24, [x23], #0x4\n" "mov x19, #0x4\n" "tbz x16, #0, 220f\n" "ld1 { v8.h }[2], [x13]\n" "ld1 { v12.h }[2], [x9]\n" "ld1 { v16.h }[2], [x27]\n" "ld1 { v20.h }[2], [x25]\n" "ld1 { v24.h }[2], [x23]\n" "b 220f\n" "219:" // Height 5: Partial accumulate: partial_1_0 "mov x19, #0x0\n" "ldr h8, [x13, #0x0]\n" "ldr h12, [x9, #0x0]\n" "ldr h16, [x27, #0x0]\n" "ldr h20, [x25, #0x0]\n" "ldr h24, [x23, #0x0]\n" "220:" // Height 5: Partial accumulate: Done "sub x13, x13, x19\n" "sub x9, x9, x19\n" "sub x27, x27, x19\n" "sub x25, x25, x19\n" "sub x23, x23, x19\n" "b 223f\n" "221:" // Height 5: full accumulate "ldr q8, [x13, #0x0]\n" "ldr q9, [x13, #0x10]\n" "ldr q10, [x13, #0x20]\n" "ldr q11, [x13, #0x30]\n" "ldr q12, [x9, #0x0]\n" "ldr q13, [x9, #0x10]\n" "ldr q14, [x9, #0x20]\n" "ldr q15, [x9, #0x30]\n" "ldr q16, [x27, #0x0]\n" "ldr q17, [x27, #0x10]\n" "ldr q18, [x27, #0x20]\n" "ldr q19, [x27, #0x30]\n" "ldr q20, [x25, #0x0]\n" "ldr q21, [x25, #0x10]\n" "ldr q22, [x25, #0x20]\n" "ldr q23, [x25, #0x30]\n" "ldr q24, [x23, #0x0]\n" "ldr q25, [x23, #0x10]\n" "ldr q26, [x23, #0x20]\n" "ldr q27, [x23, #0x30]\n" "b 223f\n" "222:" // Height 5: no accumulate "movi v8.16b, #0x0\n" "movi v9.16b, #0x0\n" "movi v10.16b, #0x0\n" "movi v11.16b, #0x0\n" "movi v12.16b, #0x0\n" "movi v13.16b, #0x0\n" "movi v14.16b, #0x0\n" "movi v15.16b, #0x0\n" "movi v16.16b, #0x0\n" "movi v17.16b, #0x0\n" "movi v18.16b, #0x0\n" "movi v19.16b, #0x0\n" "movi v20.16b, #0x0\n" "movi v21.16b, #0x0\n" "movi v22.16b, #0x0\n" "movi v23.16b, #0x0\n" "movi v24.16b, #0x0\n" "movi v25.16b, #0x0\n" "movi v26.16b, #0x0\n" "movi v27.16b, #0x0\n" "223:" // Height 5: setup done "mov x12, #0x0\n" "224:" // Height 5: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n" "ldr w11, [x20, x12, LSL #0x2]\n" "tbz %x[flags], #3, 225f\n" "ldr x20, [%x[input_ptr], x12, LSL #0x3]\n" "add x20, x20, x19, LSL #3\n" "ldr x10, [x20, #0x0]\n" "ldr x28, [x20, #0x8]\n" "ldr x26, [x20, #0x10]\n" "ldr x24, [x20, #0x18]\n" "ldr x22, [x20, #0x20]\n" "cbnz x12, 226f\n" "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n" "add x10, x10, x19, LSL #1\n" "add x28, x28, x19, LSL #1\n" "add x26, x26, x19, LSL #1\n" "add x24, x24, x19, LSL #1\n" "add x22, x22, x19, LSL #1\n" "b 226f\n" "225:" // Height 5: setup direct input "mov x10, %x[input_ptr]\n" "add x28, x10, x19, LSL #1\n" "add x26, x28, x19, LSL #1\n" "add x24, x26, x19, LSL #1\n" "add x22, x24, x19, LSL #1\n" "226:" // Height 5: input setup done "cmp x11, #0x8\n" "blt 229f\n" "cmp x11, #0x10\n" "blt 228f\n" "227:" // Height 5: Multiply loop: Main loop head "ldr q0, [x10, #0x0]\n" "ldr q1, [x28, #0x0]\n" "ldr q2, [x26, #0x0]\n" "ldr q3, [x24, #0x0]\n" "ldr q4, [x22, #0x0]\n" "ldr q6, [x15, #0x0]\n" "fmla v8.8h, v6.8h, v0.h[0]\n" "ldr q7, [x15, #0x10]\n" "fmla v12.8h, v6.8h, v1.h[0]\n" "add x10, x10, #0x10\n" "prfm pldl1keep, [x10, #0x80]\n" "fmla v16.8h, v6.8h, v2.h[0]\n" "add x28, x28, #0x10\n" "fmla v20.8h, v6.8h, v3.h[0]\n" "prfm pldl1keep, [x28, #0x80]\n" "add x26, x26, #0x10\n" "fmla v24.8h, v6.8h, v4.h[0]\n" "prfm pldl1keep, [x26, #0x80]\n" "ldr q6, [x15, #0x20]\n" "fmla v9.8h, v7.8h, v0.h[0]\n" "add x24, x24, #0x10\n" "prfm pldl1keep, [x24, #0x80]\n" "fmla v13.8h, v7.8h, v1.h[0]\n" "add x22, x22, #0x10\n" "fmla v17.8h, v7.8h, v2.h[0]\n" "prfm pldl1keep, [x22, #0x80]\n" "sub x11, x11, #0x8\n" "fmla v21.8h, v7.8h, v3.h[0]\n" "cmp x11, #0x10\n" "fmla v25.8h, v7.8h, v4.h[0]\n" "ldr q7, [x15, #0x30]\n" "fmla v10.8h, v6.8h, v0.h[0]\n" "fmla v14.8h, v6.8h, v1.h[0]\n" "fmla v18.8h, v6.8h, v2.h[0]\n" "fmla v22.8h, v6.8h, v3.h[0]\n" "fmla v26.8h, v6.8h, v4.h[0]\n" "ldr q6, [x15, #0x40]\n" "fmla v11.8h, v7.8h, v0.h[0]\n" "fmla v15.8h, v7.8h, v1.h[0]\n" "fmla v19.8h, v7.8h, v2.h[0]\n" "fmla v23.8h, v7.8h, v3.h[0]\n" "fmla v27.8h, v7.8h, v4.h[0]\n" "ldr q7, [x15, #0x50]\n" "fmla v8.8h, v6.8h, v0.h[1]\n" "fmla v12.8h, v6.8h, v1.h[1]\n" "fmla v16.8h, v6.8h, v2.h[1]\n" "fmla v20.8h, v6.8h, v3.h[1]\n" "fmla v24.8h, v6.8h, v4.h[1]\n" "ldr q6, [x15, #0x60]\n" "fmla v9.8h, v7.8h, v0.h[1]\n" "fmla v13.8h, v7.8h, v1.h[1]\n" "fmla v17.8h, v7.8h, v2.h[1]\n" "fmla v21.8h, v7.8h, v3.h[1]\n" "fmla v25.8h, v7.8h, v4.h[1]\n" "ldr q7, [x15, #0x70]\n" "fmla v10.8h, v6.8h, v0.h[1]\n" "fmla v14.8h, v6.8h, v1.h[1]\n" "fmla v18.8h, v6.8h, v2.h[1]\n" "fmla v22.8h, v6.8h, v3.h[1]\n" "fmla v26.8h, v6.8h, v4.h[1]\n" "ldr q6, [x15, #0x80]\n" "fmla v11.8h, v7.8h, v0.h[1]\n" "fmla v15.8h, v7.8h, v1.h[1]\n" "fmla v19.8h, v7.8h, v2.h[1]\n" "fmla v23.8h, v7.8h, v3.h[1]\n" "fmla v27.8h, v7.8h, v4.h[1]\n" "ldr q7, [x15, #0x90]\n" "fmla v8.8h, v6.8h, v0.h[2]\n" "fmla v12.8h, v6.8h, v1.h[2]\n" "fmla v16.8h, v6.8h, v2.h[2]\n" "fmla v20.8h, v6.8h, v3.h[2]\n" "fmla v24.8h, v6.8h, v4.h[2]\n" "ldr q6, [x15, #0xa0]\n" "fmla v9.8h, v7.8h, v0.h[2]\n" "fmla v13.8h, v7.8h, v1.h[2]\n" "fmla v17.8h, v7.8h, v2.h[2]\n" "fmla v21.8h, v7.8h, v3.h[2]\n" "fmla v25.8h, v7.8h, v4.h[2]\n" "ldr q7, [x15, #0xb0]\n" "fmla v10.8h, v6.8h, v0.h[2]\n" "fmla v14.8h, v6.8h, v1.h[2]\n" "fmla v18.8h, v6.8h, v2.h[2]\n" "fmla v22.8h, v6.8h, v3.h[2]\n" "fmla v26.8h, v6.8h, v4.h[2]\n" "ldr q6, [x15, #0xc0]\n" "fmla v11.8h, v7.8h, v0.h[2]\n" "fmla v15.8h, v7.8h, v1.h[2]\n" "fmla v19.8h, v7.8h, v2.h[2]\n" "fmla v23.8h, v7.8h, v3.h[2]\n" "fmla v27.8h, v7.8h, v4.h[2]\n" "ldr q7, [x15, #0xd0]\n" "fmla v8.8h, v6.8h, v0.h[3]\n" "fmla v12.8h, v6.8h, v1.h[3]\n" "fmla v16.8h, v6.8h, v2.h[3]\n" "fmla v20.8h, v6.8h, v3.h[3]\n" "fmla v24.8h, v6.8h, v4.h[3]\n" "ldr q6, [x15, #0xe0]\n" "fmla v9.8h, v7.8h, v0.h[3]\n" "fmla v13.8h, v7.8h, v1.h[3]\n" "fmla v17.8h, v7.8h, v2.h[3]\n" "fmla v21.8h, v7.8h, v3.h[3]\n" "fmla v25.8h, v7.8h, v4.h[3]\n" "ldr q7, [x15, #0xf0]\n" "fmla v10.8h, v6.8h, v0.h[3]\n" "fmla v14.8h, v6.8h, v1.h[3]\n" "fmla v18.8h, v6.8h, v2.h[3]\n" "fmla v22.8h, v6.8h, v3.h[3]\n" "fmla v26.8h, v6.8h, v4.h[3]\n" "ldr q6, [x15, #0x100]\n" "fmla v11.8h, v7.8h, v0.h[3]\n" "fmla v15.8h, v7.8h, v1.h[3]\n" "fmla v19.8h, v7.8h, v2.h[3]\n" "fmla v23.8h, v7.8h, v3.h[3]\n" "fmla v27.8h, v7.8h, v4.h[3]\n" "ldr q7, [x15, #0x110]\n" "fmla v8.8h, v6.8h, v0.h[4]\n" "fmla v12.8h, v6.8h, v1.h[4]\n" "fmla v16.8h, v6.8h, v2.h[4]\n" "fmla v20.8h, v6.8h, v3.h[4]\n" "fmla v24.8h, v6.8h, v4.h[4]\n" "ldr q6, [x15, #0x120]\n" "fmla v9.8h, v7.8h, v0.h[4]\n" "fmla v13.8h, v7.8h, v1.h[4]\n" "fmla v17.8h, v7.8h, v2.h[4]\n" "fmla v21.8h, v7.8h, v3.h[4]\n" "fmla v25.8h, v7.8h, v4.h[4]\n" "ldr q7, [x15, #0x130]\n" "fmla v10.8h, v6.8h, v0.h[4]\n" "fmla v14.8h, v6.8h, v1.h[4]\n" "fmla v18.8h, v6.8h, v2.h[4]\n" "fmla v22.8h, v6.8h, v3.h[4]\n" "fmla v26.8h, v6.8h, v4.h[4]\n" "ldr q6, [x15, #0x140]\n" "fmla v11.8h, v7.8h, v0.h[4]\n" "fmla v15.8h, v7.8h, v1.h[4]\n" "fmla v19.8h, v7.8h, v2.h[4]\n" "fmla v23.8h, v7.8h, v3.h[4]\n" "fmla v27.8h, v7.8h, v4.h[4]\n" "ldr q7, [x15, #0x150]\n" "fmla v8.8h, v6.8h, v0.h[5]\n" "fmla v12.8h, v6.8h, v1.h[5]\n" "fmla v16.8h, v6.8h, v2.h[5]\n" "fmla v20.8h, v6.8h, v3.h[5]\n" "fmla v24.8h, v6.8h, v4.h[5]\n" "ldr q6, [x15, #0x160]\n" "fmla v9.8h, v7.8h, v0.h[5]\n" "fmla v13.8h, v7.8h, v1.h[5]\n" "fmla v17.8h, v7.8h, v2.h[5]\n" "fmla v21.8h, v7.8h, v3.h[5]\n" "fmla v25.8h, v7.8h, v4.h[5]\n" "ldr q7, [x15, #0x170]\n" "fmla v10.8h, v6.8h, v0.h[5]\n" "fmla v14.8h, v6.8h, v1.h[5]\n" "fmla v18.8h, v6.8h, v2.h[5]\n" "fmla v22.8h, v6.8h, v3.h[5]\n" "fmla v26.8h, v6.8h, v4.h[5]\n" "ldr q6, [x15, #0x180]\n" "fmla v11.8h, v7.8h, v0.h[5]\n" "fmla v15.8h, v7.8h, v1.h[5]\n" "fmla v19.8h, v7.8h, v2.h[5]\n" "fmla v23.8h, v7.8h, v3.h[5]\n" "fmla v27.8h, v7.8h, v4.h[5]\n" "ldr q7, [x15, #0x190]\n" "fmla v8.8h, v6.8h, v0.h[6]\n" "fmla v12.8h, v6.8h, v1.h[6]\n" "fmla v16.8h, v6.8h, v2.h[6]\n" "fmla v20.8h, v6.8h, v3.h[6]\n" "fmla v24.8h, v6.8h, v4.h[6]\n" "ldr q6, [x15, #0x1a0]\n" "fmla v9.8h, v7.8h, v0.h[6]\n" "fmla v13.8h, v7.8h, v1.h[6]\n" "fmla v17.8h, v7.8h, v2.h[6]\n" "fmla v21.8h, v7.8h, v3.h[6]\n" "fmla v25.8h, v7.8h, v4.h[6]\n" "ldr q7, [x15, #0x1b0]\n" "fmla v10.8h, v6.8h, v0.h[6]\n" "fmla v14.8h, v6.8h, v1.h[6]\n" "fmla v18.8h, v6.8h, v2.h[6]\n" "fmla v22.8h, v6.8h, v3.h[6]\n" "fmla v26.8h, v6.8h, v4.h[6]\n" "ldr q6, [x15, #0x1c0]\n" "fmla v11.8h, v7.8h, v0.h[6]\n" "fmla v15.8h, v7.8h, v1.h[6]\n" "fmla v19.8h, v7.8h, v2.h[6]\n" "fmla v23.8h, v7.8h, v3.h[6]\n" "fmla v27.8h, v7.8h, v4.h[6]\n" "ldr q7, [x15, #0x1d0]\n" "fmla v8.8h, v6.8h, v0.h[7]\n" "fmla v12.8h, v6.8h, v1.h[7]\n" "fmla v16.8h, v6.8h, v2.h[7]\n" "fmla v20.8h, v6.8h, v3.h[7]\n" "fmla v24.8h, v6.8h, v4.h[7]\n" "ldr q6, [x15, #0x1e0]\n" "fmla v9.8h, v7.8h, v0.h[7]\n" "fmla v13.8h, v7.8h, v1.h[7]\n" "fmla v17.8h, v7.8h, v2.h[7]\n" "fmla v21.8h, v7.8h, v3.h[7]\n" "fmla v25.8h, v7.8h, v4.h[7]\n" "ldr q7, [x15, #0x1f0]\n" "fmla v10.8h, v6.8h, v0.h[7]\n" "add x15, x15, #0x200\n" "fmla v14.8h, v6.8h, v1.h[7]\n" "fmla v18.8h, v6.8h, v2.h[7]\n" "fmla v22.8h, v6.8h, v3.h[7]\n" "fmla v26.8h, v6.8h, v4.h[7]\n" "fmla v11.8h, v7.8h, v0.h[7]\n" "fmla v15.8h, v7.8h, v1.h[7]\n" "fmla v19.8h, v7.8h, v2.h[7]\n" "fmla v23.8h, v7.8h, v3.h[7]\n" "fmla v27.8h, v7.8h, v4.h[7]\n" "bge 227b\n" "228:" // Height 5: Multiply loop: Single iteration only "sub x11, x11, #0x8\n" "ldr q0, [x10, #0x0]\n" "ldr q1, [x28, #0x0]\n" "ldr q2, [x26, #0x0]\n" "ldr q3, [x24, #0x0]\n" "ldr q4, [x22, #0x0]\n" "ldr q6, [x15, #0x0]\n" "fmla v8.8h, v6.8h, v0.h[0]\n" "ldr q7, [x15, #0x10]\n" "fmla v12.8h, v6.8h, v1.h[0]\n" "add x10, x10, #0x10\n" "prfm pldl1keep, [x10, #0x80]\n" "fmla v16.8h, v6.8h, v2.h[0]\n" "add x28, x28, #0x10\n" "fmla v20.8h, v6.8h, v3.h[0]\n" "prfm pldl1keep, [x28, #0x80]\n" "add x26, x26, #0x10\n" "fmla v24.8h, v6.8h, v4.h[0]\n" "prfm pldl1keep, [x26, #0x80]\n" "ldr q6, [x15, #0x20]\n" "fmla v9.8h, v7.8h, v0.h[0]\n" "add x24, x24, #0x10\n" "prfm pldl1keep, [x24, #0x80]\n" "fmla v13.8h, v7.8h, v1.h[0]\n" "add x22, x22, #0x10\n" "fmla v17.8h, v7.8h, v2.h[0]\n" "prfm pldl1keep, [x22, #0x80]\n" "fmla v21.8h, v7.8h, v3.h[0]\n" "fmla v25.8h, v7.8h, v4.h[0]\n" "ldr q7, [x15, #0x30]\n" "fmla v10.8h, v6.8h, v0.h[0]\n" "fmla v14.8h, v6.8h, v1.h[0]\n" "fmla v18.8h, v6.8h, v2.h[0]\n" "fmla v22.8h, v6.8h, v3.h[0]\n" "fmla v26.8h, v6.8h, v4.h[0]\n" "ldr q6, [x15, #0x40]\n" "fmla v11.8h, v7.8h, v0.h[0]\n" "fmla v15.8h, v7.8h, v1.h[0]\n" "fmla v19.8h, v7.8h, v2.h[0]\n" "fmla v23.8h, v7.8h, v3.h[0]\n" "fmla v27.8h, v7.8h, v4.h[0]\n" "ldr q7, [x15, #0x50]\n" "fmla v8.8h, v6.8h, v0.h[1]\n" "fmla v12.8h, v6.8h, v1.h[1]\n" "fmla v16.8h, v6.8h, v2.h[1]\n" "fmla v20.8h, v6.8h, v3.h[1]\n" "fmla v24.8h, v6.8h, v4.h[1]\n" "ldr q6, [x15, #0x60]\n" "fmla v9.8h, v7.8h, v0.h[1]\n" "fmla v13.8h, v7.8h, v1.h[1]\n" "fmla v17.8h, v7.8h, v2.h[1]\n" "fmla v21.8h, v7.8h, v3.h[1]\n" "fmla v25.8h, v7.8h, v4.h[1]\n" "ldr q7, [x15, #0x70]\n" "fmla v10.8h, v6.8h, v0.h[1]\n" "fmla v14.8h, v6.8h, v1.h[1]\n" "fmla v18.8h, v6.8h, v2.h[1]\n" "fmla v22.8h, v6.8h, v3.h[1]\n" "fmla v26.8h, v6.8h, v4.h[1]\n" "ldr q6, [x15, #0x80]\n" "fmla v11.8h, v7.8h, v0.h[1]\n" "fmla v15.8h, v7.8h, v1.h[1]\n" "fmla v19.8h, v7.8h, v2.h[1]\n" "fmla v23.8h, v7.8h, v3.h[1]\n" "fmla v27.8h, v7.8h, v4.h[1]\n" "ldr q7, [x15, #0x90]\n" "fmla v8.8h, v6.8h, v0.h[2]\n" "fmla v12.8h, v6.8h, v1.h[2]\n" "fmla v16.8h, v6.8h, v2.h[2]\n" "fmla v20.8h, v6.8h, v3.h[2]\n" "fmla v24.8h, v6.8h, v4.h[2]\n" "ldr q6, [x15, #0xa0]\n" "fmla v9.8h, v7.8h, v0.h[2]\n" "fmla v13.8h, v7.8h, v1.h[2]\n" "fmla v17.8h, v7.8h, v2.h[2]\n" "fmla v21.8h, v7.8h, v3.h[2]\n" "fmla v25.8h, v7.8h, v4.h[2]\n" "ldr q7, [x15, #0xb0]\n" "fmla v10.8h, v6.8h, v0.h[2]\n" "fmla v14.8h, v6.8h, v1.h[2]\n" "fmla v18.8h, v6.8h, v2.h[2]\n" "fmla v22.8h, v6.8h, v3.h[2]\n" "fmla v26.8h, v6.8h, v4.h[2]\n" "ldr q6, [x15, #0xc0]\n" "fmla v11.8h, v7.8h, v0.h[2]\n" "fmla v15.8h, v7.8h, v1.h[2]\n" "fmla v19.8h, v7.8h, v2.h[2]\n" "fmla v23.8h, v7.8h, v3.h[2]\n" "fmla v27.8h, v7.8h, v4.h[2]\n" "ldr q7, [x15, #0xd0]\n" "fmla v8.8h, v6.8h, v0.h[3]\n" "fmla v12.8h, v6.8h, v1.h[3]\n" "fmla v16.8h, v6.8h, v2.h[3]\n" "fmla v20.8h, v6.8h, v3.h[3]\n" "fmla v24.8h, v6.8h, v4.h[3]\n" "ldr q6, [x15, #0xe0]\n" "fmla v9.8h, v7.8h, v0.h[3]\n" "fmla v13.8h, v7.8h, v1.h[3]\n" "fmla v17.8h, v7.8h, v2.h[3]\n" "fmla v21.8h, v7.8h, v3.h[3]\n" "fmla v25.8h, v7.8h, v4.h[3]\n" "ldr q7, [x15, #0xf0]\n" "fmla v10.8h, v6.8h, v0.h[3]\n" "fmla v14.8h, v6.8h, v1.h[3]\n" "fmla v18.8h, v6.8h, v2.h[3]\n" "fmla v22.8h, v6.8h, v3.h[3]\n" "fmla v26.8h, v6.8h, v4.h[3]\n" "ldr q6, [x15, #0x100]\n" "fmla v11.8h, v7.8h, v0.h[3]\n" "fmla v15.8h, v7.8h, v1.h[3]\n" "fmla v19.8h, v7.8h, v2.h[3]\n" "fmla v23.8h, v7.8h, v3.h[3]\n" "fmla v27.8h, v7.8h, v4.h[3]\n" "ldr q7, [x15, #0x110]\n" "fmla v8.8h, v6.8h, v0.h[4]\n" "fmla v12.8h, v6.8h, v1.h[4]\n" "fmla v16.8h, v6.8h, v2.h[4]\n" "fmla v20.8h, v6.8h, v3.h[4]\n" "fmla v24.8h, v6.8h, v4.h[4]\n" "ldr q6, [x15, #0x120]\n" "fmla v9.8h, v7.8h, v0.h[4]\n" "fmla v13.8h, v7.8h, v1.h[4]\n" "fmla v17.8h, v7.8h, v2.h[4]\n" "fmla v21.8h, v7.8h, v3.h[4]\n" "fmla v25.8h, v7.8h, v4.h[4]\n" "ldr q7, [x15, #0x130]\n" "fmla v10.8h, v6.8h, v0.h[4]\n" "fmla v14.8h, v6.8h, v1.h[4]\n" "fmla v18.8h, v6.8h, v2.h[4]\n" "fmla v22.8h, v6.8h, v3.h[4]\n" "fmla v26.8h, v6.8h, v4.h[4]\n" "ldr q6, [x15, #0x140]\n" "fmla v11.8h, v7.8h, v0.h[4]\n" "fmla v15.8h, v7.8h, v1.h[4]\n" "fmla v19.8h, v7.8h, v2.h[4]\n" "fmla v23.8h, v7.8h, v3.h[4]\n" "fmla v27.8h, v7.8h, v4.h[4]\n" "ldr q7, [x15, #0x150]\n" "fmla v8.8h, v6.8h, v0.h[5]\n" "fmla v12.8h, v6.8h, v1.h[5]\n" "fmla v16.8h, v6.8h, v2.h[5]\n" "fmla v20.8h, v6.8h, v3.h[5]\n" "fmla v24.8h, v6.8h, v4.h[5]\n" "ldr q6, [x15, #0x160]\n" "fmla v9.8h, v7.8h, v0.h[5]\n" "fmla v13.8h, v7.8h, v1.h[5]\n" "fmla v17.8h, v7.8h, v2.h[5]\n" "fmla v21.8h, v7.8h, v3.h[5]\n" "fmla v25.8h, v7.8h, v4.h[5]\n" "ldr q7, [x15, #0x170]\n" "fmla v10.8h, v6.8h, v0.h[5]\n" "fmla v14.8h, v6.8h, v1.h[5]\n" "fmla v18.8h, v6.8h, v2.h[5]\n" "fmla v22.8h, v6.8h, v3.h[5]\n" "fmla v26.8h, v6.8h, v4.h[5]\n" "ldr q6, [x15, #0x180]\n" "fmla v11.8h, v7.8h, v0.h[5]\n" "fmla v15.8h, v7.8h, v1.h[5]\n" "fmla v19.8h, v7.8h, v2.h[5]\n" "fmla v23.8h, v7.8h, v3.h[5]\n" "fmla v27.8h, v7.8h, v4.h[5]\n" "ldr q7, [x15, #0x190]\n" "fmla v8.8h, v6.8h, v0.h[6]\n" "fmla v12.8h, v6.8h, v1.h[6]\n" "fmla v16.8h, v6.8h, v2.h[6]\n" "fmla v20.8h, v6.8h, v3.h[6]\n" "fmla v24.8h, v6.8h, v4.h[6]\n" "ldr q6, [x15, #0x1a0]\n" "fmla v9.8h, v7.8h, v0.h[6]\n" "fmla v13.8h, v7.8h, v1.h[6]\n" "fmla v17.8h, v7.8h, v2.h[6]\n" "fmla v21.8h, v7.8h, v3.h[6]\n" "fmla v25.8h, v7.8h, v4.h[6]\n" "ldr q7, [x15, #0x1b0]\n" "fmla v10.8h, v6.8h, v0.h[6]\n" "fmla v14.8h, v6.8h, v1.h[6]\n" "fmla v18.8h, v6.8h, v2.h[6]\n" "fmla v22.8h, v6.8h, v3.h[6]\n" "fmla v26.8h, v6.8h, v4.h[6]\n" "ldr q6, [x15, #0x1c0]\n" "fmla v11.8h, v7.8h, v0.h[6]\n" "fmla v15.8h, v7.8h, v1.h[6]\n" "fmla v19.8h, v7.8h, v2.h[6]\n" "fmla v23.8h, v7.8h, v3.h[6]\n" "fmla v27.8h, v7.8h, v4.h[6]\n" "ldr q7, [x15, #0x1d0]\n" "fmla v8.8h, v6.8h, v0.h[7]\n" "fmla v12.8h, v6.8h, v1.h[7]\n" "fmla v16.8h, v6.8h, v2.h[7]\n" "fmla v20.8h, v6.8h, v3.h[7]\n" "fmla v24.8h, v6.8h, v4.h[7]\n" "ldr q6, [x15, #0x1e0]\n" "fmla v9.8h, v7.8h, v0.h[7]\n" "fmla v13.8h, v7.8h, v1.h[7]\n" "fmla v17.8h, v7.8h, v2.h[7]\n" "fmla v21.8h, v7.8h, v3.h[7]\n" "fmla v25.8h, v7.8h, v4.h[7]\n" "ldr q7, [x15, #0x1f0]\n" "fmla v10.8h, v6.8h, v0.h[7]\n" "add x15, x15, #0x200\n" "fmla v14.8h, v6.8h, v1.h[7]\n" "fmla v18.8h, v6.8h, v2.h[7]\n" "fmla v22.8h, v6.8h, v3.h[7]\n" "fmla v26.8h, v6.8h, v4.h[7]\n" "fmla v11.8h, v7.8h, v0.h[7]\n" "fmla v15.8h, v7.8h, v1.h[7]\n" "fmla v19.8h, v7.8h, v2.h[7]\n" "fmla v23.8h, v7.8h, v3.h[7]\n" "fmla v27.8h, v7.8h, v4.h[7]\n" "229:" // Height 5: Multiply loop: Main loop skip "cbz x11, 231f\n" "230:" // Height 5: Multiply loop: Odd block loop "ldr h0, [x10], #0x2\n" "ldr h1, [x28], #0x2\n" "ldr h2, [x26], #0x2\n" "ldr h3, [x24], #0x2\n" "ldr h4, [x22], #0x2\n" "ldr q6, [x15, #0x0]\n" "fmla v8.8h, v6.8h, v0.h[0]\n" "ldr q7, [x15, #0x10]\n" "fmla v12.8h, v6.8h, v1.h[0]\n" "sub x11, x11, #0x1\n" "fmla v16.8h, v6.8h, v2.h[0]\n" "fmla v20.8h, v6.8h, v3.h[0]\n" "fmla v24.8h, v6.8h, v4.h[0]\n" "ldr q6, [x15, #0x20]\n" "fmla v9.8h, v7.8h, v0.h[0]\n" "fmla v13.8h, v7.8h, v1.h[0]\n" "fmla v17.8h, v7.8h, v2.h[0]\n" "fmla v21.8h, v7.8h, v3.h[0]\n" "fmla v25.8h, v7.8h, v4.h[0]\n" "ldr q7, [x15, #0x30]\n" "fmla v10.8h, v6.8h, v0.h[0]\n" "add x15, x15, #0x40\n" "fmla v14.8h, v6.8h, v1.h[0]\n" "fmla v18.8h, v6.8h, v2.h[0]\n" "fmla v22.8h, v6.8h, v3.h[0]\n" "fmla v26.8h, v6.8h, v4.h[0]\n" "fmla v11.8h, v7.8h, v0.h[0]\n" "fmla v15.8h, v7.8h, v1.h[0]\n" "fmla v19.8h, v7.8h, v2.h[0]\n" "fmla v23.8h, v7.8h, v3.h[0]\n" "fmla v27.8h, v7.8h, v4.h[0]\n" "cbnz x11, 230b\n" "231:" // Height 5: Multiply loop: No odd multiplies "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "add x12, x12, #0x1\n" "cmp x12, x19\n" "bne 224b\n" "prfm pstl1keep, [x13, #0x0]\n" "prfm pstl1keep, [x9, #0x0]\n" "prfm pstl1keep, [x27, #0x0]\n" "prfm pstl1keep, [x25, #0x0]\n" "prfm pstl1keep, [x23, #0x0]\n" "tbz %x[flags], #1, 232f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1r { v1.8h }, [x19]\n" "add x19, %x[args_ptr], %[offset_max]\n" "ld1r { v0.8h }, [x19]\n" "fmin v8.8h, v8.8h, v0.8h\n" "fmin v9.8h, v9.8h, v0.8h\n" "fmin v10.8h, v10.8h, v0.8h\n" "fmin v11.8h, v11.8h, v0.8h\n" "fmax v8.8h, v8.8h, v1.8h\n" "fmax v9.8h, v9.8h, v1.8h\n" "fmax v10.8h, v10.8h, v1.8h\n" "fmax v11.8h, v11.8h, v1.8h\n" "fmin v12.8h, v12.8h, v0.8h\n" "fmin v13.8h, v13.8h, v0.8h\n" "fmin v14.8h, v14.8h, v0.8h\n" "fmax v12.8h, v12.8h, v1.8h\n" "fmax v13.8h, v13.8h, v1.8h\n" "fmax v14.8h, v14.8h, v1.8h\n" "fmin v15.8h, v15.8h, v0.8h\n" "fmin v16.8h, v16.8h, v0.8h\n" "fmin v17.8h, v17.8h, v0.8h\n" "fmax v15.8h, v15.8h, v1.8h\n" "fmax v16.8h, v16.8h, v1.8h\n" "fmax v17.8h, v17.8h, v1.8h\n" "fmin v18.8h, v18.8h, v0.8h\n" "fmin v19.8h, v19.8h, v0.8h\n" "fmin v20.8h, v20.8h, v0.8h\n" "fmax v18.8h, v18.8h, v1.8h\n" "fmax v19.8h, v19.8h, v1.8h\n" "fmax v20.8h, v20.8h, v1.8h\n" "fmin v21.8h, v21.8h, v0.8h\n" "fmin v22.8h, v22.8h, v0.8h\n" "fmin v23.8h, v23.8h, v0.8h\n" "fmax v21.8h, v21.8h, v1.8h\n" "fmax v22.8h, v22.8h, v1.8h\n" "fmax v23.8h, v23.8h, v1.8h\n" "fmin v24.8h, v24.8h, v0.8h\n" "fmin v25.8h, v25.8h, v0.8h\n" "fmin v26.8h, v26.8h, v0.8h\n" "fmax v24.8h, v24.8h, v1.8h\n" "fmax v25.8h, v25.8h, v1.8h\n" "fmax v26.8h, v26.8h, v1.8h\n" "fmin v27.8h, v27.8h, v0.8h\n" "fmax v27.8h, v27.8h, v1.8h\n" "232:" // Height 5: No activation "cmp x16, #0x20\n" "bge 249f\n" "tbz x16, #4, 240f\n" "st1 { v8.8h }, [x13], #0x10\n" "st1 { v9.8h }, [x13], #0x10\n" "st1 { v12.8h }, [x9], #0x10\n" "st1 { v13.8h }, [x9], #0x10\n" "st1 { v16.8h }, [x27], #0x10\n" "st1 { v17.8h }, [x27], #0x10\n" "st1 { v20.8h }, [x25], #0x10\n" "st1 { v21.8h }, [x25], #0x10\n" "st1 { v24.8h }, [x23], #0x10\n" "st1 { v25.8h }, [x23], #0x10\n" "tbz x16, #3, 236f\n" "st1 { v10.8h }, [x13], #0x10\n" "st1 { v14.8h }, [x9], #0x10\n" "st1 { v18.8h }, [x27], #0x10\n" "st1 { v22.8h }, [x25], #0x10\n" "st1 { v26.8h }, [x23], #0x10\n" "tbz x16, #2, 234f\n" "str d11, [x13], #0x8\n" "str d15, [x9], #0x8\n" "str d19, [x27], #0x8\n" "str d23, [x25], #0x8\n" "str d27, [x23], #0x8\n" "tbz x16, #1, 233f\n" "st1 { v11.s }[2], [x13], #0x4\n" "st1 { v15.s }[2], [x9], #0x4\n" "st1 { v19.s }[2], [x27], #0x4\n" "st1 { v23.s }[2], [x25], #0x4\n" "st1 { v27.s }[2], [x23], #0x4\n" "tbz x16, #0, 248f\n" "st1 { v11.h }[6], [x13]\n" "st1 { v15.h }[6], [x9]\n" "st1 { v19.h }[6], [x27]\n" "st1 { v23.h }[6], [x25]\n" "st1 { v27.h }[6], [x23]\n" "b 248f\n" "233:" // Height 5: Partial direct writeback: partial_1_28 "tbz x16, #0, 248f\n" "st1 { v11.h }[4], [x13]\n" "st1 { v15.h }[4], [x9]\n" "st1 { v19.h }[4], [x27]\n" "st1 { v23.h }[4], [x25]\n" "st1 { v27.h }[4], [x23]\n" "b 248f\n" "234:" // Height 5: Partial direct writeback: partial_2_24 "tbz x16, #1, 235f\n" "str s11, [x13], #0x4\n" "str s15, [x9], #0x4\n" "str s19, [x27], #0x4\n" "str s23, [x25], #0x4\n" "str s27, [x23], #0x4\n" "tbz x16, #0, 248f\n" "st1 { v11.h }[2], [x13]\n" "st1 { v15.h }[2], [x9]\n" "st1 { v19.h }[2], [x27]\n" "st1 { v23.h }[2], [x25]\n" "st1 { v27.h }[2], [x23]\n" "b 248f\n" "235:" // Height 5: Partial direct writeback: partial_1_24 "tbz x16, #0, 248f\n" "str h11, [x13, #0x0]\n" "str h15, [x9, #0x0]\n" "str h19, [x27, #0x0]\n" "str h23, [x25, #0x0]\n" "str h27, [x23, #0x0]\n" "b 248f\n" "236:" // Height 5: Partial direct writeback: partial_4_16 "tbz x16, #2, 238f\n" "str d10, [x13], #0x8\n" "str d14, [x9], #0x8\n" "str d18, [x27], #0x8\n" "str d22, [x25], #0x8\n" "str d26, [x23], #0x8\n" "tbz x16, #1, 237f\n" "st1 { v10.s }[2], [x13], #0x4\n" "st1 { v14.s }[2], [x9], #0x4\n" "st1 { v18.s }[2], [x27], #0x4\n" "st1 { v22.s }[2], [x25], #0x4\n" "st1 { v26.s }[2], [x23], #0x4\n" "tbz x16, #0, 248f\n" "st1 { v10.h }[6], [x13]\n" "st1 { v14.h }[6], [x9]\n" "st1 { v18.h }[6], [x27]\n" "st1 { v22.h }[6], [x25]\n" "st1 { v26.h }[6], [x23]\n" "b 248f\n" "237:" // Height 5: Partial direct writeback: partial_1_20 "tbz x16, #0, 248f\n" "st1 { v10.h }[4], [x13]\n" "st1 { v14.h }[4], [x9]\n" "st1 { v18.h }[4], [x27]\n" "st1 { v22.h }[4], [x25]\n" "st1 { v26.h }[4], [x23]\n" "b 248f\n" "238:" // Height 5: Partial direct writeback: partial_2_16 "tbz x16, #1, 239f\n" "str s10, [x13], #0x4\n" "str s14, [x9], #0x4\n" "str s18, [x27], #0x4\n" "str s22, [x25], #0x4\n" "str s26, [x23], #0x4\n" "tbz x16, #0, 248f\n" "st1 { v10.h }[2], [x13]\n" "st1 { v14.h }[2], [x9]\n" "st1 { v18.h }[2], [x27]\n" "st1 { v22.h }[2], [x25]\n" "st1 { v26.h }[2], [x23]\n" "b 248f\n" "239:" // Height 5: Partial direct writeback: partial_1_16 "tbz x16, #0, 248f\n" "str h10, [x13, #0x0]\n" "str h14, [x9, #0x0]\n" "str h18, [x27, #0x0]\n" "str h22, [x25, #0x0]\n" "str h26, [x23, #0x0]\n" "b 248f\n" "240:" // Height 5: Partial direct writeback: partial_8_0 "tbz x16, #3, 244f\n" "st1 { v8.8h }, [x13], #0x10\n" "st1 { v12.8h }, [x9], #0x10\n" "st1 { v16.8h }, [x27], #0x10\n" "st1 { v20.8h }, [x25], #0x10\n" "st1 { v24.8h }, [x23], #0x10\n" "tbz x16, #2, 242f\n" "str d9, [x13], #0x8\n" "str d13, [x9], #0x8\n" "str d17, [x27], #0x8\n" "str d21, [x25], #0x8\n" "str d25, [x23], #0x8\n" "tbz x16, #1, 241f\n" "st1 { v9.s }[2], [x13], #0x4\n" "st1 { v13.s }[2], [x9], #0x4\n" "st1 { v17.s }[2], [x27], #0x4\n" "st1 { v21.s }[2], [x25], #0x4\n" "st1 { v25.s }[2], [x23], #0x4\n" "tbz x16, #0, 248f\n" "st1 { v9.h }[6], [x13]\n" "st1 { v13.h }[6], [x9]\n" "st1 { v17.h }[6], [x27]\n" "st1 { v21.h }[6], [x25]\n" "st1 { v25.h }[6], [x23]\n" "b 248f\n" "241:" // Height 5: Partial direct writeback: partial_1_12 "tbz x16, #0, 248f\n" "st1 { v9.h }[4], [x13]\n" "st1 { v13.h }[4], [x9]\n" "st1 { v17.h }[4], [x27]\n" "st1 { v21.h }[4], [x25]\n" "st1 { v25.h }[4], [x23]\n" "b 248f\n" "242:" // Height 5: Partial direct writeback: partial_2_8 "tbz x16, #1, 243f\n" "str s9, [x13], #0x4\n" "str s13, [x9], #0x4\n" "str s17, [x27], #0x4\n" "str s21, [x25], #0x4\n" "str s25, [x23], #0x4\n" "tbz x16, #0, 248f\n" "st1 { v9.h }[2], [x13]\n" "st1 { v13.h }[2], [x9]\n" "st1 { v17.h }[2], [x27]\n" "st1 { v21.h }[2], [x25]\n" "st1 { v25.h }[2], [x23]\n" "b 248f\n" "243:" // Height 5: Partial direct writeback: partial_1_8 "tbz x16, #0, 248f\n" "str h9, [x13, #0x0]\n" "str h13, [x9, #0x0]\n" "str h17, [x27, #0x0]\n" "str h21, [x25, #0x0]\n" "str h25, [x23, #0x0]\n" "b 248f\n" "244:" // Height 5: Partial direct writeback: partial_4_0 "tbz x16, #2, 246f\n" "str d8, [x13], #0x8\n" "str d12, [x9], #0x8\n" "str d16, [x27], #0x8\n" "str d20, [x25], #0x8\n" "str d24, [x23], #0x8\n" "tbz x16, #1, 245f\n" "st1 { v8.s }[2], [x13], #0x4\n" "st1 { v12.s }[2], [x9], #0x4\n" "st1 { v16.s }[2], [x27], #0x4\n" "st1 { v20.s }[2], [x25], #0x4\n" "st1 { v24.s }[2], [x23], #0x4\n" "tbz x16, #0, 248f\n" "st1 { v8.h }[6], [x13]\n" "st1 { v12.h }[6], [x9]\n" "st1 { v16.h }[6], [x27]\n" "st1 { v20.h }[6], [x25]\n" "st1 { v24.h }[6], [x23]\n" "b 248f\n" "245:" // Height 5: Partial direct writeback: partial_1_4 "tbz x16, #0, 248f\n" "st1 { v8.h }[4], [x13]\n" "st1 { v12.h }[4], [x9]\n" "st1 { v16.h }[4], [x27]\n" "st1 { v20.h }[4], [x25]\n" "st1 { v24.h }[4], [x23]\n" "b 248f\n" "246:" // Height 5: Partial direct writeback: partial_2_0 "tbz x16, #1, 247f\n" "str s8, [x13], #0x4\n" "str s12, [x9], #0x4\n" "str s16, [x27], #0x4\n" "str s20, [x25], #0x4\n" "str s24, [x23], #0x4\n" "tbz x16, #0, 248f\n" "st1 { v8.h }[2], [x13]\n" "st1 { v12.h }[2], [x9]\n" "st1 { v16.h }[2], [x27]\n" "st1 { v20.h }[2], [x25]\n" "st1 { v24.h }[2], [x23]\n" "b 248f\n" "247:" // Height 5: Partial direct writeback: partial_1_0 "str h8, [x13, #0x0]\n" "str h12, [x9, #0x0]\n" "str h16, [x27, #0x0]\n" "str h20, [x25, #0x0]\n" "str h24, [x23, #0x0]\n" "248:" // Height 5: Partial direct writeback: Done "b 250f\n" "249:" // Height 5: Full writeback "str q8, [x13, #0x0]\n" "str q9, [x13, #0x10]\n" "str q10, [x13, #0x20]\n" "str q11, [x13, #0x30]\n" "str q12, [x9, #0x0]\n" "str q13, [x9, #0x10]\n" "str q14, [x9, #0x20]\n" "str q15, [x9, #0x30]\n" "str q16, [x27, #0x0]\n" "str q17, [x27, #0x10]\n" "str q18, [x27, #0x20]\n" "str q19, [x27, #0x30]\n" "str q20, [x25, #0x0]\n" "str q21, [x25, #0x10]\n" "str q22, [x25, #0x20]\n" "str q23, [x25, #0x30]\n" "str q24, [x23, #0x0]\n" "str q25, [x23, #0x10]\n" "str q26, [x23, #0x20]\n" "str q27, [x23, #0x30]\n" "add x13, x13, #0x40\n" "add x9, x9, #0x40\n" "add x27, x27, #0x40\n" "add x25, x25, #0x40\n" "add x23, x23, #0x40\n" "250:" // Height 5: Writeback done "subs x16, x16, #0x20\n" "bgt 203b\n" "b 302f\n" "251:" // Height 6 "ldr x16, [%x[args_ptr], %[offsetof_N]]\n" "mov x14, %x[bias]\n" "ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n" "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n" "tbz %x[flags], #2, 252f\n" "ldr x13, [%x[output_ptr], #0x0]\n" "add x13, x13, x19, LSL #1\n" "ldr x9, [%x[output_ptr], #0x8]\n" "ldr x27, [%x[output_ptr], #0x10]\n" "add x9, x9, x19, LSL #1\n" "ldr x25, [%x[output_ptr], #0x18]\n" "ldr x23, [%x[output_ptr], #0x20]\n" "add x27, x27, x19, LSL #1\n" "ldr x21, [%x[output_ptr], #0x28]\n" "add %x[output_ptr], %x[output_ptr], #0x30\n" "add x25, x25, x19, LSL #1\n" "add x23, x23, x19, LSL #1\n" "add x21, x21, x19, LSL #1\n" "b 253f\n" "252:" // Height 6: setup direct output "mov x13, %x[output_ptr]\n" "add x9, x13, x19, LSL #1\n" "add x27, x9, x19, LSL #1\n" "add x25, x27, x19, LSL #1\n" "add x23, x25, x19, LSL #1\n" "add x21, x23, x19, LSL #1\n" "add %x[output_ptr], x21, x19, LSL #1\n" "253:" // Height 6: Column loop "cbz x14, 254f\n" "ldr q8, [x14, #0x0]\n" "mov v12.16b, v8.16b\n" "ldr q9, [x14, #0x10]\n" "mov v16.16b, v8.16b\n" "ldr q10, [x14, #0x20]\n" "mov v20.16b, v8.16b\n" "ldr q11, [x14, #0x30]\n" "mov v24.16b, v8.16b\n" "add x14, x14, #0x40\n" "mov v28.16b, v8.16b\n" "mov v13.16b, v9.16b\n" "mov v17.16b, v9.16b\n" "mov v14.16b, v10.16b\n" "mov v15.16b, v11.16b\n" "mov v18.16b, v10.16b\n" "mov v19.16b, v11.16b\n" "mov v21.16b, v9.16b\n" "mov v22.16b, v10.16b\n" "mov v23.16b, v11.16b\n" "mov v25.16b, v9.16b\n" "mov v26.16b, v10.16b\n" "mov v27.16b, v11.16b\n" "mov v29.16b, v9.16b\n" "mov v30.16b, v10.16b\n" "mov v31.16b, v11.16b\n" "b 273f\n" "254:" // Height 6: no bias "tbz %x[flags], #0, 272f\n" "cmp x16, #0x20\n" "bge 271f\n" "tbz x16, #4, 262f\n" "ld1 { v8.8h }, [x13], #0x10\n" "ld1 { v12.8h }, [x9], #0x10\n" "ld1 { v16.8h }, [x27], #0x10\n" "ld1 { v20.8h }, [x25], #0x10\n" "ld1 { v24.8h }, [x23], #0x10\n" "ld1 { v28.8h }, [x21], #0x10\n" "ld1 { v9.8h }, [x13], #0x10\n" "ld1 { v13.8h }, [x9], #0x10\n" "ld1 { v17.8h }, [x27], #0x10\n" "ld1 { v21.8h }, [x25], #0x10\n" "ld1 { v25.8h }, [x23], #0x10\n" "ld1 { v29.8h }, [x21], #0x10\n" "tbz x16, #3, 258f\n" "ld1 { v10.8h }, [x13], #0x10\n" "ld1 { v14.8h }, [x9], #0x10\n" "ld1 { v18.8h }, [x27], #0x10\n" "ld1 { v22.8h }, [x25], #0x10\n" "ld1 { v26.8h }, [x23], #0x10\n" "ld1 { v30.8h }, [x21], #0x10\n" "tbz x16, #2, 256f\n" "ldr d11, [x13], #0x8\n" "ldr d15, [x9], #0x8\n" "ldr d19, [x27], #0x8\n" "ldr d23, [x25], #0x8\n" "ldr d27, [x23], #0x8\n" "ldr d31, [x21], #0x8\n" "tbz x16, #1, 255f\n" "ld1 { v11.s }[2], [x13], #0x4\n" "ld1 { v15.s }[2], [x9], #0x4\n" "ld1 { v19.s }[2], [x27], #0x4\n" "ld1 { v23.s }[2], [x25], #0x4\n" "ld1 { v27.s }[2], [x23], #0x4\n" "ld1 { v31.s }[2], [x21], #0x4\n" "mov x19, #0x3c\n" "tbz x16, #0, 270f\n" "ld1 { v11.h }[6], [x13]\n" "ld1 { v15.h }[6], [x9]\n" "ld1 { v19.h }[6], [x27]\n" "ld1 { v23.h }[6], [x25]\n" "ld1 { v27.h }[6], [x23]\n" "ld1 { v31.h }[6], [x21]\n" "b 270f\n" "255:" // Height 6: Partial accumulate: partial_1_28 "mov x19, #0x38\n" "tbz x16, #0, 270f\n" "ld1 { v11.h }[4], [x13]\n" "ld1 { v15.h }[4], [x9]\n" "ld1 { v19.h }[4], [x27]\n" "ld1 { v23.h }[4], [x25]\n" "ld1 { v27.h }[4], [x23]\n" "ld1 { v31.h }[4], [x21]\n" "b 270f\n" "256:" // Height 6: Partial accumulate: partial_2_24 "tbz x16, #1, 257f\n" "ldr s11, [x13], #0x4\n" "ldr s15, [x9], #0x4\n" "ldr s19, [x27], #0x4\n" "ldr s23, [x25], #0x4\n" "ldr s27, [x23], #0x4\n" "ldr s31, [x21], #0x4\n" "mov x19, #0x34\n" "tbz x16, #0, 270f\n" "ld1 { v11.h }[2], [x13]\n" "ld1 { v15.h }[2], [x9]\n" "ld1 { v19.h }[2], [x27]\n" "ld1 { v23.h }[2], [x25]\n" "ld1 { v27.h }[2], [x23]\n" "ld1 { v31.h }[2], [x21]\n" "b 270f\n" "257:" // Height 6: Partial accumulate: partial_1_24 "mov x19, #0x30\n" "tbz x16, #0, 270f\n" "ldr h11, [x13, #0x0]\n" "ldr h15, [x9, #0x0]\n" "ldr h19, [x27, #0x0]\n" "ldr h23, [x25, #0x0]\n" "ldr h27, [x23, #0x0]\n" "ldr h31, [x21, #0x0]\n" "b 270f\n" "258:" // Height 6: Partial accumulate: partial_4_16 "tbz x16, #2, 260f\n" "ldr d10, [x13], #0x8\n" "ldr d14, [x9], #0x8\n" "ldr d18, [x27], #0x8\n" "ldr d22, [x25], #0x8\n" "ldr d26, [x23], #0x8\n" "ldr d30, [x21], #0x8\n" "tbz x16, #1, 259f\n" "ld1 { v10.s }[2], [x13], #0x4\n" "ld1 { v14.s }[2], [x9], #0x4\n" "ld1 { v18.s }[2], [x27], #0x4\n" "ld1 { v22.s }[2], [x25], #0x4\n" "ld1 { v26.s }[2], [x23], #0x4\n" "ld1 { v30.s }[2], [x21], #0x4\n" "mov x19, #0x2c\n" "tbz x16, #0, 270f\n" "ld1 { v10.h }[6], [x13]\n" "ld1 { v14.h }[6], [x9]\n" "ld1 { v18.h }[6], [x27]\n" "ld1 { v22.h }[6], [x25]\n" "ld1 { v26.h }[6], [x23]\n" "ld1 { v30.h }[6], [x21]\n" "b 270f\n" "259:" // Height 6: Partial accumulate: partial_1_20 "mov x19, #0x28\n" "tbz x16, #0, 270f\n" "ld1 { v10.h }[4], [x13]\n" "ld1 { v14.h }[4], [x9]\n" "ld1 { v18.h }[4], [x27]\n" "ld1 { v22.h }[4], [x25]\n" "ld1 { v26.h }[4], [x23]\n" "ld1 { v30.h }[4], [x21]\n" "b 270f\n" "260:" // Height 6: Partial accumulate: partial_2_16 "tbz x16, #1, 261f\n" "ldr s10, [x13], #0x4\n" "ldr s14, [x9], #0x4\n" "ldr s18, [x27], #0x4\n" "ldr s22, [x25], #0x4\n" "ldr s26, [x23], #0x4\n" "ldr s30, [x21], #0x4\n" "mov x19, #0x24\n" "tbz x16, #0, 270f\n" "ld1 { v10.h }[2], [x13]\n" "ld1 { v14.h }[2], [x9]\n" "ld1 { v18.h }[2], [x27]\n" "ld1 { v22.h }[2], [x25]\n" "ld1 { v26.h }[2], [x23]\n" "ld1 { v30.h }[2], [x21]\n" "b 270f\n" "261:" // Height 6: Partial accumulate: partial_1_16 "mov x19, #0x20\n" "tbz x16, #0, 270f\n" "ldr h10, [x13, #0x0]\n" "ldr h14, [x9, #0x0]\n" "ldr h18, [x27, #0x0]\n" "ldr h22, [x25, #0x0]\n" "ldr h26, [x23, #0x0]\n" "ldr h30, [x21, #0x0]\n" "b 270f\n" "262:" // Height 6: Partial accumulate: partial_8_0 "tbz x16, #3, 266f\n" "ld1 { v8.8h }, [x13], #0x10\n" "ld1 { v12.8h }, [x9], #0x10\n" "ld1 { v16.8h }, [x27], #0x10\n" "ld1 { v20.8h }, [x25], #0x10\n" "ld1 { v24.8h }, [x23], #0x10\n" "ld1 { v28.8h }, [x21], #0x10\n" "tbz x16, #2, 264f\n" "ldr d9, [x13], #0x8\n" "ldr d13, [x9], #0x8\n" "ldr d17, [x27], #0x8\n" "ldr d21, [x25], #0x8\n" "ldr d25, [x23], #0x8\n" "ldr d29, [x21], #0x8\n" "tbz x16, #1, 263f\n" "ld1 { v9.s }[2], [x13], #0x4\n" "ld1 { v13.s }[2], [x9], #0x4\n" "ld1 { v17.s }[2], [x27], #0x4\n" "ld1 { v21.s }[2], [x25], #0x4\n" "ld1 { v25.s }[2], [x23], #0x4\n" "ld1 { v29.s }[2], [x21], #0x4\n" "mov x19, #0x1c\n" "tbz x16, #0, 270f\n" "ld1 { v9.h }[6], [x13]\n" "ld1 { v13.h }[6], [x9]\n" "ld1 { v17.h }[6], [x27]\n" "ld1 { v21.h }[6], [x25]\n" "ld1 { v25.h }[6], [x23]\n" "ld1 { v29.h }[6], [x21]\n" "b 270f\n" "263:" // Height 6: Partial accumulate: partial_1_12 "mov x19, #0x18\n" "tbz x16, #0, 270f\n" "ld1 { v9.h }[4], [x13]\n" "ld1 { v13.h }[4], [x9]\n" "ld1 { v17.h }[4], [x27]\n" "ld1 { v21.h }[4], [x25]\n" "ld1 { v25.h }[4], [x23]\n" "ld1 { v29.h }[4], [x21]\n" "b 270f\n" "264:" // Height 6: Partial accumulate: partial_2_8 "tbz x16, #1, 265f\n" "ldr s9, [x13], #0x4\n" "ldr s13, [x9], #0x4\n" "ldr s17, [x27], #0x4\n" "ldr s21, [x25], #0x4\n" "ldr s25, [x23], #0x4\n" "ldr s29, [x21], #0x4\n" "mov x19, #0x14\n" "tbz x16, #0, 270f\n" "ld1 { v9.h }[2], [x13]\n" "ld1 { v13.h }[2], [x9]\n" "ld1 { v17.h }[2], [x27]\n" "ld1 { v21.h }[2], [x25]\n" "ld1 { v25.h }[2], [x23]\n" "ld1 { v29.h }[2], [x21]\n" "b 270f\n" "265:" // Height 6: Partial accumulate: partial_1_8 "mov x19, #0x10\n" "tbz x16, #0, 270f\n" "ldr h9, [x13, #0x0]\n" "ldr h13, [x9, #0x0]\n" "ldr h17, [x27, #0x0]\n" "ldr h21, [x25, #0x0]\n" "ldr h25, [x23, #0x0]\n" "ldr h29, [x21, #0x0]\n" "b 270f\n" "266:" // Height 6: Partial accumulate: partial_4_0 "tbz x16, #2, 268f\n" "ldr d8, [x13], #0x8\n" "ldr d12, [x9], #0x8\n" "ldr d16, [x27], #0x8\n" "ldr d20, [x25], #0x8\n" "ldr d24, [x23], #0x8\n" "ldr d28, [x21], #0x8\n" "tbz x16, #1, 267f\n" "ld1 { v8.s }[2], [x13], #0x4\n" "ld1 { v12.s }[2], [x9], #0x4\n" "ld1 { v16.s }[2], [x27], #0x4\n" "ld1 { v20.s }[2], [x25], #0x4\n" "ld1 { v24.s }[2], [x23], #0x4\n" "ld1 { v28.s }[2], [x21], #0x4\n" "mov x19, #0xc\n" "tbz x16, #0, 270f\n" "ld1 { v8.h }[6], [x13]\n" "ld1 { v12.h }[6], [x9]\n" "ld1 { v16.h }[6], [x27]\n" "ld1 { v20.h }[6], [x25]\n" "ld1 { v24.h }[6], [x23]\n" "ld1 { v28.h }[6], [x21]\n" "b 270f\n" "267:" // Height 6: Partial accumulate: partial_1_4 "mov x19, #0x8\n" "tbz x16, #0, 270f\n" "ld1 { v8.h }[4], [x13]\n" "ld1 { v12.h }[4], [x9]\n" "ld1 { v16.h }[4], [x27]\n" "ld1 { v20.h }[4], [x25]\n" "ld1 { v24.h }[4], [x23]\n" "ld1 { v28.h }[4], [x21]\n" "b 270f\n" "268:" // Height 6: Partial accumulate: partial_2_0 "tbz x16, #1, 269f\n" "ldr s8, [x13], #0x4\n" "ldr s12, [x9], #0x4\n" "ldr s16, [x27], #0x4\n" "ldr s20, [x25], #0x4\n" "ldr s24, [x23], #0x4\n" "ldr s28, [x21], #0x4\n" "mov x19, #0x4\n" "tbz x16, #0, 270f\n" "ld1 { v8.h }[2], [x13]\n" "ld1 { v12.h }[2], [x9]\n" "ld1 { v16.h }[2], [x27]\n" "ld1 { v20.h }[2], [x25]\n" "ld1 { v24.h }[2], [x23]\n" "ld1 { v28.h }[2], [x21]\n" "b 270f\n" "269:" // Height 6: Partial accumulate: partial_1_0 "mov x19, #0x0\n" "ldr h8, [x13, #0x0]\n" "ldr h12, [x9, #0x0]\n" "ldr h16, [x27, #0x0]\n" "ldr h20, [x25, #0x0]\n" "ldr h24, [x23, #0x0]\n" "ldr h28, [x21, #0x0]\n" "270:" // Height 6: Partial accumulate: Done "sub x13, x13, x19\n" "sub x9, x9, x19\n" "sub x27, x27, x19\n" "sub x25, x25, x19\n" "sub x23, x23, x19\n" "sub x21, x21, x19\n" "b 273f\n" "271:" // Height 6: full accumulate "ldr q8, [x13, #0x0]\n" "ldr q9, [x13, #0x10]\n" "ldr q10, [x13, #0x20]\n" "ldr q11, [x13, #0x30]\n" "ldr q12, [x9, #0x0]\n" "ldr q13, [x9, #0x10]\n" "ldr q14, [x9, #0x20]\n" "ldr q15, [x9, #0x30]\n" "ldr q16, [x27, #0x0]\n" "ldr q17, [x27, #0x10]\n" "ldr q18, [x27, #0x20]\n" "ldr q19, [x27, #0x30]\n" "ldr q20, [x25, #0x0]\n" "ldr q21, [x25, #0x10]\n" "ldr q22, [x25, #0x20]\n" "ldr q23, [x25, #0x30]\n" "ldr q24, [x23, #0x0]\n" "ldr q25, [x23, #0x10]\n" "ldr q26, [x23, #0x20]\n" "ldr q27, [x23, #0x30]\n" "ldr q28, [x21, #0x0]\n" "ldr q29, [x21, #0x10]\n" "ldr q30, [x21, #0x20]\n" "ldr q31, [x21, #0x30]\n" "b 273f\n" "272:" // Height 6: no accumulate "movi v8.16b, #0x0\n" "movi v9.16b, #0x0\n" "movi v10.16b, #0x0\n" "movi v11.16b, #0x0\n" "movi v12.16b, #0x0\n" "movi v13.16b, #0x0\n" "movi v14.16b, #0x0\n" "movi v15.16b, #0x0\n" "movi v16.16b, #0x0\n" "movi v17.16b, #0x0\n" "movi v18.16b, #0x0\n" "movi v19.16b, #0x0\n" "movi v20.16b, #0x0\n" "movi v21.16b, #0x0\n" "movi v22.16b, #0x0\n" "movi v23.16b, #0x0\n" "movi v24.16b, #0x0\n" "movi v25.16b, #0x0\n" "movi v26.16b, #0x0\n" "movi v27.16b, #0x0\n" "movi v28.16b, #0x0\n" "movi v29.16b, #0x0\n" "movi v30.16b, #0x0\n" "movi v31.16b, #0x0\n" "273:" // Height 6: setup done "mov x12, #0x0\n" "274:" // Height 6: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n" "ldr w11, [x20, x12, LSL #0x2]\n" "tbz %x[flags], #3, 275f\n" "ldr x20, [%x[input_ptr], x12, LSL #0x3]\n" "add x20, x20, x19, LSL #3\n" "ldr x10, [x20, #0x0]\n" "ldr x28, [x20, #0x8]\n" "ldr x26, [x20, #0x10]\n" "ldr x24, [x20, #0x18]\n" "ldr x22, [x20, #0x20]\n" "ldr x20, [x20, #0x28]\n" "cbnz x12, 276f\n" "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n" "add x10, x10, x19, LSL #1\n" "add x28, x28, x19, LSL #1\n" "add x26, x26, x19, LSL #1\n" "add x24, x24, x19, LSL #1\n" "add x22, x22, x19, LSL #1\n" "add x20, x20, x19, LSL #1\n" "b 276f\n" "275:" // Height 6: setup direct input "mov x10, %x[input_ptr]\n" "add x28, x10, x19, LSL #1\n" "add x26, x28, x19, LSL #1\n" "add x24, x26, x19, LSL #1\n" "add x22, x24, x19, LSL #1\n" "add x20, x22, x19, LSL #1\n" "276:" // Height 6: input setup done "cmp x11, #0x8\n" "blt 279f\n" "cmp x11, #0x10\n" "blt 278f\n" "277:" // Height 6: Multiply loop: Main loop head "ldr q0, [x10, #0x0]\n" "ldr q1, [x28, #0x0]\n" "ldr q2, [x26, #0x0]\n" "ldr q3, [x24, #0x0]\n" "ldr q4, [x22, #0x0]\n" "ldr q5, [x20, #0x0]\n" "ldr q6, [x15, #0x0]\n" "fmla v8.8h, v6.8h, v0.h[0]\n" "ldr q7, [x15, #0x10]\n" "fmla v12.8h, v6.8h, v1.h[0]\n" "add x10, x10, #0x10\n" "prfm pldl1keep, [x10, #0x80]\n" "fmla v16.8h, v6.8h, v2.h[0]\n" "add x28, x28, #0x10\n" "fmla v20.8h, v6.8h, v3.h[0]\n" "prfm pldl1keep, [x28, #0x80]\n" "add x26, x26, #0x10\n" "fmla v24.8h, v6.8h, v4.h[0]\n" "prfm pldl1keep, [x26, #0x80]\n" "add x24, x24, #0x10\n" "fmla v28.8h, v6.8h, v5.h[0]\n" "prfm pldl1keep, [x24, #0x80]\n" "ldr q6, [x15, #0x20]\n" "fmla v9.8h, v7.8h, v0.h[0]\n" "add x22, x22, #0x10\n" "prfm pldl1keep, [x22, #0x80]\n" "fmla v13.8h, v7.8h, v1.h[0]\n" "add x20, x20, #0x10\n" "fmla v17.8h, v7.8h, v2.h[0]\n" "prfm pldl1keep, [x20, #0x80]\n" "sub x11, x11, #0x8\n" "fmla v21.8h, v7.8h, v3.h[0]\n" "cmp x11, #0x10\n" "fmla v25.8h, v7.8h, v4.h[0]\n" "fmla v29.8h, v7.8h, v5.h[0]\n" "ldr q7, [x15, #0x30]\n" "fmla v10.8h, v6.8h, v0.h[0]\n" "fmla v14.8h, v6.8h, v1.h[0]\n" "fmla v18.8h, v6.8h, v2.h[0]\n" "fmla v22.8h, v6.8h, v3.h[0]\n" "fmla v26.8h, v6.8h, v4.h[0]\n" "fmla v30.8h, v6.8h, v5.h[0]\n" "ldr q6, [x15, #0x40]\n" "fmla v11.8h, v7.8h, v0.h[0]\n" "fmla v15.8h, v7.8h, v1.h[0]\n" "fmla v19.8h, v7.8h, v2.h[0]\n" "fmla v23.8h, v7.8h, v3.h[0]\n" "fmla v27.8h, v7.8h, v4.h[0]\n" "fmla v31.8h, v7.8h, v5.h[0]\n" "ldr q7, [x15, #0x50]\n" "fmla v8.8h, v6.8h, v0.h[1]\n" "fmla v12.8h, v6.8h, v1.h[1]\n" "fmla v16.8h, v6.8h, v2.h[1]\n" "fmla v20.8h, v6.8h, v3.h[1]\n" "fmla v24.8h, v6.8h, v4.h[1]\n" "fmla v28.8h, v6.8h, v5.h[1]\n" "ldr q6, [x15, #0x60]\n" "fmla v9.8h, v7.8h, v0.h[1]\n" "fmla v13.8h, v7.8h, v1.h[1]\n" "fmla v17.8h, v7.8h, v2.h[1]\n" "fmla v21.8h, v7.8h, v3.h[1]\n" "fmla v25.8h, v7.8h, v4.h[1]\n" "fmla v29.8h, v7.8h, v5.h[1]\n" "ldr q7, [x15, #0x70]\n" "fmla v10.8h, v6.8h, v0.h[1]\n" "fmla v14.8h, v6.8h, v1.h[1]\n" "fmla v18.8h, v6.8h, v2.h[1]\n" "fmla v22.8h, v6.8h, v3.h[1]\n" "fmla v26.8h, v6.8h, v4.h[1]\n" "fmla v30.8h, v6.8h, v5.h[1]\n" "ldr q6, [x15, #0x80]\n" "fmla v11.8h, v7.8h, v0.h[1]\n" "fmla v15.8h, v7.8h, v1.h[1]\n" "fmla v19.8h, v7.8h, v2.h[1]\n" "fmla v23.8h, v7.8h, v3.h[1]\n" "fmla v27.8h, v7.8h, v4.h[1]\n" "fmla v31.8h, v7.8h, v5.h[1]\n" "ldr q7, [x15, #0x90]\n" "fmla v8.8h, v6.8h, v0.h[2]\n" "fmla v12.8h, v6.8h, v1.h[2]\n" "fmla v16.8h, v6.8h, v2.h[2]\n" "fmla v20.8h, v6.8h, v3.h[2]\n" "fmla v24.8h, v6.8h, v4.h[2]\n" "fmla v28.8h, v6.8h, v5.h[2]\n" "ldr q6, [x15, #0xa0]\n" "fmla v9.8h, v7.8h, v0.h[2]\n" "fmla v13.8h, v7.8h, v1.h[2]\n" "fmla v17.8h, v7.8h, v2.h[2]\n" "fmla v21.8h, v7.8h, v3.h[2]\n" "fmla v25.8h, v7.8h, v4.h[2]\n" "fmla v29.8h, v7.8h, v5.h[2]\n" "ldr q7, [x15, #0xb0]\n" "fmla v10.8h, v6.8h, v0.h[2]\n" "fmla v14.8h, v6.8h, v1.h[2]\n" "fmla v18.8h, v6.8h, v2.h[2]\n" "fmla v22.8h, v6.8h, v3.h[2]\n" "fmla v26.8h, v6.8h, v4.h[2]\n" "fmla v30.8h, v6.8h, v5.h[2]\n" "ldr q6, [x15, #0xc0]\n" "fmla v11.8h, v7.8h, v0.h[2]\n" "fmla v15.8h, v7.8h, v1.h[2]\n" "fmla v19.8h, v7.8h, v2.h[2]\n" "fmla v23.8h, v7.8h, v3.h[2]\n" "fmla v27.8h, v7.8h, v4.h[2]\n" "fmla v31.8h, v7.8h, v5.h[2]\n" "ldr q7, [x15, #0xd0]\n" "fmla v8.8h, v6.8h, v0.h[3]\n" "fmla v12.8h, v6.8h, v1.h[3]\n" "fmla v16.8h, v6.8h, v2.h[3]\n" "fmla v20.8h, v6.8h, v3.h[3]\n" "fmla v24.8h, v6.8h, v4.h[3]\n" "fmla v28.8h, v6.8h, v5.h[3]\n" "ldr q6, [x15, #0xe0]\n" "fmla v9.8h, v7.8h, v0.h[3]\n" "fmla v13.8h, v7.8h, v1.h[3]\n" "fmla v17.8h, v7.8h, v2.h[3]\n" "fmla v21.8h, v7.8h, v3.h[3]\n" "fmla v25.8h, v7.8h, v4.h[3]\n" "fmla v29.8h, v7.8h, v5.h[3]\n" "ldr q7, [x15, #0xf0]\n" "fmla v10.8h, v6.8h, v0.h[3]\n" "fmla v14.8h, v6.8h, v1.h[3]\n" "fmla v18.8h, v6.8h, v2.h[3]\n" "fmla v22.8h, v6.8h, v3.h[3]\n" "fmla v26.8h, v6.8h, v4.h[3]\n" "fmla v30.8h, v6.8h, v5.h[3]\n" "ldr q6, [x15, #0x100]\n" "fmla v11.8h, v7.8h, v0.h[3]\n" "fmla v15.8h, v7.8h, v1.h[3]\n" "fmla v19.8h, v7.8h, v2.h[3]\n" "fmla v23.8h, v7.8h, v3.h[3]\n" "fmla v27.8h, v7.8h, v4.h[3]\n" "fmla v31.8h, v7.8h, v5.h[3]\n" "ldr q7, [x15, #0x110]\n" "fmla v8.8h, v6.8h, v0.h[4]\n" "fmla v12.8h, v6.8h, v1.h[4]\n" "fmla v16.8h, v6.8h, v2.h[4]\n" "fmla v20.8h, v6.8h, v3.h[4]\n" "fmla v24.8h, v6.8h, v4.h[4]\n" "fmla v28.8h, v6.8h, v5.h[4]\n" "ldr q6, [x15, #0x120]\n" "fmla v9.8h, v7.8h, v0.h[4]\n" "fmla v13.8h, v7.8h, v1.h[4]\n" "fmla v17.8h, v7.8h, v2.h[4]\n" "fmla v21.8h, v7.8h, v3.h[4]\n" "fmla v25.8h, v7.8h, v4.h[4]\n" "fmla v29.8h, v7.8h, v5.h[4]\n" "ldr q7, [x15, #0x130]\n" "fmla v10.8h, v6.8h, v0.h[4]\n" "fmla v14.8h, v6.8h, v1.h[4]\n" "fmla v18.8h, v6.8h, v2.h[4]\n" "fmla v22.8h, v6.8h, v3.h[4]\n" "fmla v26.8h, v6.8h, v4.h[4]\n" "fmla v30.8h, v6.8h, v5.h[4]\n" "ldr q6, [x15, #0x140]\n" "fmla v11.8h, v7.8h, v0.h[4]\n" "fmla v15.8h, v7.8h, v1.h[4]\n" "fmla v19.8h, v7.8h, v2.h[4]\n" "fmla v23.8h, v7.8h, v3.h[4]\n" "fmla v27.8h, v7.8h, v4.h[4]\n" "fmla v31.8h, v7.8h, v5.h[4]\n" "ldr q7, [x15, #0x150]\n" "fmla v8.8h, v6.8h, v0.h[5]\n" "fmla v12.8h, v6.8h, v1.h[5]\n" "fmla v16.8h, v6.8h, v2.h[5]\n" "fmla v20.8h, v6.8h, v3.h[5]\n" "fmla v24.8h, v6.8h, v4.h[5]\n" "fmla v28.8h, v6.8h, v5.h[5]\n" "ldr q6, [x15, #0x160]\n" "fmla v9.8h, v7.8h, v0.h[5]\n" "fmla v13.8h, v7.8h, v1.h[5]\n" "fmla v17.8h, v7.8h, v2.h[5]\n" "fmla v21.8h, v7.8h, v3.h[5]\n" "fmla v25.8h, v7.8h, v4.h[5]\n" "fmla v29.8h, v7.8h, v5.h[5]\n" "ldr q7, [x15, #0x170]\n" "fmla v10.8h, v6.8h, v0.h[5]\n" "fmla v14.8h, v6.8h, v1.h[5]\n" "fmla v18.8h, v6.8h, v2.h[5]\n" "fmla v22.8h, v6.8h, v3.h[5]\n" "fmla v26.8h, v6.8h, v4.h[5]\n" "fmla v30.8h, v6.8h, v5.h[5]\n" "ldr q6, [x15, #0x180]\n" "fmla v11.8h, v7.8h, v0.h[5]\n" "fmla v15.8h, v7.8h, v1.h[5]\n" "fmla v19.8h, v7.8h, v2.h[5]\n" "fmla v23.8h, v7.8h, v3.h[5]\n" "fmla v27.8h, v7.8h, v4.h[5]\n" "fmla v31.8h, v7.8h, v5.h[5]\n" "ldr q7, [x15, #0x190]\n" "fmla v8.8h, v6.8h, v0.h[6]\n" "fmla v12.8h, v6.8h, v1.h[6]\n" "fmla v16.8h, v6.8h, v2.h[6]\n" "fmla v20.8h, v6.8h, v3.h[6]\n" "fmla v24.8h, v6.8h, v4.h[6]\n" "fmla v28.8h, v6.8h, v5.h[6]\n" "ldr q6, [x15, #0x1a0]\n" "fmla v9.8h, v7.8h, v0.h[6]\n" "fmla v13.8h, v7.8h, v1.h[6]\n" "fmla v17.8h, v7.8h, v2.h[6]\n" "fmla v21.8h, v7.8h, v3.h[6]\n" "fmla v25.8h, v7.8h, v4.h[6]\n" "fmla v29.8h, v7.8h, v5.h[6]\n" "ldr q7, [x15, #0x1b0]\n" "fmla v10.8h, v6.8h, v0.h[6]\n" "fmla v14.8h, v6.8h, v1.h[6]\n" "fmla v18.8h, v6.8h, v2.h[6]\n" "fmla v22.8h, v6.8h, v3.h[6]\n" "fmla v26.8h, v6.8h, v4.h[6]\n" "fmla v30.8h, v6.8h, v5.h[6]\n" "ldr q6, [x15, #0x1c0]\n" "fmla v11.8h, v7.8h, v0.h[6]\n" "fmla v15.8h, v7.8h, v1.h[6]\n" "fmla v19.8h, v7.8h, v2.h[6]\n" "fmla v23.8h, v7.8h, v3.h[6]\n" "fmla v27.8h, v7.8h, v4.h[6]\n" "fmla v31.8h, v7.8h, v5.h[6]\n" "ldr q7, [x15, #0x1d0]\n" "fmla v8.8h, v6.8h, v0.h[7]\n" "fmla v12.8h, v6.8h, v1.h[7]\n" "fmla v16.8h, v6.8h, v2.h[7]\n" "fmla v20.8h, v6.8h, v3.h[7]\n" "fmla v24.8h, v6.8h, v4.h[7]\n" "fmla v28.8h, v6.8h, v5.h[7]\n" "ldr q6, [x15, #0x1e0]\n" "fmla v9.8h, v7.8h, v0.h[7]\n" "fmla v13.8h, v7.8h, v1.h[7]\n" "fmla v17.8h, v7.8h, v2.h[7]\n" "fmla v21.8h, v7.8h, v3.h[7]\n" "fmla v25.8h, v7.8h, v4.h[7]\n" "fmla v29.8h, v7.8h, v5.h[7]\n" "ldr q7, [x15, #0x1f0]\n" "fmla v10.8h, v6.8h, v0.h[7]\n" "add x15, x15, #0x200\n" "fmla v14.8h, v6.8h, v1.h[7]\n" "fmla v18.8h, v6.8h, v2.h[7]\n" "fmla v22.8h, v6.8h, v3.h[7]\n" "fmla v26.8h, v6.8h, v4.h[7]\n" "fmla v30.8h, v6.8h, v5.h[7]\n" "fmla v11.8h, v7.8h, v0.h[7]\n" "fmla v15.8h, v7.8h, v1.h[7]\n" "fmla v19.8h, v7.8h, v2.h[7]\n" "fmla v23.8h, v7.8h, v3.h[7]\n" "fmla v27.8h, v7.8h, v4.h[7]\n" "fmla v31.8h, v7.8h, v5.h[7]\n" "bge 277b\n" "278:" // Height 6: Multiply loop: Single iteration only "sub x11, x11, #0x8\n" "ldr q0, [x10, #0x0]\n" "ldr q1, [x28, #0x0]\n" "ldr q2, [x26, #0x0]\n" "ldr q3, [x24, #0x0]\n" "ldr q4, [x22, #0x0]\n" "ldr q5, [x20, #0x0]\n" "ldr q6, [x15, #0x0]\n" "fmla v8.8h, v6.8h, v0.h[0]\n" "ldr q7, [x15, #0x10]\n" "fmla v12.8h, v6.8h, v1.h[0]\n" "add x10, x10, #0x10\n" "prfm pldl1keep, [x10, #0x80]\n" "fmla v16.8h, v6.8h, v2.h[0]\n" "add x28, x28, #0x10\n" "fmla v20.8h, v6.8h, v3.h[0]\n" "prfm pldl1keep, [x28, #0x80]\n" "add x26, x26, #0x10\n" "fmla v24.8h, v6.8h, v4.h[0]\n" "prfm pldl1keep, [x26, #0x80]\n" "add x24, x24, #0x10\n" "fmla v28.8h, v6.8h, v5.h[0]\n" "prfm pldl1keep, [x24, #0x80]\n" "ldr q6, [x15, #0x20]\n" "fmla v9.8h, v7.8h, v0.h[0]\n" "add x22, x22, #0x10\n" "prfm pldl1keep, [x22, #0x80]\n" "fmla v13.8h, v7.8h, v1.h[0]\n" "add x20, x20, #0x10\n" "fmla v17.8h, v7.8h, v2.h[0]\n" "prfm pldl1keep, [x20, #0x80]\n" "fmla v21.8h, v7.8h, v3.h[0]\n" "fmla v25.8h, v7.8h, v4.h[0]\n" "fmla v29.8h, v7.8h, v5.h[0]\n" "ldr q7, [x15, #0x30]\n" "fmla v10.8h, v6.8h, v0.h[0]\n" "fmla v14.8h, v6.8h, v1.h[0]\n" "fmla v18.8h, v6.8h, v2.h[0]\n" "fmla v22.8h, v6.8h, v3.h[0]\n" "fmla v26.8h, v6.8h, v4.h[0]\n" "fmla v30.8h, v6.8h, v5.h[0]\n" "ldr q6, [x15, #0x40]\n" "fmla v11.8h, v7.8h, v0.h[0]\n" "fmla v15.8h, v7.8h, v1.h[0]\n" "fmla v19.8h, v7.8h, v2.h[0]\n" "fmla v23.8h, v7.8h, v3.h[0]\n" "fmla v27.8h, v7.8h, v4.h[0]\n" "fmla v31.8h, v7.8h, v5.h[0]\n" "ldr q7, [x15, #0x50]\n" "fmla v8.8h, v6.8h, v0.h[1]\n" "fmla v12.8h, v6.8h, v1.h[1]\n" "fmla v16.8h, v6.8h, v2.h[1]\n" "fmla v20.8h, v6.8h, v3.h[1]\n" "fmla v24.8h, v6.8h, v4.h[1]\n" "fmla v28.8h, v6.8h, v5.h[1]\n" "ldr q6, [x15, #0x60]\n" "fmla v9.8h, v7.8h, v0.h[1]\n" "fmla v13.8h, v7.8h, v1.h[1]\n" "fmla v17.8h, v7.8h, v2.h[1]\n" "fmla v21.8h, v7.8h, v3.h[1]\n" "fmla v25.8h, v7.8h, v4.h[1]\n" "fmla v29.8h, v7.8h, v5.h[1]\n" "ldr q7, [x15, #0x70]\n" "fmla v10.8h, v6.8h, v0.h[1]\n" "fmla v14.8h, v6.8h, v1.h[1]\n" "fmla v18.8h, v6.8h, v2.h[1]\n" "fmla v22.8h, v6.8h, v3.h[1]\n" "fmla v26.8h, v6.8h, v4.h[1]\n" "fmla v30.8h, v6.8h, v5.h[1]\n" "ldr q6, [x15, #0x80]\n" "fmla v11.8h, v7.8h, v0.h[1]\n" "fmla v15.8h, v7.8h, v1.h[1]\n" "fmla v19.8h, v7.8h, v2.h[1]\n" "fmla v23.8h, v7.8h, v3.h[1]\n" "fmla v27.8h, v7.8h, v4.h[1]\n" "fmla v31.8h, v7.8h, v5.h[1]\n" "ldr q7, [x15, #0x90]\n" "fmla v8.8h, v6.8h, v0.h[2]\n" "fmla v12.8h, v6.8h, v1.h[2]\n" "fmla v16.8h, v6.8h, v2.h[2]\n" "fmla v20.8h, v6.8h, v3.h[2]\n" "fmla v24.8h, v6.8h, v4.h[2]\n" "fmla v28.8h, v6.8h, v5.h[2]\n" "ldr q6, [x15, #0xa0]\n" "fmla v9.8h, v7.8h, v0.h[2]\n" "fmla v13.8h, v7.8h, v1.h[2]\n" "fmla v17.8h, v7.8h, v2.h[2]\n" "fmla v21.8h, v7.8h, v3.h[2]\n" "fmla v25.8h, v7.8h, v4.h[2]\n" "fmla v29.8h, v7.8h, v5.h[2]\n" "ldr q7, [x15, #0xb0]\n" "fmla v10.8h, v6.8h, v0.h[2]\n" "fmla v14.8h, v6.8h, v1.h[2]\n" "fmla v18.8h, v6.8h, v2.h[2]\n" "fmla v22.8h, v6.8h, v3.h[2]\n" "fmla v26.8h, v6.8h, v4.h[2]\n" "fmla v30.8h, v6.8h, v5.h[2]\n" "ldr q6, [x15, #0xc0]\n" "fmla v11.8h, v7.8h, v0.h[2]\n" "fmla v15.8h, v7.8h, v1.h[2]\n" "fmla v19.8h, v7.8h, v2.h[2]\n" "fmla v23.8h, v7.8h, v3.h[2]\n" "fmla v27.8h, v7.8h, v4.h[2]\n" "fmla v31.8h, v7.8h, v5.h[2]\n" "ldr q7, [x15, #0xd0]\n" "fmla v8.8h, v6.8h, v0.h[3]\n" "fmla v12.8h, v6.8h, v1.h[3]\n" "fmla v16.8h, v6.8h, v2.h[3]\n" "fmla v20.8h, v6.8h, v3.h[3]\n" "fmla v24.8h, v6.8h, v4.h[3]\n" "fmla v28.8h, v6.8h, v5.h[3]\n" "ldr q6, [x15, #0xe0]\n" "fmla v9.8h, v7.8h, v0.h[3]\n" "fmla v13.8h, v7.8h, v1.h[3]\n" "fmla v17.8h, v7.8h, v2.h[3]\n" "fmla v21.8h, v7.8h, v3.h[3]\n" "fmla v25.8h, v7.8h, v4.h[3]\n" "fmla v29.8h, v7.8h, v5.h[3]\n" "ldr q7, [x15, #0xf0]\n" "fmla v10.8h, v6.8h, v0.h[3]\n" "fmla v14.8h, v6.8h, v1.h[3]\n" "fmla v18.8h, v6.8h, v2.h[3]\n" "fmla v22.8h, v6.8h, v3.h[3]\n" "fmla v26.8h, v6.8h, v4.h[3]\n" "fmla v30.8h, v6.8h, v5.h[3]\n" "ldr q6, [x15, #0x100]\n" "fmla v11.8h, v7.8h, v0.h[3]\n" "fmla v15.8h, v7.8h, v1.h[3]\n" "fmla v19.8h, v7.8h, v2.h[3]\n" "fmla v23.8h, v7.8h, v3.h[3]\n" "fmla v27.8h, v7.8h, v4.h[3]\n" "fmla v31.8h, v7.8h, v5.h[3]\n" "ldr q7, [x15, #0x110]\n" "fmla v8.8h, v6.8h, v0.h[4]\n" "fmla v12.8h, v6.8h, v1.h[4]\n" "fmla v16.8h, v6.8h, v2.h[4]\n" "fmla v20.8h, v6.8h, v3.h[4]\n" "fmla v24.8h, v6.8h, v4.h[4]\n" "fmla v28.8h, v6.8h, v5.h[4]\n" "ldr q6, [x15, #0x120]\n" "fmla v9.8h, v7.8h, v0.h[4]\n" "fmla v13.8h, v7.8h, v1.h[4]\n" "fmla v17.8h, v7.8h, v2.h[4]\n" "fmla v21.8h, v7.8h, v3.h[4]\n" "fmla v25.8h, v7.8h, v4.h[4]\n" "fmla v29.8h, v7.8h, v5.h[4]\n" "ldr q7, [x15, #0x130]\n" "fmla v10.8h, v6.8h, v0.h[4]\n" "fmla v14.8h, v6.8h, v1.h[4]\n" "fmla v18.8h, v6.8h, v2.h[4]\n" "fmla v22.8h, v6.8h, v3.h[4]\n" "fmla v26.8h, v6.8h, v4.h[4]\n" "fmla v30.8h, v6.8h, v5.h[4]\n" "ldr q6, [x15, #0x140]\n" "fmla v11.8h, v7.8h, v0.h[4]\n" "fmla v15.8h, v7.8h, v1.h[4]\n" "fmla v19.8h, v7.8h, v2.h[4]\n" "fmla v23.8h, v7.8h, v3.h[4]\n" "fmla v27.8h, v7.8h, v4.h[4]\n" "fmla v31.8h, v7.8h, v5.h[4]\n" "ldr q7, [x15, #0x150]\n" "fmla v8.8h, v6.8h, v0.h[5]\n" "fmla v12.8h, v6.8h, v1.h[5]\n" "fmla v16.8h, v6.8h, v2.h[5]\n" "fmla v20.8h, v6.8h, v3.h[5]\n" "fmla v24.8h, v6.8h, v4.h[5]\n" "fmla v28.8h, v6.8h, v5.h[5]\n" "ldr q6, [x15, #0x160]\n" "fmla v9.8h, v7.8h, v0.h[5]\n" "fmla v13.8h, v7.8h, v1.h[5]\n" "fmla v17.8h, v7.8h, v2.h[5]\n" "fmla v21.8h, v7.8h, v3.h[5]\n" "fmla v25.8h, v7.8h, v4.h[5]\n" "fmla v29.8h, v7.8h, v5.h[5]\n" "ldr q7, [x15, #0x170]\n" "fmla v10.8h, v6.8h, v0.h[5]\n" "fmla v14.8h, v6.8h, v1.h[5]\n" "fmla v18.8h, v6.8h, v2.h[5]\n" "fmla v22.8h, v6.8h, v3.h[5]\n" "fmla v26.8h, v6.8h, v4.h[5]\n" "fmla v30.8h, v6.8h, v5.h[5]\n" "ldr q6, [x15, #0x180]\n" "fmla v11.8h, v7.8h, v0.h[5]\n" "fmla v15.8h, v7.8h, v1.h[5]\n" "fmla v19.8h, v7.8h, v2.h[5]\n" "fmla v23.8h, v7.8h, v3.h[5]\n" "fmla v27.8h, v7.8h, v4.h[5]\n" "fmla v31.8h, v7.8h, v5.h[5]\n" "ldr q7, [x15, #0x190]\n" "fmla v8.8h, v6.8h, v0.h[6]\n" "fmla v12.8h, v6.8h, v1.h[6]\n" "fmla v16.8h, v6.8h, v2.h[6]\n" "fmla v20.8h, v6.8h, v3.h[6]\n" "fmla v24.8h, v6.8h, v4.h[6]\n" "fmla v28.8h, v6.8h, v5.h[6]\n" "ldr q6, [x15, #0x1a0]\n" "fmla v9.8h, v7.8h, v0.h[6]\n" "fmla v13.8h, v7.8h, v1.h[6]\n" "fmla v17.8h, v7.8h, v2.h[6]\n" "fmla v21.8h, v7.8h, v3.h[6]\n" "fmla v25.8h, v7.8h, v4.h[6]\n" "fmla v29.8h, v7.8h, v5.h[6]\n" "ldr q7, [x15, #0x1b0]\n" "fmla v10.8h, v6.8h, v0.h[6]\n" "fmla v14.8h, v6.8h, v1.h[6]\n" "fmla v18.8h, v6.8h, v2.h[6]\n" "fmla v22.8h, v6.8h, v3.h[6]\n" "fmla v26.8h, v6.8h, v4.h[6]\n" "fmla v30.8h, v6.8h, v5.h[6]\n" "ldr q6, [x15, #0x1c0]\n" "fmla v11.8h, v7.8h, v0.h[6]\n" "fmla v15.8h, v7.8h, v1.h[6]\n" "fmla v19.8h, v7.8h, v2.h[6]\n" "fmla v23.8h, v7.8h, v3.h[6]\n" "fmla v27.8h, v7.8h, v4.h[6]\n" "fmla v31.8h, v7.8h, v5.h[6]\n" "ldr q7, [x15, #0x1d0]\n" "fmla v8.8h, v6.8h, v0.h[7]\n" "fmla v12.8h, v6.8h, v1.h[7]\n" "fmla v16.8h, v6.8h, v2.h[7]\n" "fmla v20.8h, v6.8h, v3.h[7]\n" "fmla v24.8h, v6.8h, v4.h[7]\n" "fmla v28.8h, v6.8h, v5.h[7]\n" "ldr q6, [x15, #0x1e0]\n" "fmla v9.8h, v7.8h, v0.h[7]\n" "fmla v13.8h, v7.8h, v1.h[7]\n" "fmla v17.8h, v7.8h, v2.h[7]\n" "fmla v21.8h, v7.8h, v3.h[7]\n" "fmla v25.8h, v7.8h, v4.h[7]\n" "fmla v29.8h, v7.8h, v5.h[7]\n" "ldr q7, [x15, #0x1f0]\n" "fmla v10.8h, v6.8h, v0.h[7]\n" "add x15, x15, #0x200\n" "fmla v14.8h, v6.8h, v1.h[7]\n" "fmla v18.8h, v6.8h, v2.h[7]\n" "fmla v22.8h, v6.8h, v3.h[7]\n" "fmla v26.8h, v6.8h, v4.h[7]\n" "fmla v30.8h, v6.8h, v5.h[7]\n" "fmla v11.8h, v7.8h, v0.h[7]\n" "fmla v15.8h, v7.8h, v1.h[7]\n" "fmla v19.8h, v7.8h, v2.h[7]\n" "fmla v23.8h, v7.8h, v3.h[7]\n" "fmla v27.8h, v7.8h, v4.h[7]\n" "fmla v31.8h, v7.8h, v5.h[7]\n" "279:" // Height 6: Multiply loop: Main loop skip "cbz x11, 281f\n" "280:" // Height 6: Multiply loop: Odd block loop "ldr h0, [x10], #0x2\n" "ldr h1, [x28], #0x2\n" "ldr h2, [x26], #0x2\n" "ldr h3, [x24], #0x2\n" "ldr h4, [x22], #0x2\n" "ldr h5, [x20], #0x2\n" "ldr q6, [x15, #0x0]\n" "fmla v8.8h, v6.8h, v0.h[0]\n" "ldr q7, [x15, #0x10]\n" "fmla v12.8h, v6.8h, v1.h[0]\n" "sub x11, x11, #0x1\n" "fmla v16.8h, v6.8h, v2.h[0]\n" "fmla v20.8h, v6.8h, v3.h[0]\n" "fmla v24.8h, v6.8h, v4.h[0]\n" "fmla v28.8h, v6.8h, v5.h[0]\n" "ldr q6, [x15, #0x20]\n" "fmla v9.8h, v7.8h, v0.h[0]\n" "fmla v13.8h, v7.8h, v1.h[0]\n" "fmla v17.8h, v7.8h, v2.h[0]\n" "fmla v21.8h, v7.8h, v3.h[0]\n" "fmla v25.8h, v7.8h, v4.h[0]\n" "fmla v29.8h, v7.8h, v5.h[0]\n" "ldr q7, [x15, #0x30]\n" "fmla v10.8h, v6.8h, v0.h[0]\n" "add x15, x15, #0x40\n" "fmla v14.8h, v6.8h, v1.h[0]\n" "fmla v18.8h, v6.8h, v2.h[0]\n" "fmla v22.8h, v6.8h, v3.h[0]\n" "fmla v26.8h, v6.8h, v4.h[0]\n" "fmla v30.8h, v6.8h, v5.h[0]\n" "fmla v11.8h, v7.8h, v0.h[0]\n" "fmla v15.8h, v7.8h, v1.h[0]\n" "fmla v19.8h, v7.8h, v2.h[0]\n" "fmla v23.8h, v7.8h, v3.h[0]\n" "fmla v27.8h, v7.8h, v4.h[0]\n" "fmla v31.8h, v7.8h, v5.h[0]\n" "cbnz x11, 280b\n" "281:" // Height 6: Multiply loop: No odd multiplies "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "add x12, x12, #0x1\n" "cmp x12, x19\n" "bne 274b\n" "prfm pstl1keep, [x13, #0x0]\n" "prfm pstl1keep, [x9, #0x0]\n" "prfm pstl1keep, [x27, #0x0]\n" "prfm pstl1keep, [x25, #0x0]\n" "prfm pstl1keep, [x23, #0x0]\n" "prfm pstl1keep, [x21, #0x0]\n" "tbz %x[flags], #1, 282f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1r { v1.8h }, [x19]\n" "add x19, %x[args_ptr], %[offset_max]\n" "ld1r { v0.8h }, [x19]\n" "fmin v8.8h, v8.8h, v0.8h\n" "fmin v9.8h, v9.8h, v0.8h\n" "fmin v10.8h, v10.8h, v0.8h\n" "fmin v11.8h, v11.8h, v0.8h\n" "fmax v8.8h, v8.8h, v1.8h\n" "fmax v9.8h, v9.8h, v1.8h\n" "fmax v10.8h, v10.8h, v1.8h\n" "fmax v11.8h, v11.8h, v1.8h\n" "fmin v12.8h, v12.8h, v0.8h\n" "fmin v13.8h, v13.8h, v0.8h\n" "fmin v14.8h, v14.8h, v0.8h\n" "fmax v12.8h, v12.8h, v1.8h\n" "fmax v13.8h, v13.8h, v1.8h\n" "fmax v14.8h, v14.8h, v1.8h\n" "fmin v15.8h, v15.8h, v0.8h\n" "fmin v16.8h, v16.8h, v0.8h\n" "fmin v17.8h, v17.8h, v0.8h\n" "fmax v15.8h, v15.8h, v1.8h\n" "fmax v16.8h, v16.8h, v1.8h\n" "fmax v17.8h, v17.8h, v1.8h\n" "fmin v18.8h, v18.8h, v0.8h\n" "fmin v19.8h, v19.8h, v0.8h\n" "fmin v20.8h, v20.8h, v0.8h\n" "fmax v18.8h, v18.8h, v1.8h\n" "fmax v19.8h, v19.8h, v1.8h\n" "fmax v20.8h, v20.8h, v1.8h\n" "fmin v21.8h, v21.8h, v0.8h\n" "fmin v22.8h, v22.8h, v0.8h\n" "fmin v23.8h, v23.8h, v0.8h\n" "fmax v21.8h, v21.8h, v1.8h\n" "fmax v22.8h, v22.8h, v1.8h\n" "fmax v23.8h, v23.8h, v1.8h\n" "fmin v24.8h, v24.8h, v0.8h\n" "fmin v25.8h, v25.8h, v0.8h\n" "fmin v26.8h, v26.8h, v0.8h\n" "fmax v24.8h, v24.8h, v1.8h\n" "fmax v25.8h, v25.8h, v1.8h\n" "fmax v26.8h, v26.8h, v1.8h\n" "fmin v27.8h, v27.8h, v0.8h\n" "fmin v28.8h, v28.8h, v0.8h\n" "fmin v29.8h, v29.8h, v0.8h\n" "fmax v27.8h, v27.8h, v1.8h\n" "fmax v28.8h, v28.8h, v1.8h\n" "fmax v29.8h, v29.8h, v1.8h\n" "fmin v30.8h, v30.8h, v0.8h\n" "fmin v31.8h, v31.8h, v0.8h\n" "fmax v30.8h, v30.8h, v1.8h\n" "fmax v31.8h, v31.8h, v1.8h\n" "282:" // Height 6: No activation "cmp x16, #0x20\n" "bge 299f\n" "tbz x16, #4, 290f\n" "st1 { v8.8h }, [x13], #0x10\n" "st1 { v9.8h }, [x13], #0x10\n" "st1 { v12.8h }, [x9], #0x10\n" "st1 { v13.8h }, [x9], #0x10\n" "st1 { v16.8h }, [x27], #0x10\n" "st1 { v17.8h }, [x27], #0x10\n" "st1 { v20.8h }, [x25], #0x10\n" "st1 { v21.8h }, [x25], #0x10\n" "st1 { v24.8h }, [x23], #0x10\n" "st1 { v25.8h }, [x23], #0x10\n" "st1 { v28.8h }, [x21], #0x10\n" "st1 { v29.8h }, [x21], #0x10\n" "tbz x16, #3, 286f\n" "st1 { v10.8h }, [x13], #0x10\n" "st1 { v14.8h }, [x9], #0x10\n" "st1 { v18.8h }, [x27], #0x10\n" "st1 { v22.8h }, [x25], #0x10\n" "st1 { v26.8h }, [x23], #0x10\n" "st1 { v30.8h }, [x21], #0x10\n" "tbz x16, #2, 284f\n" "str d11, [x13], #0x8\n" "str d15, [x9], #0x8\n" "str d19, [x27], #0x8\n" "str d23, [x25], #0x8\n" "str d27, [x23], #0x8\n" "str d31, [x21], #0x8\n" "tbz x16, #1, 283f\n" "st1 { v11.s }[2], [x13], #0x4\n" "st1 { v15.s }[2], [x9], #0x4\n" "st1 { v19.s }[2], [x27], #0x4\n" "st1 { v23.s }[2], [x25], #0x4\n" "st1 { v27.s }[2], [x23], #0x4\n" "st1 { v31.s }[2], [x21], #0x4\n" "tbz x16, #0, 298f\n" "st1 { v11.h }[6], [x13]\n" "st1 { v15.h }[6], [x9]\n" "st1 { v19.h }[6], [x27]\n" "st1 { v23.h }[6], [x25]\n" "st1 { v27.h }[6], [x23]\n" "st1 { v31.h }[6], [x21]\n" "b 298f\n" "283:" // Height 6: Partial direct writeback: partial_1_28 "tbz x16, #0, 298f\n" "st1 { v11.h }[4], [x13]\n" "st1 { v15.h }[4], [x9]\n" "st1 { v19.h }[4], [x27]\n" "st1 { v23.h }[4], [x25]\n" "st1 { v27.h }[4], [x23]\n" "st1 { v31.h }[4], [x21]\n" "b 298f\n" "284:" // Height 6: Partial direct writeback: partial_2_24 "tbz x16, #1, 285f\n" "str s11, [x13], #0x4\n" "str s15, [x9], #0x4\n" "str s19, [x27], #0x4\n" "str s23, [x25], #0x4\n" "str s27, [x23], #0x4\n" "str s31, [x21], #0x4\n" "tbz x16, #0, 298f\n" "st1 { v11.h }[2], [x13]\n" "st1 { v15.h }[2], [x9]\n" "st1 { v19.h }[2], [x27]\n" "st1 { v23.h }[2], [x25]\n" "st1 { v27.h }[2], [x23]\n" "st1 { v31.h }[2], [x21]\n" "b 298f\n" "285:" // Height 6: Partial direct writeback: partial_1_24 "tbz x16, #0, 298f\n" "str h11, [x13, #0x0]\n" "str h15, [x9, #0x0]\n" "str h19, [x27, #0x0]\n" "str h23, [x25, #0x0]\n" "str h27, [x23, #0x0]\n" "str h31, [x21, #0x0]\n" "b 298f\n" "286:" // Height 6: Partial direct writeback: partial_4_16 "tbz x16, #2, 288f\n" "str d10, [x13], #0x8\n" "str d14, [x9], #0x8\n" "str d18, [x27], #0x8\n" "str d22, [x25], #0x8\n" "str d26, [x23], #0x8\n" "str d30, [x21], #0x8\n" "tbz x16, #1, 287f\n" "st1 { v10.s }[2], [x13], #0x4\n" "st1 { v14.s }[2], [x9], #0x4\n" "st1 { v18.s }[2], [x27], #0x4\n" "st1 { v22.s }[2], [x25], #0x4\n" "st1 { v26.s }[2], [x23], #0x4\n" "st1 { v30.s }[2], [x21], #0x4\n" "tbz x16, #0, 298f\n" "st1 { v10.h }[6], [x13]\n" "st1 { v14.h }[6], [x9]\n" "st1 { v18.h }[6], [x27]\n" "st1 { v22.h }[6], [x25]\n" "st1 { v26.h }[6], [x23]\n" "st1 { v30.h }[6], [x21]\n" "b 298f\n" "287:" // Height 6: Partial direct writeback: partial_1_20 "tbz x16, #0, 298f\n" "st1 { v10.h }[4], [x13]\n" "st1 { v14.h }[4], [x9]\n" "st1 { v18.h }[4], [x27]\n" "st1 { v22.h }[4], [x25]\n" "st1 { v26.h }[4], [x23]\n" "st1 { v30.h }[4], [x21]\n" "b 298f\n" "288:" // Height 6: Partial direct writeback: partial_2_16 "tbz x16, #1, 289f\n" "str s10, [x13], #0x4\n" "str s14, [x9], #0x4\n" "str s18, [x27], #0x4\n" "str s22, [x25], #0x4\n" "str s26, [x23], #0x4\n" "str s30, [x21], #0x4\n" "tbz x16, #0, 298f\n" "st1 { v10.h }[2], [x13]\n" "st1 { v14.h }[2], [x9]\n" "st1 { v18.h }[2], [x27]\n" "st1 { v22.h }[2], [x25]\n" "st1 { v26.h }[2], [x23]\n" "st1 { v30.h }[2], [x21]\n" "b 298f\n" "289:" // Height 6: Partial direct writeback: partial_1_16 "tbz x16, #0, 298f\n" "str h10, [x13, #0x0]\n" "str h14, [x9, #0x0]\n" "str h18, [x27, #0x0]\n" "str h22, [x25, #0x0]\n" "str h26, [x23, #0x0]\n" "str h30, [x21, #0x0]\n" "b 298f\n" "290:" // Height 6: Partial direct writeback: partial_8_0 "tbz x16, #3, 294f\n" "st1 { v8.8h }, [x13], #0x10\n" "st1 { v12.8h }, [x9], #0x10\n" "st1 { v16.8h }, [x27], #0x10\n" "st1 { v20.8h }, [x25], #0x10\n" "st1 { v24.8h }, [x23], #0x10\n" "st1 { v28.8h }, [x21], #0x10\n" "tbz x16, #2, 292f\n" "str d9, [x13], #0x8\n" "str d13, [x9], #0x8\n" "str d17, [x27], #0x8\n" "str d21, [x25], #0x8\n" "str d25, [x23], #0x8\n" "str d29, [x21], #0x8\n" "tbz x16, #1, 291f\n" "st1 { v9.s }[2], [x13], #0x4\n" "st1 { v13.s }[2], [x9], #0x4\n" "st1 { v17.s }[2], [x27], #0x4\n" "st1 { v21.s }[2], [x25], #0x4\n" "st1 { v25.s }[2], [x23], #0x4\n" "st1 { v29.s }[2], [x21], #0x4\n" "tbz x16, #0, 298f\n" "st1 { v9.h }[6], [x13]\n" "st1 { v13.h }[6], [x9]\n" "st1 { v17.h }[6], [x27]\n" "st1 { v21.h }[6], [x25]\n" "st1 { v25.h }[6], [x23]\n" "st1 { v29.h }[6], [x21]\n" "b 298f\n" "291:" // Height 6: Partial direct writeback: partial_1_12 "tbz x16, #0, 298f\n" "st1 { v9.h }[4], [x13]\n" "st1 { v13.h }[4], [x9]\n" "st1 { v17.h }[4], [x27]\n" "st1 { v21.h }[4], [x25]\n" "st1 { v25.h }[4], [x23]\n" "st1 { v29.h }[4], [x21]\n" "b 298f\n" "292:" // Height 6: Partial direct writeback: partial_2_8 "tbz x16, #1, 293f\n" "str s9, [x13], #0x4\n" "str s13, [x9], #0x4\n" "str s17, [x27], #0x4\n" "str s21, [x25], #0x4\n" "str s25, [x23], #0x4\n" "str s29, [x21], #0x4\n" "tbz x16, #0, 298f\n" "st1 { v9.h }[2], [x13]\n" "st1 { v13.h }[2], [x9]\n" "st1 { v17.h }[2], [x27]\n" "st1 { v21.h }[2], [x25]\n" "st1 { v25.h }[2], [x23]\n" "st1 { v29.h }[2], [x21]\n" "b 298f\n" "293:" // Height 6: Partial direct writeback: partial_1_8 "tbz x16, #0, 298f\n" "str h9, [x13, #0x0]\n" "str h13, [x9, #0x0]\n" "str h17, [x27, #0x0]\n" "str h21, [x25, #0x0]\n" "str h25, [x23, #0x0]\n" "str h29, [x21, #0x0]\n" "b 298f\n" "294:" // Height 6: Partial direct writeback: partial_4_0 "tbz x16, #2, 296f\n" "str d8, [x13], #0x8\n" "str d12, [x9], #0x8\n" "str d16, [x27], #0x8\n" "str d20, [x25], #0x8\n" "str d24, [x23], #0x8\n" "str d28, [x21], #0x8\n" "tbz x16, #1, 295f\n" "st1 { v8.s }[2], [x13], #0x4\n" "st1 { v12.s }[2], [x9], #0x4\n" "st1 { v16.s }[2], [x27], #0x4\n" "st1 { v20.s }[2], [x25], #0x4\n" "st1 { v24.s }[2], [x23], #0x4\n" "st1 { v28.s }[2], [x21], #0x4\n" "tbz x16, #0, 298f\n" "st1 { v8.h }[6], [x13]\n" "st1 { v12.h }[6], [x9]\n" "st1 { v16.h }[6], [x27]\n" "st1 { v20.h }[6], [x25]\n" "st1 { v24.h }[6], [x23]\n" "st1 { v28.h }[6], [x21]\n" "b 298f\n" "295:" // Height 6: Partial direct writeback: partial_1_4 "tbz x16, #0, 298f\n" "st1 { v8.h }[4], [x13]\n" "st1 { v12.h }[4], [x9]\n" "st1 { v16.h }[4], [x27]\n" "st1 { v20.h }[4], [x25]\n" "st1 { v24.h }[4], [x23]\n" "st1 { v28.h }[4], [x21]\n" "b 298f\n" "296:" // Height 6: Partial direct writeback: partial_2_0 "tbz x16, #1, 297f\n" "str s8, [x13], #0x4\n" "str s12, [x9], #0x4\n" "str s16, [x27], #0x4\n" "str s20, [x25], #0x4\n" "str s24, [x23], #0x4\n" "str s28, [x21], #0x4\n" "tbz x16, #0, 298f\n" "st1 { v8.h }[2], [x13]\n" "st1 { v12.h }[2], [x9]\n" "st1 { v16.h }[2], [x27]\n" "st1 { v20.h }[2], [x25]\n" "st1 { v24.h }[2], [x23]\n" "st1 { v28.h }[2], [x21]\n" "b 298f\n" "297:" // Height 6: Partial direct writeback: partial_1_0 "str h8, [x13, #0x0]\n" "str h12, [x9, #0x0]\n" "str h16, [x27, #0x0]\n" "str h20, [x25, #0x0]\n" "str h24, [x23, #0x0]\n" "str h28, [x21, #0x0]\n" "298:" // Height 6: Partial direct writeback: Done "b 300f\n" "299:" // Height 6: Full writeback "str q8, [x13, #0x0]\n" "str q9, [x13, #0x10]\n" "str q10, [x13, #0x20]\n" "str q11, [x13, #0x30]\n" "str q12, [x9, #0x0]\n" "str q13, [x9, #0x10]\n" "str q14, [x9, #0x20]\n" "str q15, [x9, #0x30]\n" "str q16, [x27, #0x0]\n" "str q17, [x27, #0x10]\n" "str q18, [x27, #0x20]\n" "str q19, [x27, #0x30]\n" "str q20, [x25, #0x0]\n" "str q21, [x25, #0x10]\n" "str q22, [x25, #0x20]\n" "str q23, [x25, #0x30]\n" "str q24, [x23, #0x0]\n" "str q25, [x23, #0x10]\n" "str q26, [x23, #0x20]\n" "str q27, [x23, #0x30]\n" "str q28, [x21, #0x0]\n" "str q29, [x21, #0x10]\n" "str q30, [x21, #0x20]\n" "str q31, [x21, #0x30]\n" "add x13, x13, #0x40\n" "add x9, x9, #0x40\n" "add x27, x27, #0x40\n" "add x25, x25, #0x40\n" "add x23, x23, #0x40\n" "add x21, x21, #0x40\n" "300:" // Height 6: Writeback done "subs x16, x16, #0x20\n" "bgt 253b\n" "subs %x[M], %x[M], #0x6\n" "beq 302f\n" "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n" "tbz %x[flags], #3, 301f\n" "add x20, x20, #0x6\n" "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n" "b 1b\n" "301:" // Update direct input "mov x19, #0xc\n" "madd %x[input_ptr], x19, x20, %x[input_ptr]\n" "b 1b\n" "302:" // Exit : [M] "+r" (M), [input_ptr] "+r" (input_ptr), [output_ptr] "+r" (output_ptr) : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" ); } } // namespace arm_gemm #endif // __aarch64__
//===- TestDialect.cpp - MLIR Dialect for Testing -------------------------===// // // Copyright 2019 The MLIR Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // ============================================================================= #include "TestDialect.h" #include "mlir/IR/PatternMatch.h" #include "mlir/IR/TypeUtilities.h" using namespace mlir; //===----------------------------------------------------------------------===// // TestDialect //===----------------------------------------------------------------------===// TestDialect::TestDialect(MLIRContext *context) : Dialect(getDialectName(), context) { addOperations< #define GET_OP_LIST #include "TestOps.cpp.inc" >(); allowUnknownOperations(); } //===----------------------------------------------------------------------===// // Test PolyForOp - parse list of region arguments. //===----------------------------------------------------------------------===// ParseResult parsePolyForOp(OpAsmParser *parser, OperationState *result) { SmallVector<OpAsmParser::OperandType, 4> ivsInfo; // Parse list of region arguments without a delimiter. if (parser->parseRegionArgumentList(ivsInfo)) return failure(); // Parse the body region. Region *body = result->addRegion(); auto &builder = parser->getBuilder(); SmallVector<Type, 4> argTypes(ivsInfo.size(), builder.getIndexType()); if (parser->parseRegion(*body, ivsInfo, argTypes)) return failure(); return success(); } // Static initialization for Test dialect registration. static mlir::DialectRegistration<mlir::TestDialect> testDialect; #define GET_OP_CLASSES #include "TestOps.cpp.inc"
//===- LegalizeDAG.cpp - Implement SelectionDAG::Legalize -----------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements the SelectionDAG::Legalize method. // //===----------------------------------------------------------------------===// #include "llvm/ADT/APFloat.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/CodeGen/ISDOpcodes.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineJumpTableInfo.h" #include "llvm/CodeGen/MachineMemOperand.h" #include "llvm/CodeGen/RuntimeLibcalls.h" #include "llvm/CodeGen/SelectionDAG.h" #include "llvm/CodeGen/SelectionDAGNodes.h" #include "llvm/CodeGen/TargetFrameLowering.h" #include "llvm/CodeGen/TargetLowering.h" #include "llvm/CodeGen/TargetSubtargetInfo.h" #include "llvm/CodeGen/ValueTypes.h" #include "llvm/IR/CallingConv.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Function.h" #include "llvm/IR/Metadata.h" #include "llvm/IR/Type.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MachineValueType.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetOptions.h" #include <algorithm> #include <cassert> #include <cstdint> #include <tuple> #include <utility> using namespace llvm; #define DEBUG_TYPE "legalizedag" namespace { /// Keeps track of state when getting the sign of a floating-point value as an /// integer. struct FloatSignAsInt { EVT FloatVT; SDValue Chain; SDValue FloatPtr; SDValue IntPtr; MachinePointerInfo IntPointerInfo; MachinePointerInfo FloatPointerInfo; SDValue IntValue; APInt SignMask; uint8_t SignBit; }; //===----------------------------------------------------------------------===// /// This takes an arbitrary SelectionDAG as input and /// hacks on it until the target machine can handle it. This involves /// eliminating value sizes the machine cannot handle (promoting small sizes to /// large sizes or splitting up large values into small values) as well as /// eliminating operations the machine cannot handle. /// /// This code also does a small amount of optimization and recognition of idioms /// as part of its processing. For example, if a target does not support a /// 'setcc' instruction efficiently, but does support 'brcc' instruction, this /// will attempt merge setcc and brc instructions into brcc's. class SelectionDAGLegalize { const TargetMachine &TM; const TargetLowering &TLI; SelectionDAG &DAG; /// The set of nodes which have already been legalized. We hold a /// reference to it in order to update as necessary on node deletion. SmallPtrSetImpl<SDNode *> &LegalizedNodes; /// A set of all the nodes updated during legalization. SmallSetVector<SDNode *, 16> *UpdatedNodes; EVT getSetCCResultType(EVT VT) const { return TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); } // Libcall insertion helpers. public: SelectionDAGLegalize(SelectionDAG &DAG, SmallPtrSetImpl<SDNode *> &LegalizedNodes, SmallSetVector<SDNode *, 16> *UpdatedNodes = nullptr) : TM(DAG.getTarget()), TLI(DAG.getTargetLoweringInfo()), DAG(DAG), LegalizedNodes(LegalizedNodes), UpdatedNodes(UpdatedNodes) {} /// Legalizes the given operation. void LegalizeOp(SDNode *Node); private: SDValue OptimizeFloatStore(StoreSDNode *ST); void LegalizeLoadOps(SDNode *Node); void LegalizeStoreOps(SDNode *Node); /// Some targets cannot handle a variable /// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it /// is necessary to spill the vector being inserted into to memory, perform /// the insert there, and then read the result back. SDValue PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, SDValue Idx, const SDLoc &dl); SDValue ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, SDValue Idx, const SDLoc &dl); /// Return a vector shuffle operation which /// performs the same shuffe in terms of order or result bytes, but on a type /// whose vector element type is narrower than the original shuffle type. /// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3> SDValue ShuffleWithNarrowerEltType(EVT NVT, EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef<int> Mask) const; bool LegalizeSetCCCondCode(EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC, bool &NeedInvert, const SDLoc &dl, SDValue &Chain, bool IsSignaling = false); SDValue ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned); void ExpandFPLibCall(SDNode *Node, RTLIB::Libcall Call_F32, RTLIB::Libcall Call_F64, RTLIB::Libcall Call_F80, RTLIB::Libcall Call_F128, RTLIB::Libcall Call_PPCF128, SmallVectorImpl<SDValue> &Results); SDValue ExpandIntLibCall(SDNode *Node, bool isSigned, RTLIB::Libcall Call_I8, RTLIB::Libcall Call_I16, RTLIB::Libcall Call_I32, RTLIB::Libcall Call_I64, RTLIB::Libcall Call_I128); void ExpandArgFPLibCall(SDNode *Node, RTLIB::Libcall Call_F32, RTLIB::Libcall Call_F64, RTLIB::Libcall Call_F80, RTLIB::Libcall Call_F128, RTLIB::Libcall Call_PPCF128, SmallVectorImpl<SDValue> &Results); void ExpandDivRemLibCall(SDNode *Node, SmallVectorImpl<SDValue> &Results); void ExpandSinCosLibCall(SDNode *Node, SmallVectorImpl<SDValue> &Results); SDValue EmitStackConvert(SDValue SrcOp, EVT SlotVT, EVT DestVT, const SDLoc &dl); SDValue EmitStackConvert(SDValue SrcOp, EVT SlotVT, EVT DestVT, const SDLoc &dl, SDValue ChainIn); SDValue ExpandBUILD_VECTOR(SDNode *Node); SDValue ExpandSPLAT_VECTOR(SDNode *Node); SDValue ExpandSCALAR_TO_VECTOR(SDNode *Node); void ExpandDYNAMIC_STACKALLOC(SDNode *Node, SmallVectorImpl<SDValue> &Results); void getSignAsIntValue(FloatSignAsInt &State, const SDLoc &DL, SDValue Value) const; SDValue modifySignAsInt(const FloatSignAsInt &State, const SDLoc &DL, SDValue NewIntValue) const; SDValue ExpandFCOPYSIGN(SDNode *Node) const; SDValue ExpandFABS(SDNode *Node) const; SDValue ExpandLegalINT_TO_FP(SDNode *Node, SDValue &Chain); void PromoteLegalINT_TO_FP(SDNode *N, const SDLoc &dl, SmallVectorImpl<SDValue> &Results); void PromoteLegalFP_TO_INT(SDNode *N, const SDLoc &dl, SmallVectorImpl<SDValue> &Results); SDValue ExpandBITREVERSE(SDValue Op, const SDLoc &dl); SDValue ExpandBSWAP(SDValue Op, const SDLoc &dl); SDValue ExpandExtractFromVectorThroughStack(SDValue Op); SDValue ExpandInsertToVectorThroughStack(SDValue Op); SDValue ExpandVectorBuildThroughStack(SDNode* Node); SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP); SDValue ExpandConstant(ConstantSDNode *CP); // if ExpandNode returns false, LegalizeOp falls back to ConvertNodeToLibcall bool ExpandNode(SDNode *Node); void ConvertNodeToLibcall(SDNode *Node); void PromoteNode(SDNode *Node); public: // Node replacement helpers void ReplacedNode(SDNode *N) { LegalizedNodes.erase(N); if (UpdatedNodes) UpdatedNodes->insert(N); } void ReplaceNode(SDNode *Old, SDNode *New) { LLVM_DEBUG(dbgs() << " ... replacing: "; Old->dump(&DAG); dbgs() << " with: "; New->dump(&DAG)); assert(Old->getNumValues() == New->getNumValues() && "Replacing one node with another that produces a different number " "of values!"); DAG.ReplaceAllUsesWith(Old, New); if (UpdatedNodes) UpdatedNodes->insert(New); ReplacedNode(Old); } void ReplaceNode(SDValue Old, SDValue New) { LLVM_DEBUG(dbgs() << " ... replacing: "; Old->dump(&DAG); dbgs() << " with: "; New->dump(&DAG)); DAG.ReplaceAllUsesWith(Old, New); if (UpdatedNodes) UpdatedNodes->insert(New.getNode()); ReplacedNode(Old.getNode()); } void ReplaceNode(SDNode *Old, const SDValue *New) { LLVM_DEBUG(dbgs() << " ... replacing: "; Old->dump(&DAG)); DAG.ReplaceAllUsesWith(Old, New); for (unsigned i = 0, e = Old->getNumValues(); i != e; ++i) { LLVM_DEBUG(dbgs() << (i == 0 ? " with: " : " and: "); New[i]->dump(&DAG)); if (UpdatedNodes) UpdatedNodes->insert(New[i].getNode()); } ReplacedNode(Old); } void ReplaceNodeWithValue(SDValue Old, SDValue New) { LLVM_DEBUG(dbgs() << " ... replacing: "; Old->dump(&DAG); dbgs() << " with: "; New->dump(&DAG)); DAG.ReplaceAllUsesOfValueWith(Old, New); if (UpdatedNodes) UpdatedNodes->insert(New.getNode()); ReplacedNode(Old.getNode()); } }; } // end anonymous namespace /// Return a vector shuffle operation which /// performs the same shuffle in terms of order or result bytes, but on a type /// whose vector element type is narrower than the original shuffle type. /// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3> SDValue SelectionDAGLegalize::ShuffleWithNarrowerEltType( EVT NVT, EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef<int> Mask) const { unsigned NumMaskElts = VT.getVectorNumElements(); unsigned NumDestElts = NVT.getVectorNumElements(); unsigned NumEltsGrowth = NumDestElts / NumMaskElts; assert(NumEltsGrowth && "Cannot promote to vector type with fewer elts!"); if (NumEltsGrowth == 1) return DAG.getVectorShuffle(NVT, dl, N1, N2, Mask); SmallVector<int, 8> NewMask; for (unsigned i = 0; i != NumMaskElts; ++i) { int Idx = Mask[i]; for (unsigned j = 0; j != NumEltsGrowth; ++j) { if (Idx < 0) NewMask.push_back(-1); else NewMask.push_back(Idx * NumEltsGrowth + j); } } assert(NewMask.size() == NumDestElts && "Non-integer NumEltsGrowth?"); assert(TLI.isShuffleMaskLegal(NewMask, NVT) && "Shuffle not legal?"); return DAG.getVectorShuffle(NVT, dl, N1, N2, NewMask); } /// Expands the ConstantFP node to an integer constant or /// a load from the constant pool. SDValue SelectionDAGLegalize::ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP) { bool Extend = false; SDLoc dl(CFP); // If a FP immediate is precise when represented as a float and if the // target can do an extending load from float to double, we put it into // the constant pool as a float, even if it's is statically typed as a // double. This shrinks FP constants and canonicalizes them for targets where // an FP extending load is the same cost as a normal load (such as on the x87 // fp stack or PPC FP unit). EVT VT = CFP->getValueType(0); ConstantFP *LLVMC = const_cast<ConstantFP*>(CFP->getConstantFPValue()); if (!UseCP) { assert((VT == MVT::f64 || VT == MVT::f32) && "Invalid type expansion"); return DAG.getConstant(LLVMC->getValueAPF().bitcastToAPInt(), dl, (VT == MVT::f64) ? MVT::i64 : MVT::i32); } APFloat APF = CFP->getValueAPF(); EVT OrigVT = VT; EVT SVT = VT; // We don't want to shrink SNaNs. Converting the SNaN back to its real type // can cause it to be changed into a QNaN on some platforms (e.g. on SystemZ). if (!APF.isSignaling()) { while (SVT != MVT::f32 && SVT != MVT::f16) { SVT = (MVT::SimpleValueType)(SVT.getSimpleVT().SimpleTy - 1); if (ConstantFPSDNode::isValueValidForType(SVT, APF) && // Only do this if the target has a native EXTLOAD instruction from // smaller type. TLI.isLoadExtLegal(ISD::EXTLOAD, OrigVT, SVT) && TLI.ShouldShrinkFPConstant(OrigVT)) { Type *SType = SVT.getTypeForEVT(*DAG.getContext()); LLVMC = cast<ConstantFP>(ConstantExpr::getFPTrunc(LLVMC, SType)); VT = SVT; Extend = true; } } } SDValue CPIdx = DAG.getConstantPool(LLVMC, TLI.getPointerTy(DAG.getDataLayout())); Align Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlign(); if (Extend) { SDValue Result = DAG.getExtLoad( ISD::EXTLOAD, dl, OrigVT, DAG.getEntryNode(), CPIdx, MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), VT, Alignment); return Result; } SDValue Result = DAG.getLoad( OrigVT, dl, DAG.getEntryNode(), CPIdx, MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Alignment); return Result; } /// Expands the Constant node to a load from the constant pool. SDValue SelectionDAGLegalize::ExpandConstant(ConstantSDNode *CP) { SDLoc dl(CP); EVT VT = CP->getValueType(0); SDValue CPIdx = DAG.getConstantPool(CP->getConstantIntValue(), TLI.getPointerTy(DAG.getDataLayout())); Align Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlign(); SDValue Result = DAG.getLoad( VT, dl, DAG.getEntryNode(), CPIdx, MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Alignment); return Result; } /// Some target cannot handle a variable insertion index for the /// INSERT_VECTOR_ELT instruction. In this case, it /// is necessary to spill the vector being inserted into to memory, perform /// the insert there, and then read the result back. SDValue SelectionDAGLegalize::PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, SDValue Idx, const SDLoc &dl) { SDValue Tmp1 = Vec; SDValue Tmp2 = Val; SDValue Tmp3 = Idx; // If the target doesn't support this, we have to spill the input vector // to a temporary stack slot, update the element, then reload it. This is // badness. We could also load the value into a vector register (either // with a "move to register" or "extload into register" instruction, then // permute it into place, if the idx is a constant and if the idx is // supported by the target. EVT VT = Tmp1.getValueType(); EVT EltVT = VT.getVectorElementType(); SDValue StackPtr = DAG.CreateStackTemporary(VT); int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); // Store the vector. SDValue Ch = DAG.getStore( DAG.getEntryNode(), dl, Tmp1, StackPtr, MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI)); SDValue StackPtr2 = TLI.getVectorElementPointer(DAG, StackPtr, VT, Tmp3); // Store the scalar value. Ch = DAG.getTruncStore( Ch, dl, Tmp2, StackPtr2, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()), EltVT); // Load the updated vector. return DAG.getLoad(VT, dl, Ch, StackPtr, MachinePointerInfo::getFixedStack( DAG.getMachineFunction(), SPFI)); } SDValue SelectionDAGLegalize::ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, SDValue Idx, const SDLoc &dl) { if (ConstantSDNode *InsertPos = dyn_cast<ConstantSDNode>(Idx)) { // SCALAR_TO_VECTOR requires that the type of the value being inserted // match the element type of the vector being created, except for // integers in which case the inserted value can be over width. EVT EltVT = Vec.getValueType().getVectorElementType(); if (Val.getValueType() == EltVT || (EltVT.isInteger() && Val.getValueType().bitsGE(EltVT))) { SDValue ScVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, Vec.getValueType(), Val); unsigned NumElts = Vec.getValueType().getVectorNumElements(); // We generate a shuffle of InVec and ScVec, so the shuffle mask // should be 0,1,2,3,4,5... with the appropriate element replaced with // elt 0 of the RHS. SmallVector<int, 8> ShufOps; for (unsigned i = 0; i != NumElts; ++i) ShufOps.push_back(i != InsertPos->getZExtValue() ? i : NumElts); return DAG.getVectorShuffle(Vec.getValueType(), dl, Vec, ScVec, ShufOps); } } return PerformInsertVectorEltInMemory(Vec, Val, Idx, dl); } SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) { if (!ISD::isNormalStore(ST)) return SDValue(); LLVM_DEBUG(dbgs() << "Optimizing float store operations\n"); // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr' // FIXME: We shouldn't do this for TargetConstantFP's. // FIXME: move this to the DAG Combiner! Note that we can't regress due // to phase ordering between legalized code and the dag combiner. This // probably means that we need to integrate dag combiner and legalizer // together. // We generally can't do this one for long doubles. SDValue Chain = ST->getChain(); SDValue Ptr = ST->getBasePtr(); MachineMemOperand::Flags MMOFlags = ST->getMemOperand()->getFlags(); AAMDNodes AAInfo = ST->getAAInfo(); SDLoc dl(ST); if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(ST->getValue())) { if (CFP->getValueType(0) == MVT::f32 && TLI.isTypeLegal(MVT::i32)) { SDValue Con = DAG.getConstant(CFP->getValueAPF(). bitcastToAPInt().zextOrTrunc(32), SDLoc(CFP), MVT::i32); return DAG.getStore(Chain, dl, Con, Ptr, ST->getPointerInfo(), ST->getOriginalAlign(), MMOFlags, AAInfo); } if (CFP->getValueType(0) == MVT::f64) { // If this target supports 64-bit registers, do a single 64-bit store. if (TLI.isTypeLegal(MVT::i64)) { SDValue Con = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt(). zextOrTrunc(64), SDLoc(CFP), MVT::i64); return DAG.getStore(Chain, dl, Con, Ptr, ST->getPointerInfo(), ST->getOriginalAlign(), MMOFlags, AAInfo); } if (TLI.isTypeLegal(MVT::i32) && !ST->isVolatile()) { // Otherwise, if the target supports 32-bit registers, use 2 32-bit // stores. If the target supports neither 32- nor 64-bits, this // xform is certainly not worth it. const APInt &IntVal = CFP->getValueAPF().bitcastToAPInt(); SDValue Lo = DAG.getConstant(IntVal.trunc(32), dl, MVT::i32); SDValue Hi = DAG.getConstant(IntVal.lshr(32).trunc(32), dl, MVT::i32); if (DAG.getDataLayout().isBigEndian()) std::swap(Lo, Hi); Lo = DAG.getStore(Chain, dl, Lo, Ptr, ST->getPointerInfo(), ST->getOriginalAlign(), MMOFlags, AAInfo); Ptr = DAG.getMemBasePlusOffset(Ptr, 4, dl); Hi = DAG.getStore(Chain, dl, Hi, Ptr, ST->getPointerInfo().getWithOffset(4), ST->getOriginalAlign(), MMOFlags, AAInfo); return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi); } } } return SDValue(nullptr, 0); } void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) { StoreSDNode *ST = cast<StoreSDNode>(Node); SDValue Chain = ST->getChain(); SDValue Ptr = ST->getBasePtr(); SDLoc dl(Node); MachineMemOperand::Flags MMOFlags = ST->getMemOperand()->getFlags(); AAMDNodes AAInfo = ST->getAAInfo(); if (!ST->isTruncatingStore()) { LLVM_DEBUG(dbgs() << "Legalizing store operation\n"); if (SDNode *OptStore = OptimizeFloatStore(ST).getNode()) { ReplaceNode(ST, OptStore); return; } SDValue Value = ST->getValue(); MVT VT = Value.getSimpleValueType(); switch (TLI.getOperationAction(ISD::STORE, VT)) { default: llvm_unreachable("This action is not supported yet!"); case TargetLowering::Legal: { // If this is an unaligned store and the target doesn't support it, // expand it. EVT MemVT = ST->getMemoryVT(); const DataLayout &DL = DAG.getDataLayout(); if (!TLI.allowsMemoryAccessForAlignment(*DAG.getContext(), DL, MemVT, *ST->getMemOperand())) { LLVM_DEBUG(dbgs() << "Expanding unsupported unaligned store\n"); SDValue Result = TLI.expandUnalignedStore(ST, DAG); ReplaceNode(SDValue(ST, 0), Result); } else LLVM_DEBUG(dbgs() << "Legal store\n"); break; } case TargetLowering::Custom: { LLVM_DEBUG(dbgs() << "Trying custom lowering\n"); SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG); if (Res && Res != SDValue(Node, 0)) ReplaceNode(SDValue(Node, 0), Res); return; } case TargetLowering::Promote: { MVT NVT = TLI.getTypeToPromoteTo(ISD::STORE, VT); assert(NVT.getSizeInBits() == VT.getSizeInBits() && "Can only promote stores to same size type"); Value = DAG.getNode(ISD::BITCAST, dl, NVT, Value); SDValue Result = DAG.getStore(Chain, dl, Value, Ptr, ST->getPointerInfo(), ST->getOriginalAlign(), MMOFlags, AAInfo); ReplaceNode(SDValue(Node, 0), Result); break; } } return; } LLVM_DEBUG(dbgs() << "Legalizing truncating store operations\n"); SDValue Value = ST->getValue(); EVT StVT = ST->getMemoryVT(); unsigned StWidth = StVT.getSizeInBits(); auto &DL = DAG.getDataLayout(); if (StWidth != StVT.getStoreSizeInBits()) { // Promote to a byte-sized store with upper bits zero if not // storing an integral number of bytes. For example, promote // TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1) EVT NVT = EVT::getIntegerVT(*DAG.getContext(), StVT.getStoreSizeInBits()); Value = DAG.getZeroExtendInReg(Value, dl, StVT); SDValue Result = DAG.getTruncStore(Chain, dl, Value, Ptr, ST->getPointerInfo(), NVT, ST->getOriginalAlign(), MMOFlags, AAInfo); ReplaceNode(SDValue(Node, 0), Result); } else if (StWidth & (StWidth - 1)) { // If not storing a power-of-2 number of bits, expand as two stores. assert(!StVT.isVector() && "Unsupported truncstore!"); unsigned LogStWidth = Log2_32(StWidth); assert(LogStWidth < 32); unsigned RoundWidth = 1 << LogStWidth; assert(RoundWidth < StWidth); unsigned ExtraWidth = StWidth - RoundWidth; assert(ExtraWidth < RoundWidth); assert(!(RoundWidth % 8) && !(ExtraWidth % 8) && "Store size not an integral number of bytes!"); EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth); EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth); SDValue Lo, Hi; unsigned IncrementSize; if (DL.isLittleEndian()) { // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 X, TRUNCSTORE@+2:i8 (srl X, 16) // Store the bottom RoundWidth bits. Lo = DAG.getTruncStore(Chain, dl, Value, Ptr, ST->getPointerInfo(), RoundVT, ST->getOriginalAlign(), MMOFlags, AAInfo); // Store the remaining ExtraWidth bits. IncrementSize = RoundWidth / 8; Ptr = DAG.getMemBasePlusOffset(Ptr, IncrementSize, dl); Hi = DAG.getNode( ISD::SRL, dl, Value.getValueType(), Value, DAG.getConstant(RoundWidth, dl, TLI.getShiftAmountTy(Value.getValueType(), DL))); Hi = DAG.getTruncStore(Chain, dl, Hi, Ptr, ST->getPointerInfo().getWithOffset(IncrementSize), ExtraVT, ST->getOriginalAlign(), MMOFlags, AAInfo); } else { // Big endian - avoid unaligned stores. // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 (srl X, 8), TRUNCSTORE@+2:i8 X // Store the top RoundWidth bits. Hi = DAG.getNode( ISD::SRL, dl, Value.getValueType(), Value, DAG.getConstant(ExtraWidth, dl, TLI.getShiftAmountTy(Value.getValueType(), DL))); Hi = DAG.getTruncStore(Chain, dl, Hi, Ptr, ST->getPointerInfo(), RoundVT, ST->getOriginalAlign(), MMOFlags, AAInfo); // Store the remaining ExtraWidth bits. IncrementSize = RoundWidth / 8; Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, DAG.getConstant(IncrementSize, dl, Ptr.getValueType())); Lo = DAG.getTruncStore(Chain, dl, Value, Ptr, ST->getPointerInfo().getWithOffset(IncrementSize), ExtraVT, ST->getOriginalAlign(), MMOFlags, AAInfo); } // The order of the stores doesn't matter. SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi); ReplaceNode(SDValue(Node, 0), Result); } else { switch (TLI.getTruncStoreAction(ST->getValue().getValueType(), StVT)) { default: llvm_unreachable("This action is not supported yet!"); case TargetLowering::Legal: { EVT MemVT = ST->getMemoryVT(); // If this is an unaligned store and the target doesn't support it, // expand it. if (!TLI.allowsMemoryAccessForAlignment(*DAG.getContext(), DL, MemVT, *ST->getMemOperand())) { SDValue Result = TLI.expandUnalignedStore(ST, DAG); ReplaceNode(SDValue(ST, 0), Result); } break; } case TargetLowering::Custom: { SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG); if (Res && Res != SDValue(Node, 0)) ReplaceNode(SDValue(Node, 0), Res); return; } case TargetLowering::Expand: assert(!StVT.isVector() && "Vector Stores are handled in LegalizeVectorOps"); SDValue Result; // TRUNCSTORE:i16 i32 -> STORE i16 if (TLI.isTypeLegal(StVT)) { Value = DAG.getNode(ISD::TRUNCATE, dl, StVT, Value); Result = DAG.getStore(Chain, dl, Value, Ptr, ST->getPointerInfo(), ST->getOriginalAlign(), MMOFlags, AAInfo); } else { // The in-memory type isn't legal. Truncate to the type it would promote // to, and then do a truncstore. Value = DAG.getNode(ISD::TRUNCATE, dl, TLI.getTypeToTransformTo(*DAG.getContext(), StVT), Value); Result = DAG.getTruncStore(Chain, dl, Value, Ptr, ST->getPointerInfo(), StVT, ST->getOriginalAlign(), MMOFlags, AAInfo); } ReplaceNode(SDValue(Node, 0), Result); break; } } } void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) { LoadSDNode *LD = cast<LoadSDNode>(Node); SDValue Chain = LD->getChain(); // The chain. SDValue Ptr = LD->getBasePtr(); // The base pointer. SDValue Value; // The value returned by the load op. SDLoc dl(Node); ISD::LoadExtType ExtType = LD->getExtensionType(); if (ExtType == ISD::NON_EXTLOAD) { LLVM_DEBUG(dbgs() << "Legalizing non-extending load operation\n"); MVT VT = Node->getSimpleValueType(0); SDValue RVal = SDValue(Node, 0); SDValue RChain = SDValue(Node, 1); switch (TLI.getOperationAction(Node->getOpcode(), VT)) { default: llvm_unreachable("This action is not supported yet!"); case TargetLowering::Legal: { EVT MemVT = LD->getMemoryVT(); const DataLayout &DL = DAG.getDataLayout(); // If this is an unaligned load and the target doesn't support it, // expand it. if (!TLI.allowsMemoryAccessForAlignment(*DAG.getContext(), DL, MemVT, *LD->getMemOperand())) { std::tie(RVal, RChain) = TLI.expandUnalignedLoad(LD, DAG); } break; } case TargetLowering::Custom: if (SDValue Res = TLI.LowerOperation(RVal, DAG)) { RVal = Res; RChain = Res.getValue(1); } break; case TargetLowering::Promote: { MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT); assert(NVT.getSizeInBits() == VT.getSizeInBits() && "Can only promote loads to same size type"); SDValue Res = DAG.getLoad(NVT, dl, Chain, Ptr, LD->getMemOperand()); RVal = DAG.getNode(ISD::BITCAST, dl, VT, Res); RChain = Res.getValue(1); break; } } if (RChain.getNode() != Node) { assert(RVal.getNode() != Node && "Load must be completely replaced"); DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), RVal); DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), RChain); if (UpdatedNodes) { UpdatedNodes->insert(RVal.getNode()); UpdatedNodes->insert(RChain.getNode()); } ReplacedNode(Node); } return; } LLVM_DEBUG(dbgs() << "Legalizing extending load operation\n"); EVT SrcVT = LD->getMemoryVT(); unsigned SrcWidth = SrcVT.getSizeInBits(); MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags(); AAMDNodes AAInfo = LD->getAAInfo(); if (SrcWidth != SrcVT.getStoreSizeInBits() && // Some targets pretend to have an i1 loading operation, and actually // load an i8. This trick is correct for ZEXTLOAD because the top 7 // bits are guaranteed to be zero; it helps the optimizers understand // that these bits are zero. It is also useful for EXTLOAD, since it // tells the optimizers that those bits are undefined. It would be // nice to have an effective generic way of getting these benefits... // Until such a way is found, don't insist on promoting i1 here. (SrcVT != MVT::i1 || TLI.getLoadExtAction(ExtType, Node->getValueType(0), MVT::i1) == TargetLowering::Promote)) { // Promote to a byte-sized load if not loading an integral number of // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24. unsigned NewWidth = SrcVT.getStoreSizeInBits(); EVT NVT = EVT::getIntegerVT(*DAG.getContext(), NewWidth); SDValue Ch; // The extra bits are guaranteed to be zero, since we stored them that // way. A zext load from NVT thus automatically gives zext from SrcVT. ISD::LoadExtType NewExtType = ExtType == ISD::ZEXTLOAD ? ISD::ZEXTLOAD : ISD::EXTLOAD; SDValue Result = DAG.getExtLoad(NewExtType, dl, Node->getValueType(0), Chain, Ptr, LD->getPointerInfo(), NVT, LD->getOriginalAlign(), MMOFlags, AAInfo); Ch = Result.getValue(1); // The chain. if (ExtType == ISD::SEXTLOAD) // Having the top bits zero doesn't help when sign extending. Result = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, Result.getValueType(), Result, DAG.getValueType(SrcVT)); else if (ExtType == ISD::ZEXTLOAD || NVT == Result.getValueType()) // All the top bits are guaranteed to be zero - inform the optimizers. Result = DAG.getNode(ISD::AssertZext, dl, Result.getValueType(), Result, DAG.getValueType(SrcVT)); Value = Result; Chain = Ch; } else if (SrcWidth & (SrcWidth - 1)) { // If not loading a power-of-2 number of bits, expand as two loads. assert(!SrcVT.isVector() && "Unsupported extload!"); unsigned LogSrcWidth = Log2_32(SrcWidth); assert(LogSrcWidth < 32); unsigned RoundWidth = 1 << LogSrcWidth; assert(RoundWidth < SrcWidth); unsigned ExtraWidth = SrcWidth - RoundWidth; assert(ExtraWidth < RoundWidth); assert(!(RoundWidth % 8) && !(ExtraWidth % 8) && "Load size not an integral number of bytes!"); EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth); EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth); SDValue Lo, Hi, Ch; unsigned IncrementSize; auto &DL = DAG.getDataLayout(); if (DL.isLittleEndian()) { // EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16) // Load the bottom RoundWidth bits. Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, Node->getValueType(0), Chain, Ptr, LD->getPointerInfo(), RoundVT, LD->getOriginalAlign(), MMOFlags, AAInfo); // Load the remaining ExtraWidth bits. IncrementSize = RoundWidth / 8; Ptr = DAG.getMemBasePlusOffset(Ptr, IncrementSize, dl); Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Chain, Ptr, LD->getPointerInfo().getWithOffset(IncrementSize), ExtraVT, LD->getOriginalAlign(), MMOFlags, AAInfo); // Build a factor node to remember that this load is independent of // the other one. Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), Hi.getValue(1)); // Move the top bits to the right place. Hi = DAG.getNode( ISD::SHL, dl, Hi.getValueType(), Hi, DAG.getConstant(RoundWidth, dl, TLI.getShiftAmountTy(Hi.getValueType(), DL))); // Join the hi and lo parts. Value = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi); } else { // Big endian - avoid unaligned loads. // EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8 // Load the top RoundWidth bits. Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Chain, Ptr, LD->getPointerInfo(), RoundVT, LD->getOriginalAlign(), MMOFlags, AAInfo); // Load the remaining ExtraWidth bits. IncrementSize = RoundWidth / 8; Ptr = DAG.getMemBasePlusOffset(Ptr, IncrementSize, dl); Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, Node->getValueType(0), Chain, Ptr, LD->getPointerInfo().getWithOffset(IncrementSize), ExtraVT, LD->getOriginalAlign(), MMOFlags, AAInfo); // Build a factor node to remember that this load is independent of // the other one. Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), Hi.getValue(1)); // Move the top bits to the right place. Hi = DAG.getNode( ISD::SHL, dl, Hi.getValueType(), Hi, DAG.getConstant(ExtraWidth, dl, TLI.getShiftAmountTy(Hi.getValueType(), DL))); // Join the hi and lo parts. Value = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi); } Chain = Ch; } else { bool isCustom = false; switch (TLI.getLoadExtAction(ExtType, Node->getValueType(0), SrcVT.getSimpleVT())) { default: llvm_unreachable("This action is not supported yet!"); case TargetLowering::Custom: isCustom = true; LLVM_FALLTHROUGH; case TargetLowering::Legal: Value = SDValue(Node, 0); Chain = SDValue(Node, 1); if (isCustom) { if (SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG)) { Value = Res; Chain = Res.getValue(1); } } else { // If this is an unaligned load and the target doesn't support it, // expand it. EVT MemVT = LD->getMemoryVT(); const DataLayout &DL = DAG.getDataLayout(); if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT, *LD->getMemOperand())) { std::tie(Value, Chain) = TLI.expandUnalignedLoad(LD, DAG); } } break; case TargetLowering::Expand: { EVT DestVT = Node->getValueType(0); if (!TLI.isLoadExtLegal(ISD::EXTLOAD, DestVT, SrcVT)) { // If the source type is not legal, see if there is a legal extload to // an intermediate type that we can then extend further. EVT LoadVT = TLI.getRegisterType(SrcVT.getSimpleVT()); if (TLI.isTypeLegal(SrcVT) || // Same as SrcVT == LoadVT? TLI.isLoadExtLegal(ExtType, LoadVT, SrcVT)) { // If we are loading a legal type, this is a non-extload followed by a // full extend. ISD::LoadExtType MidExtType = (LoadVT == SrcVT) ? ISD::NON_EXTLOAD : ExtType; SDValue Load = DAG.getExtLoad(MidExtType, dl, LoadVT, Chain, Ptr, SrcVT, LD->getMemOperand()); unsigned ExtendOp = ISD::getExtForLoadExtType(SrcVT.isFloatingPoint(), ExtType); Value = DAG.getNode(ExtendOp, dl, Node->getValueType(0), Load); Chain = Load.getValue(1); break; } // Handle the special case of fp16 extloads. EXTLOAD doesn't have the // normal undefined upper bits behavior to allow using an in-reg extend // with the illegal FP type, so load as an integer and do the // from-integer conversion. if (SrcVT.getScalarType() == MVT::f16) { EVT ISrcVT = SrcVT.changeTypeToInteger(); EVT IDestVT = DestVT.changeTypeToInteger(); EVT ILoadVT = TLI.getRegisterType(IDestVT.getSimpleVT()); SDValue Result = DAG.getExtLoad(ISD::ZEXTLOAD, dl, ILoadVT, Chain, Ptr, ISrcVT, LD->getMemOperand()); Value = DAG.getNode(ISD::FP16_TO_FP, dl, DestVT, Result); Chain = Result.getValue(1); break; } } assert(!SrcVT.isVector() && "Vector Loads are handled in LegalizeVectorOps"); // FIXME: This does not work for vectors on most targets. Sign- // and zero-extend operations are currently folded into extending // loads, whether they are legal or not, and then we end up here // without any support for legalizing them. assert(ExtType != ISD::EXTLOAD && "EXTLOAD should always be supported!"); // Turn the unsupported load into an EXTLOAD followed by an // explicit zero/sign extend inreg. SDValue Result = DAG.getExtLoad(ISD::EXTLOAD, dl, Node->getValueType(0), Chain, Ptr, SrcVT, LD->getMemOperand()); SDValue ValRes; if (ExtType == ISD::SEXTLOAD) ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, Result.getValueType(), Result, DAG.getValueType(SrcVT)); else ValRes = DAG.getZeroExtendInReg(Result, dl, SrcVT); Value = ValRes; Chain = Result.getValue(1); break; } } } // Since loads produce two values, make sure to remember that we legalized // both of them. if (Chain.getNode() != Node) { assert(Value.getNode() != Node && "Load must be completely replaced"); DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), Value); DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), Chain); if (UpdatedNodes) { UpdatedNodes->insert(Value.getNode()); UpdatedNodes->insert(Chain.getNode()); } ReplacedNode(Node); } } /// Return a legal replacement for the given operation, with all legal operands. void SelectionDAGLegalize::LegalizeOp(SDNode *Node) { LLVM_DEBUG(dbgs() << "\nLegalizing: "; Node->dump(&DAG)); // Allow illegal target nodes and illegal registers. if (Node->getOpcode() == ISD::TargetConstant || Node->getOpcode() == ISD::Register) return; #ifndef NDEBUG for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) assert(TLI.getTypeAction(*DAG.getContext(), Node->getValueType(i)) == TargetLowering::TypeLegal && "Unexpected illegal type!"); for (const SDValue &Op : Node->op_values()) assert((TLI.getTypeAction(*DAG.getContext(), Op.getValueType()) == TargetLowering::TypeLegal || Op.getOpcode() == ISD::TargetConstant || Op.getOpcode() == ISD::Register) && "Unexpected illegal type!"); #endif // Figure out the correct action; the way to query this varies by opcode TargetLowering::LegalizeAction Action = TargetLowering::Legal; bool SimpleFinishLegalizing = true; switch (Node->getOpcode()) { case ISD::INTRINSIC_W_CHAIN: case ISD::INTRINSIC_WO_CHAIN: case ISD::INTRINSIC_VOID: case ISD::STACKSAVE: Action = TLI.getOperationAction(Node->getOpcode(), MVT::Other); break; case ISD::GET_DYNAMIC_AREA_OFFSET: Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); break; case ISD::VAARG: Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); if (Action != TargetLowering::Promote) Action = TLI.getOperationAction(Node->getOpcode(), MVT::Other); break; case ISD::FP_TO_FP16: case ISD::SINT_TO_FP: case ISD::UINT_TO_FP: case ISD::EXTRACT_VECTOR_ELT: case ISD::LROUND: case ISD::LLROUND: case ISD::LRINT: case ISD::LLRINT: Action = TLI.getOperationAction(Node->getOpcode(), Node->getOperand(0).getValueType()); break; case ISD::STRICT_FP_TO_FP16: case ISD::STRICT_SINT_TO_FP: case ISD::STRICT_UINT_TO_FP: case ISD::STRICT_LRINT: case ISD::STRICT_LLRINT: case ISD::STRICT_LROUND: case ISD::STRICT_LLROUND: // These pseudo-ops are the same as the other STRICT_ ops except // they are registered with setOperationAction() using the input type // instead of the output type. Action = TLI.getOperationAction(Node->getOpcode(), Node->getOperand(1).getValueType()); break; case ISD::SIGN_EXTEND_INREG: { EVT InnerType = cast<VTSDNode>(Node->getOperand(1))->getVT(); Action = TLI.getOperationAction(Node->getOpcode(), InnerType); break; } case ISD::ATOMIC_STORE: Action = TLI.getOperationAction(Node->getOpcode(), Node->getOperand(2).getValueType()); break; case ISD::SELECT_CC: case ISD::STRICT_FSETCC: case ISD::STRICT_FSETCCS: case ISD::SETCC: case ISD::BR_CC: { unsigned CCOperand = Node->getOpcode() == ISD::SELECT_CC ? 4 : Node->getOpcode() == ISD::STRICT_FSETCC ? 3 : Node->getOpcode() == ISD::STRICT_FSETCCS ? 3 : Node->getOpcode() == ISD::SETCC ? 2 : 1; unsigned CompareOperand = Node->getOpcode() == ISD::BR_CC ? 2 : Node->getOpcode() == ISD::STRICT_FSETCC ? 1 : Node->getOpcode() == ISD::STRICT_FSETCCS ? 1 : 0; MVT OpVT = Node->getOperand(CompareOperand).getSimpleValueType(); ISD::CondCode CCCode = cast<CondCodeSDNode>(Node->getOperand(CCOperand))->get(); Action = TLI.getCondCodeAction(CCCode, OpVT); if (Action == TargetLowering::Legal) { if (Node->getOpcode() == ISD::SELECT_CC) Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); else Action = TLI.getOperationAction(Node->getOpcode(), OpVT); } break; } case ISD::LOAD: case ISD::STORE: // FIXME: Model these properly. LOAD and STORE are complicated, and // STORE expects the unlegalized operand in some cases. SimpleFinishLegalizing = false; break; case ISD::CALLSEQ_START: case ISD::CALLSEQ_END: // FIXME: This shouldn't be necessary. These nodes have special properties // dealing with the recursive nature of legalization. Removing this // special case should be done as part of making LegalizeDAG non-recursive. SimpleFinishLegalizing = false; break; case ISD::EXTRACT_ELEMENT: case ISD::FLT_ROUNDS_: case ISD::MERGE_VALUES: case ISD::EH_RETURN: case ISD::FRAME_TO_ARGS_OFFSET: case ISD::EH_DWARF_CFA: case ISD::EH_SJLJ_SETJMP: case ISD::EH_SJLJ_LONGJMP: case ISD::EH_SJLJ_SETUP_DISPATCH: // These operations lie about being legal: when they claim to be legal, // they should actually be expanded. Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); if (Action == TargetLowering::Legal) Action = TargetLowering::Expand; break; case ISD::INIT_TRAMPOLINE: case ISD::ADJUST_TRAMPOLINE: case ISD::FRAMEADDR: case ISD::RETURNADDR: case ISD::ADDROFRETURNADDR: case ISD::SPONENTRY: // These operations lie about being legal: when they claim to be legal, // they should actually be custom-lowered. Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); if (Action == TargetLowering::Legal) Action = TargetLowering::Custom; break; case ISD::READCYCLECOUNTER: // READCYCLECOUNTER returns an i64, even if type legalization might have // expanded that to several smaller types. Action = TLI.getOperationAction(Node->getOpcode(), MVT::i64); break; case ISD::READ_REGISTER: case ISD::WRITE_REGISTER: // Named register is legal in the DAG, but blocked by register name // selection if not implemented by target (to chose the correct register) // They'll be converted to Copy(To/From)Reg. Action = TargetLowering::Legal; break; case ISD::DEBUGTRAP: Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); if (Action == TargetLowering::Expand) { // replace ISD::DEBUGTRAP with ISD::TRAP SDValue NewVal; NewVal = DAG.getNode(ISD::TRAP, SDLoc(Node), Node->getVTList(), Node->getOperand(0)); ReplaceNode(Node, NewVal.getNode()); LegalizeOp(NewVal.getNode()); return; } break; case ISD::SADDSAT: case ISD::UADDSAT: case ISD::SSUBSAT: case ISD::USUBSAT: { Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); break; } case ISD::SMULFIX: case ISD::SMULFIXSAT: case ISD::UMULFIX: case ISD::UMULFIXSAT: case ISD::SDIVFIX: case ISD::SDIVFIXSAT: case ISD::UDIVFIX: case ISD::UDIVFIXSAT: { unsigned Scale = Node->getConstantOperandVal(2); Action = TLI.getFixedPointOperationAction(Node->getOpcode(), Node->getValueType(0), Scale); break; } case ISD::MSCATTER: Action = TLI.getOperationAction(Node->getOpcode(), cast<MaskedScatterSDNode>(Node)->getValue().getValueType()); break; case ISD::MSTORE: Action = TLI.getOperationAction(Node->getOpcode(), cast<MaskedStoreSDNode>(Node)->getValue().getValueType()); break; case ISD::VECREDUCE_FADD: case ISD::VECREDUCE_FMUL: case ISD::VECREDUCE_ADD: case ISD::VECREDUCE_MUL: case ISD::VECREDUCE_AND: case ISD::VECREDUCE_OR: case ISD::VECREDUCE_XOR: case ISD::VECREDUCE_SMAX: case ISD::VECREDUCE_SMIN: case ISD::VECREDUCE_UMAX: case ISD::VECREDUCE_UMIN: case ISD::VECREDUCE_FMAX: case ISD::VECREDUCE_FMIN: Action = TLI.getOperationAction( Node->getOpcode(), Node->getOperand(0).getValueType()); break; default: if (Node->getOpcode() >= ISD::BUILTIN_OP_END) { Action = TargetLowering::Legal; } else { Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); } break; } if (SimpleFinishLegalizing) { SDNode *NewNode = Node; switch (Node->getOpcode()) { default: break; case ISD::SHL: case ISD::SRL: case ISD::SRA: case ISD::ROTL: case ISD::ROTR: { // Legalizing shifts/rotates requires adjusting the shift amount // to the appropriate width. SDValue Op0 = Node->getOperand(0); SDValue Op1 = Node->getOperand(1); if (!Op1.getValueType().isVector()) { SDValue SAO = DAG.getShiftAmountOperand(Op0.getValueType(), Op1); // The getShiftAmountOperand() may create a new operand node or // return the existing one. If new operand is created we need // to update the parent node. // Do not try to legalize SAO here! It will be automatically legalized // in the next round. if (SAO != Op1) NewNode = DAG.UpdateNodeOperands(Node, Op0, SAO); } } break; case ISD::FSHL: case ISD::FSHR: case ISD::SRL_PARTS: case ISD::SRA_PARTS: case ISD::SHL_PARTS: { // Legalizing shifts/rotates requires adjusting the shift amount // to the appropriate width. SDValue Op0 = Node->getOperand(0); SDValue Op1 = Node->getOperand(1); SDValue Op2 = Node->getOperand(2); if (!Op2.getValueType().isVector()) { SDValue SAO = DAG.getShiftAmountOperand(Op0.getValueType(), Op2); // The getShiftAmountOperand() may create a new operand node or // return the existing one. If new operand is created we need // to update the parent node. if (SAO != Op2) NewNode = DAG.UpdateNodeOperands(Node, Op0, Op1, SAO); } break; } } if (NewNode != Node) { ReplaceNode(Node, NewNode); Node = NewNode; } switch (Action) { case TargetLowering::Legal: LLVM_DEBUG(dbgs() << "Legal node: nothing to do\n"); return; case TargetLowering::Custom: LLVM_DEBUG(dbgs() << "Trying custom legalization\n"); // FIXME: The handling for custom lowering with multiple results is // a complete mess. if (SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG)) { if (!(Res.getNode() != Node || Res.getResNo() != 0)) return; if (Node->getNumValues() == 1) { LLVM_DEBUG(dbgs() << "Successfully custom legalized node\n"); // We can just directly replace this node with the lowered value. ReplaceNode(SDValue(Node, 0), Res); return; } SmallVector<SDValue, 8> ResultVals; for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) ResultVals.push_back(Res.getValue(i)); LLVM_DEBUG(dbgs() << "Successfully custom legalized node\n"); ReplaceNode(Node, ResultVals.data()); return; } LLVM_DEBUG(dbgs() << "Could not custom legalize node\n"); LLVM_FALLTHROUGH; case TargetLowering::Expand: if (ExpandNode(Node)) return; LLVM_FALLTHROUGH; case TargetLowering::LibCall: ConvertNodeToLibcall(Node); return; case TargetLowering::Promote: PromoteNode(Node); return; } } switch (Node->getOpcode()) { default: #ifndef NDEBUG dbgs() << "NODE: "; Node->dump( &DAG); dbgs() << "\n"; #endif llvm_unreachable("Do not know how to legalize this operator!"); case ISD::CALLSEQ_START: case ISD::CALLSEQ_END: break; case ISD::LOAD: return LegalizeLoadOps(Node); case ISD::STORE: return LegalizeStoreOps(Node); } } SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) { SDValue Vec = Op.getOperand(0); SDValue Idx = Op.getOperand(1); SDLoc dl(Op); // Before we generate a new store to a temporary stack slot, see if there is // already one that we can use. There often is because when we scalarize // vector operations (using SelectionDAG::UnrollVectorOp for example) a whole // series of EXTRACT_VECTOR_ELT nodes are generated, one for each element in // the vector. If all are expanded here, we don't want one store per vector // element. // Caches for hasPredecessorHelper SmallPtrSet<const SDNode *, 32> Visited; SmallVector<const SDNode *, 16> Worklist; Visited.insert(Op.getNode()); Worklist.push_back(Idx.getNode()); SDValue StackPtr, Ch; for (SDNode::use_iterator UI = Vec.getNode()->use_begin(), UE = Vec.getNode()->use_end(); UI != UE; ++UI) { SDNode *User = *UI; if (StoreSDNode *ST = dyn_cast<StoreSDNode>(User)) { if (ST->isIndexed() || ST->isTruncatingStore() || ST->getValue() != Vec) continue; // Make sure that nothing else could have stored into the destination of // this store. if (!ST->getChain().reachesChainWithoutSideEffects(DAG.getEntryNode())) continue; // If the index is dependent on the store we will introduce a cycle when // creating the load (the load uses the index, and by replacing the chain // we will make the index dependent on the load). Also, the store might be // dependent on the extractelement and introduce a cycle when creating // the load. if (SDNode::hasPredecessorHelper(ST, Visited, Worklist) || ST->hasPredecessor(Op.getNode())) continue; StackPtr = ST->getBasePtr(); Ch = SDValue(ST, 0); break; } } EVT VecVT = Vec.getValueType(); if (!Ch.getNode()) { // Store the value to a temporary stack slot, then LOAD the returned part. StackPtr = DAG.CreateStackTemporary(VecVT); Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, MachinePointerInfo()); } StackPtr = TLI.getVectorElementPointer(DAG, StackPtr, VecVT, Idx); SDValue NewLoad; if (Op.getValueType().isVector()) NewLoad = DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr, MachinePointerInfo()); else NewLoad = DAG.getExtLoad(ISD::EXTLOAD, dl, Op.getValueType(), Ch, StackPtr, MachinePointerInfo(), VecVT.getVectorElementType()); // Replace the chain going out of the store, by the one out of the load. DAG.ReplaceAllUsesOfValueWith(Ch, SDValue(NewLoad.getNode(), 1)); // We introduced a cycle though, so update the loads operands, making sure // to use the original store's chain as an incoming chain. SmallVector<SDValue, 6> NewLoadOperands(NewLoad->op_begin(), NewLoad->op_end()); NewLoadOperands[0] = Ch; NewLoad = SDValue(DAG.UpdateNodeOperands(NewLoad.getNode(), NewLoadOperands), 0); return NewLoad; } SDValue SelectionDAGLegalize::ExpandInsertToVectorThroughStack(SDValue Op) { assert(Op.getValueType().isVector() && "Non-vector insert subvector!"); SDValue Vec = Op.getOperand(0); SDValue Part = Op.getOperand(1); SDValue Idx = Op.getOperand(2); SDLoc dl(Op); // Store the value to a temporary stack slot, then LOAD the returned part. EVT VecVT = Vec.getValueType(); SDValue StackPtr = DAG.CreateStackTemporary(VecVT); int FI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); // First store the whole vector. SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo); // Then store the inserted part. SDValue SubStackPtr = TLI.getVectorElementPointer(DAG, StackPtr, VecVT, Idx); // Store the subvector. Ch = DAG.getStore( Ch, dl, Part, SubStackPtr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction())); // Finally, load the updated vector. return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr, PtrInfo); } SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) { // We can't handle this case efficiently. Allocate a sufficiently // aligned object on the stack, store each element into it, then load // the result as a vector. // Create the stack frame object. EVT VT = Node->getValueType(0); EVT EltVT = VT.getVectorElementType(); SDLoc dl(Node); SDValue FIPtr = DAG.CreateStackTemporary(VT); int FI = cast<FrameIndexSDNode>(FIPtr.getNode())->getIndex(); MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); // Emit a store of each element to the stack slot. SmallVector<SDValue, 8> Stores; unsigned TypeByteSize = EltVT.getSizeInBits() / 8; assert(TypeByteSize > 0 && "Vector element type too small for stack store!"); // Store (in the right endianness) the elements to memory. for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { // Ignore undef elements. if (Node->getOperand(i).isUndef()) continue; unsigned Offset = TypeByteSize*i; SDValue Idx = DAG.getMemBasePlusOffset(FIPtr, Offset, dl); // If the destination vector element type is narrower than the source // element type, only store the bits necessary. if (EltVT.bitsLT(Node->getOperand(i).getValueType().getScalarType())) { Stores.push_back(DAG.getTruncStore(DAG.getEntryNode(), dl, Node->getOperand(i), Idx, PtrInfo.getWithOffset(Offset), EltVT)); } else Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, Node->getOperand(i), Idx, PtrInfo.getWithOffset(Offset))); } SDValue StoreChain; if (!Stores.empty()) // Not all undef elements? StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); else StoreChain = DAG.getEntryNode(); // Result is a load from the stack slot. return DAG.getLoad(VT, dl, StoreChain, FIPtr, PtrInfo); } /// Bitcast a floating-point value to an integer value. Only bitcast the part /// containing the sign bit if the target has no integer value capable of /// holding all bits of the floating-point value. void SelectionDAGLegalize::getSignAsIntValue(FloatSignAsInt &State, const SDLoc &DL, SDValue Value) const { EVT FloatVT = Value.getValueType(); unsigned NumBits = FloatVT.getSizeInBits(); State.FloatVT = FloatVT; EVT IVT = EVT::getIntegerVT(*DAG.getContext(), NumBits); // Convert to an integer of the same size. if (TLI.isTypeLegal(IVT)) { State.IntValue = DAG.getNode(ISD::BITCAST, DL, IVT, Value); State.SignMask = APInt::getSignMask(NumBits); State.SignBit = NumBits - 1; return; } auto &DataLayout = DAG.getDataLayout(); // Store the float to memory, then load the sign part out as an integer. MVT LoadTy = TLI.getRegisterType(*DAG.getContext(), MVT::i8); // First create a temporary that is aligned for both the load and store. SDValue StackPtr = DAG.CreateStackTemporary(FloatVT, LoadTy); int FI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); // Then store the float to it. State.FloatPtr = StackPtr; MachineFunction &MF = DAG.getMachineFunction(); State.FloatPointerInfo = MachinePointerInfo::getFixedStack(MF, FI); State.Chain = DAG.getStore(DAG.getEntryNode(), DL, Value, State.FloatPtr, State.FloatPointerInfo); SDValue IntPtr; if (DataLayout.isBigEndian()) { assert(FloatVT.isByteSized() && "Unsupported floating point type!"); // Load out a legal integer with the same sign bit as the float. IntPtr = StackPtr; State.IntPointerInfo = State.FloatPointerInfo; } else { // Advance the pointer so that the loaded byte will contain the sign bit. unsigned ByteOffset = (FloatVT.getSizeInBits() / 8) - 1; IntPtr = DAG.getMemBasePlusOffset(StackPtr, ByteOffset, DL); State.IntPointerInfo = MachinePointerInfo::getFixedStack(MF, FI, ByteOffset); } State.IntPtr = IntPtr; State.IntValue = DAG.getExtLoad(ISD::EXTLOAD, DL, LoadTy, State.Chain, IntPtr, State.IntPointerInfo, MVT::i8); State.SignMask = APInt::getOneBitSet(LoadTy.getSizeInBits(), 7); State.SignBit = 7; } /// Replace the integer value produced by getSignAsIntValue() with a new value /// and cast the result back to a floating-point type. SDValue SelectionDAGLegalize::modifySignAsInt(const FloatSignAsInt &State, const SDLoc &DL, SDValue NewIntValue) const { if (!State.Chain) return DAG.getNode(ISD::BITCAST, DL, State.FloatVT, NewIntValue); // Override the part containing the sign bit in the value stored on the stack. SDValue Chain = DAG.getTruncStore(State.Chain, DL, NewIntValue, State.IntPtr, State.IntPointerInfo, MVT::i8); return DAG.getLoad(State.FloatVT, DL, Chain, State.FloatPtr, State.FloatPointerInfo); } SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode *Node) const { SDLoc DL(Node); SDValue Mag = Node->getOperand(0); SDValue Sign = Node->getOperand(1); // Get sign bit into an integer value. FloatSignAsInt SignAsInt; getSignAsIntValue(SignAsInt, DL, Sign); EVT IntVT = SignAsInt.IntValue.getValueType(); SDValue SignMask = DAG.getConstant(SignAsInt.SignMask, DL, IntVT); SDValue SignBit = DAG.getNode(ISD::AND, DL, IntVT, SignAsInt.IntValue, SignMask); // If FABS is legal transform FCOPYSIGN(x, y) => sign(x) ? -FABS(x) : FABS(X) EVT FloatVT = Mag.getValueType(); if (TLI.isOperationLegalOrCustom(ISD::FABS, FloatVT) && TLI.isOperationLegalOrCustom(ISD::FNEG, FloatVT)) { SDValue AbsValue = DAG.getNode(ISD::FABS, DL, FloatVT, Mag); SDValue NegValue = DAG.getNode(ISD::FNEG, DL, FloatVT, AbsValue); SDValue Cond = DAG.getSetCC(DL, getSetCCResultType(IntVT), SignBit, DAG.getConstant(0, DL, IntVT), ISD::SETNE); return DAG.getSelect(DL, FloatVT, Cond, NegValue, AbsValue); } // Transform Mag value to integer, and clear the sign bit. FloatSignAsInt MagAsInt; getSignAsIntValue(MagAsInt, DL, Mag); EVT MagVT = MagAsInt.IntValue.getValueType(); SDValue ClearSignMask = DAG.getConstant(~MagAsInt.SignMask, DL, MagVT); SDValue ClearedSign = DAG.getNode(ISD::AND, DL, MagVT, MagAsInt.IntValue, ClearSignMask); // Get the signbit at the right position for MagAsInt. int ShiftAmount = SignAsInt.SignBit - MagAsInt.SignBit; EVT ShiftVT = IntVT; if (SignBit.getValueSizeInBits() < ClearedSign.getValueSizeInBits()) { SignBit = DAG.getNode(ISD::ZERO_EXTEND, DL, MagVT, SignBit); ShiftVT = MagVT; } if (ShiftAmount > 0) { SDValue ShiftCnst = DAG.getConstant(ShiftAmount, DL, ShiftVT); SignBit = DAG.getNode(ISD::SRL, DL, ShiftVT, SignBit, ShiftCnst); } else if (ShiftAmount < 0) { SDValue ShiftCnst = DAG.getConstant(-ShiftAmount, DL, ShiftVT); SignBit = DAG.getNode(ISD::SHL, DL, ShiftVT, SignBit, ShiftCnst); } if (SignBit.getValueSizeInBits() > ClearedSign.getValueSizeInBits()) { SignBit = DAG.getNode(ISD::TRUNCATE, DL, MagVT, SignBit); } // Store the part with the modified sign and convert back to float. SDValue CopiedSign = DAG.getNode(ISD::OR, DL, MagVT, ClearedSign, SignBit); return modifySignAsInt(MagAsInt, DL, CopiedSign); } SDValue SelectionDAGLegalize::ExpandFABS(SDNode *Node) const { SDLoc DL(Node); SDValue Value = Node->getOperand(0); // Transform FABS(x) => FCOPYSIGN(x, 0.0) if FCOPYSIGN is legal. EVT FloatVT = Value.getValueType(); if (TLI.isOperationLegalOrCustom(ISD::FCOPYSIGN, FloatVT)) { SDValue Zero = DAG.getConstantFP(0.0, DL, FloatVT); return DAG.getNode(ISD::FCOPYSIGN, DL, FloatVT, Value, Zero); } // Transform value to integer, clear the sign bit and transform back. FloatSignAsInt ValueAsInt; getSignAsIntValue(ValueAsInt, DL, Value); EVT IntVT = ValueAsInt.IntValue.getValueType(); SDValue ClearSignMask = DAG.getConstant(~ValueAsInt.SignMask, DL, IntVT); SDValue ClearedSign = DAG.getNode(ISD::AND, DL, IntVT, ValueAsInt.IntValue, ClearSignMask); return modifySignAsInt(ValueAsInt, DL, ClearedSign); } void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode* Node, SmallVectorImpl<SDValue> &Results) { unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore(); assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and" " not tell us which reg is the stack pointer!"); SDLoc dl(Node); EVT VT = Node->getValueType(0); SDValue Tmp1 = SDValue(Node, 0); SDValue Tmp2 = SDValue(Node, 1); SDValue Tmp3 = Node->getOperand(2); SDValue Chain = Tmp1.getOperand(0); // Chain the dynamic stack allocation so that it doesn't modify the stack // pointer when other instructions are using the stack. Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl); SDValue Size = Tmp2.getOperand(1); SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT); Chain = SP.getValue(1); Align Alignment = cast<ConstantSDNode>(Tmp3)->getAlignValue(); const TargetFrameLowering *TFL = DAG.getSubtarget().getFrameLowering(); unsigned Opc = TFL->getStackGrowthDirection() == TargetFrameLowering::StackGrowsUp ? ISD::ADD : ISD::SUB; Align StackAlign = TFL->getStackAlign(); Tmp1 = DAG.getNode(Opc, dl, VT, SP, Size); // Value if (Alignment > StackAlign) Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1, DAG.getConstant(-Alignment.value(), dl, VT)); Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true), DAG.getIntPtrConstant(0, dl, true), SDValue(), dl); Results.push_back(Tmp1); Results.push_back(Tmp2); } /// Legalize a SETCC with given LHS and RHS and condition code CC on the current /// target. /// /// If the SETCC has been legalized using AND / OR, then the legalized node /// will be stored in LHS. RHS and CC will be set to SDValue(). NeedInvert /// will be set to false. /// /// If the SETCC has been legalized by using getSetCCSwappedOperands(), /// then the values of LHS and RHS will be swapped, CC will be set to the /// new condition, and NeedInvert will be set to false. /// /// If the SETCC has been legalized using the inverse condcode, then LHS and /// RHS will be unchanged, CC will set to the inverted condcode, and NeedInvert /// will be set to true. The caller must invert the result of the SETCC with /// SelectionDAG::getLogicalNOT() or take equivalent action to swap the effect /// of a true/false result. /// /// \returns true if the SetCC has been legalized, false if it hasn't. bool SelectionDAGLegalize::LegalizeSetCCCondCode( EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC, bool &NeedInvert, const SDLoc &dl, SDValue &Chain, bool IsSignaling) { MVT OpVT = LHS.getSimpleValueType(); ISD::CondCode CCCode = cast<CondCodeSDNode>(CC)->get(); NeedInvert = false; switch (TLI.getCondCodeAction(CCCode, OpVT)) { default: llvm_unreachable("Unknown condition code action!"); case TargetLowering::Legal: // Nothing to do. break; case TargetLowering::Expand: { ISD::CondCode InvCC = ISD::getSetCCSwappedOperands(CCCode); if (TLI.isCondCodeLegalOrCustom(InvCC, OpVT)) { std::swap(LHS, RHS); CC = DAG.getCondCode(InvCC); return true; } // Swapping operands didn't work. Try inverting the condition. bool NeedSwap = false; InvCC = getSetCCInverse(CCCode, OpVT); if (!TLI.isCondCodeLegalOrCustom(InvCC, OpVT)) { // If inverting the condition is not enough, try swapping operands // on top of it. InvCC = ISD::getSetCCSwappedOperands(InvCC); NeedSwap = true; } if (TLI.isCondCodeLegalOrCustom(InvCC, OpVT)) { CC = DAG.getCondCode(InvCC); NeedInvert = true; if (NeedSwap) std::swap(LHS, RHS); return true; } ISD::CondCode CC1 = ISD::SETCC_INVALID, CC2 = ISD::SETCC_INVALID; unsigned Opc = 0; switch (CCCode) { default: llvm_unreachable("Don't know how to expand this condition!"); case ISD::SETO: assert(TLI.isCondCodeLegal(ISD::SETOEQ, OpVT) && "If SETO is expanded, SETOEQ must be legal!"); CC1 = ISD::SETOEQ; CC2 = ISD::SETOEQ; Opc = ISD::AND; break; case ISD::SETUO: assert(TLI.isCondCodeLegal(ISD::SETUNE, OpVT) && "If SETUO is expanded, SETUNE must be legal!"); CC1 = ISD::SETUNE; CC2 = ISD::SETUNE; Opc = ISD::OR; break; case ISD::SETOEQ: case ISD::SETOGT: case ISD::SETOGE: case ISD::SETOLT: case ISD::SETOLE: case ISD::SETONE: case ISD::SETUEQ: case ISD::SETUNE: case ISD::SETUGT: case ISD::SETUGE: case ISD::SETULT: case ISD::SETULE: // If we are floating point, assign and break, otherwise fall through. if (!OpVT.isInteger()) { // We can use the 4th bit to tell if we are the unordered // or ordered version of the opcode. CC2 = ((unsigned)CCCode & 0x8U) ? ISD::SETUO : ISD::SETO; Opc = ((unsigned)CCCode & 0x8U) ? ISD::OR : ISD::AND; CC1 = (ISD::CondCode)(((int)CCCode & 0x7) | 0x10); break; } // Fallthrough if we are unsigned integer. LLVM_FALLTHROUGH; case ISD::SETLE: case ISD::SETGT: case ISD::SETGE: case ISD::SETLT: case ISD::SETNE: case ISD::SETEQ: // If all combinations of inverting the condition and swapping operands // didn't work then we have no means to expand the condition. llvm_unreachable("Don't know how to expand this condition!"); } SDValue SetCC1, SetCC2; if (CCCode != ISD::SETO && CCCode != ISD::SETUO) { // If we aren't the ordered or unorder operation, // then the pattern is (LHS CC1 RHS) Opc (LHS CC2 RHS). SetCC1 = DAG.getSetCC(dl, VT, LHS, RHS, CC1, Chain, IsSignaling); SetCC2 = DAG.getSetCC(dl, VT, LHS, RHS, CC2, Chain, IsSignaling); } else { // Otherwise, the pattern is (LHS CC1 LHS) Opc (RHS CC2 RHS) SetCC1 = DAG.getSetCC(dl, VT, LHS, LHS, CC1, Chain, IsSignaling); SetCC2 = DAG.getSetCC(dl, VT, RHS, RHS, CC2, Chain, IsSignaling); } if (Chain) Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, SetCC1.getValue(1), SetCC2.getValue(1)); LHS = DAG.getNode(Opc, dl, VT, SetCC1, SetCC2); RHS = SDValue(); CC = SDValue(); return true; } } return false; } /// Emit a store/load combination to the stack. This stores /// SrcOp to a stack slot of type SlotVT, truncating it if needed. It then does /// a load from the stack slot to DestVT, extending it if needed. /// The resultant code need not be legal. SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp, EVT SlotVT, EVT DestVT, const SDLoc &dl) { return EmitStackConvert(SrcOp, SlotVT, DestVT, dl, DAG.getEntryNode()); } SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp, EVT SlotVT, EVT DestVT, const SDLoc &dl, SDValue Chain) { // Create the stack frame object. unsigned SrcAlign = DAG.getDataLayout().getPrefTypeAlignment( SrcOp.getValueType().getTypeForEVT(*DAG.getContext())); SDValue FIPtr = DAG.CreateStackTemporary(SlotVT, SrcAlign); FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(FIPtr); int SPFI = StackPtrFI->getIndex(); MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI); unsigned SrcSize = SrcOp.getValueSizeInBits(); unsigned SlotSize = SlotVT.getSizeInBits(); unsigned DestSize = DestVT.getSizeInBits(); Type *DestType = DestVT.getTypeForEVT(*DAG.getContext()); unsigned DestAlign = DAG.getDataLayout().getPrefTypeAlignment(DestType); // Emit a store to the stack slot. Use a truncstore if the input value is // later than DestVT. SDValue Store; if (SrcSize > SlotSize) Store = DAG.getTruncStore(Chain, dl, SrcOp, FIPtr, PtrInfo, SlotVT, SrcAlign); else { assert(SrcSize == SlotSize && "Invalid store"); Store = DAG.getStore(Chain, dl, SrcOp, FIPtr, PtrInfo, SrcAlign); } // Result is a load from the stack slot. if (SlotSize == DestSize) return DAG.getLoad(DestVT, dl, Store, FIPtr, PtrInfo, DestAlign); assert(SlotSize < DestSize && "Unknown extension!"); return DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, Store, FIPtr, PtrInfo, SlotVT, DestAlign); } SDValue SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode *Node) { SDLoc dl(Node); // Create a vector sized/aligned stack slot, store the value to element #0, // then load the whole vector back out. SDValue StackPtr = DAG.CreateStackTemporary(Node->getValueType(0)); FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(StackPtr); int SPFI = StackPtrFI->getIndex(); SDValue Ch = DAG.getTruncStore( DAG.getEntryNode(), dl, Node->getOperand(0), StackPtr, MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI), Node->getValueType(0).getVectorElementType()); return DAG.getLoad( Node->getValueType(0), dl, Ch, StackPtr, MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI)); } static bool ExpandBVWithShuffles(SDNode *Node, SelectionDAG &DAG, const TargetLowering &TLI, SDValue &Res) { unsigned NumElems = Node->getNumOperands(); SDLoc dl(Node); EVT VT = Node->getValueType(0); // Try to group the scalars into pairs, shuffle the pairs together, then // shuffle the pairs of pairs together, etc. until the vector has // been built. This will work only if all of the necessary shuffle masks // are legal. // We do this in two phases; first to check the legality of the shuffles, // and next, assuming that all shuffles are legal, to create the new nodes. for (int Phase = 0; Phase < 2; ++Phase) { SmallVector<std::pair<SDValue, SmallVector<int, 16>>, 16> IntermedVals, NewIntermedVals; for (unsigned i = 0; i < NumElems; ++i) { SDValue V = Node->getOperand(i); if (V.isUndef()) continue; SDValue Vec; if (Phase) Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, V); IntermedVals.push_back(std::make_pair(Vec, SmallVector<int, 16>(1, i))); } while (IntermedVals.size() > 2) { NewIntermedVals.clear(); for (unsigned i = 0, e = (IntermedVals.size() & ~1u); i < e; i += 2) { // This vector and the next vector are shuffled together (simply to // append the one to the other). SmallVector<int, 16> ShuffleVec(NumElems, -1); SmallVector<int, 16> FinalIndices; FinalIndices.reserve(IntermedVals[i].second.size() + IntermedVals[i+1].second.size()); int k = 0; for (unsigned j = 0, f = IntermedVals[i].second.size(); j != f; ++j, ++k) { ShuffleVec[k] = j; FinalIndices.push_back(IntermedVals[i].second[j]); } for (unsigned j = 0, f = IntermedVals[i+1].second.size(); j != f; ++j, ++k) { ShuffleVec[k] = NumElems + j; FinalIndices.push_back(IntermedVals[i+1].second[j]); } SDValue Shuffle; if (Phase) Shuffle = DAG.getVectorShuffle(VT, dl, IntermedVals[i].first, IntermedVals[i+1].first, ShuffleVec); else if (!TLI.isShuffleMaskLegal(ShuffleVec, VT)) return false; NewIntermedVals.push_back( std::make_pair(Shuffle, std::move(FinalIndices))); } // If we had an odd number of defined values, then append the last // element to the array of new vectors. if ((IntermedVals.size() & 1) != 0) NewIntermedVals.push_back(IntermedVals.back()); IntermedVals.swap(NewIntermedVals); } assert(IntermedVals.size() <= 2 && IntermedVals.size() > 0 && "Invalid number of intermediate vectors"); SDValue Vec1 = IntermedVals[0].first; SDValue Vec2; if (IntermedVals.size() > 1) Vec2 = IntermedVals[1].first; else if (Phase) Vec2 = DAG.getUNDEF(VT); SmallVector<int, 16> ShuffleVec(NumElems, -1); for (unsigned i = 0, e = IntermedVals[0].second.size(); i != e; ++i) ShuffleVec[IntermedVals[0].second[i]] = i; for (unsigned i = 0, e = IntermedVals[1].second.size(); i != e; ++i) ShuffleVec[IntermedVals[1].second[i]] = NumElems + i; if (Phase) Res = DAG.getVectorShuffle(VT, dl, Vec1, Vec2, ShuffleVec); else if (!TLI.isShuffleMaskLegal(ShuffleVec, VT)) return false; } return true; } /// Expand a BUILD_VECTOR node on targets that don't /// support the operation, but do support the resultant vector type. SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { unsigned NumElems = Node->getNumOperands(); SDValue Value1, Value2; SDLoc dl(Node); EVT VT = Node->getValueType(0); EVT OpVT = Node->getOperand(0).getValueType(); EVT EltVT = VT.getVectorElementType(); // If the only non-undef value is the low element, turn this into a // SCALAR_TO_VECTOR node. If this is { X, X, X, X }, determine X. bool isOnlyLowElement = true; bool MoreThanTwoValues = false; bool isConstant = true; for (unsigned i = 0; i < NumElems; ++i) { SDValue V = Node->getOperand(i); if (V.isUndef()) continue; if (i > 0) isOnlyLowElement = false; if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) isConstant = false; if (!Value1.getNode()) { Value1 = V; } else if (!Value2.getNode()) { if (V != Value1) Value2 = V; } else if (V != Value1 && V != Value2) { MoreThanTwoValues = true; } } if (!Value1.getNode()) return DAG.getUNDEF(VT); if (isOnlyLowElement) return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Node->getOperand(0)); // If all elements are constants, create a load from the constant pool. if (isConstant) { SmallVector<Constant*, 16> CV; for (unsigned i = 0, e = NumElems; i != e; ++i) { if (ConstantFPSDNode *V = dyn_cast<ConstantFPSDNode>(Node->getOperand(i))) { CV.push_back(const_cast<ConstantFP *>(V->getConstantFPValue())); } else if (ConstantSDNode *V = dyn_cast<ConstantSDNode>(Node->getOperand(i))) { if (OpVT==EltVT) CV.push_back(const_cast<ConstantInt *>(V->getConstantIntValue())); else { // If OpVT and EltVT don't match, EltVT is not legal and the // element values have been promoted/truncated earlier. Undo this; // we don't want a v16i8 to become a v16i32 for example. const ConstantInt *CI = V->getConstantIntValue(); CV.push_back(ConstantInt::get(EltVT.getTypeForEVT(*DAG.getContext()), CI->getZExtValue())); } } else { assert(Node->getOperand(i).isUndef()); Type *OpNTy = EltVT.getTypeForEVT(*DAG.getContext()); CV.push_back(UndefValue::get(OpNTy)); } } Constant *CP = ConstantVector::get(CV); SDValue CPIdx = DAG.getConstantPool(CP, TLI.getPointerTy(DAG.getDataLayout())); Align Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlign(); return DAG.getLoad( VT, dl, DAG.getEntryNode(), CPIdx, MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Alignment); } SmallSet<SDValue, 16> DefinedValues; for (unsigned i = 0; i < NumElems; ++i) { if (Node->getOperand(i).isUndef()) continue; DefinedValues.insert(Node->getOperand(i)); } if (TLI.shouldExpandBuildVectorWithShuffles(VT, DefinedValues.size())) { if (!MoreThanTwoValues) { SmallVector<int, 8> ShuffleVec(NumElems, -1); for (unsigned i = 0; i < NumElems; ++i) { SDValue V = Node->getOperand(i); if (V.isUndef()) continue; ShuffleVec[i] = V == Value1 ? 0 : NumElems; } if (TLI.isShuffleMaskLegal(ShuffleVec, Node->getValueType(0))) { // Get the splatted value into the low element of a vector register. SDValue Vec1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value1); SDValue Vec2; if (Value2.getNode()) Vec2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value2); else Vec2 = DAG.getUNDEF(VT); // Return shuffle(LowValVec, undef, <0,0,0,0>) return DAG.getVectorShuffle(VT, dl, Vec1, Vec2, ShuffleVec); } } else { SDValue Res; if (ExpandBVWithShuffles(Node, DAG, TLI, Res)) return Res; } } // Otherwise, we can't handle this case efficiently. return ExpandVectorBuildThroughStack(Node); } SDValue SelectionDAGLegalize::ExpandSPLAT_VECTOR(SDNode *Node) { SDLoc DL(Node); EVT VT = Node->getValueType(0); SDValue SplatVal = Node->getOperand(0); return DAG.getSplatBuildVector(VT, DL, SplatVal); } // Expand a node into a call to a libcall. If the result value // does not fit into a register, return the lo part and set the hi part to the // by-reg argument. If it does fit into a single register, return the result // and leave the Hi part unset. SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned) { TargetLowering::ArgListTy Args; TargetLowering::ArgListEntry Entry; for (const SDValue &Op : Node->op_values()) { EVT ArgVT = Op.getValueType(); Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); Entry.Node = Op; Entry.Ty = ArgTy; Entry.IsSExt = TLI.shouldSignExtendTypeInLibCall(ArgVT, isSigned); Entry.IsZExt = !TLI.shouldSignExtendTypeInLibCall(ArgVT, isSigned); Args.push_back(Entry); } SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), TLI.getPointerTy(DAG.getDataLayout())); EVT RetVT = Node->getValueType(0); Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); // By default, the input chain to this libcall is the entry node of the // function. If the libcall is going to be emitted as a tail call then // TLI.isUsedByReturnOnly will change it to the right chain if the return // node which is being folded has a non-entry input chain. SDValue InChain = DAG.getEntryNode(); // isTailCall may be true since the callee does not reference caller stack // frame. Check if it's in the right position and that the return types match. SDValue TCChain = InChain; const Function &F = DAG.getMachineFunction().getFunction(); bool isTailCall = TLI.isInTailCallPosition(DAG, Node, TCChain) && (RetTy == F.getReturnType() || F.getReturnType()->isVoidTy()); if (isTailCall) InChain = TCChain; TargetLowering::CallLoweringInfo CLI(DAG); bool signExtend = TLI.shouldSignExtendTypeInLibCall(RetVT, isSigned); CLI.setDebugLoc(SDLoc(Node)) .setChain(InChain) .setLibCallee(TLI.getLibcallCallingConv(LC), RetTy, Callee, std::move(Args)) .setTailCall(isTailCall) .setSExtResult(signExtend) .setZExtResult(!signExtend) .setIsPostTypeLegalization(true); std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI); if (!CallInfo.second.getNode()) { LLVM_DEBUG(dbgs() << "Created tailcall: "; DAG.getRoot().dump(&DAG)); // It's a tailcall, return the chain (which is the DAG root). return DAG.getRoot(); } LLVM_DEBUG(dbgs() << "Created libcall: "; CallInfo.first.dump(&DAG)); return CallInfo.first; } void SelectionDAGLegalize::ExpandFPLibCall(SDNode* Node, RTLIB::Libcall Call_F32, RTLIB::Libcall Call_F64, RTLIB::Libcall Call_F80, RTLIB::Libcall Call_F128, RTLIB::Libcall Call_PPCF128, SmallVectorImpl<SDValue> &Results) { RTLIB::Libcall LC; switch (Node->getSimpleValueType(0).SimpleTy) { default: llvm_unreachable("Unexpected request for libcall!"); case MVT::f32: LC = Call_F32; break; case MVT::f64: LC = Call_F64; break; case MVT::f80: LC = Call_F80; break; case MVT::f128: LC = Call_F128; break; case MVT::ppcf128: LC = Call_PPCF128; break; } if (Node->isStrictFPOpcode()) { EVT RetVT = Node->getValueType(0); SmallVector<SDValue, 4> Ops(Node->op_begin() + 1, Node->op_end()); TargetLowering::MakeLibCallOptions CallOptions; // FIXME: This doesn't support tail calls. std::pair<SDValue, SDValue> Tmp = TLI.makeLibCall(DAG, LC, RetVT, Ops, CallOptions, SDLoc(Node), Node->getOperand(0)); Results.push_back(Tmp.first); Results.push_back(Tmp.second); } else { SDValue Tmp = ExpandLibCall(LC, Node, false); Results.push_back(Tmp); } } SDValue SelectionDAGLegalize::ExpandIntLibCall(SDNode* Node, bool isSigned, RTLIB::Libcall Call_I8, RTLIB::Libcall Call_I16, RTLIB::Libcall Call_I32, RTLIB::Libcall Call_I64, RTLIB::Libcall Call_I128) { RTLIB::Libcall LC; switch (Node->getSimpleValueType(0).SimpleTy) { default: llvm_unreachable("Unexpected request for libcall!"); case MVT::i8: LC = Call_I8; break; case MVT::i16: LC = Call_I16; break; case MVT::i32: LC = Call_I32; break; case MVT::i64: LC = Call_I64; break; case MVT::i128: LC = Call_I128; break; } return ExpandLibCall(LC, Node, isSigned); } /// Expand the node to a libcall based on first argument type (for instance /// lround and its variant). void SelectionDAGLegalize::ExpandArgFPLibCall(SDNode* Node, RTLIB::Libcall Call_F32, RTLIB::Libcall Call_F64, RTLIB::Libcall Call_F80, RTLIB::Libcall Call_F128, RTLIB::Libcall Call_PPCF128, SmallVectorImpl<SDValue> &Results) { EVT InVT = Node->getOperand(Node->isStrictFPOpcode() ? 1 : 0).getValueType(); RTLIB::Libcall LC; switch (InVT.getSimpleVT().SimpleTy) { default: llvm_unreachable("Unexpected request for libcall!"); case MVT::f32: LC = Call_F32; break; case MVT::f64: LC = Call_F64; break; case MVT::f80: LC = Call_F80; break; case MVT::f128: LC = Call_F128; break; case MVT::ppcf128: LC = Call_PPCF128; break; } if (Node->isStrictFPOpcode()) { EVT RetVT = Node->getValueType(0); SmallVector<SDValue, 4> Ops(Node->op_begin() + 1, Node->op_end()); TargetLowering::MakeLibCallOptions CallOptions; // FIXME: This doesn't support tail calls. std::pair<SDValue, SDValue> Tmp = TLI.makeLibCall(DAG, LC, RetVT, Ops, CallOptions, SDLoc(Node), Node->getOperand(0)); Results.push_back(Tmp.first); Results.push_back(Tmp.second); } else { SDValue Tmp = ExpandLibCall(LC, Node, false); Results.push_back(Tmp); } } /// Issue libcalls to __{u}divmod to compute div / rem pairs. void SelectionDAGLegalize::ExpandDivRemLibCall(SDNode *Node, SmallVectorImpl<SDValue> &Results) { unsigned Opcode = Node->getOpcode(); bool isSigned = Opcode == ISD::SDIVREM; RTLIB::Libcall LC; switch (Node->getSimpleValueType(0).SimpleTy) { default: llvm_unreachable("Unexpected request for libcall!"); case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break; } // The input chain to this libcall is the entry node of the function. // Legalizing the call will automatically add the previous call to the // dependence. SDValue InChain = DAG.getEntryNode(); EVT RetVT = Node->getValueType(0); Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); TargetLowering::ArgListTy Args; TargetLowering::ArgListEntry Entry; for (const SDValue &Op : Node->op_values()) { EVT ArgVT = Op.getValueType(); Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); Entry.Node = Op; Entry.Ty = ArgTy; Entry.IsSExt = isSigned; Entry.IsZExt = !isSigned; Args.push_back(Entry); } // Also pass the return address of the remainder. SDValue FIPtr = DAG.CreateStackTemporary(RetVT); Entry.Node = FIPtr; Entry.Ty = RetTy->getPointerTo(); Entry.IsSExt = isSigned; Entry.IsZExt = !isSigned; Args.push_back(Entry); SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), TLI.getPointerTy(DAG.getDataLayout())); SDLoc dl(Node); TargetLowering::CallLoweringInfo CLI(DAG); CLI.setDebugLoc(dl) .setChain(InChain) .setLibCallee(TLI.getLibcallCallingConv(LC), RetTy, Callee, std::move(Args)) .setSExtResult(isSigned) .setZExtResult(!isSigned); std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI); // Remainder is loaded back from the stack frame. SDValue Rem = DAG.getLoad(RetVT, dl, CallInfo.second, FIPtr, MachinePointerInfo()); Results.push_back(CallInfo.first); Results.push_back(Rem); } /// Return true if sincos libcall is available. static bool isSinCosLibcallAvailable(SDNode *Node, const TargetLowering &TLI) { RTLIB::Libcall LC; switch (Node->getSimpleValueType(0).SimpleTy) { default: llvm_unreachable("Unexpected request for libcall!"); case MVT::f32: LC = RTLIB::SINCOS_F32; break; case MVT::f64: LC = RTLIB::SINCOS_F64; break; case MVT::f80: LC = RTLIB::SINCOS_F80; break; case MVT::f128: LC = RTLIB::SINCOS_F128; break; case MVT::ppcf128: LC = RTLIB::SINCOS_PPCF128; break; } return TLI.getLibcallName(LC) != nullptr; } /// Only issue sincos libcall if both sin and cos are needed. static bool useSinCos(SDNode *Node) { unsigned OtherOpcode = Node->getOpcode() == ISD::FSIN ? ISD::FCOS : ISD::FSIN; SDValue Op0 = Node->getOperand(0); for (SDNode::use_iterator UI = Op0.getNode()->use_begin(), UE = Op0.getNode()->use_end(); UI != UE; ++UI) { SDNode *User = *UI; if (User == Node) continue; // The other user might have been turned into sincos already. if (User->getOpcode() == OtherOpcode || User->getOpcode() == ISD::FSINCOS) return true; } return false; } /// Issue libcalls to sincos to compute sin / cos pairs. void SelectionDAGLegalize::ExpandSinCosLibCall(SDNode *Node, SmallVectorImpl<SDValue> &Results) { RTLIB::Libcall LC; switch (Node->getSimpleValueType(0).SimpleTy) { default: llvm_unreachable("Unexpected request for libcall!"); case MVT::f32: LC = RTLIB::SINCOS_F32; break; case MVT::f64: LC = RTLIB::SINCOS_F64; break; case MVT::f80: LC = RTLIB::SINCOS_F80; break; case MVT::f128: LC = RTLIB::SINCOS_F128; break; case MVT::ppcf128: LC = RTLIB::SINCOS_PPCF128; break; } // The input chain to this libcall is the entry node of the function. // Legalizing the call will automatically add the previous call to the // dependence. SDValue InChain = DAG.getEntryNode(); EVT RetVT = Node->getValueType(0); Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); TargetLowering::ArgListTy Args; TargetLowering::ArgListEntry Entry; // Pass the argument. Entry.Node = Node->getOperand(0); Entry.Ty = RetTy; Entry.IsSExt = false; Entry.IsZExt = false; Args.push_back(Entry); // Pass the return address of sin. SDValue SinPtr = DAG.CreateStackTemporary(RetVT); Entry.Node = SinPtr; Entry.Ty = RetTy->getPointerTo(); Entry.IsSExt = false; Entry.IsZExt = false; Args.push_back(Entry); // Also pass the return address of the cos. SDValue CosPtr = DAG.CreateStackTemporary(RetVT); Entry.Node = CosPtr; Entry.Ty = RetTy->getPointerTo(); Entry.IsSExt = false; Entry.IsZExt = false; Args.push_back(Entry); SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), TLI.getPointerTy(DAG.getDataLayout())); SDLoc dl(Node); TargetLowering::CallLoweringInfo CLI(DAG); CLI.setDebugLoc(dl).setChain(InChain).setLibCallee( TLI.getLibcallCallingConv(LC), Type::getVoidTy(*DAG.getContext()), Callee, std::move(Args)); std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI); Results.push_back( DAG.getLoad(RetVT, dl, CallInfo.second, SinPtr, MachinePointerInfo())); Results.push_back( DAG.getLoad(RetVT, dl, CallInfo.second, CosPtr, MachinePointerInfo())); } /// This function is responsible for legalizing a /// INT_TO_FP operation of the specified operand when the target requests that /// we expand it. At this point, we know that the result and operand types are /// legal for the target. SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(SDNode *Node, SDValue &Chain) { bool isSigned = (Node->getOpcode() == ISD::STRICT_SINT_TO_FP || Node->getOpcode() == ISD::SINT_TO_FP); EVT DestVT = Node->getValueType(0); SDLoc dl(Node); unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0; SDValue Op0 = Node->getOperand(OpNo); EVT SrcVT = Op0.getValueType(); // TODO: Should any fast-math-flags be set for the created nodes? LLVM_DEBUG(dbgs() << "Legalizing INT_TO_FP\n"); if (SrcVT == MVT::i32 && TLI.isTypeLegal(MVT::f64)) { LLVM_DEBUG(dbgs() << "32-bit [signed|unsigned] integer to float/double " "expansion\n"); // Get the stack frame index of a 8 byte buffer. SDValue StackSlot = DAG.CreateStackTemporary(MVT::f64); SDValue Lo = Op0; // if signed map to unsigned space if (isSigned) { // Invert sign bit (signed to unsigned mapping). Lo = DAG.getNode(ISD::XOR, dl, MVT::i32, Lo, DAG.getConstant(0x80000000u, dl, MVT::i32)); } // Initial hi portion of constructed double. SDValue Hi = DAG.getConstant(0x43300000u, dl, MVT::i32); // If this a big endian target, swap the lo and high data. if (DAG.getDataLayout().isBigEndian()) std::swap(Lo, Hi); SDValue MemChain = DAG.getEntryNode(); // Store the lo of the constructed double. SDValue Store1 = DAG.getStore(MemChain, dl, Lo, StackSlot, MachinePointerInfo()); // Store the hi of the constructed double. SDValue HiPtr = DAG.getMemBasePlusOffset(StackSlot, 4, dl); SDValue Store2 = DAG.getStore(MemChain, dl, Hi, HiPtr, MachinePointerInfo()); MemChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2); // load the constructed double SDValue Load = DAG.getLoad(MVT::f64, dl, MemChain, StackSlot, MachinePointerInfo()); // FP constant to bias correct the final result SDValue Bias = DAG.getConstantFP(isSigned ? BitsToDouble(0x4330000080000000ULL) : BitsToDouble(0x4330000000000000ULL), dl, MVT::f64); // Subtract the bias and get the final result. SDValue Sub; SDValue Result; if (Node->isStrictFPOpcode()) { Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::f64, MVT::Other}, {Node->getOperand(0), Load, Bias}); Chain = Sub.getValue(1); if (DestVT != Sub.getValueType()) { std::pair<SDValue, SDValue> ResultPair; ResultPair = DAG.getStrictFPExtendOrRound(Sub, Chain, dl, DestVT); Result = ResultPair.first; Chain = ResultPair.second; } else Result = Sub; } else { Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Load, Bias); Result = DAG.getFPExtendOrRound(Sub, dl, DestVT); } return Result; } // Code below here assumes !isSigned without checking again. assert(!isSigned && "Legalize cannot Expand SINT_TO_FP for i64 yet"); // TODO: Generalize this for use with other types. if ((SrcVT == MVT::i32 || SrcVT == MVT::i64) && DestVT == MVT::f32) { LLVM_DEBUG(dbgs() << "Converting unsigned i32/i64 to f32\n"); // For unsigned conversions, convert them to signed conversions using the // algorithm from the x86_64 __floatundisf in compiler_rt. That method // should be valid for i32->f32 as well. // TODO: This really should be implemented using a branch rather than a // select. We happen to get lucky and machinesink does the right // thing most of the time. This would be a good candidate for a // pseudo-op, or, even better, for whole-function isel. EVT SetCCVT = getSetCCResultType(SrcVT); SDValue SignBitTest = DAG.getSetCC( dl, SetCCVT, Op0, DAG.getConstant(0, dl, SrcVT), ISD::SETLT); EVT ShiftVT = TLI.getShiftAmountTy(SrcVT, DAG.getDataLayout()); SDValue ShiftConst = DAG.getConstant(1, dl, ShiftVT); SDValue Shr = DAG.getNode(ISD::SRL, dl, SrcVT, Op0, ShiftConst); SDValue AndConst = DAG.getConstant(1, dl, SrcVT); SDValue And = DAG.getNode(ISD::AND, dl, SrcVT, Op0, AndConst); SDValue Or = DAG.getNode(ISD::OR, dl, SrcVT, And, Shr); SDValue Slow, Fast; if (Node->isStrictFPOpcode()) { // In strict mode, we must avoid spurious exceptions, and therefore // must make sure to only emit a single STRICT_SINT_TO_FP. SDValue InCvt = DAG.getSelect(dl, SrcVT, SignBitTest, Or, Op0); Fast = DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, { DestVT, MVT::Other }, { Node->getOperand(0), InCvt }); Slow = DAG.getNode(ISD::STRICT_FADD, dl, { DestVT, MVT::Other }, { Fast.getValue(1), Fast, Fast }); Chain = Slow.getValue(1); // The STRICT_SINT_TO_FP inherits the exception mode from the // incoming STRICT_UINT_TO_FP node; the STRICT_FADD node can // never raise any exception. SDNodeFlags Flags; Flags.setNoFPExcept(Node->getFlags().hasNoFPExcept()); Fast->setFlags(Flags); Flags.setNoFPExcept(true); Slow->setFlags(Flags); } else { SDValue SignCvt = DAG.getNode(ISD::SINT_TO_FP, dl, DestVT, Or); Slow = DAG.getNode(ISD::FADD, dl, DestVT, SignCvt, SignCvt); Fast = DAG.getNode(ISD::SINT_TO_FP, dl, DestVT, Op0); } return DAG.getSelect(dl, DestVT, SignBitTest, Slow, Fast); } // The following optimization is valid only if every value in SrcVT (when // treated as signed) is representable in DestVT. Check that the mantissa // size of DestVT is >= than the number of bits in SrcVT -1. assert(APFloat::semanticsPrecision(DAG.EVTToAPFloatSemantics(DestVT)) >= SrcVT.getSizeInBits() - 1 && "Cannot perform lossless SINT_TO_FP!"); SDValue Tmp1; if (Node->isStrictFPOpcode()) { Tmp1 = DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, { DestVT, MVT::Other }, { Node->getOperand(0), Op0 }); } else Tmp1 = DAG.getNode(ISD::SINT_TO_FP, dl, DestVT, Op0); SDValue SignSet = DAG.getSetCC(dl, getSetCCResultType(SrcVT), Op0, DAG.getConstant(0, dl, SrcVT), ISD::SETLT); SDValue Zero = DAG.getIntPtrConstant(0, dl), Four = DAG.getIntPtrConstant(4, dl); SDValue CstOffset = DAG.getSelect(dl, Zero.getValueType(), SignSet, Four, Zero); // If the sign bit of the integer is set, the large number will be treated // as a negative number. To counteract this, the dynamic code adds an // offset depending on the data type. uint64_t FF; switch (SrcVT.getSimpleVT().SimpleTy) { default: llvm_unreachable("Unsupported integer type!"); case MVT::i8 : FF = 0x43800000ULL; break; // 2^8 (as a float) case MVT::i16: FF = 0x47800000ULL; break; // 2^16 (as a float) case MVT::i32: FF = 0x4F800000ULL; break; // 2^32 (as a float) case MVT::i64: FF = 0x5F800000ULL; break; // 2^64 (as a float) } if (DAG.getDataLayout().isLittleEndian()) FF <<= 32; Constant *FudgeFactor = ConstantInt::get( Type::getInt64Ty(*DAG.getContext()), FF); SDValue CPIdx = DAG.getConstantPool(FudgeFactor, TLI.getPointerTy(DAG.getDataLayout())); Align Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlign(); CPIdx = DAG.getNode(ISD::ADD, dl, CPIdx.getValueType(), CPIdx, CstOffset); Alignment = commonAlignment(Alignment, 4); SDValue FudgeInReg; if (DestVT == MVT::f32) FudgeInReg = DAG.getLoad( MVT::f32, dl, DAG.getEntryNode(), CPIdx, MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Alignment); else { SDValue Load = DAG.getExtLoad( ISD::EXTLOAD, dl, DestVT, DAG.getEntryNode(), CPIdx, MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), MVT::f32, Alignment); HandleSDNode Handle(Load); LegalizeOp(Load.getNode()); FudgeInReg = Handle.getValue(); } if (Node->isStrictFPOpcode()) { SDValue Result = DAG.getNode(ISD::STRICT_FADD, dl, { DestVT, MVT::Other }, { Tmp1.getValue(1), Tmp1, FudgeInReg }); Chain = Result.getValue(1); return Result; } return DAG.getNode(ISD::FADD, dl, DestVT, Tmp1, FudgeInReg); } /// This function is responsible for legalizing a /// *INT_TO_FP operation of the specified operand when the target requests that /// we promote it. At this point, we know that the result and operand types are /// legal for the target, and that there is a legal UINT_TO_FP or SINT_TO_FP /// operation that takes a larger input. void SelectionDAGLegalize::PromoteLegalINT_TO_FP( SDNode *N, const SDLoc &dl, SmallVectorImpl<SDValue> &Results) { bool IsStrict = N->isStrictFPOpcode(); bool IsSigned = N->getOpcode() == ISD::SINT_TO_FP || N->getOpcode() == ISD::STRICT_SINT_TO_FP; EVT DestVT = N->getValueType(0); SDValue LegalOp = N->getOperand(IsStrict ? 1 : 0); unsigned UIntOp = IsStrict ? ISD::STRICT_UINT_TO_FP : ISD::UINT_TO_FP; unsigned SIntOp = IsStrict ? ISD::STRICT_SINT_TO_FP : ISD::SINT_TO_FP; // First step, figure out the appropriate *INT_TO_FP operation to use. EVT NewInTy = LegalOp.getValueType(); unsigned OpToUse = 0; // Scan for the appropriate larger type to use. while (true) { NewInTy = (MVT::SimpleValueType)(NewInTy.getSimpleVT().SimpleTy+1); assert(NewInTy.isInteger() && "Ran out of possibilities!"); // If the target supports SINT_TO_FP of this type, use it. if (TLI.isOperationLegalOrCustom(SIntOp, NewInTy)) { OpToUse = SIntOp; break; } if (IsSigned) continue; // If the target supports UINT_TO_FP of this type, use it. if (TLI.isOperationLegalOrCustom(UIntOp, NewInTy)) { OpToUse = UIntOp; break; } // Otherwise, try a larger type. } // Okay, we found the operation and type to use. Zero extend our input to the // desired type then run the operation on it. if (IsStrict) { SDValue Res = DAG.getNode(OpToUse, dl, {DestVT, MVT::Other}, {N->getOperand(0), DAG.getNode(IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, dl, NewInTy, LegalOp)}); Results.push_back(Res); Results.push_back(Res.getValue(1)); return; } Results.push_back( DAG.getNode(OpToUse, dl, DestVT, DAG.getNode(IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, dl, NewInTy, LegalOp))); } /// This function is responsible for legalizing a /// FP_TO_*INT operation of the specified operand when the target requests that /// we promote it. At this point, we know that the result and operand types are /// legal for the target, and that there is a legal FP_TO_UINT or FP_TO_SINT /// operation that returns a larger result. void SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDNode *N, const SDLoc &dl, SmallVectorImpl<SDValue> &Results) { bool IsStrict = N->isStrictFPOpcode(); bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT || N->getOpcode() == ISD::STRICT_FP_TO_SINT; EVT DestVT = N->getValueType(0); SDValue LegalOp = N->getOperand(IsStrict ? 1 : 0); // First step, figure out the appropriate FP_TO*INT operation to use. EVT NewOutTy = DestVT; unsigned OpToUse = 0; // Scan for the appropriate larger type to use. while (true) { NewOutTy = (MVT::SimpleValueType)(NewOutTy.getSimpleVT().SimpleTy+1); assert(NewOutTy.isInteger() && "Ran out of possibilities!"); // A larger signed type can hold all unsigned values of the requested type, // so using FP_TO_SINT is valid OpToUse = IsStrict ? ISD::STRICT_FP_TO_SINT : ISD::FP_TO_SINT; if (TLI.isOperationLegalOrCustom(OpToUse, NewOutTy)) break; // However, if the value may be < 0.0, we *must* use some FP_TO_SINT. OpToUse = IsStrict ? ISD::STRICT_FP_TO_UINT : ISD::FP_TO_UINT; if (!IsSigned && TLI.isOperationLegalOrCustom(OpToUse, NewOutTy)) break; // Otherwise, try a larger type. } // Okay, we found the operation and type to use. SDValue Operation; if (IsStrict) { SDVTList VTs = DAG.getVTList(NewOutTy, MVT::Other); Operation = DAG.getNode(OpToUse, dl, VTs, N->getOperand(0), LegalOp); } else Operation = DAG.getNode(OpToUse, dl, NewOutTy, LegalOp); // Truncate the result of the extended FP_TO_*INT operation to the desired // size. SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, DestVT, Operation); Results.push_back(Trunc); if (IsStrict) Results.push_back(Operation.getValue(1)); } /// Legalize a BITREVERSE scalar/vector operation as a series of mask + shifts. SDValue SelectionDAGLegalize::ExpandBITREVERSE(SDValue Op, const SDLoc &dl) { EVT VT = Op.getValueType(); EVT SHVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout()); unsigned Sz = VT.getScalarSizeInBits(); SDValue Tmp, Tmp2, Tmp3; // If we can, perform BSWAP first and then the mask+swap the i4, then i2 // and finally the i1 pairs. // TODO: We can easily support i4/i2 legal types if any target ever does. if (Sz >= 8 && isPowerOf2_32(Sz)) { // Create the masks - repeating the pattern every byte. APInt MaskHi4 = APInt::getSplat(Sz, APInt(8, 0xF0)); APInt MaskHi2 = APInt::getSplat(Sz, APInt(8, 0xCC)); APInt MaskHi1 = APInt::getSplat(Sz, APInt(8, 0xAA)); APInt MaskLo4 = APInt::getSplat(Sz, APInt(8, 0x0F)); APInt MaskLo2 = APInt::getSplat(Sz, APInt(8, 0x33)); APInt MaskLo1 = APInt::getSplat(Sz, APInt(8, 0x55)); // BSWAP if the type is wider than a single byte. Tmp = (Sz > 8 ? DAG.getNode(ISD::BSWAP, dl, VT, Op) : Op); // swap i4: ((V & 0xF0) >> 4) | ((V & 0x0F) << 4) Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp, DAG.getConstant(MaskHi4, dl, VT)); Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp, DAG.getConstant(MaskLo4, dl, VT)); Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Tmp2, DAG.getConstant(4, dl, SHVT)); Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Tmp3, DAG.getConstant(4, dl, SHVT)); Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); // swap i2: ((V & 0xCC) >> 2) | ((V & 0x33) << 2) Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp, DAG.getConstant(MaskHi2, dl, VT)); Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp, DAG.getConstant(MaskLo2, dl, VT)); Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Tmp2, DAG.getConstant(2, dl, SHVT)); Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Tmp3, DAG.getConstant(2, dl, SHVT)); Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); // swap i1: ((V & 0xAA) >> 1) | ((V & 0x55) << 1) Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp, DAG.getConstant(MaskHi1, dl, VT)); Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp, DAG.getConstant(MaskLo1, dl, VT)); Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Tmp2, DAG.getConstant(1, dl, SHVT)); Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Tmp3, DAG.getConstant(1, dl, SHVT)); Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); return Tmp; } Tmp = DAG.getConstant(0, dl, VT); for (unsigned I = 0, J = Sz-1; I < Sz; ++I, --J) { if (I < J) Tmp2 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(J - I, dl, SHVT)); else Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(I - J, dl, SHVT)); APInt Shift(Sz, 1); Shift <<= J; Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(Shift, dl, VT)); Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp, Tmp2); } return Tmp; } /// Open code the operations for BSWAP of the specified operation. SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, const SDLoc &dl) { EVT VT = Op.getValueType(); EVT SHVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout()); SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8; switch (VT.getSimpleVT().getScalarType().SimpleTy) { default: llvm_unreachable("Unhandled Expand type in BSWAP!"); case MVT::i16: // Use a rotate by 8. This can be further expanded if necessary. return DAG.getNode(ISD::ROTL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); case MVT::i32: Tmp4 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, dl, SHVT)); Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, dl, SHVT)); Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, DAG.getConstant(0xFF0000, dl, VT)); Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(0xFF00, dl, VT)); Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); return DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); case MVT::i64: Tmp8 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(56, dl, SHVT)); Tmp7 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(40, dl, SHVT)); Tmp6 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, dl, SHVT)); Tmp5 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); Tmp4 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); Tmp3 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, dl, SHVT)); Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(40, dl, SHVT)); Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(56, dl, SHVT)); Tmp7 = DAG.getNode(ISD::AND, dl, VT, Tmp7, DAG.getConstant(255ULL<<48, dl, VT)); Tmp6 = DAG.getNode(ISD::AND, dl, VT, Tmp6, DAG.getConstant(255ULL<<40, dl, VT)); Tmp5 = DAG.getNode(ISD::AND, dl, VT, Tmp5, DAG.getConstant(255ULL<<32, dl, VT)); Tmp4 = DAG.getNode(ISD::AND, dl, VT, Tmp4, DAG.getConstant(255ULL<<24, dl, VT)); Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, DAG.getConstant(255ULL<<16, dl, VT)); Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(255ULL<<8 , dl, VT)); Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp7); Tmp6 = DAG.getNode(ISD::OR, dl, VT, Tmp6, Tmp5); Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp6); Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); return DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp4); } } bool SelectionDAGLegalize::ExpandNode(SDNode *Node) { LLVM_DEBUG(dbgs() << "Trying to expand node\n"); SmallVector<SDValue, 8> Results; SDLoc dl(Node); SDValue Tmp1, Tmp2, Tmp3, Tmp4; bool NeedInvert; switch (Node->getOpcode()) { case ISD::ABS: if (TLI.expandABS(Node, Tmp1, DAG)) Results.push_back(Tmp1); break; case ISD::CTPOP: if (TLI.expandCTPOP(Node, Tmp1, DAG)) Results.push_back(Tmp1); break; case ISD::CTLZ: case ISD::CTLZ_ZERO_UNDEF: if (TLI.expandCTLZ(Node, Tmp1, DAG)) Results.push_back(Tmp1); break; case ISD::CTTZ: case ISD::CTTZ_ZERO_UNDEF: if (TLI.expandCTTZ(Node, Tmp1, DAG)) Results.push_back(Tmp1); break; case ISD::BITREVERSE: Results.push_back(ExpandBITREVERSE(Node->getOperand(0), dl)); break; case ISD::BSWAP: Results.push_back(ExpandBSWAP(Node->getOperand(0), dl)); break; case ISD::FRAMEADDR: case ISD::RETURNADDR: case ISD::FRAME_TO_ARGS_OFFSET: Results.push_back(DAG.getConstant(0, dl, Node->getValueType(0))); break; case ISD::EH_DWARF_CFA: { SDValue CfaArg = DAG.getSExtOrTrunc(Node->getOperand(0), dl, TLI.getPointerTy(DAG.getDataLayout())); SDValue Offset = DAG.getNode(ISD::ADD, dl, CfaArg.getValueType(), DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, dl, CfaArg.getValueType()), CfaArg); SDValue FA = DAG.getNode( ISD::FRAMEADDR, dl, TLI.getPointerTy(DAG.getDataLayout()), DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()))); Results.push_back(DAG.getNode(ISD::ADD, dl, FA.getValueType(), FA, Offset)); break; } case ISD::FLT_ROUNDS_: Results.push_back(DAG.getConstant(1, dl, Node->getValueType(0))); Results.push_back(Node->getOperand(0)); break; case ISD::EH_RETURN: case ISD::EH_LABEL: case ISD::PREFETCH: case ISD::VAEND: case ISD::EH_SJLJ_LONGJMP: // If the target didn't expand these, there's nothing to do, so just // preserve the chain and be done. Results.push_back(Node->getOperand(0)); break; case ISD::READCYCLECOUNTER: // If the target didn't expand this, just return 'zero' and preserve the // chain. Results.append(Node->getNumValues() - 1, DAG.getConstant(0, dl, Node->getValueType(0))); Results.push_back(Node->getOperand(0)); break; case ISD::EH_SJLJ_SETJMP: // If the target didn't expand this, just return 'zero' and preserve the // chain. Results.push_back(DAG.getConstant(0, dl, MVT::i32)); Results.push_back(Node->getOperand(0)); break; case ISD::ATOMIC_LOAD: { // There is no libcall for atomic load; fake it with ATOMIC_CMP_SWAP. SDValue Zero = DAG.getConstant(0, dl, Node->getValueType(0)); SDVTList VTs = DAG.getVTList(Node->getValueType(0), MVT::Other); SDValue Swap = DAG.getAtomicCmpSwap( ISD::ATOMIC_CMP_SWAP, dl, cast<AtomicSDNode>(Node)->getMemoryVT(), VTs, Node->getOperand(0), Node->getOperand(1), Zero, Zero, cast<AtomicSDNode>(Node)->getMemOperand()); Results.push_back(Swap.getValue(0)); Results.push_back(Swap.getValue(1)); break; } case ISD::ATOMIC_STORE: { // There is no libcall for atomic store; fake it with ATOMIC_SWAP. SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl, cast<AtomicSDNode>(Node)->getMemoryVT(), Node->getOperand(0), Node->getOperand(1), Node->getOperand(2), cast<AtomicSDNode>(Node)->getMemOperand()); Results.push_back(Swap.getValue(1)); break; } case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: { // Expanding an ATOMIC_CMP_SWAP_WITH_SUCCESS produces an ATOMIC_CMP_SWAP and // splits out the success value as a comparison. Expanding the resulting // ATOMIC_CMP_SWAP will produce a libcall. SDVTList VTs = DAG.getVTList(Node->getValueType(0), MVT::Other); SDValue Res = DAG.getAtomicCmpSwap( ISD::ATOMIC_CMP_SWAP, dl, cast<AtomicSDNode>(Node)->getMemoryVT(), VTs, Node->getOperand(0), Node->getOperand(1), Node->getOperand(2), Node->getOperand(3), cast<MemSDNode>(Node)->getMemOperand()); SDValue ExtRes = Res; SDValue LHS = Res; SDValue RHS = Node->getOperand(1); EVT AtomicType = cast<AtomicSDNode>(Node)->getMemoryVT(); EVT OuterType = Node->getValueType(0); switch (TLI.getExtendForAtomicOps()) { case ISD::SIGN_EXTEND: LHS = DAG.getNode(ISD::AssertSext, dl, OuterType, Res, DAG.getValueType(AtomicType)); RHS = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, OuterType, Node->getOperand(2), DAG.getValueType(AtomicType)); ExtRes = LHS; break; case ISD::ZERO_EXTEND: LHS = DAG.getNode(ISD::AssertZext, dl, OuterType, Res, DAG.getValueType(AtomicType)); RHS = DAG.getZeroExtendInReg(Node->getOperand(2), dl, AtomicType); ExtRes = LHS; break; case ISD::ANY_EXTEND: LHS = DAG.getZeroExtendInReg(Res, dl, AtomicType); RHS = DAG.getZeroExtendInReg(Node->getOperand(2), dl, AtomicType); break; default: llvm_unreachable("Invalid atomic op extension"); } SDValue Success = DAG.getSetCC(dl, Node->getValueType(1), LHS, RHS, ISD::SETEQ); Results.push_back(ExtRes.getValue(0)); Results.push_back(Success); Results.push_back(Res.getValue(1)); break; } case ISD::DYNAMIC_STACKALLOC: ExpandDYNAMIC_STACKALLOC(Node, Results); break; case ISD::MERGE_VALUES: for (unsigned i = 0; i < Node->getNumValues(); i++) Results.push_back(Node->getOperand(i)); break; case ISD::UNDEF: { EVT VT = Node->getValueType(0); if (VT.isInteger()) Results.push_back(DAG.getConstant(0, dl, VT)); else { assert(VT.isFloatingPoint() && "Unknown value type!"); Results.push_back(DAG.getConstantFP(0, dl, VT)); } break; } case ISD::STRICT_FP_ROUND: // When strict mode is enforced we can't do expansion because it // does not honor the "strict" properties. Only libcall is allowed. if (TLI.isStrictFPEnabled()) break; // We might as well mutate to FP_ROUND when FP_ROUND operation is legal // since this operation is more efficient than stack operation. if (TLI.getStrictFPOperationAction(Node->getOpcode(), Node->getValueType(0)) == TargetLowering::Legal) break; // We fall back to use stack operation when the FP_ROUND operation // isn't available. Tmp1 = EmitStackConvert(Node->getOperand(1), Node->getValueType(0), Node->getValueType(0), dl, Node->getOperand(0)); ReplaceNode(Node, Tmp1.getNode()); LLVM_DEBUG(dbgs() << "Successfully expanded STRICT_FP_ROUND node\n"); return true; case ISD::FP_ROUND: case ISD::BITCAST: Tmp1 = EmitStackConvert(Node->getOperand(0), Node->getValueType(0), Node->getValueType(0), dl); Results.push_back(Tmp1); break; case ISD::STRICT_FP_EXTEND: // When strict mode is enforced we can't do expansion because it // does not honor the "strict" properties. Only libcall is allowed. if (TLI.isStrictFPEnabled()) break; // We might as well mutate to FP_EXTEND when FP_EXTEND operation is legal // since this operation is more efficient than stack operation. if (TLI.getStrictFPOperationAction(Node->getOpcode(), Node->getValueType(0)) == TargetLowering::Legal) break; // We fall back to use stack operation when the FP_EXTEND operation // isn't available. Tmp1 = EmitStackConvert(Node->getOperand(1), Node->getOperand(1).getValueType(), Node->getValueType(0), dl, Node->getOperand(0)); ReplaceNode(Node, Tmp1.getNode()); LLVM_DEBUG(dbgs() << "Successfully expanded STRICT_FP_EXTEND node\n"); return true; case ISD::FP_EXTEND: Tmp1 = EmitStackConvert(Node->getOperand(0), Node->getOperand(0).getValueType(), Node->getValueType(0), dl); Results.push_back(Tmp1); break; case ISD::SIGN_EXTEND_INREG: { EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); EVT VT = Node->getValueType(0); // An in-register sign-extend of a boolean is a negation: // 'true' (1) sign-extended is -1. // 'false' (0) sign-extended is 0. // However, we must mask the high bits of the source operand because the // SIGN_EXTEND_INREG does not guarantee that the high bits are already zero. // TODO: Do this for vectors too? if (ExtraVT.getSizeInBits() == 1) { SDValue One = DAG.getConstant(1, dl, VT); SDValue And = DAG.getNode(ISD::AND, dl, VT, Node->getOperand(0), One); SDValue Zero = DAG.getConstant(0, dl, VT); SDValue Neg = DAG.getNode(ISD::SUB, dl, VT, Zero, And); Results.push_back(Neg); break; } // NOTE: we could fall back on load/store here too for targets without // SRA. However, it is doubtful that any exist. EVT ShiftAmountTy = TLI.getShiftAmountTy(VT, DAG.getDataLayout()); unsigned BitsDiff = VT.getScalarSizeInBits() - ExtraVT.getScalarSizeInBits(); SDValue ShiftCst = DAG.getConstant(BitsDiff, dl, ShiftAmountTy); Tmp1 = DAG.getNode(ISD::SHL, dl, Node->getValueType(0), Node->getOperand(0), ShiftCst); Tmp1 = DAG.getNode(ISD::SRA, dl, Node->getValueType(0), Tmp1, ShiftCst); Results.push_back(Tmp1); break; } case ISD::UINT_TO_FP: case ISD::STRICT_UINT_TO_FP: if (TLI.expandUINT_TO_FP(Node, Tmp1, Tmp2, DAG)) { Results.push_back(Tmp1); if (Node->isStrictFPOpcode()) Results.push_back(Tmp2); break; } LLVM_FALLTHROUGH; case ISD::SINT_TO_FP: case ISD::STRICT_SINT_TO_FP: Tmp1 = ExpandLegalINT_TO_FP(Node, Tmp2); Results.push_back(Tmp1); if (Node->isStrictFPOpcode()) Results.push_back(Tmp2); break; case ISD::FP_TO_SINT: if (TLI.expandFP_TO_SINT(Node, Tmp1, DAG)) Results.push_back(Tmp1); break; case ISD::STRICT_FP_TO_SINT: if (TLI.expandFP_TO_SINT(Node, Tmp1, DAG)) { ReplaceNode(Node, Tmp1.getNode()); LLVM_DEBUG(dbgs() << "Successfully expanded STRICT_FP_TO_SINT node\n"); return true; } break; case ISD::FP_TO_UINT: if (TLI.expandFP_TO_UINT(Node, Tmp1, Tmp2, DAG)) Results.push_back(Tmp1); break; case ISD::STRICT_FP_TO_UINT: if (TLI.expandFP_TO_UINT(Node, Tmp1, Tmp2, DAG)) { // Relink the chain. DAG.ReplaceAllUsesOfValueWith(SDValue(Node,1), Tmp2); // Replace the new UINT result. ReplaceNodeWithValue(SDValue(Node, 0), Tmp1); LLVM_DEBUG(dbgs() << "Successfully expanded STRICT_FP_TO_UINT node\n"); return true; } break; case ISD::VAARG: Results.push_back(DAG.expandVAArg(Node)); Results.push_back(Results[0].getValue(1)); break; case ISD::VACOPY: Results.push_back(DAG.expandVACopy(Node)); break; case ISD::EXTRACT_VECTOR_ELT: if (Node->getOperand(0).getValueType().getVectorNumElements() == 1) // This must be an access of the only element. Return it. Tmp1 = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0), Node->getOperand(0)); else Tmp1 = ExpandExtractFromVectorThroughStack(SDValue(Node, 0)); Results.push_back(Tmp1); break; case ISD::EXTRACT_SUBVECTOR: Results.push_back(ExpandExtractFromVectorThroughStack(SDValue(Node, 0))); break; case ISD::INSERT_SUBVECTOR: Results.push_back(ExpandInsertToVectorThroughStack(SDValue(Node, 0))); break; case ISD::CONCAT_VECTORS: Results.push_back(ExpandVectorBuildThroughStack(Node)); break; case ISD::SCALAR_TO_VECTOR: Results.push_back(ExpandSCALAR_TO_VECTOR(Node)); break; case ISD::INSERT_VECTOR_ELT: Results.push_back(ExpandINSERT_VECTOR_ELT(Node->getOperand(0), Node->getOperand(1), Node->getOperand(2), dl)); break; case ISD::VECTOR_SHUFFLE: { SmallVector<int, 32> NewMask; ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Node)->getMask(); EVT VT = Node->getValueType(0); EVT EltVT = VT.getVectorElementType(); SDValue Op0 = Node->getOperand(0); SDValue Op1 = Node->getOperand(1); if (!TLI.isTypeLegal(EltVT)) { EVT NewEltVT = TLI.getTypeToTransformTo(*DAG.getContext(), EltVT); // BUILD_VECTOR operands are allowed to be wider than the element type. // But if NewEltVT is smaller that EltVT the BUILD_VECTOR does not accept // it. if (NewEltVT.bitsLT(EltVT)) { // Convert shuffle node. // If original node was v4i64 and the new EltVT is i32, // cast operands to v8i32 and re-build the mask. // Calculate new VT, the size of the new VT should be equal to original. EVT NewVT = EVT::getVectorVT(*DAG.getContext(), NewEltVT, VT.getSizeInBits() / NewEltVT.getSizeInBits()); assert(NewVT.bitsEq(VT)); // cast operands to new VT Op0 = DAG.getNode(ISD::BITCAST, dl, NewVT, Op0); Op1 = DAG.getNode(ISD::BITCAST, dl, NewVT, Op1); // Convert the shuffle mask unsigned int factor = NewVT.getVectorNumElements()/VT.getVectorNumElements(); // EltVT gets smaller assert(factor > 0); for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { if (Mask[i] < 0) { for (unsigned fi = 0; fi < factor; ++fi) NewMask.push_back(Mask[i]); } else { for (unsigned fi = 0; fi < factor; ++fi) NewMask.push_back(Mask[i]*factor+fi); } } Mask = NewMask; VT = NewVT; } EltVT = NewEltVT; } unsigned NumElems = VT.getVectorNumElements(); SmallVector<SDValue, 16> Ops; for (unsigned i = 0; i != NumElems; ++i) { if (Mask[i] < 0) { Ops.push_back(DAG.getUNDEF(EltVT)); continue; } unsigned Idx = Mask[i]; if (Idx < NumElems) Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0, DAG.getVectorIdxConstant(Idx, dl))); else Ops.push_back( DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op1, DAG.getVectorIdxConstant(Idx - NumElems, dl))); } Tmp1 = DAG.getBuildVector(VT, dl, Ops); // We may have changed the BUILD_VECTOR type. Cast it back to the Node type. Tmp1 = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0), Tmp1); Results.push_back(Tmp1); break; } case ISD::EXTRACT_ELEMENT: { EVT OpTy = Node->getOperand(0).getValueType(); if (cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue()) { // 1 -> Hi Tmp1 = DAG.getNode(ISD::SRL, dl, OpTy, Node->getOperand(0), DAG.getConstant(OpTy.getSizeInBits() / 2, dl, TLI.getShiftAmountTy( Node->getOperand(0).getValueType(), DAG.getDataLayout()))); Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), Tmp1); } else { // 0 -> Lo Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), Node->getOperand(0)); } Results.push_back(Tmp1); break; } case ISD::STACKSAVE: // Expand to CopyFromReg if the target set // StackPointerRegisterToSaveRestore. if (unsigned SP = TLI.getStackPointerRegisterToSaveRestore()) { Results.push_back(DAG.getCopyFromReg(Node->getOperand(0), dl, SP, Node->getValueType(0))); Results.push_back(Results[0].getValue(1)); } else { Results.push_back(DAG.getUNDEF(Node->getValueType(0))); Results.push_back(Node->getOperand(0)); } break; case ISD::STACKRESTORE: // Expand to CopyToReg if the target set // StackPointerRegisterToSaveRestore. if (unsigned SP = TLI.getStackPointerRegisterToSaveRestore()) { Results.push_back(DAG.getCopyToReg(Node->getOperand(0), dl, SP, Node->getOperand(1))); } else { Results.push_back(Node->getOperand(0)); } break; case ISD::GET_DYNAMIC_AREA_OFFSET: Results.push_back(DAG.getConstant(0, dl, Node->getValueType(0))); Results.push_back(Results[0].getValue(0)); break; case ISD::FCOPYSIGN: Results.push_back(ExpandFCOPYSIGN(Node)); break; case ISD::FNEG: // Expand Y = FNEG(X) -> Y = SUB -0.0, X Tmp1 = DAG.getConstantFP(-0.0, dl, Node->getValueType(0)); // TODO: If FNEG has fast-math-flags, propagate them to the FSUB. Tmp1 = DAG.getNode(ISD::FSUB, dl, Node->getValueType(0), Tmp1, Node->getOperand(0)); Results.push_back(Tmp1); break; case ISD::FABS: Results.push_back(ExpandFABS(Node)); break; case ISD::SMIN: case ISD::SMAX: case ISD::UMIN: case ISD::UMAX: { // Expand Y = MAX(A, B) -> Y = (A > B) ? A : B ISD::CondCode Pred; switch (Node->getOpcode()) { default: llvm_unreachable("How did we get here?"); case ISD::SMAX: Pred = ISD::SETGT; break; case ISD::SMIN: Pred = ISD::SETLT; break; case ISD::UMAX: Pred = ISD::SETUGT; break; case ISD::UMIN: Pred = ISD::SETULT; break; } Tmp1 = Node->getOperand(0); Tmp2 = Node->getOperand(1); Tmp1 = DAG.getSelectCC(dl, Tmp1, Tmp2, Tmp1, Tmp2, Pred); Results.push_back(Tmp1); break; } case ISD::FMINNUM: case ISD::FMAXNUM: { if (SDValue Expanded = TLI.expandFMINNUM_FMAXNUM(Node, DAG)) Results.push_back(Expanded); break; } case ISD::FSIN: case ISD::FCOS: { EVT VT = Node->getValueType(0); // Turn fsin / fcos into ISD::FSINCOS node if there are a pair of fsin / // fcos which share the same operand and both are used. if ((TLI.isOperationLegalOrCustom(ISD::FSINCOS, VT) || isSinCosLibcallAvailable(Node, TLI)) && useSinCos(Node)) { SDVTList VTs = DAG.getVTList(VT, VT); Tmp1 = DAG.getNode(ISD::FSINCOS, dl, VTs, Node->getOperand(0)); if (Node->getOpcode() == ISD::FCOS) Tmp1 = Tmp1.getValue(1); Results.push_back(Tmp1); } break; } case ISD::FMAD: llvm_unreachable("Illegal fmad should never be formed"); case ISD::FP16_TO_FP: if (Node->getValueType(0) != MVT::f32) { // We can extend to types bigger than f32 in two steps without changing // the result. Since "f16 -> f32" is much more commonly available, give // CodeGen the option of emitting that before resorting to a libcall. SDValue Res = DAG.getNode(ISD::FP16_TO_FP, dl, MVT::f32, Node->getOperand(0)); Results.push_back( DAG.getNode(ISD::FP_EXTEND, dl, Node->getValueType(0), Res)); } break; case ISD::STRICT_FP16_TO_FP: if (Node->getValueType(0) != MVT::f32) { // We can extend to types bigger than f32 in two steps without changing // the result. Since "f16 -> f32" is much more commonly available, give // CodeGen the option of emitting that before resorting to a libcall. SDValue Res = DAG.getNode(ISD::STRICT_FP16_TO_FP, dl, {MVT::f32, MVT::Other}, {Node->getOperand(0), Node->getOperand(1)}); Res = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {Node->getValueType(0), MVT::Other}, {Res.getValue(1), Res}); Results.push_back(Res); Results.push_back(Res.getValue(1)); } break; case ISD::FP_TO_FP16: LLVM_DEBUG(dbgs() << "Legalizing FP_TO_FP16\n"); if (!TLI.useSoftFloat() && TM.Options.UnsafeFPMath) { SDValue Op = Node->getOperand(0); MVT SVT = Op.getSimpleValueType(); if ((SVT == MVT::f64 || SVT == MVT::f80) && TLI.isOperationLegalOrCustom(ISD::FP_TO_FP16, MVT::f32)) { // Under fastmath, we can expand this node into a fround followed by // a float-half conversion. SDValue FloatVal = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Op, DAG.getIntPtrConstant(0, dl)); Results.push_back( DAG.getNode(ISD::FP_TO_FP16, dl, Node->getValueType(0), FloatVal)); } } break; case ISD::ConstantFP: { ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Node); // Check to see if this FP immediate is already legal. // If this is a legal constant, turn it into a TargetConstantFP node. if (!TLI.isFPImmLegal(CFP->getValueAPF(), Node->getValueType(0), DAG.getMachineFunction().getFunction().hasOptSize())) Results.push_back(ExpandConstantFP(CFP, true)); break; } case ISD::Constant: { ConstantSDNode *CP = cast<ConstantSDNode>(Node); Results.push_back(ExpandConstant(CP)); break; } case ISD::FSUB: { EVT VT = Node->getValueType(0); if (TLI.isOperationLegalOrCustom(ISD::FADD, VT) && TLI.isOperationLegalOrCustom(ISD::FNEG, VT)) { const SDNodeFlags Flags = Node->getFlags(); Tmp1 = DAG.getNode(ISD::FNEG, dl, VT, Node->getOperand(1)); Tmp1 = DAG.getNode(ISD::FADD, dl, VT, Node->getOperand(0), Tmp1, Flags); Results.push_back(Tmp1); } break; } case ISD::SUB: { EVT VT = Node->getValueType(0); assert(TLI.isOperationLegalOrCustom(ISD::ADD, VT) && TLI.isOperationLegalOrCustom(ISD::XOR, VT) && "Don't know how to expand this subtraction!"); Tmp1 = DAG.getNode(ISD::XOR, dl, VT, Node->getOperand(1), DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), dl, VT)); Tmp1 = DAG.getNode(ISD::ADD, dl, VT, Tmp1, DAG.getConstant(1, dl, VT)); Results.push_back(DAG.getNode(ISD::ADD, dl, VT, Node->getOperand(0), Tmp1)); break; } case ISD::UREM: case ISD::SREM: if (TLI.expandREM(Node, Tmp1, DAG)) Results.push_back(Tmp1); break; case ISD::UDIV: case ISD::SDIV: { bool isSigned = Node->getOpcode() == ISD::SDIV; unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; EVT VT = Node->getValueType(0); if (TLI.isOperationLegalOrCustom(DivRemOpc, VT)) { SDVTList VTs = DAG.getVTList(VT, VT); Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Node->getOperand(0), Node->getOperand(1)); Results.push_back(Tmp1); } break; } case ISD::MULHU: case ISD::MULHS: { unsigned ExpandOpcode = Node->getOpcode() == ISD::MULHU ? ISD::UMUL_LOHI : ISD::SMUL_LOHI; EVT VT = Node->getValueType(0); SDVTList VTs = DAG.getVTList(VT, VT); Tmp1 = DAG.getNode(ExpandOpcode, dl, VTs, Node->getOperand(0), Node->getOperand(1)); Results.push_back(Tmp1.getValue(1)); break; } case ISD::UMUL_LOHI: case ISD::SMUL_LOHI: { SDValue LHS = Node->getOperand(0); SDValue RHS = Node->getOperand(1); MVT VT = LHS.getSimpleValueType(); unsigned MULHOpcode = Node->getOpcode() == ISD::UMUL_LOHI ? ISD::MULHU : ISD::MULHS; if (TLI.isOperationLegalOrCustom(MULHOpcode, VT)) { Results.push_back(DAG.getNode(ISD::MUL, dl, VT, LHS, RHS)); Results.push_back(DAG.getNode(MULHOpcode, dl, VT, LHS, RHS)); break; } SmallVector<SDValue, 4> Halves; EVT HalfType = EVT(VT).getHalfSizedIntegerVT(*DAG.getContext()); assert(TLI.isTypeLegal(HalfType)); if (TLI.expandMUL_LOHI(Node->getOpcode(), VT, Node, LHS, RHS, Halves, HalfType, DAG, TargetLowering::MulExpansionKind::Always)) { for (unsigned i = 0; i < 2; ++i) { SDValue Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Halves[2 * i]); SDValue Hi = DAG.getNode(ISD::ANY_EXTEND, dl, VT, Halves[2 * i + 1]); SDValue Shift = DAG.getConstant( HalfType.getScalarSizeInBits(), dl, TLI.getShiftAmountTy(HalfType, DAG.getDataLayout())); Hi = DAG.getNode(ISD::SHL, dl, VT, Hi, Shift); Results.push_back(DAG.getNode(ISD::OR, dl, VT, Lo, Hi)); } break; } break; } case ISD::MUL: { EVT VT = Node->getValueType(0); SDVTList VTs = DAG.getVTList(VT, VT); // See if multiply or divide can be lowered using two-result operations. // We just need the low half of the multiply; try both the signed // and unsigned forms. If the target supports both SMUL_LOHI and // UMUL_LOHI, form a preference by checking which forms of plain // MULH it supports. bool HasSMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::SMUL_LOHI, VT); bool HasUMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::UMUL_LOHI, VT); bool HasMULHS = TLI.isOperationLegalOrCustom(ISD::MULHS, VT); bool HasMULHU = TLI.isOperationLegalOrCustom(ISD::MULHU, VT); unsigned OpToUse = 0; if (HasSMUL_LOHI && !HasMULHS) { OpToUse = ISD::SMUL_LOHI; } else if (HasUMUL_LOHI && !HasMULHU) { OpToUse = ISD::UMUL_LOHI; } else if (HasSMUL_LOHI) { OpToUse = ISD::SMUL_LOHI; } else if (HasUMUL_LOHI) { OpToUse = ISD::UMUL_LOHI; } if (OpToUse) { Results.push_back(DAG.getNode(OpToUse, dl, VTs, Node->getOperand(0), Node->getOperand(1))); break; } SDValue Lo, Hi; EVT HalfType = VT.getHalfSizedIntegerVT(*DAG.getContext()); if (TLI.isOperationLegalOrCustom(ISD::ZERO_EXTEND, VT) && TLI.isOperationLegalOrCustom(ISD::ANY_EXTEND, VT) && TLI.isOperationLegalOrCustom(ISD::SHL, VT) && TLI.isOperationLegalOrCustom(ISD::OR, VT) && TLI.expandMUL(Node, Lo, Hi, HalfType, DAG, TargetLowering::MulExpansionKind::OnlyLegalOrCustom)) { Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Lo); Hi = DAG.getNode(ISD::ANY_EXTEND, dl, VT, Hi); SDValue Shift = DAG.getConstant(HalfType.getSizeInBits(), dl, TLI.getShiftAmountTy(HalfType, DAG.getDataLayout())); Hi = DAG.getNode(ISD::SHL, dl, VT, Hi, Shift); Results.push_back(DAG.getNode(ISD::OR, dl, VT, Lo, Hi)); } break; } case ISD::FSHL: case ISD::FSHR: if (TLI.expandFunnelShift(Node, Tmp1, DAG)) Results.push_back(Tmp1); break; case ISD::ROTL: case ISD::ROTR: if (TLI.expandROT(Node, Tmp1, DAG)) Results.push_back(Tmp1); break; case ISD::SADDSAT: case ISD::UADDSAT: case ISD::SSUBSAT: case ISD::USUBSAT: Results.push_back(TLI.expandAddSubSat(Node, DAG)); break; case ISD::SMULFIX: case ISD::SMULFIXSAT: case ISD::UMULFIX: case ISD::UMULFIXSAT: Results.push_back(TLI.expandFixedPointMul(Node, DAG)); break; case ISD::SDIVFIX: case ISD::SDIVFIXSAT: case ISD::UDIVFIX: case ISD::UDIVFIXSAT: if (SDValue V = TLI.expandFixedPointDiv(Node->getOpcode(), SDLoc(Node), Node->getOperand(0), Node->getOperand(1), Node->getConstantOperandVal(2), DAG)) { Results.push_back(V); break; } // FIXME: We might want to retry here with a wider type if we fail, if that // type is legal. // FIXME: Technically, so long as we only have sdivfixes where BW+Scale is // <= 128 (which is the case for all of the default Embedded-C types), // we will only get here with types and scales that we could always expand // if we were allowed to generate libcalls to division functions of illegal // type. But we cannot do that. llvm_unreachable("Cannot expand DIVFIX!"); case ISD::ADDCARRY: case ISD::SUBCARRY: { SDValue LHS = Node->getOperand(0); SDValue RHS = Node->getOperand(1); SDValue Carry = Node->getOperand(2); bool IsAdd = Node->getOpcode() == ISD::ADDCARRY; // Initial add of the 2 operands. unsigned Op = IsAdd ? ISD::ADD : ISD::SUB; EVT VT = LHS.getValueType(); SDValue Sum = DAG.getNode(Op, dl, VT, LHS, RHS); // Initial check for overflow. EVT CarryType = Node->getValueType(1); EVT SetCCType = getSetCCResultType(Node->getValueType(0)); ISD::CondCode CC = IsAdd ? ISD::SETULT : ISD::SETUGT; SDValue Overflow = DAG.getSetCC(dl, SetCCType, Sum, LHS, CC); // Add of the sum and the carry. SDValue One = DAG.getConstant(1, dl, VT); SDValue CarryExt = DAG.getNode(ISD::AND, dl, VT, DAG.getZExtOrTrunc(Carry, dl, VT), One); SDValue Sum2 = DAG.getNode(Op, dl, VT, Sum, CarryExt); // Second check for overflow. If we are adding, we can only overflow if the // initial sum is all 1s ang the carry is set, resulting in a new sum of 0. // If we are subtracting, we can only overflow if the initial sum is 0 and // the carry is set, resulting in a new sum of all 1s. SDValue Zero = DAG.getConstant(0, dl, VT); SDValue Overflow2 = IsAdd ? DAG.getSetCC(dl, SetCCType, Sum2, Zero, ISD::SETEQ) : DAG.getSetCC(dl, SetCCType, Sum, Zero, ISD::SETEQ); Overflow2 = DAG.getNode(ISD::AND, dl, SetCCType, Overflow2, DAG.getZExtOrTrunc(Carry, dl, SetCCType)); SDValue ResultCarry = DAG.getNode(ISD::OR, dl, SetCCType, Overflow, Overflow2); Results.push_back(Sum2); Results.push_back(DAG.getBoolExtOrTrunc(ResultCarry, dl, CarryType, VT)); break; } case ISD::SADDO: case ISD::SSUBO: { SDValue Result, Overflow; TLI.expandSADDSUBO(Node, Result, Overflow, DAG); Results.push_back(Result); Results.push_back(Overflow); break; } case ISD::UADDO: case ISD::USUBO: { SDValue Result, Overflow; TLI.expandUADDSUBO(Node, Result, Overflow, DAG); Results.push_back(Result); Results.push_back(Overflow); break; } case ISD::UMULO: case ISD::SMULO: { SDValue Result, Overflow; if (TLI.expandMULO(Node, Result, Overflow, DAG)) { Results.push_back(Result); Results.push_back(Overflow); } break; } case ISD::BUILD_PAIR: { EVT PairTy = Node->getValueType(0); Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, PairTy, Node->getOperand(0)); Tmp2 = DAG.getNode(ISD::ANY_EXTEND, dl, PairTy, Node->getOperand(1)); Tmp2 = DAG.getNode( ISD::SHL, dl, PairTy, Tmp2, DAG.getConstant(PairTy.getSizeInBits() / 2, dl, TLI.getShiftAmountTy(PairTy, DAG.getDataLayout()))); Results.push_back(DAG.getNode(ISD::OR, dl, PairTy, Tmp1, Tmp2)); break; } case ISD::SELECT: Tmp1 = Node->getOperand(0); Tmp2 = Node->getOperand(1); Tmp3 = Node->getOperand(2); if (Tmp1.getOpcode() == ISD::SETCC) { Tmp1 = DAG.getSelectCC(dl, Tmp1.getOperand(0), Tmp1.getOperand(1), Tmp2, Tmp3, cast<CondCodeSDNode>(Tmp1.getOperand(2))->get()); } else { Tmp1 = DAG.getSelectCC(dl, Tmp1, DAG.getConstant(0, dl, Tmp1.getValueType()), Tmp2, Tmp3, ISD::SETNE); } Tmp1->setFlags(Node->getFlags()); Results.push_back(Tmp1); break; case ISD::BR_JT: { SDValue Chain = Node->getOperand(0); SDValue Table = Node->getOperand(1); SDValue Index = Node->getOperand(2); const DataLayout &TD = DAG.getDataLayout(); EVT PTy = TLI.getPointerTy(TD); unsigned EntrySize = DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(TD); // For power-of-two jumptable entry sizes convert multiplication to a shift. // This transformation needs to be done here since otherwise the MIPS // backend will end up emitting a three instruction multiply sequence // instead of a single shift and MSP430 will call a runtime function. if (llvm::isPowerOf2_32(EntrySize)) Index = DAG.getNode( ISD::SHL, dl, Index.getValueType(), Index, DAG.getConstant(llvm::Log2_32(EntrySize), dl, Index.getValueType())); else Index = DAG.getNode(ISD::MUL, dl, Index.getValueType(), Index, DAG.getConstant(EntrySize, dl, Index.getValueType())); SDValue Addr = DAG.getNode(ISD::ADD, dl, Index.getValueType(), Index, Table); EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), EntrySize * 8); SDValue LD = DAG.getExtLoad( ISD::SEXTLOAD, dl, PTy, Chain, Addr, MachinePointerInfo::getJumpTable(DAG.getMachineFunction()), MemVT); Addr = LD; if (TLI.isJumpTableRelative()) { // For PIC, the sequence is: // BRIND(load(Jumptable + index) + RelocBase) // RelocBase can be JumpTable, GOT or some sort of global base. Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, TLI.getPICJumpTableRelocBase(Table, DAG)); } Tmp1 = TLI.expandIndirectJTBranch(dl, LD.getValue(1), Addr, DAG); Results.push_back(Tmp1); break; } case ISD::BRCOND: // Expand brcond's setcc into its constituent parts and create a BR_CC // Node. Tmp1 = Node->getOperand(0); Tmp2 = Node->getOperand(1); if (Tmp2.getOpcode() == ISD::SETCC) { Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other, Tmp1, Tmp2.getOperand(2), Tmp2.getOperand(0), Tmp2.getOperand(1), Node->getOperand(2)); } else { // We test only the i1 bit. Skip the AND if UNDEF or another AND. if (Tmp2.isUndef() || (Tmp2.getOpcode() == ISD::AND && isa<ConstantSDNode>(Tmp2.getOperand(1)) && cast<ConstantSDNode>(Tmp2.getOperand(1))->getZExtValue() == 1)) Tmp3 = Tmp2; else Tmp3 = DAG.getNode(ISD::AND, dl, Tmp2.getValueType(), Tmp2, DAG.getConstant(1, dl, Tmp2.getValueType())); Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other, Tmp1, DAG.getCondCode(ISD::SETNE), Tmp3, DAG.getConstant(0, dl, Tmp3.getValueType()), Node->getOperand(2)); } Results.push_back(Tmp1); break; case ISD::SETCC: case ISD::STRICT_FSETCC: case ISD::STRICT_FSETCCS: { bool IsStrict = Node->getOpcode() != ISD::SETCC; bool IsSignaling = Node->getOpcode() == ISD::STRICT_FSETCCS; SDValue Chain = IsStrict ? Node->getOperand(0) : SDValue(); unsigned Offset = IsStrict ? 1 : 0; Tmp1 = Node->getOperand(0 + Offset); Tmp2 = Node->getOperand(1 + Offset); Tmp3 = Node->getOperand(2 + Offset); bool Legalized = LegalizeSetCCCondCode(Node->getValueType(0), Tmp1, Tmp2, Tmp3, NeedInvert, dl, Chain, IsSignaling); if (Legalized) { // If we expanded the SETCC by swapping LHS and RHS, or by inverting the // condition code, create a new SETCC node. if (Tmp3.getNode()) Tmp1 = DAG.getNode(ISD::SETCC, dl, Node->getValueType(0), Tmp1, Tmp2, Tmp3, Node->getFlags()); // If we expanded the SETCC by inverting the condition code, then wrap // the existing SETCC in a NOT to restore the intended condition. if (NeedInvert) Tmp1 = DAG.getLogicalNOT(dl, Tmp1, Tmp1->getValueType(0)); Results.push_back(Tmp1); if (IsStrict) Results.push_back(Chain); break; } // FIXME: It seems Legalized is false iff CCCode is Legal. I don't // understand if this code is useful for strict nodes. assert(!IsStrict && "Don't know how to expand for strict nodes."); // Otherwise, SETCC for the given comparison type must be completely // illegal; expand it into a SELECT_CC. EVT VT = Node->getValueType(0); int TrueValue; switch (TLI.getBooleanContents(Tmp1.getValueType())) { case TargetLowering::ZeroOrOneBooleanContent: case TargetLowering::UndefinedBooleanContent: TrueValue = 1; break; case TargetLowering::ZeroOrNegativeOneBooleanContent: TrueValue = -1; break; } Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, VT, Tmp1, Tmp2, DAG.getConstant(TrueValue, dl, VT), DAG.getConstant(0, dl, VT), Tmp3); Tmp1->setFlags(Node->getFlags()); Results.push_back(Tmp1); break; } case ISD::SELECT_CC: { // TODO: need to add STRICT_SELECT_CC and STRICT_SELECT_CCS Tmp1 = Node->getOperand(0); // LHS Tmp2 = Node->getOperand(1); // RHS Tmp3 = Node->getOperand(2); // True Tmp4 = Node->getOperand(3); // False EVT VT = Node->getValueType(0); SDValue Chain; SDValue CC = Node->getOperand(4); ISD::CondCode CCOp = cast<CondCodeSDNode>(CC)->get(); if (TLI.isCondCodeLegalOrCustom(CCOp, Tmp1.getSimpleValueType())) { // If the condition code is legal, then we need to expand this // node using SETCC and SELECT. EVT CmpVT = Tmp1.getValueType(); assert(!TLI.isOperationExpand(ISD::SELECT, VT) && "Cannot expand ISD::SELECT_CC when ISD::SELECT also needs to be " "expanded."); EVT CCVT = getSetCCResultType(CmpVT); SDValue Cond = DAG.getNode(ISD::SETCC, dl, CCVT, Tmp1, Tmp2, CC, Node->getFlags()); Results.push_back(DAG.getSelect(dl, VT, Cond, Tmp3, Tmp4)); break; } // SELECT_CC is legal, so the condition code must not be. bool Legalized = false; // Try to legalize by inverting the condition. This is for targets that // might support an ordered version of a condition, but not the unordered // version (or vice versa). ISD::CondCode InvCC = ISD::getSetCCInverse(CCOp, Tmp1.getValueType()); if (TLI.isCondCodeLegalOrCustom(InvCC, Tmp1.getSimpleValueType())) { // Use the new condition code and swap true and false Legalized = true; Tmp1 = DAG.getSelectCC(dl, Tmp1, Tmp2, Tmp4, Tmp3, InvCC); Tmp1->setFlags(Node->getFlags()); } else { // If The inverse is not legal, then try to swap the arguments using // the inverse condition code. ISD::CondCode SwapInvCC = ISD::getSetCCSwappedOperands(InvCC); if (TLI.isCondCodeLegalOrCustom(SwapInvCC, Tmp1.getSimpleValueType())) { // The swapped inverse condition is legal, so swap true and false, // lhs and rhs. Legalized = true; Tmp1 = DAG.getSelectCC(dl, Tmp2, Tmp1, Tmp4, Tmp3, SwapInvCC); Tmp1->setFlags(Node->getFlags()); } } if (!Legalized) { Legalized = LegalizeSetCCCondCode(getSetCCResultType(Tmp1.getValueType()), Tmp1, Tmp2, CC, NeedInvert, dl, Chain); assert(Legalized && "Can't legalize SELECT_CC with legal condition!"); // If we expanded the SETCC by inverting the condition code, then swap // the True/False operands to match. if (NeedInvert) std::swap(Tmp3, Tmp4); // If we expanded the SETCC by swapping LHS and RHS, or by inverting the // condition code, create a new SELECT_CC node. if (CC.getNode()) { Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, Node->getValueType(0), Tmp1, Tmp2, Tmp3, Tmp4, CC); } else { Tmp2 = DAG.getConstant(0, dl, Tmp1.getValueType()); CC = DAG.getCondCode(ISD::SETNE); Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, Node->getValueType(0), Tmp1, Tmp2, Tmp3, Tmp4, CC); } Tmp1->setFlags(Node->getFlags()); } Results.push_back(Tmp1); break; } case ISD::BR_CC: { // TODO: need to add STRICT_BR_CC and STRICT_BR_CCS SDValue Chain; Tmp1 = Node->getOperand(0); // Chain Tmp2 = Node->getOperand(2); // LHS Tmp3 = Node->getOperand(3); // RHS Tmp4 = Node->getOperand(1); // CC bool Legalized = LegalizeSetCCCondCode(getSetCCResultType(Tmp2.getValueType()), Tmp2, Tmp3, Tmp4, NeedInvert, dl, Chain); (void)Legalized; assert(Legalized && "Can't legalize BR_CC with legal condition!"); assert(!NeedInvert && "Don't know how to invert BR_CC!"); // If we expanded the SETCC by swapping LHS and RHS, create a new BR_CC // node. if (Tmp4.getNode()) { Tmp1 = DAG.getNode(ISD::BR_CC, dl, Node->getValueType(0), Tmp1, Tmp4, Tmp2, Tmp3, Node->getOperand(4)); } else { Tmp3 = DAG.getConstant(0, dl, Tmp2.getValueType()); Tmp4 = DAG.getCondCode(ISD::SETNE); Tmp1 = DAG.getNode(ISD::BR_CC, dl, Node->getValueType(0), Tmp1, Tmp4, Tmp2, Tmp3, Node->getOperand(4)); } Results.push_back(Tmp1); break; } case ISD::BUILD_VECTOR: Results.push_back(ExpandBUILD_VECTOR(Node)); break; case ISD::SPLAT_VECTOR: Results.push_back(ExpandSPLAT_VECTOR(Node)); break; case ISD::SRA: case ISD::SRL: case ISD::SHL: { // Scalarize vector SRA/SRL/SHL. EVT VT = Node->getValueType(0); assert(VT.isVector() && "Unable to legalize non-vector shift"); assert(TLI.isTypeLegal(VT.getScalarType())&& "Element type must be legal"); unsigned NumElem = VT.getVectorNumElements(); SmallVector<SDValue, 8> Scalars; for (unsigned Idx = 0; Idx < NumElem; Idx++) { SDValue Ex = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT.getScalarType(), Node->getOperand(0), DAG.getVectorIdxConstant(Idx, dl)); SDValue Sh = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT.getScalarType(), Node->getOperand(1), DAG.getVectorIdxConstant(Idx, dl)); Scalars.push_back(DAG.getNode(Node->getOpcode(), dl, VT.getScalarType(), Ex, Sh)); } SDValue Result = DAG.getBuildVector(Node->getValueType(0), dl, Scalars); Results.push_back(Result); break; } case ISD::VECREDUCE_FADD: case ISD::VECREDUCE_FMUL: case ISD::VECREDUCE_ADD: case ISD::VECREDUCE_MUL: case ISD::VECREDUCE_AND: case ISD::VECREDUCE_OR: case ISD::VECREDUCE_XOR: case ISD::VECREDUCE_SMAX: case ISD::VECREDUCE_SMIN: case ISD::VECREDUCE_UMAX: case ISD::VECREDUCE_UMIN: case ISD::VECREDUCE_FMAX: case ISD::VECREDUCE_FMIN: Results.push_back(TLI.expandVecReduce(Node, DAG)); break; case ISD::GLOBAL_OFFSET_TABLE: case ISD::GlobalAddress: case ISD::GlobalTLSAddress: case ISD::ExternalSymbol: case ISD::ConstantPool: case ISD::JumpTable: case ISD::INTRINSIC_W_CHAIN: case ISD::INTRINSIC_WO_CHAIN: case ISD::INTRINSIC_VOID: // FIXME: Custom lowering for these operations shouldn't return null! // Return true so that we don't call ConvertNodeToLibcall which also won't // do anything. return true; } if (!TLI.isStrictFPEnabled() && Results.empty() && Node->isStrictFPOpcode()) { // FIXME: We were asked to expand a strict floating-point operation, // but there is currently no expansion implemented that would preserve // the "strict" properties. For now, we just fall back to the non-strict // version if that is legal on the target. The actual mutation of the // operation will happen in SelectionDAGISel::DoInstructionSelection. switch (Node->getOpcode()) { default: if (TLI.getStrictFPOperationAction(Node->getOpcode(), Node->getValueType(0)) == TargetLowering::Legal) return true; break; case ISD::STRICT_LRINT: case ISD::STRICT_LLRINT: case ISD::STRICT_LROUND: case ISD::STRICT_LLROUND: // These are registered by the operand type instead of the value // type. Reflect that here. if (TLI.getStrictFPOperationAction(Node->getOpcode(), Node->getOperand(1).getValueType()) == TargetLowering::Legal) return true; break; } } // Replace the original node with the legalized result. if (Results.empty()) { LLVM_DEBUG(dbgs() << "Cannot expand node\n"); return false; } LLVM_DEBUG(dbgs() << "Successfully expanded node\n"); ReplaceNode(Node, Results.data()); return true; } void SelectionDAGLegalize::ConvertNodeToLibcall(SDNode *Node) { LLVM_DEBUG(dbgs() << "Trying to convert node to libcall\n"); SmallVector<SDValue, 8> Results; SDLoc dl(Node); // FIXME: Check flags on the node to see if we can use a finite call. unsigned Opc = Node->getOpcode(); switch (Opc) { case ISD::ATOMIC_FENCE: { // If the target didn't lower this, lower it to '__sync_synchronize()' call // FIXME: handle "fence singlethread" more efficiently. TargetLowering::ArgListTy Args; TargetLowering::CallLoweringInfo CLI(DAG); CLI.setDebugLoc(dl) .setChain(Node->getOperand(0)) .setLibCallee( CallingConv::C, Type::getVoidTy(*DAG.getContext()), DAG.getExternalSymbol("__sync_synchronize", TLI.getPointerTy(DAG.getDataLayout())), std::move(Args)); std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI); Results.push_back(CallResult.second); break; } // By default, atomic intrinsics are marked Legal and lowered. Targets // which don't support them directly, however, may want libcalls, in which // case they mark them Expand, and we get here. case ISD::ATOMIC_SWAP: case ISD::ATOMIC_LOAD_ADD: case ISD::ATOMIC_LOAD_SUB: case ISD::ATOMIC_LOAD_AND: case ISD::ATOMIC_LOAD_CLR: case ISD::ATOMIC_LOAD_OR: case ISD::ATOMIC_LOAD_XOR: case ISD::ATOMIC_LOAD_NAND: case ISD::ATOMIC_LOAD_MIN: case ISD::ATOMIC_LOAD_MAX: case ISD::ATOMIC_LOAD_UMIN: case ISD::ATOMIC_LOAD_UMAX: case ISD::ATOMIC_CMP_SWAP: { MVT VT = cast<AtomicSDNode>(Node)->getMemoryVT().getSimpleVT(); RTLIB::Libcall LC = RTLIB::getSYNC(Opc, VT); assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected atomic op or value type!"); EVT RetVT = Node->getValueType(0); SmallVector<SDValue, 4> Ops(Node->op_begin() + 1, Node->op_end()); TargetLowering::MakeLibCallOptions CallOptions; std::pair<SDValue, SDValue> Tmp = TLI.makeLibCall(DAG, LC, RetVT, Ops, CallOptions, SDLoc(Node), Node->getOperand(0)); Results.push_back(Tmp.first); Results.push_back(Tmp.second); break; } case ISD::TRAP: { // If this operation is not supported, lower it to 'abort()' call TargetLowering::ArgListTy Args; TargetLowering::CallLoweringInfo CLI(DAG); CLI.setDebugLoc(dl) .setChain(Node->getOperand(0)) .setLibCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()), DAG.getExternalSymbol( "abort", TLI.getPointerTy(DAG.getDataLayout())), std::move(Args)); std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI); Results.push_back(CallResult.second); break; } case ISD::FMINNUM: case ISD::STRICT_FMINNUM: ExpandFPLibCall(Node, RTLIB::FMIN_F32, RTLIB::FMIN_F64, RTLIB::FMIN_F80, RTLIB::FMIN_F128, RTLIB::FMIN_PPCF128, Results); break; case ISD::FMAXNUM: case ISD::STRICT_FMAXNUM: ExpandFPLibCall(Node, RTLIB::FMAX_F32, RTLIB::FMAX_F64, RTLIB::FMAX_F80, RTLIB::FMAX_F128, RTLIB::FMAX_PPCF128, Results); break; case ISD::FSQRT: case ISD::STRICT_FSQRT: ExpandFPLibCall(Node, RTLIB::SQRT_F32, RTLIB::SQRT_F64, RTLIB::SQRT_F80, RTLIB::SQRT_F128, RTLIB::SQRT_PPCF128, Results); break; case ISD::FCBRT: ExpandFPLibCall(Node, RTLIB::CBRT_F32, RTLIB::CBRT_F64, RTLIB::CBRT_F80, RTLIB::CBRT_F128, RTLIB::CBRT_PPCF128, Results); break; case ISD::FSIN: case ISD::STRICT_FSIN: ExpandFPLibCall(Node, RTLIB::SIN_F32, RTLIB::SIN_F64, RTLIB::SIN_F80, RTLIB::SIN_F128, RTLIB::SIN_PPCF128, Results); break; case ISD::FCOS: case ISD::STRICT_FCOS: ExpandFPLibCall(Node, RTLIB::COS_F32, RTLIB::COS_F64, RTLIB::COS_F80, RTLIB::COS_F128, RTLIB::COS_PPCF128, Results); break; case ISD::FSINCOS: // Expand into sincos libcall. ExpandSinCosLibCall(Node, Results); break; case ISD::FLOG: case ISD::STRICT_FLOG: ExpandFPLibCall(Node, RTLIB::LOG_F32, RTLIB::LOG_F64, RTLIB::LOG_F80, RTLIB::LOG_F128, RTLIB::LOG_PPCF128, Results); break; case ISD::FLOG2: case ISD::STRICT_FLOG2: ExpandFPLibCall(Node, RTLIB::LOG2_F32, RTLIB::LOG2_F64, RTLIB::LOG2_F80, RTLIB::LOG2_F128, RTLIB::LOG2_PPCF128, Results); break; case ISD::FLOG10: case ISD::STRICT_FLOG10: ExpandFPLibCall(Node, RTLIB::LOG10_F32, RTLIB::LOG10_F64, RTLIB::LOG10_F80, RTLIB::LOG10_F128, RTLIB::LOG10_PPCF128, Results); break; case ISD::FEXP: case ISD::STRICT_FEXP: ExpandFPLibCall(Node, RTLIB::EXP_F32, RTLIB::EXP_F64, RTLIB::EXP_F80, RTLIB::EXP_F128, RTLIB::EXP_PPCF128, Results); break; case ISD::FEXP2: case ISD::STRICT_FEXP2: ExpandFPLibCall(Node, RTLIB::EXP2_F32, RTLIB::EXP2_F64, RTLIB::EXP2_F80, RTLIB::EXP2_F128, RTLIB::EXP2_PPCF128, Results); break; case ISD::FTRUNC: case ISD::STRICT_FTRUNC: ExpandFPLibCall(Node, RTLIB::TRUNC_F32, RTLIB::TRUNC_F64, RTLIB::TRUNC_F80, RTLIB::TRUNC_F128, RTLIB::TRUNC_PPCF128, Results); break; case ISD::FFLOOR: case ISD::STRICT_FFLOOR: ExpandFPLibCall(Node, RTLIB::FLOOR_F32, RTLIB::FLOOR_F64, RTLIB::FLOOR_F80, RTLIB::FLOOR_F128, RTLIB::FLOOR_PPCF128, Results); break; case ISD::FCEIL: case ISD::STRICT_FCEIL: ExpandFPLibCall(Node, RTLIB::CEIL_F32, RTLIB::CEIL_F64, RTLIB::CEIL_F80, RTLIB::CEIL_F128, RTLIB::CEIL_PPCF128, Results); break; case ISD::FRINT: case ISD::STRICT_FRINT: ExpandFPLibCall(Node, RTLIB::RINT_F32, RTLIB::RINT_F64, RTLIB::RINT_F80, RTLIB::RINT_F128, RTLIB::RINT_PPCF128, Results); break; case ISD::FNEARBYINT: case ISD::STRICT_FNEARBYINT: ExpandFPLibCall(Node, RTLIB::NEARBYINT_F32, RTLIB::NEARBYINT_F64, RTLIB::NEARBYINT_F80, RTLIB::NEARBYINT_F128, RTLIB::NEARBYINT_PPCF128, Results); break; case ISD::FROUND: case ISD::STRICT_FROUND: ExpandFPLibCall(Node, RTLIB::ROUND_F32, RTLIB::ROUND_F64, RTLIB::ROUND_F80, RTLIB::ROUND_F128, RTLIB::ROUND_PPCF128, Results); break; case ISD::FROUNDEVEN: case ISD::STRICT_FROUNDEVEN: ExpandFPLibCall(Node, RTLIB::ROUNDEVEN_F32, RTLIB::ROUNDEVEN_F64, RTLIB::ROUNDEVEN_F80, RTLIB::ROUNDEVEN_F128, RTLIB::ROUNDEVEN_PPCF128, Results); break; case ISD::FPOWI: case ISD::STRICT_FPOWI: { RTLIB::Libcall LC; switch (Node->getSimpleValueType(0).SimpleTy) { default: llvm_unreachable("Unexpected request for libcall!"); case MVT::f32: LC = RTLIB::POWI_F32; break; case MVT::f64: LC = RTLIB::POWI_F64; break; case MVT::f80: LC = RTLIB::POWI_F80; break; case MVT::f128: LC = RTLIB::POWI_F128; break; case MVT::ppcf128: LC = RTLIB::POWI_PPCF128; break; } if (!TLI.getLibcallName(LC)) { // Some targets don't have a powi libcall; use pow instead. SDValue Exponent = DAG.getNode(ISD::SINT_TO_FP, SDLoc(Node), Node->getValueType(0), Node->getOperand(1)); Results.push_back(DAG.getNode(ISD::FPOW, SDLoc(Node), Node->getValueType(0), Node->getOperand(0), Exponent)); break; } ExpandFPLibCall(Node, RTLIB::POWI_F32, RTLIB::POWI_F64, RTLIB::POWI_F80, RTLIB::POWI_F128, RTLIB::POWI_PPCF128, Results); break; } case ISD::FPOW: case ISD::STRICT_FPOW: ExpandFPLibCall(Node, RTLIB::POW_F32, RTLIB::POW_F64, RTLIB::POW_F80, RTLIB::POW_F128, RTLIB::POW_PPCF128, Results); break; case ISD::LROUND: case ISD::STRICT_LROUND: ExpandArgFPLibCall(Node, RTLIB::LROUND_F32, RTLIB::LROUND_F64, RTLIB::LROUND_F80, RTLIB::LROUND_F128, RTLIB::LROUND_PPCF128, Results); break; case ISD::LLROUND: case ISD::STRICT_LLROUND: ExpandArgFPLibCall(Node, RTLIB::LLROUND_F32, RTLIB::LLROUND_F64, RTLIB::LLROUND_F80, RTLIB::LLROUND_F128, RTLIB::LLROUND_PPCF128, Results); break; case ISD::LRINT: case ISD::STRICT_LRINT: ExpandArgFPLibCall(Node, RTLIB::LRINT_F32, RTLIB::LRINT_F64, RTLIB::LRINT_F80, RTLIB::LRINT_F128, RTLIB::LRINT_PPCF128, Results); break; case ISD::LLRINT: case ISD::STRICT_LLRINT: ExpandArgFPLibCall(Node, RTLIB::LLRINT_F32, RTLIB::LLRINT_F64, RTLIB::LLRINT_F80, RTLIB::LLRINT_F128, RTLIB::LLRINT_PPCF128, Results); break; case ISD::FDIV: case ISD::STRICT_FDIV: ExpandFPLibCall(Node, RTLIB::DIV_F32, RTLIB::DIV_F64, RTLIB::DIV_F80, RTLIB::DIV_F128, RTLIB::DIV_PPCF128, Results); break; case ISD::FREM: case ISD::STRICT_FREM: ExpandFPLibCall(Node, RTLIB::REM_F32, RTLIB::REM_F64, RTLIB::REM_F80, RTLIB::REM_F128, RTLIB::REM_PPCF128, Results); break; case ISD::FMA: case ISD::STRICT_FMA: ExpandFPLibCall(Node, RTLIB::FMA_F32, RTLIB::FMA_F64, RTLIB::FMA_F80, RTLIB::FMA_F128, RTLIB::FMA_PPCF128, Results); break; case ISD::FADD: case ISD::STRICT_FADD: ExpandFPLibCall(Node, RTLIB::ADD_F32, RTLIB::ADD_F64, RTLIB::ADD_F80, RTLIB::ADD_F128, RTLIB::ADD_PPCF128, Results); break; case ISD::FMUL: case ISD::STRICT_FMUL: ExpandFPLibCall(Node, RTLIB::MUL_F32, RTLIB::MUL_F64, RTLIB::MUL_F80, RTLIB::MUL_F128, RTLIB::MUL_PPCF128, Results); break; case ISD::FP16_TO_FP: if (Node->getValueType(0) == MVT::f32) { Results.push_back(ExpandLibCall(RTLIB::FPEXT_F16_F32, Node, false)); } break; case ISD::STRICT_FP16_TO_FP: { if (Node->getValueType(0) == MVT::f32) { TargetLowering::MakeLibCallOptions CallOptions; std::pair<SDValue, SDValue> Tmp = TLI.makeLibCall( DAG, RTLIB::FPEXT_F16_F32, MVT::f32, Node->getOperand(1), CallOptions, SDLoc(Node), Node->getOperand(0)); Results.push_back(Tmp.first); Results.push_back(Tmp.second); } break; } case ISD::FP_TO_FP16: { RTLIB::Libcall LC = RTLIB::getFPROUND(Node->getOperand(0).getValueType(), MVT::f16); assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unable to expand fp_to_fp16"); Results.push_back(ExpandLibCall(LC, Node, false)); break; } case ISD::STRICT_FP_TO_FP16: { RTLIB::Libcall LC = RTLIB::getFPROUND(Node->getOperand(1).getValueType(), MVT::f16); assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unable to expand strict_fp_to_fp16"); TargetLowering::MakeLibCallOptions CallOptions; std::pair<SDValue, SDValue> Tmp = TLI.makeLibCall(DAG, LC, Node->getValueType(0), Node->getOperand(1), CallOptions, SDLoc(Node), Node->getOperand(0)); Results.push_back(Tmp.first); Results.push_back(Tmp.second); break; } case ISD::FSUB: case ISD::STRICT_FSUB: ExpandFPLibCall(Node, RTLIB::SUB_F32, RTLIB::SUB_F64, RTLIB::SUB_F80, RTLIB::SUB_F128, RTLIB::SUB_PPCF128, Results); break; case ISD::SREM: Results.push_back(ExpandIntLibCall(Node, true, RTLIB::SREM_I8, RTLIB::SREM_I16, RTLIB::SREM_I32, RTLIB::SREM_I64, RTLIB::SREM_I128)); break; case ISD::UREM: Results.push_back(ExpandIntLibCall(Node, false, RTLIB::UREM_I8, RTLIB::UREM_I16, RTLIB::UREM_I32, RTLIB::UREM_I64, RTLIB::UREM_I128)); break; case ISD::SDIV: Results.push_back(ExpandIntLibCall(Node, true, RTLIB::SDIV_I8, RTLIB::SDIV_I16, RTLIB::SDIV_I32, RTLIB::SDIV_I64, RTLIB::SDIV_I128)); break; case ISD::UDIV: Results.push_back(ExpandIntLibCall(Node, false, RTLIB::UDIV_I8, RTLIB::UDIV_I16, RTLIB::UDIV_I32, RTLIB::UDIV_I64, RTLIB::UDIV_I128)); break; case ISD::SDIVREM: case ISD::UDIVREM: // Expand into divrem libcall ExpandDivRemLibCall(Node, Results); break; case ISD::MUL: Results.push_back(ExpandIntLibCall(Node, false, RTLIB::MUL_I8, RTLIB::MUL_I16, RTLIB::MUL_I32, RTLIB::MUL_I64, RTLIB::MUL_I128)); break; case ISD::CTLZ_ZERO_UNDEF: switch (Node->getSimpleValueType(0).SimpleTy) { default: llvm_unreachable("LibCall explicitly requested, but not available"); case MVT::i32: Results.push_back(ExpandLibCall(RTLIB::CTLZ_I32, Node, false)); break; case MVT::i64: Results.push_back(ExpandLibCall(RTLIB::CTLZ_I64, Node, false)); break; case MVT::i128: Results.push_back(ExpandLibCall(RTLIB::CTLZ_I128, Node, false)); break; } break; } // Replace the original node with the legalized result. if (!Results.empty()) { LLVM_DEBUG(dbgs() << "Successfully converted node to libcall\n"); ReplaceNode(Node, Results.data()); } else LLVM_DEBUG(dbgs() << "Could not convert node to libcall\n"); } // Determine the vector type to use in place of an original scalar element when // promoting equally sized vectors. static MVT getPromotedVectorElementType(const TargetLowering &TLI, MVT EltVT, MVT NewEltVT) { unsigned OldEltsPerNewElt = EltVT.getSizeInBits() / NewEltVT.getSizeInBits(); MVT MidVT = MVT::getVectorVT(NewEltVT, OldEltsPerNewElt); assert(TLI.isTypeLegal(MidVT) && "unexpected"); return MidVT; } void SelectionDAGLegalize::PromoteNode(SDNode *Node) { LLVM_DEBUG(dbgs() << "Trying to promote node\n"); SmallVector<SDValue, 8> Results; MVT OVT = Node->getSimpleValueType(0); if (Node->getOpcode() == ISD::UINT_TO_FP || Node->getOpcode() == ISD::SINT_TO_FP || Node->getOpcode() == ISD::SETCC || Node->getOpcode() == ISD::EXTRACT_VECTOR_ELT || Node->getOpcode() == ISD::INSERT_VECTOR_ELT) { OVT = Node->getOperand(0).getSimpleValueType(); } if (Node->getOpcode() == ISD::STRICT_UINT_TO_FP || Node->getOpcode() == ISD::STRICT_SINT_TO_FP) OVT = Node->getOperand(1).getSimpleValueType(); if (Node->getOpcode() == ISD::BR_CC) OVT = Node->getOperand(2).getSimpleValueType(); MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT); SDLoc dl(Node); SDValue Tmp1, Tmp2, Tmp3; switch (Node->getOpcode()) { case ISD::CTTZ: case ISD::CTTZ_ZERO_UNDEF: case ISD::CTLZ: case ISD::CTLZ_ZERO_UNDEF: case ISD::CTPOP: // Zero extend the argument unless its cttz, then use any_extend. if (Node->getOpcode() == ISD::CTTZ || Node->getOpcode() == ISD::CTTZ_ZERO_UNDEF) Tmp1 = DAG.getNode(ISD::ANY_EXTEND, dl, NVT, Node->getOperand(0)); else Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0)); if (Node->getOpcode() == ISD::CTTZ) { // The count is the same in the promoted type except if the original // value was zero. This can be handled by setting the bit just off // the top of the original type. auto TopBit = APInt::getOneBitSet(NVT.getSizeInBits(), OVT.getSizeInBits()); Tmp1 = DAG.getNode(ISD::OR, dl, NVT, Tmp1, DAG.getConstant(TopBit, dl, NVT)); } // Perform the larger operation. For CTPOP and CTTZ_ZERO_UNDEF, this is // already the correct result. Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1); if (Node->getOpcode() == ISD::CTLZ || Node->getOpcode() == ISD::CTLZ_ZERO_UNDEF) { // Tmp1 = Tmp1 - (sizeinbits(NVT) - sizeinbits(Old VT)) Tmp1 = DAG.getNode(ISD::SUB, dl, NVT, Tmp1, DAG.getConstant(NVT.getSizeInBits() - OVT.getSizeInBits(), dl, NVT)); } Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, OVT, Tmp1)); break; case ISD::BITREVERSE: case ISD::BSWAP: { unsigned DiffBits = NVT.getSizeInBits() - OVT.getSizeInBits(); Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0)); Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1); Tmp1 = DAG.getNode( ISD::SRL, dl, NVT, Tmp1, DAG.getConstant(DiffBits, dl, TLI.getShiftAmountTy(NVT, DAG.getDataLayout()))); Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, OVT, Tmp1)); break; } case ISD::FP_TO_UINT: case ISD::STRICT_FP_TO_UINT: case ISD::FP_TO_SINT: case ISD::STRICT_FP_TO_SINT: PromoteLegalFP_TO_INT(Node, dl, Results); break; case ISD::UINT_TO_FP: case ISD::STRICT_UINT_TO_FP: case ISD::SINT_TO_FP: case ISD::STRICT_SINT_TO_FP: PromoteLegalINT_TO_FP(Node, dl, Results); break; case ISD::VAARG: { SDValue Chain = Node->getOperand(0); // Get the chain. SDValue Ptr = Node->getOperand(1); // Get the pointer. unsigned TruncOp; if (OVT.isVector()) { TruncOp = ISD::BITCAST; } else { assert(OVT.isInteger() && "VAARG promotion is supported only for vectors or integer types"); TruncOp = ISD::TRUNCATE; } // Perform the larger operation, then convert back Tmp1 = DAG.getVAArg(NVT, dl, Chain, Ptr, Node->getOperand(2), Node->getConstantOperandVal(3)); Chain = Tmp1.getValue(1); Tmp2 = DAG.getNode(TruncOp, dl, OVT, Tmp1); // Modified the chain result - switch anything that used the old chain to // use the new one. DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), Tmp2); DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), Chain); if (UpdatedNodes) { UpdatedNodes->insert(Tmp2.getNode()); UpdatedNodes->insert(Chain.getNode()); } ReplacedNode(Node); break; } case ISD::MUL: case ISD::SDIV: case ISD::SREM: case ISD::UDIV: case ISD::UREM: case ISD::AND: case ISD::OR: case ISD::XOR: { unsigned ExtOp, TruncOp; if (OVT.isVector()) { ExtOp = ISD::BITCAST; TruncOp = ISD::BITCAST; } else { assert(OVT.isInteger() && "Cannot promote logic operation"); switch (Node->getOpcode()) { default: ExtOp = ISD::ANY_EXTEND; break; case ISD::SDIV: case ISD::SREM: ExtOp = ISD::SIGN_EXTEND; break; case ISD::UDIV: case ISD::UREM: ExtOp = ISD::ZERO_EXTEND; break; } TruncOp = ISD::TRUNCATE; } // Promote each of the values to the new type. Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0)); Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); // Perform the larger operation, then convert back Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2); Results.push_back(DAG.getNode(TruncOp, dl, OVT, Tmp1)); break; } case ISD::UMUL_LOHI: case ISD::SMUL_LOHI: { // Promote to a multiply in a wider integer type. unsigned ExtOp = Node->getOpcode() == ISD::UMUL_LOHI ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND; Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0)); Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); Tmp1 = DAG.getNode(ISD::MUL, dl, NVT, Tmp1, Tmp2); auto &DL = DAG.getDataLayout(); unsigned OriginalSize = OVT.getScalarSizeInBits(); Tmp2 = DAG.getNode( ISD::SRL, dl, NVT, Tmp1, DAG.getConstant(OriginalSize, dl, TLI.getScalarShiftAmountTy(DL, NVT))); Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, OVT, Tmp1)); Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, OVT, Tmp2)); break; } case ISD::SELECT: { unsigned ExtOp, TruncOp; if (Node->getValueType(0).isVector() || Node->getValueType(0).getSizeInBits() == NVT.getSizeInBits()) { ExtOp = ISD::BITCAST; TruncOp = ISD::BITCAST; } else if (Node->getValueType(0).isInteger()) { ExtOp = ISD::ANY_EXTEND; TruncOp = ISD::TRUNCATE; } else { ExtOp = ISD::FP_EXTEND; TruncOp = ISD::FP_ROUND; } Tmp1 = Node->getOperand(0); // Promote each of the values to the new type. Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); Tmp3 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(2)); // Perform the larger operation, then round down. Tmp1 = DAG.getSelect(dl, NVT, Tmp1, Tmp2, Tmp3); Tmp1->setFlags(Node->getFlags()); if (TruncOp != ISD::FP_ROUND) Tmp1 = DAG.getNode(TruncOp, dl, Node->getValueType(0), Tmp1); else Tmp1 = DAG.getNode(TruncOp, dl, Node->getValueType(0), Tmp1, DAG.getIntPtrConstant(0, dl)); Results.push_back(Tmp1); break; } case ISD::VECTOR_SHUFFLE: { ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Node)->getMask(); // Cast the two input vectors. Tmp1 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(0)); Tmp2 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(1)); // Convert the shuffle mask to the right # elements. Tmp1 = ShuffleWithNarrowerEltType(NVT, OVT, dl, Tmp1, Tmp2, Mask); Tmp1 = DAG.getNode(ISD::BITCAST, dl, OVT, Tmp1); Results.push_back(Tmp1); break; } case ISD::SETCC: { unsigned ExtOp = ISD::FP_EXTEND; if (NVT.isInteger()) { ISD::CondCode CCCode = cast<CondCodeSDNode>(Node->getOperand(2))->get(); ExtOp = isSignedIntSetCC(CCCode) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; } Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0)); Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); Results.push_back(DAG.getNode(ISD::SETCC, dl, Node->getValueType(0), Tmp1, Tmp2, Node->getOperand(2), Node->getFlags())); break; } case ISD::BR_CC: { unsigned ExtOp = ISD::FP_EXTEND; if (NVT.isInteger()) { ISD::CondCode CCCode = cast<CondCodeSDNode>(Node->getOperand(1))->get(); ExtOp = isSignedIntSetCC(CCCode) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; } Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(2)); Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(3)); Results.push_back(DAG.getNode(ISD::BR_CC, dl, Node->getValueType(0), Node->getOperand(0), Node->getOperand(1), Tmp1, Tmp2, Node->getOperand(4))); break; } case ISD::FADD: case ISD::FSUB: case ISD::FMUL: case ISD::FDIV: case ISD::FREM: case ISD::FMINNUM: case ISD::FMAXNUM: case ISD::FPOW: Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(0)); Tmp2 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(1)); Tmp3 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2, Node->getFlags()); Results.push_back(DAG.getNode(ISD::FP_ROUND, dl, OVT, Tmp3, DAG.getIntPtrConstant(0, dl))); break; case ISD::STRICT_FREM: case ISD::STRICT_FPOW: Tmp1 = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {NVT, MVT::Other}, {Node->getOperand(0), Node->getOperand(1)}); Tmp2 = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {NVT, MVT::Other}, {Node->getOperand(0), Node->getOperand(2)}); Tmp3 = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Tmp1.getValue(1), Tmp2.getValue(1)); Tmp1 = DAG.getNode(Node->getOpcode(), dl, {NVT, MVT::Other}, {Tmp3, Tmp1, Tmp2}); Tmp1 = DAG.getNode(ISD::STRICT_FP_ROUND, dl, {OVT, MVT::Other}, {Tmp1.getValue(1), Tmp1, DAG.getIntPtrConstant(0, dl)}); Results.push_back(Tmp1); Results.push_back(Tmp1.getValue(1)); break; case ISD::FMA: Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(0)); Tmp2 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(1)); Tmp3 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(2)); Results.push_back( DAG.getNode(ISD::FP_ROUND, dl, OVT, DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2, Tmp3), DAG.getIntPtrConstant(0, dl))); break; case ISD::FCOPYSIGN: case ISD::FPOWI: { Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(0)); Tmp2 = Node->getOperand(1); Tmp3 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2); // fcopysign doesn't change anything but the sign bit, so // (fp_round (fcopysign (fpext a), b)) // is as precise as // (fp_round (fpext a)) // which is a no-op. Mark it as a TRUNCating FP_ROUND. const bool isTrunc = (Node->getOpcode() == ISD::FCOPYSIGN); Results.push_back(DAG.getNode(ISD::FP_ROUND, dl, OVT, Tmp3, DAG.getIntPtrConstant(isTrunc, dl))); break; } case ISD::FFLOOR: case ISD::FCEIL: case ISD::FRINT: case ISD::FNEARBYINT: case ISD::FROUND: case ISD::FROUNDEVEN: case ISD::FTRUNC: case ISD::FNEG: case ISD::FSQRT: case ISD::FSIN: case ISD::FCOS: case ISD::FLOG: case ISD::FLOG2: case ISD::FLOG10: case ISD::FABS: case ISD::FEXP: case ISD::FEXP2: Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(0)); Tmp2 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1); Results.push_back(DAG.getNode(ISD::FP_ROUND, dl, OVT, Tmp2, DAG.getIntPtrConstant(0, dl))); break; case ISD::STRICT_FFLOOR: case ISD::STRICT_FCEIL: case ISD::STRICT_FSIN: case ISD::STRICT_FCOS: case ISD::STRICT_FLOG: case ISD::STRICT_FLOG10: case ISD::STRICT_FEXP: Tmp1 = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {NVT, MVT::Other}, {Node->getOperand(0), Node->getOperand(1)}); Tmp2 = DAG.getNode(Node->getOpcode(), dl, {NVT, MVT::Other}, {Tmp1.getValue(1), Tmp1}); Tmp3 = DAG.getNode(ISD::STRICT_FP_ROUND, dl, {OVT, MVT::Other}, {Tmp2.getValue(1), Tmp2, DAG.getIntPtrConstant(0, dl)}); Results.push_back(Tmp3); Results.push_back(Tmp3.getValue(1)); break; case ISD::BUILD_VECTOR: { MVT EltVT = OVT.getVectorElementType(); MVT NewEltVT = NVT.getVectorElementType(); // Handle bitcasts to a different vector type with the same total bit size // // e.g. v2i64 = build_vector i64:x, i64:y => v4i32 // => // v4i32 = concat_vectors (v2i32 (bitcast i64:x)), (v2i32 (bitcast i64:y)) assert(NVT.isVector() && OVT.getSizeInBits() == NVT.getSizeInBits() && "Invalid promote type for build_vector"); assert(NewEltVT.bitsLT(EltVT) && "not handled"); MVT MidVT = getPromotedVectorElementType(TLI, EltVT, NewEltVT); SmallVector<SDValue, 8> NewOps; for (unsigned I = 0, E = Node->getNumOperands(); I != E; ++I) { SDValue Op = Node->getOperand(I); NewOps.push_back(DAG.getNode(ISD::BITCAST, SDLoc(Op), MidVT, Op)); } SDLoc SL(Node); SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, SL, NVT, NewOps); SDValue CvtVec = DAG.getNode(ISD::BITCAST, SL, OVT, Concat); Results.push_back(CvtVec); break; } case ISD::EXTRACT_VECTOR_ELT: { MVT EltVT = OVT.getVectorElementType(); MVT NewEltVT = NVT.getVectorElementType(); // Handle bitcasts to a different vector type with the same total bit size. // // e.g. v2i64 = extract_vector_elt x:v2i64, y:i32 // => // v4i32:castx = bitcast x:v2i64 // // i64 = bitcast // (v2i32 build_vector (i32 (extract_vector_elt castx, (2 * y))), // (i32 (extract_vector_elt castx, (2 * y + 1))) // assert(NVT.isVector() && OVT.getSizeInBits() == NVT.getSizeInBits() && "Invalid promote type for extract_vector_elt"); assert(NewEltVT.bitsLT(EltVT) && "not handled"); MVT MidVT = getPromotedVectorElementType(TLI, EltVT, NewEltVT); unsigned NewEltsPerOldElt = MidVT.getVectorNumElements(); SDValue Idx = Node->getOperand(1); EVT IdxVT = Idx.getValueType(); SDLoc SL(Node); SDValue Factor = DAG.getConstant(NewEltsPerOldElt, SL, IdxVT); SDValue NewBaseIdx = DAG.getNode(ISD::MUL, SL, IdxVT, Idx, Factor); SDValue CastVec = DAG.getNode(ISD::BITCAST, SL, NVT, Node->getOperand(0)); SmallVector<SDValue, 8> NewOps; for (unsigned I = 0; I < NewEltsPerOldElt; ++I) { SDValue IdxOffset = DAG.getConstant(I, SL, IdxVT); SDValue TmpIdx = DAG.getNode(ISD::ADD, SL, IdxVT, NewBaseIdx, IdxOffset); SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, NewEltVT, CastVec, TmpIdx); NewOps.push_back(Elt); } SDValue NewVec = DAG.getBuildVector(MidVT, SL, NewOps); Results.push_back(DAG.getNode(ISD::BITCAST, SL, EltVT, NewVec)); break; } case ISD::INSERT_VECTOR_ELT: { MVT EltVT = OVT.getVectorElementType(); MVT NewEltVT = NVT.getVectorElementType(); // Handle bitcasts to a different vector type with the same total bit size // // e.g. v2i64 = insert_vector_elt x:v2i64, y:i64, z:i32 // => // v4i32:castx = bitcast x:v2i64 // v2i32:casty = bitcast y:i64 // // v2i64 = bitcast // (v4i32 insert_vector_elt // (v4i32 insert_vector_elt v4i32:castx, // (extract_vector_elt casty, 0), 2 * z), // (extract_vector_elt casty, 1), (2 * z + 1)) assert(NVT.isVector() && OVT.getSizeInBits() == NVT.getSizeInBits() && "Invalid promote type for insert_vector_elt"); assert(NewEltVT.bitsLT(EltVT) && "not handled"); MVT MidVT = getPromotedVectorElementType(TLI, EltVT, NewEltVT); unsigned NewEltsPerOldElt = MidVT.getVectorNumElements(); SDValue Val = Node->getOperand(1); SDValue Idx = Node->getOperand(2); EVT IdxVT = Idx.getValueType(); SDLoc SL(Node); SDValue Factor = DAG.getConstant(NewEltsPerOldElt, SDLoc(), IdxVT); SDValue NewBaseIdx = DAG.getNode(ISD::MUL, SL, IdxVT, Idx, Factor); SDValue CastVec = DAG.getNode(ISD::BITCAST, SL, NVT, Node->getOperand(0)); SDValue CastVal = DAG.getNode(ISD::BITCAST, SL, MidVT, Val); SDValue NewVec = CastVec; for (unsigned I = 0; I < NewEltsPerOldElt; ++I) { SDValue IdxOffset = DAG.getConstant(I, SL, IdxVT); SDValue InEltIdx = DAG.getNode(ISD::ADD, SL, IdxVT, NewBaseIdx, IdxOffset); SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, NewEltVT, CastVal, IdxOffset); NewVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, NVT, NewVec, Elt, InEltIdx); } Results.push_back(DAG.getNode(ISD::BITCAST, SL, OVT, NewVec)); break; } case ISD::SCALAR_TO_VECTOR: { MVT EltVT = OVT.getVectorElementType(); MVT NewEltVT = NVT.getVectorElementType(); // Handle bitcasts to different vector type with the same total bit size. // // e.g. v2i64 = scalar_to_vector x:i64 // => // concat_vectors (v2i32 bitcast x:i64), (v2i32 undef) // MVT MidVT = getPromotedVectorElementType(TLI, EltVT, NewEltVT); SDValue Val = Node->getOperand(0); SDLoc SL(Node); SDValue CastVal = DAG.getNode(ISD::BITCAST, SL, MidVT, Val); SDValue Undef = DAG.getUNDEF(MidVT); SmallVector<SDValue, 8> NewElts; NewElts.push_back(CastVal); for (unsigned I = 1, NElts = OVT.getVectorNumElements(); I != NElts; ++I) NewElts.push_back(Undef); SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, SL, NVT, NewElts); SDValue CvtVec = DAG.getNode(ISD::BITCAST, SL, OVT, Concat); Results.push_back(CvtVec); break; } case ISD::ATOMIC_SWAP: { AtomicSDNode *AM = cast<AtomicSDNode>(Node); SDLoc SL(Node); SDValue CastVal = DAG.getNode(ISD::BITCAST, SL, NVT, AM->getVal()); assert(NVT.getSizeInBits() == OVT.getSizeInBits() && "unexpected promotion type"); assert(AM->getMemoryVT().getSizeInBits() == NVT.getSizeInBits() && "unexpected atomic_swap with illegal type"); SDValue NewAtomic = DAG.getAtomic(ISD::ATOMIC_SWAP, SL, NVT, DAG.getVTList(NVT, MVT::Other), { AM->getChain(), AM->getBasePtr(), CastVal }, AM->getMemOperand()); Results.push_back(DAG.getNode(ISD::BITCAST, SL, OVT, NewAtomic)); Results.push_back(NewAtomic.getValue(1)); break; } } // Replace the original node with the legalized result. if (!Results.empty()) { LLVM_DEBUG(dbgs() << "Successfully promoted node\n"); ReplaceNode(Node, Results.data()); } else LLVM_DEBUG(dbgs() << "Could not promote node\n"); } /// This is the entry point for the file. void SelectionDAG::Legalize() { AssignTopologicalOrder(); SmallPtrSet<SDNode *, 16> LegalizedNodes; // Use a delete listener to remove nodes which were deleted during // legalization from LegalizeNodes. This is needed to handle the situation // where a new node is allocated by the object pool to the same address of a // previously deleted node. DAGNodeDeletedListener DeleteListener( *this, [&LegalizedNodes](SDNode *N, SDNode *E) { LegalizedNodes.erase(N); }); SelectionDAGLegalize Legalizer(*this, LegalizedNodes); // Visit all the nodes. We start in topological order, so that we see // nodes with their original operands intact. Legalization can produce // new nodes which may themselves need to be legalized. Iterate until all // nodes have been legalized. while (true) { bool AnyLegalized = false; for (auto NI = allnodes_end(); NI != allnodes_begin();) { --NI; SDNode *N = &*NI; if (N->use_empty() && N != getRoot().getNode()) { ++NI; DeleteNode(N); continue; } if (LegalizedNodes.insert(N).second) { AnyLegalized = true; Legalizer.LegalizeOp(N); if (N->use_empty() && N != getRoot().getNode()) { ++NI; DeleteNode(N); } } } if (!AnyLegalized) break; } // Remove dead nodes now. RemoveDeadNodes(); } bool SelectionDAG::LegalizeOp(SDNode *N, SmallSetVector<SDNode *, 16> &UpdatedNodes) { SmallPtrSet<SDNode *, 16> LegalizedNodes; SelectionDAGLegalize Legalizer(*this, LegalizedNodes, &UpdatedNodes); // Directly insert the node in question, and legalize it. This will recurse // as needed through operands. LegalizedNodes.insert(N); Legalizer.LegalizeOp(N); return LegalizedNodes.count(N); }
// Copyright (c) 2011-2013 The Bitcoin developers // Distributed under the MIT/X11 software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) #include "config/localtrade-config.h" #endif #include "optionsdialog.h" #include "ui_optionsdialog.h" #include "bitcoinunits.h" #include "guiutil.h" #include "obfuscation.h" #include "optionsmodel.h" #include "main.h" // for MAX_SCRIPTCHECK_THREADS #include "netbase.h" #include "txdb.h" // for -dbcache defaults #ifdef ENABLE_WALLET #include "wallet.h" // for CWallet::minTxFee #endif #include <boost/thread.hpp> #include <QDataWidgetMapper> #include <QDir> #include <QIntValidator> #include <QLocale> #include <QMessageBox> #include <QTimer> OptionsDialog::OptionsDialog(QWidget* parent, bool enableWallet) : QDialog(parent), ui(new Ui::OptionsDialog), model(0), mapper(0), fProxyIpValid(true) { ui->setupUi(this); GUIUtil::restoreWindowGeometry("nOptionsDialogWindow", this->size(), this); /* Main elements init */ ui->databaseCache->setMinimum(nMinDbCache); ui->databaseCache->setMaximum(nMaxDbCache); ui->threadsScriptVerif->setMinimum(-(int)boost::thread::hardware_concurrency()); ui->threadsScriptVerif->setMaximum(MAX_SCRIPTCHECK_THREADS); /* Network elements init */ #ifndef USE_UPNP ui->mapPortUpnp->setEnabled(false); #endif ui->proxyIp->setEnabled(false); ui->proxyPort->setEnabled(false); ui->proxyPort->setValidator(new QIntValidator(1, 65535, this)); connect(ui->connectSocks, SIGNAL(toggled(bool)), ui->proxyIp, SLOT(setEnabled(bool))); connect(ui->connectSocks, SIGNAL(toggled(bool)), ui->proxyPort, SLOT(setEnabled(bool))); ui->proxyIp->installEventFilter(this); /* Window elements init */ #ifdef Q_OS_MAC /* remove Window tab on Mac */ ui->tabWidget->removeTab(ui->tabWidget->indexOf(ui->tabWindow)); #endif /* remove Wallet tab in case of -disablewallet */ if (!enableWallet) { ui->tabWidget->removeTab(ui->tabWidget->indexOf(ui->tabWallet)); } /* Display elements init */ /* Number of displayed decimal digits selector */ QString digits; for (int index = 2; index <= 8; index++) { digits.setNum(index); ui->digits->addItem(digits, digits); } /* Theme selector static themes */ ui->theme->addItem(QString("Default"), QVariant("default")); /* Preferred Zerocoin Denominations */ ui->preferredDenom->addItem(QString(tr("I don't care")), QVariant("0")); ui->preferredDenom->addItem(QString("1"), QVariant("1")); ui->preferredDenom->addItem(QString("5"), QVariant("5")); ui->preferredDenom->addItem(QString("10"), QVariant("10")); ui->preferredDenom->addItem(QString("50"), QVariant("50")); ui->preferredDenom->addItem(QString("100"), QVariant("100")); ui->preferredDenom->addItem(QString("500"), QVariant("500")); ui->preferredDenom->addItem(QString("1000"), QVariant("1000")); ui->preferredDenom->addItem(QString("5000"), QVariant("5000")); /* Theme selector external themes */ boost::filesystem::path pathAddr = GetDataDir() / "themes"; QDir dir(pathAddr.string().c_str()); dir.setFilter(QDir::Dirs | QDir::NoSymLinks | QDir::NoDotAndDotDot); QFileInfoList list = dir.entryInfoList(); for (int i = 0; i < list.size(); ++i) { QFileInfo fileInfo = list.at(i); ui->theme->addItem(fileInfo.fileName(), QVariant(fileInfo.fileName())); } /* Language selector */ QDir translations(":translations"); ui->lang->addItem(QString("(") + tr("default") + QString(")"), QVariant("")); foreach (const QString& langStr, translations.entryList()) { QLocale locale(langStr); /** check if the locale name consists of 2 parts (language_country) */ if (langStr.contains("_")) { #if QT_VERSION >= 0x040800 /** display language strings as "native language - native country (locale name)", e.g. "Deutsch - Deutschland (de)" */ ui->lang->addItem(locale.nativeLanguageName() + QString(" - ") + locale.nativeCountryName() + QString(" (") + langStr + QString(")"), QVariant(langStr)); #else /** display language strings as "language - country (locale name)", e.g. "German - Germany (de)" */ ui->lang->addItem(QLocale::languageToString(locale.language()) + QString(" - ") + QLocale::countryToString(locale.country()) + QString(" (") + langStr + QString(")"), QVariant(langStr)); #endif } else { #if QT_VERSION >= 0x040800 /** display language strings as "native language (locale name)", e.g. "Deutsch (de)" */ ui->lang->addItem(locale.nativeLanguageName() + QString(" (") + langStr + QString(")"), QVariant(langStr)); #else /** display language strings as "language (locale name)", e.g. "German (de)" */ ui->lang->addItem(QLocale::languageToString(locale.language()) + QString(" (") + langStr + QString(")"), QVariant(langStr)); #endif } } #if QT_VERSION >= 0x040700 ui->thirdPartyTxUrls->setPlaceholderText("https://example.com/tx/%s"); #endif ui->unit->setModel(new BitcoinUnits(this)); /* Widget-to-option mapper */ mapper = new QDataWidgetMapper(this); mapper->setSubmitPolicy(QDataWidgetMapper::ManualSubmit); mapper->setOrientation(Qt::Vertical); /* setup/change UI elements when proxy IP is invalid/valid */ connect(this, SIGNAL(proxyIpChecks(QValidatedLineEdit*, int)), this, SLOT(doProxyIpChecks(QValidatedLineEdit*, int))); } OptionsDialog::~OptionsDialog() { GUIUtil::saveWindowGeometry("nOptionsDialogWindow", this); delete ui; } void OptionsDialog::setModel(OptionsModel* model) { this->model = model; if (model) { /* check if client restart is needed and show persistent message */ if (model->isRestartRequired()) showRestartWarning(true); QString strLabel = model->getOverriddenByCommandLine(); if (strLabel.isEmpty()) strLabel = tr("none"); ui->overriddenByCommandLineLabel->setText(strLabel); mapper->setModel(model); setMapper(); mapper->toFirst(); } /* warn when one of the following settings changes by user action (placed here so init via mapper doesn't trigger them) */ /* Main */ connect(ui->databaseCache, SIGNAL(valueChanged(int)), this, SLOT(showRestartWarning())); connect(ui->threadsScriptVerif, SIGNAL(valueChanged(int)), this, SLOT(showRestartWarning())); /* Wallet */ connect(ui->spendZeroConfChange, SIGNAL(clicked(bool)), this, SLOT(showRestartWarning())); /* Network */ connect(ui->allowIncoming, SIGNAL(clicked(bool)), this, SLOT(showRestartWarning())); connect(ui->connectSocks, SIGNAL(clicked(bool)), this, SLOT(showRestartWarning())); /* Display */ connect(ui->digits, SIGNAL(valueChanged()), this, SLOT(showRestartWarning())); connect(ui->theme, SIGNAL(valueChanged()), this, SLOT(showRestartWarning())); connect(ui->lang, SIGNAL(valueChanged()), this, SLOT(showRestartWarning())); connect(ui->thirdPartyTxUrls, SIGNAL(textChanged(const QString&)), this, SLOT(showRestartWarning())); connect(ui->showMasternodesTab, SIGNAL(clicked(bool)), this, SLOT(showRestartWarning())); } void OptionsDialog::setMapper() { /* Main */ mapper->addMapping(ui->bitcoinAtStartup, OptionsModel::StartAtStartup); mapper->addMapping(ui->threadsScriptVerif, OptionsModel::ThreadsScriptVerif); mapper->addMapping(ui->databaseCache, OptionsModel::DatabaseCache); // Zerocoin mint percentage mapper->addMapping(ui->zeromintPercentage, OptionsModel::ZeromintPercentage); // Zerocoin preferred denomination mapper->addMapping(ui->preferredDenom, OptionsModel::ZeromintPrefDenom); /* Wallet */ mapper->addMapping(ui->spendZeroConfChange, OptionsModel::SpendZeroConfChange); mapper->addMapping(ui->coinControlFeatures, OptionsModel::CoinControlFeatures); /* Network */ mapper->addMapping(ui->mapPortUpnp, OptionsModel::MapPortUPnP); mapper->addMapping(ui->allowIncoming, OptionsModel::Listen); mapper->addMapping(ui->connectSocks, OptionsModel::ProxyUse); mapper->addMapping(ui->proxyIp, OptionsModel::ProxyIP); mapper->addMapping(ui->proxyPort, OptionsModel::ProxyPort); /* Window */ #ifndef Q_OS_MAC mapper->addMapping(ui->minimizeToTray, OptionsModel::MinimizeToTray); mapper->addMapping(ui->minimizeOnClose, OptionsModel::MinimizeOnClose); #endif /* Display */ mapper->addMapping(ui->digits, OptionsModel::Digits); mapper->addMapping(ui->theme, OptionsModel::Theme); mapper->addMapping(ui->theme, OptionsModel::Theme); mapper->addMapping(ui->lang, OptionsModel::Language); mapper->addMapping(ui->unit, OptionsModel::DisplayUnit); mapper->addMapping(ui->thirdPartyTxUrls, OptionsModel::ThirdPartyTxUrls); /* Masternode Tab */ mapper->addMapping(ui->showMasternodesTab, OptionsModel::ShowMasternodesTab); } void OptionsDialog::enableOkButton() { /* prevent enabling of the OK button when data modified, if there is an invalid proxy address present */ if (fProxyIpValid) setOkButtonState(true); } void OptionsDialog::disableOkButton() { setOkButtonState(false); } void OptionsDialog::setOkButtonState(bool fState) { ui->okButton->setEnabled(fState); } void OptionsDialog::on_resetButton_clicked() { if (model) { // confirmation dialog QMessageBox::StandardButton btnRetVal = QMessageBox::question(this, tr("Confirm options reset"), tr("Client restart required to activate changes.") + "<br><br>" + tr("Client will be shutdown, do you want to proceed?"), QMessageBox::Yes | QMessageBox::Cancel, QMessageBox::Cancel); if (btnRetVal == QMessageBox::Cancel) return; /* reset all options and close GUI */ model->Reset(); QApplication::quit(); } } void OptionsDialog::on_okButton_clicked() { mapper->submit(); obfuScationPool.cachedNumBlocks = std::numeric_limits<int>::max(); pwalletMain->MarkDirty(); accept(); } void OptionsDialog::on_cancelButton_clicked() { reject(); } void OptionsDialog::showRestartWarning(bool fPersistent) { ui->statusLabel->setStyleSheet("QLabel { color: red; }"); if (fPersistent) { ui->statusLabel->setText(tr("Client restart required to activate changes.")); } else { ui->statusLabel->setText(tr("This change would require a client restart.")); // clear non-persistent status label after 10 seconds // Todo: should perhaps be a class attribute, if we extend the use of statusLabel QTimer::singleShot(10000, this, SLOT(clearStatusLabel())); } } void OptionsDialog::clearStatusLabel() { ui->statusLabel->clear(); } void OptionsDialog::doProxyIpChecks(QValidatedLineEdit* pUiProxyIp, int nProxyPort) { Q_UNUSED(nProxyPort); const std::string strAddrProxy = pUiProxyIp->text().toStdString(); CService addrProxy; /* Check for a valid IPv4 / IPv6 address */ if (!(fProxyIpValid = LookupNumeric(strAddrProxy.c_str(), addrProxy))) { disableOkButton(); pUiProxyIp->setValid(false); ui->statusLabel->setStyleSheet("QLabel { color: red; }"); ui->statusLabel->setText(tr("The supplied proxy address is invalid.")); } else { enableOkButton(); ui->statusLabel->clear(); } } bool OptionsDialog::eventFilter(QObject* object, QEvent* event) { if (event->type() == QEvent::FocusOut) { if (object == ui->proxyIp) { emit proxyIpChecks(ui->proxyIp, ui->proxyPort->text().toInt()); } } return QDialog::eventFilter(object, event); }
// Copyright (c) Microsoft Corporation. All rights reserved. // SPDX-License-Identifier: MIT #include <gtest/gtest.h> #include <azure/core/context.hpp> #include <chrono> #include <memory> #include <string> #include <thread> #include <vector> using namespace Azure::Core; TEST(Context, Basic) { Context context; Context::Key const key; int placeholder = -15; EXPECT_FALSE(context.TryGetValue(key, placeholder)); EXPECT_EQ(placeholder, -15); } TEST(Context, BasicBool) { Context context; Context::Key const key; // New context from previous auto c2 = context.WithValue(key, true); bool value; EXPECT_TRUE(c2.TryGetValue<bool>(key, value)); EXPECT_TRUE(value == true); Context::Key const anotherKey; auto c3 = c2.WithValue(anotherKey, std::make_shared<bool>(true)); std::shared_ptr<bool> sharedPtrBool; EXPECT_FALSE(c2.TryGetValue<std::shared_ptr<bool>>(anotherKey, sharedPtrBool)); EXPECT_FALSE(sharedPtrBool); EXPECT_TRUE(c3.TryGetValue(anotherKey, sharedPtrBool)); EXPECT_TRUE(sharedPtrBool); EXPECT_TRUE(*sharedPtrBool); } TEST(Context, BasicInt) { Context context; Context::Key const key; // New context from previous auto c2 = context.WithValue(key, 123); int value; EXPECT_TRUE(c2.TryGetValue<int>(key, value)); EXPECT_TRUE(value == 123); } TEST(Context, BasicStdString) { std::string s("Test String"); Context context; Context::Key const key; // New context from previous auto c2 = context.WithValue(key, s); std::string value; EXPECT_TRUE(c2.TryGetValue<std::string>(key, value)); EXPECT_TRUE(value == s); } TEST(Context, BasicChar) { const char* str = "Test String"; std::string s(str); Context context; Context::Key const key; // New context from previous auto c2 = context.WithValue(key, str); const char* value; EXPECT_TRUE(c2.TryGetValue<const char*>(key, value)); EXPECT_TRUE(value == s); EXPECT_TRUE(value == str); } TEST(Context, IsCancelled) { auto duration = std::chrono::milliseconds(250); auto deadline = std::chrono::system_clock::now() + duration; Context context; auto c2 = context.WithDeadline(deadline); EXPECT_FALSE(c2.IsCancelled()); std::this_thread::sleep_for(duration); EXPECT_TRUE(c2.IsCancelled()); } TEST(Context, NestedIsCancelled) { auto duration = std::chrono::milliseconds(250); auto deadline = std::chrono::system_clock::now() + duration; Context context; Context::Key const key; std::string actualValue = "Value"; auto c2 = context.WithValue(key, actualValue); EXPECT_FALSE(c2.IsCancelled()); std::string value = "a"; EXPECT_TRUE(c2.TryGetValue<std::string>(key, value)); EXPECT_EQ(value, "Value"); value = "temp"; EXPECT_FALSE(context.TryGetValue<std::string>(key, value)); EXPECT_EQ(value, "temp"); auto c3 = context.WithDeadline(deadline); EXPECT_FALSE(context.IsCancelled()); EXPECT_FALSE(c2.IsCancelled()); EXPECT_FALSE(c3.IsCancelled()); std::this_thread::sleep_for(duration); EXPECT_FALSE(context.IsCancelled()); EXPECT_FALSE(c2.IsCancelled()); EXPECT_TRUE(c3.IsCancelled()); value = "b"; EXPECT_TRUE(c2.TryGetValue<std::string>(key, value)); EXPECT_EQ(value, "Value"); EXPECT_FALSE(context.TryGetValue<std::string>(key, value)); EXPECT_FALSE(c3.TryGetValue<std::string>(key, value)); } TEST(Context, CancelWithValue) { Context context; Context::Key const key; std::string actualValue = "Value"; auto c2 = context.WithValue(key, actualValue); EXPECT_FALSE(context.IsCancelled()); EXPECT_FALSE(c2.IsCancelled()); std::string value = "a"; EXPECT_TRUE(c2.TryGetValue<std::string>(key, value)); EXPECT_EQ(value, "Value"); EXPECT_FALSE(context.TryGetValue<std::string>(key, value)); c2.Cancel(); EXPECT_TRUE(c2.IsCancelled()); EXPECT_FALSE(context.IsCancelled()); EXPECT_TRUE(c2.TryGetValue<std::string>(key, value)); EXPECT_EQ(value, "Value"); EXPECT_FALSE(context.TryGetValue<std::string>(key, value)); } TEST(Context, ThrowIfCancelled) { auto duration = std::chrono::milliseconds(250); auto deadline = std::chrono::system_clock::now() + duration; Context context; auto c2 = context.WithDeadline(deadline); EXPECT_NO_THROW(c2.ThrowIfCancelled()); std::this_thread::sleep_for(duration); EXPECT_THROW(c2.ThrowIfCancelled(), Azure::Core::OperationCancelledException); } TEST(Context, Chain) { Context context; Context::Key const key2; Context::Key const key3; Context::Key const key4; Context::Key const key5; Context::Key const key6; Context::Key const key7; Context::Key const keyFinal; // New context from previous auto c2 = context.WithValue(key2, 123); auto c3 = c2.WithValue(key3, 456); auto c4 = c3.WithValue(key4, 789); auto c5 = c4.WithValue(key5, "5"); auto c6 = c5.WithValue(key6, "6"); auto c7 = c6.WithValue(key7, "7"); auto finalContext = c7.WithValue(keyFinal, "Final"); int valueT2; EXPECT_TRUE(finalContext.TryGetValue<int>(key2, valueT2)); int valueT3; EXPECT_TRUE(finalContext.TryGetValue<int>(key3, valueT3)); int valueT4; EXPECT_TRUE(finalContext.TryGetValue<int>(key4, valueT4)); const char* valueT5; EXPECT_TRUE(finalContext.TryGetValue<const char*>(key5, valueT5)); const char* valueT6; EXPECT_TRUE(finalContext.TryGetValue<const char*>(key6, valueT6)); const char* valueT7; EXPECT_TRUE(finalContext.TryGetValue<const char*>(key7, valueT7)); const char* valueT8; EXPECT_TRUE(finalContext.TryGetValue<const char*>(keyFinal, valueT8)); EXPECT_TRUE(valueT2 == 123); EXPECT_TRUE(valueT3 == 456); EXPECT_TRUE(valueT4 == 789); EXPECT_TRUE(valueT5 == std::string("5")); EXPECT_TRUE(valueT6 == std::string("6")); EXPECT_TRUE(valueT7 == std::string("7")); EXPECT_TRUE(valueT8 == std::string("Final")); } TEST(Context, MatchingKeys) { Context context; Context::Key const key; // New context from previous auto c2 = context.WithValue(key, 123); auto c3 = c2.WithValue(key, 456); int valueT2; EXPECT_TRUE(c2.TryGetValue<int>(key, valueT2)); int valueT3; EXPECT_TRUE(c3.TryGetValue<int>(key, valueT3)); EXPECT_TRUE(valueT2 == 123); EXPECT_TRUE(valueT3 == 456); } struct SomeStructForContext { int someField = 12345; }; TEST(Context, InstanceValue) { Context::Key const key; auto contextP = Context::GetApplicationContext().WithValue(key, SomeStructForContext()); SomeStructForContext contextValueRef; EXPECT_TRUE(contextP.TryGetValue<SomeStructForContext>(key, contextValueRef)); EXPECT_EQ(contextValueRef.someField, 12345); } TEST(Context, Ptr) { Context::Key const key; SomeStructForContext value; auto contextP = Context::GetApplicationContext().WithValue(key, &value); SomeStructForContext* contextValueRef; EXPECT_TRUE(contextP.TryGetValue<SomeStructForContext*>(key, contextValueRef)); EXPECT_EQ(contextValueRef->someField, 12345); EXPECT_EQ(&value, contextValueRef); } TEST(Context, NestedClassPtr) { class TestClass { private: int* m_instanceCount; public: TestClass(int* instanceCount) : m_instanceCount(instanceCount) { ++(*m_instanceCount); } ~TestClass() { --(*m_instanceCount); } }; int instanceCount = 0; { auto sharedPtr = std::make_shared<TestClass>(&instanceCount); EXPECT_EQ(sharedPtr.use_count(), 1); Context::Key const key; auto context = Context::GetApplicationContext().WithValue(key, sharedPtr); EXPECT_EQ(sharedPtr.use_count(), 2); std::shared_ptr<TestClass> foundPtr; EXPECT_TRUE(context.TryGetValue(key, foundPtr)); EXPECT_EQ(foundPtr.get(), sharedPtr.get()); EXPECT_EQ(instanceCount, 1); EXPECT_EQ(sharedPtr.use_count(), 3); } // Verify that context calls the destructor of shared_ptr it is holding EXPECT_EQ(instanceCount, 0); } TEST(Context, HeapLinkIntegrity) { std::string value = "z"; Context::Key const a; Context::Key const b; Context::Key const c; Context::Key const d; Context::Key const e; Context thirdGeneration; // To be used at the end { Context root; auto firstGeneration = root.WithValue(a, std::string("a")); EXPECT_TRUE(firstGeneration.TryGetValue<std::string>(a, value)); EXPECT_EQ(value, "a"); auto secondGeneration = firstGeneration.WithValue(b, std::string("b")); EXPECT_TRUE(secondGeneration.TryGetValue<std::string>(a, value)); EXPECT_EQ(value, "a"); EXPECT_TRUE(secondGeneration.TryGetValue<std::string>(b, value)); EXPECT_EQ(value, "b"); // Now overide the generation secondGeneration = secondGeneration.WithValue(c, std::string("c")); EXPECT_TRUE(secondGeneration.TryGetValue<std::string>(a, value)); EXPECT_EQ(value, "a"); // Still know about first gen - The link is still in heap EXPECT_TRUE(secondGeneration.TryGetValue<std::string>(b, value)); EXPECT_EQ( value, "b"); // Still knows about the initial second gen, as a shared_ptr, it is still on heap EXPECT_TRUE(secondGeneration.TryGetValue<std::string>(c, value)); EXPECT_EQ(value, "c"); // Check new value // One more override secondGeneration = secondGeneration.WithValue(d, std::string("d")); EXPECT_TRUE(secondGeneration.TryGetValue<std::string>(a, value)); EXPECT_EQ(value, "a"); EXPECT_TRUE(secondGeneration.TryGetValue<std::string>(b, value)); EXPECT_EQ(value, "b"); EXPECT_TRUE(secondGeneration.TryGetValue<std::string>(c, value)); EXPECT_EQ(value, "c"); EXPECT_TRUE(secondGeneration.TryGetValue<std::string>(d, value)); EXPECT_EQ(value, "d"); // New Gen thirdGeneration = secondGeneration.WithValue(e, std::string("e")); } // Went out of scope, root and secondGeneration are destroyed. but should remain in heap for the // third-generation since the previous geneations are still alive inside his heart <3. EXPECT_TRUE(thirdGeneration.TryGetValue<std::string>(a, value)); EXPECT_EQ(value, "a"); EXPECT_TRUE(thirdGeneration.TryGetValue<std::string>(b, value)); EXPECT_EQ(value, "b"); EXPECT_TRUE(thirdGeneration.TryGetValue<std::string>(c, value)); EXPECT_EQ(value, "c"); EXPECT_TRUE(thirdGeneration.TryGetValue<std::string>(d, value)); EXPECT_EQ(value, "d"); EXPECT_TRUE(thirdGeneration.TryGetValue<std::string>(e, value)); EXPECT_EQ(value, "e"); } Context::Key const GlobalKey1; Context::Key const GlobalKey2; namespace { Context::Key const UnnamedNamespaceKey1; Context::Key const UnnamedNamespaceKey2; } // namespace TEST(Context, KeyComparison) { EXPECT_EQ(GlobalKey1, GlobalKey1); EXPECT_EQ(GlobalKey2, GlobalKey2); EXPECT_NE(GlobalKey1, GlobalKey2); EXPECT_NE(GlobalKey2, GlobalKey1); EXPECT_EQ(UnnamedNamespaceKey1, UnnamedNamespaceKey1); EXPECT_EQ(UnnamedNamespaceKey2, UnnamedNamespaceKey2); EXPECT_NE(UnnamedNamespaceKey1, UnnamedNamespaceKey2); EXPECT_NE(UnnamedNamespaceKey2, UnnamedNamespaceKey1); Context::Key const localKey1; Context::Key const localKey2; EXPECT_EQ(localKey1, localKey1); EXPECT_EQ(localKey2, localKey2); EXPECT_NE(localKey1, localKey2); EXPECT_NE(localKey2, localKey1); Context::Key const localKey1Copy = localKey1; Context::Key const localKey2Copy = localKey2; EXPECT_EQ(localKey1Copy, localKey1Copy); EXPECT_EQ(localKey2Copy, localKey2Copy); EXPECT_NE(localKey1Copy, localKey2Copy); EXPECT_NE(localKey2Copy, localKey1Copy); EXPECT_EQ(localKey1, localKey1Copy); EXPECT_EQ(localKey2, localKey2Copy); EXPECT_EQ(localKey1Copy, localKey1); EXPECT_EQ(localKey2Copy, localKey2); EXPECT_NE(localKey1, localKey2Copy); EXPECT_NE(localKey2, localKey1Copy); EXPECT_NE(localKey1Copy, localKey2); EXPECT_NE(localKey2Copy, localKey1); } TEST(Context, Deadline) { auto const deadline = Azure::DateTime(2021, 4, 1, 23, 45, 15); Context::Key const key1; Context::Key const key2; { Context ctx; EXPECT_EQ(ctx.GetDeadline(), Azure::DateTime::max()); ctx.Cancel(); EXPECT_EQ(ctx.GetDeadline(), Azure::DateTime::min()); } { Context ctx; ctx = ctx.WithDeadline(deadline); EXPECT_EQ(ctx.GetDeadline(), deadline); } { Context ctx; auto childCtx = ctx.WithDeadline(deadline).WithValue(key1, "val").WithValue(key2, "val2"); EXPECT_EQ(childCtx.GetDeadline(), deadline); } { Context ctx; ctx.Cancel(); auto childCtx = ctx.WithDeadline(deadline).WithValue(key1, "val").WithValue(key2, "val2"); EXPECT_EQ(childCtx.GetDeadline(), Azure::DateTime::min()); } }
// // Copyright (c) 2019 Marat Abrarov (abrarov@gmail.com) // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #if defined(WIN32) #include <tchar.h> #endif #include <cstdlib> #include <string> #include <iostream> #include <brackets_gen_lib.hpp> #if defined(WIN32) int _tmain(int /*argc*/, _TCHAR* /*argv*/[]) #else int main(int /*argc*/, char* /*argv*/[]) #endif { try { std::size_t pair_number; std::cin >> pair_number; yatest::gen_brackets(pair_number, [](const std::string& s) { std::cout << s << '\n'; }); return EXIT_SUCCESS; } catch (const std::exception& e) { std::cerr << "Error: " << e.what() << std::endl; } catch (...) { std::cerr << "Unknown error" << std::endl; } return EXIT_FAILURE; } // main
/* * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include <aws/medialive/model/RtmpGroupSettings.h> #include <aws/core/utils/json/JsonSerializer.h> #include <utility> using namespace Aws::Utils::Json; using namespace Aws::Utils; namespace Aws { namespace MediaLive { namespace Model { RtmpGroupSettings::RtmpGroupSettings() : m_authenticationScheme(AuthenticationScheme::NOT_SET), m_authenticationSchemeHasBeenSet(false), m_cacheFullBehavior(RtmpCacheFullBehavior::NOT_SET), m_cacheFullBehaviorHasBeenSet(false), m_cacheLength(0), m_cacheLengthHasBeenSet(false), m_captionData(RtmpCaptionData::NOT_SET), m_captionDataHasBeenSet(false), m_restartDelay(0), m_restartDelayHasBeenSet(false) { } RtmpGroupSettings::RtmpGroupSettings(JsonView jsonValue) : m_authenticationScheme(AuthenticationScheme::NOT_SET), m_authenticationSchemeHasBeenSet(false), m_cacheFullBehavior(RtmpCacheFullBehavior::NOT_SET), m_cacheFullBehaviorHasBeenSet(false), m_cacheLength(0), m_cacheLengthHasBeenSet(false), m_captionData(RtmpCaptionData::NOT_SET), m_captionDataHasBeenSet(false), m_restartDelay(0), m_restartDelayHasBeenSet(false) { *this = jsonValue; } RtmpGroupSettings& RtmpGroupSettings::operator =(JsonView jsonValue) { if(jsonValue.ValueExists("authenticationScheme")) { m_authenticationScheme = AuthenticationSchemeMapper::GetAuthenticationSchemeForName(jsonValue.GetString("authenticationScheme")); m_authenticationSchemeHasBeenSet = true; } if(jsonValue.ValueExists("cacheFullBehavior")) { m_cacheFullBehavior = RtmpCacheFullBehaviorMapper::GetRtmpCacheFullBehaviorForName(jsonValue.GetString("cacheFullBehavior")); m_cacheFullBehaviorHasBeenSet = true; } if(jsonValue.ValueExists("cacheLength")) { m_cacheLength = jsonValue.GetInteger("cacheLength"); m_cacheLengthHasBeenSet = true; } if(jsonValue.ValueExists("captionData")) { m_captionData = RtmpCaptionDataMapper::GetRtmpCaptionDataForName(jsonValue.GetString("captionData")); m_captionDataHasBeenSet = true; } if(jsonValue.ValueExists("restartDelay")) { m_restartDelay = jsonValue.GetInteger("restartDelay"); m_restartDelayHasBeenSet = true; } return *this; } JsonValue RtmpGroupSettings::Jsonize() const { JsonValue payload; if(m_authenticationSchemeHasBeenSet) { payload.WithString("authenticationScheme", AuthenticationSchemeMapper::GetNameForAuthenticationScheme(m_authenticationScheme)); } if(m_cacheFullBehaviorHasBeenSet) { payload.WithString("cacheFullBehavior", RtmpCacheFullBehaviorMapper::GetNameForRtmpCacheFullBehavior(m_cacheFullBehavior)); } if(m_cacheLengthHasBeenSet) { payload.WithInteger("cacheLength", m_cacheLength); } if(m_captionDataHasBeenSet) { payload.WithString("captionData", RtmpCaptionDataMapper::GetNameForRtmpCaptionData(m_captionData)); } if(m_restartDelayHasBeenSet) { payload.WithInteger("restartDelay", m_restartDelay); } return payload; } } // namespace Model } // namespace MediaLive } // namespace Aws
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. * */ #include <gmock/gmock.h> #include <gtest/gtest.h> #include <quic/common/test/TestUtils.h> #include <quic/state/StateData.h> #include "quic/state/LossState.h" using namespace quic; using namespace testing; constexpr QuicVersion kVersion = static_cast<QuicVersion>(0); namespace quic { namespace test { class StateDataTest : public Test {}; TEST_F(StateDataTest, EmptyLossEvent) { CongestionController::LossEvent loss; EXPECT_EQ(0, loss.lostBytes); EXPECT_FALSE(loss.largestLostPacketNum); } TEST_F(StateDataTest, SingleLostPacketEvent) { RegularQuicWritePacket packet(LongHeader( LongHeader::Types::Initial, getTestConnectionId(1), getTestConnectionId(), 100, kVersion)); OutstandingPacket outstandingPacket( packet, Clock::now(), 1234, 0, false, 1234, 0, 0, 0, LossState()); CongestionController::LossEvent loss; loss.addLostPacket(outstandingPacket); EXPECT_EQ(1234, loss.lostBytes); EXPECT_EQ(100, *loss.largestLostPacketNum); } TEST_F(StateDataTest, MultipleLostPacketsEvent) { RegularQuicWritePacket packet1(LongHeader( LongHeader::Types::Initial, getTestConnectionId(1), getTestConnectionId(), 100, kVersion)); OutstandingPacket outstandingPacket1( packet1, Clock::now(), 1234, 0, false, 1234, 0, 0, 0, LossState()); RegularQuicWritePacket packet2(LongHeader( LongHeader::Types::Initial, getTestConnectionId(1), getTestConnectionId(), 110, kVersion)); OutstandingPacket outstandingPacket2( packet2, Clock::now(), 1357, 0, false, 1357, 0, 0, 0, LossState()); CongestionController::LossEvent loss; loss.addLostPacket(outstandingPacket1); loss.addLostPacket(outstandingPacket2); EXPECT_EQ(1234 + 1357, loss.lostBytes); EXPECT_EQ(110, *loss.largestLostPacketNum); } constexpr size_t kRtt{100000}; class PendingPathRateLimiterTest : public Test { public: void SetUp() override { now = std::chrono::steady_clock::now(); } protected: QuicConnectionStateBase conn_{QuicNodeType::Server}; PendingPathRateLimiter limiter_{conn_.udpSendPacketLen}; size_t maxWindowBytes{kMinCwndInMss * conn_.udpSendPacketLen}; TimePoint now; }; TEST_F(PendingPathRateLimiterTest, TestSetInitialCredit) { EXPECT_EQ( limiter_.currentCredit(now, std::chrono::microseconds{kRtt}), maxWindowBytes); conn_.udpSendPacketLen = 2000; PendingPathRateLimiter limiter2{conn_.udpSendPacketLen}; EXPECT_EQ( limiter2.currentCredit(now, std::chrono::microseconds{kRtt}), kMinCwndInMss * 2000); } TEST_F(PendingPathRateLimiterTest, TestNoImmediateCreditRefresh) { EXPECT_EQ( limiter_.currentCredit(now, std::chrono::microseconds{kRtt}), maxWindowBytes); limiter_.onPacketSent(420); EXPECT_EQ( limiter_.currentCredit(now, std::chrono::microseconds{kRtt}), maxWindowBytes - 420); } TEST_F(PendingPathRateLimiterTest, TestBoundaryRttPassedCreditRefresh) { EXPECT_EQ( limiter_.currentCredit(now, std::chrono::microseconds{kRtt}), maxWindowBytes); limiter_.onPacketSent(420); auto halfRtt = std::chrono::microseconds(kRtt / 2); now += halfRtt; EXPECT_EQ( limiter_.currentCredit(now, std::chrono::microseconds{kRtt}), maxWindowBytes - 420); limiter_.onPacketSent(420); now += halfRtt; EXPECT_EQ( limiter_.currentCredit(now, std::chrono::microseconds{kRtt}), maxWindowBytes - 840); now += std::chrono::microseconds(10); EXPECT_EQ( limiter_.currentCredit(now, std::chrono::microseconds{kRtt}), maxWindowBytes); } TEST_F(PendingPathRateLimiterTest, TestCreditRefreshOnInfrequentSends) { auto delta = std::chrono::microseconds{kRtt} + std::chrono::microseconds{1000}; EXPECT_EQ( limiter_.currentCredit(now, std::chrono::microseconds{kRtt}), maxWindowBytes); limiter_.onPacketSent(420); now += delta; EXPECT_EQ( limiter_.currentCredit(now, std::chrono::microseconds{kRtt}), maxWindowBytes); limiter_.onPacketSent(420); now += delta; EXPECT_EQ( limiter_.currentCredit(now, std::chrono::microseconds{kRtt}), maxWindowBytes); } } // namespace test } // namespace quic
#include <bits/stdc++.h> using namespace std; int main() { int t,g; cin>>t; while(t--) { cin>>g; cout<<g/2<<endl; } }
#include <iostream> #include <random> #include <utility> #include <vector> #include "baldr/rapidjson_utils.h" #include <boost/property_tree/ptree.hpp> #include "baldr/json.h" #include "loki/worker.h" #include "midgard/distanceapproximator.h" #include "midgard/encoded.h" #include "midgard/logging.h" #include "midgard/util.h" #include "odin/worker.h" #include "thor/worker.h" #include "tyr/actor.h" #include "worker.h" #include "test.h" using namespace valhalla; using namespace valhalla::midgard; namespace std { std::string to_string(const midgard::PointLL& p) { return "[" + to_string(p.first) + "," + to_string(p.second) + "]"; } } // namespace std namespace { template <class container_t> std::string print(const container_t& container) { std::string output; for (const auto& e : container) output += std::to_string(e) + ","; if (container.size()) output.pop_back(); return output; } float round_up(float val, int multiple) { return int((val + multiple - 1) / multiple) * multiple; } std::string to_locations(const std::vector<PointLL>& shape, const std::vector<float>& accuracies, int frequency = 0, int start_time = 8 * 60 * 60) { std::string locations = "["; for (size_t i = 0; i < shape.size(); ++i) { // round accuracy up to nearest 5m int accuracy = round_up(accuracies[i] + 1.f, 5); std::string acc = R"(,"accuracy":)" + std::to_string(accuracy); // add this point on const auto& p = shape[i]; locations += R"({"lat":)" + std::to_string(p.second) + R"(,"lon":)" + std::to_string(p.first) + acc; // get the time component start_time += frequency; if (frequency > 0) locations += R"(,"time":)" + std::to_string(start_time); locations += "},"; } locations.back() = ']'; return locations; } boost::property_tree::ptree json_to_pt(const std::string& json) { std::stringstream ss; ss << json; boost::property_tree::ptree pt; rapidjson::read_json(ss, pt); return pt; } // fake config const auto conf = json_to_pt(R"({ "mjolnir":{"tile_dir":"test/data/utrecht_tiles", "concurrency": 1}, "loki":{ "actions":["locate","route","sources_to_targets","optimized_route","isochrone","trace_route","trace_attributes"], "logging":{"long_request": 100}, "service_defaults":{"minimum_reachability": 50,"radius": 0,"search_cutoff": 35000, "node_snap_tolerance": 5, "street_side_tolerance": 5, "heading_tolerance": 60} }, "thor":{"logging":{"long_request": 110}}, "skadi":{"actons":["height"],"logging":{"long_request": 5}}, "meili":{"customizable": ["turn_penalty_factor","max_route_distance_factor","max_route_time_factor","search_radius"], "mode":"auto","grid":{"cache_size":100240,"size":500}, "default":{"beta":3,"breakage_distance":2000,"geometry":false,"gps_accuracy":5.0,"interpolation_distance":10, "max_route_distance_factor":5,"max_route_time_factor":5,"max_search_radius":200,"route":true, "search_radius":15.0,"sigma_z":4.07,"turn_penalty_factor":200}}, "service_limits": { "auto": {"max_distance": 5000000.0, "max_locations": 20,"max_matrix_distance": 400000.0,"max_matrix_locations": 50}, "auto_shorter": {"max_distance": 5000000.0,"max_locations": 20,"max_matrix_distance": 400000.0,"max_matrix_locations": 50}, "bicycle": {"max_distance": 500000.0,"max_locations": 50,"max_matrix_distance": 200000.0,"max_matrix_locations": 50}, "bus": {"max_distance": 5000000.0,"max_locations": 50,"max_matrix_distance": 400000.0,"max_matrix_locations": 50}, "hov": {"max_distance": 5000000.0,"max_locations": 20,"max_matrix_distance": 400000.0,"max_matrix_locations": 50}, "taxi": {"max_distance": 5000000.0,"max_locations": 20,"max_matrix_distance": 400000.0,"max_matrix_locations": 50}, "isochrone": {"max_contours": 4,"max_distance": 25000.0,"max_locations": 1,"max_time": 120}, "max_avoid_locations": 50,"max_radius": 200,"max_reachability": 100,"max_alternates":2, "multimodal": {"max_distance": 500000.0,"max_locations": 50,"max_matrix_distance": 0.0,"max_matrix_locations": 0}, "pedestrian": {"max_distance": 250000.0,"max_locations": 50,"max_matrix_distance": 200000.0,"max_matrix_locations": 50,"max_transit_walking_distance": 10000,"min_transit_walking_distance": 1}, "skadi": {"max_shape": 750000,"min_resample": 10.0}, "trace": {"max_distance": 200000.0,"max_gps_accuracy": 100.0,"max_search_radius": 100,"max_shape": 16000,"max_best_paths":4,"max_best_paths_shape":100}, "transit": {"max_distance": 500000.0,"max_locations": 50,"max_matrix_distance": 200000.0,"max_matrix_locations": 50}, "truck": {"max_distance": 5000000.0,"max_locations": 20,"max_matrix_distance": 400000.0,"max_matrix_locations": 50} } })"); struct api_tester { api_tester() : conf_(conf), reader(new valhalla::baldr::GraphReader(conf.get_child("mjolnir"))), loki_worker(conf, reader), thor_worker(conf, reader), odin_worker(conf) { } Api match(const std::string& request_json) { Api request; ParseApi(request_json, Options::trace_route, request); loki_worker.trace(request); thor_worker.trace_route(request); odin_worker.narrate(request); loki_worker.cleanup(); thor_worker.cleanup(); odin_worker.cleanup(); return request; } Api route(const std::string& request_json) { Api request; ParseApi(request_json, Options::route, request); loki_worker.route(request); thor_worker.route(request); odin_worker.narrate(request); loki_worker.cleanup(); thor_worker.cleanup(); odin_worker.cleanup(); return request; } boost::property_tree::ptree conf_; std::shared_ptr<valhalla::baldr::GraphReader> reader; valhalla::loki::loki_worker_t loki_worker; valhalla::thor::thor_worker_t thor_worker; valhalla::odin::odin_worker_t odin_worker; }; std::string json_escape(const std::string& unescaped) { std::stringstream ss; baldr::json::OstreamVisitor v(ss); v(unescaped); std::string escaped = ss.str().substr(1); escaped.pop_back(); return escaped; } int seed = 521; int bound = 81; std::string make_test_case(PointLL& start, PointLL& end) { static std::mt19937 generator(seed); static std::uniform_real_distribution<float> distribution(0, 1); float distance = 0; do { // get two points in and around utrecht start = PointLL(5.0819f + .053f * distribution(generator), 52.0698f + .0334f * distribution(generator)); end = PointLL(5.0819f + .053f * distribution(generator), 52.0698f + .0334f * distribution(generator)); distance = start.Distance(end); // try again if they are too close or too far apart } while (distance < 1000 || distance > 2000); return R"({"costing":"auto","locations":[{"lat":)" + std::to_string(start.second) + R"(,"lon":)" + std::to_string(start.first) + R"(},{"lat":)" + std::to_string(end.second) + R"(,"lon":)" + std::to_string(end.first) + "}]}"; } TEST(Mapmatch, test_matcher) { // generate a bunch of tests tyr::actor_t actor(conf, true); int tested = 0; while (tested < bound) { // get a route shape PointLL start, end; auto test_case = make_test_case(start, end); std::cout << test_case << std::endl; boost::property_tree::ptree route; std::string route_json; try { route_json = actor.route(test_case); route = json_to_pt(route_json); } catch (...) { std::cout << "route failed" << std::endl; continue; } auto encoded_shape = route.get_child("trip.legs").front().second.get<std::string>("shape"); auto shape = midgard::decode<std::vector<midgard::PointLL>>(encoded_shape); // skip any routes that have loops in them as edge walk fails in that case... // TODO: fix edge walk std::unordered_set<std::string> names; bool looped = false; const auto& maneuvers = route.get_child("trip.legs").front().second.get_child("maneuvers"); for (const auto& maneuver : maneuvers) { if (maneuver.second.find("street_names") == maneuver.second.not_found()) continue; for (const auto& name : maneuver.second.get_child("street_names")) looped = looped || !names.insert(name.second.get_value<std::string>()).second; } // get the edges along that route shape boost::property_tree::ptree walked; std::string walked_json; try { walked_json = actor.trace_attributes( R"({"date_time":{"type":1,"value":"2019-10-31T18:30"},"costing":"auto","shape_match":"edge_walk","encoded_polyline":")" + json_escape(encoded_shape) + "\"}"); walked = json_to_pt(walked_json); } catch (...) { std::cout << test_case << std::endl; std::cout << R"({"costing":"auto","shape_match":"edge_walk","encoded_polyline":")" + json_escape(encoded_shape) + "\"}" << std::endl; FAIL() << "Edge walk failed with exact shape"; } // check the shape makes sense auto walked_encoded_shape = walked.get<std::string>("shape"); auto walked_shape = midgard::decode<std::vector<midgard::PointLL>>(walked_encoded_shape); /*EXPECT_EQ(walked_shape.size() , shape.size()) << "Differing shape lengths " + std::to_string(shape.size()) + " != " + std::to_string(walked_shape.size()) + "\n" + encoded_shape + "\n" + walked_encoded_shape;*/ // build up some gps segments for simulation from the real shape std::vector<uint64_t> walked_edges; std::vector<gps_segment_t> segments; for (const auto& edge : walked.get_child("edges")) { walked_edges.push_back(edge.second.get<uint64_t>("id")); auto b = edge.second.get<size_t>("begin_shape_index"); auto e = edge.second.get<size_t>("end_shape_index") + 1; segments.emplace_back( gps_segment_t{std::vector<PointLL>(walked_shape.cbegin() + b, walked_shape.cbegin() + e), static_cast<float>(edge.second.get<float>("speed") * 1e3) / 3600.f}); } // simulate gps from the route shape std::vector<float> accuracies; auto simulation = simulate_gps(segments, accuracies, 50, 75.f, 1); auto locations = to_locations(simulation, accuracies, 1); // get a trace-attributes from the simulated gps auto matched = json_to_pt(actor.trace_attributes( R"({"costing":"auto","shape_match":"map_snap","shape":)" + locations + "}")); std::vector<uint64_t> matched_edges; for (const auto& edge : matched.get_child("edges")) matched_edges.push_back(edge.second.get<uint64_t>("id")); // because of noise we can have off by 1 happen at the beginning or end so we trim to make sure auto walked_it = std::search(walked_edges.begin(), walked_edges.end(), matched_edges.begin() + 1, matched_edges.end() - 1); if (walked_it == walked_edges.end()) { if (looped) { std::cout << "route had a possible loop" << std::endl; continue; } auto decoded_match = midgard::decode<std::vector<PointLL>>(matched.get<std::string>("shape")); std::string geojson = R"({"type":"FeatureCollection","features":[{"geometry":{"type":"LineString","coordinates":[)"; geojson += print(shape); geojson += R"(]},"type":"Feature","properties":{"stroke":"#00ff00","stroke-width":2}},{"geometry":{"type":"LineString","coordinates":[)"; geojson += print(simulation); geojson += R"(]},"type":"Feature","properties":{"stroke":"#0000ff","stroke-width":2}},{"geometry":{"type":"LineString","coordinates":[)"; geojson += print(decoded_match); geojson += R"(]},"type":"Feature","properties":{"stroke":"#ff0000","stroke-width":2}},{"geometry":{"type":"MultiPoint","coordinates":[)"; geojson += print(std::vector<PointLL>{start, end}); geojson += R"(]},"type":"Feature","properties":{}}]})"; std::cout << geojson << std::endl; FAIL() << "The match did not match the walk"; } std::cout << "Iteration " << tested << " complete" << std::endl; ++tested; } } TEST(Mapmatch, test_distance_only) { tyr::actor_t actor(conf, true); auto matched = json_to_pt(actor.trace_attributes( R"({"trace_options":{"max_route_distance_factor":10,"max_route_time_factor":1,"turn_penalty_factor":0}, "costing":"auto","shape_match":"map_snap","shape":[ {"lat":52.09110,"lon":5.09806,"accuracy":10}, {"lat":52.09050,"lon":5.09769,"accuracy":100}, {"lat":52.09098,"lon":5.09679,"accuracy":10}]})")); std::unordered_set<std::string> names; for (const auto& edge : matched.get_child("edges")) for (const auto& name : edge.second.get_child("names")) names.insert(name.second.get_value<std::string>()); EXPECT_NE(names.find("Jan Pieterszoon Coenstraat"), names.end()) << "Using distance only it should have taken a small detour"; } TEST(Mapmatch, test_trace_route_breaks) { std::vector<std::string> test_cases = { R"({"costing":"auto","shape_match":"map_snap","shape":[ {"lat":52.09110,"lon":5.09806,"type":"break"}, {"lat":52.09050,"lon":5.09769,"type":"break"}, {"lat":52.09098,"lon":5.09679,"type":"break"}]})", R"({"costing":"auto","shape_match":"map_snap","shape":[ {"lat":52.09110,"lon":5.09806,"type":"break"}, {"lat":52.09050,"lon":5.09769,"type":"via"}, {"lat":52.09098,"lon":5.09679,"type":"break"}]})", R"({"costing":"auto","shape_match":"map_snap","shape":[ {"lat":52.09110,"lon":5.09806}, {"lat":52.09050,"lon":5.09769}, {"lat":52.09098,"lon":5.09679}]})", R"({"costing":"auto","shape_match":"map_snap","shape":[ {"lat":52.09110,"lon":5.09806,"type":"break","radius":5}, {"lat":52.0911006,"lon":5.0972905,"type":"break","radius":5}, {"lat":52.0909933,"lon":5.0969919,"type":"break","radius":5}, {"lat":52.0909707,"lon":5.0967710,"type":"break","radius":5}]})", R"({"costing":"auto","shape_match":"map_snap","encoded_polyline":"quijbBqpnwHfJxc@bBdJrDfSdAzFX|AHd@bG~[|AnIdArGbAo@z@m@`EuClO}MjE}E~NkPaAuC"})"}; std::vector<size_t> test_answers = {2, 1, 1, 1, 1}; tyr::actor_t actor(conf, true); for (size_t i = 0; i < test_cases.size(); ++i) { auto matched = json_to_pt(actor.trace_route(test_cases[i])); const auto& legs = matched.get_child("trip.legs"); EXPECT_EQ(legs.size(), test_answers[i]); for (const auto& leg : legs) { auto decoded_match = midgard::decode<std::vector<PointLL>>(leg.second.get<std::string>("shape")); } } } TEST(Mapmatch, test_edges_discontinuity_with_multi_routes) { // here everything is a leg and the discontinuities are the routes // we have to use osrm format because valhalla format doesnt support multi route std::vector<std::string> test_cases = { R"({"date_time":{"type":1,"value":"2019-11-10T09:00"}, "costing":"auto","format":"osrm","shape_match":"map_snap","shape":[ {"lat":52.0609632,"lon":5.0917676,"type":"break"}, {"lat":52.0607180,"lon":5.0950566,"type":"break"}, {"lat":52.0797372,"lon":5.1293068,"type":"break"}, {"lat":52.0792731,"lon":5.1343818,"type":"break"}, {"lat":52.0763011,"lon":5.1574637,"type":"break"}, {"lat":52.0782167,"lon":5.1592370,"type":"break"}]})", R"({"date_time":{"type":0}, "costing":"auto","format":"osrm","shape_match":"map_snap","shape":[ {"lat":52.0609632,"lon":5.0917676,"type":"break"}, {"lat":52.0607180,"lon":5.0950566,"type":"via"}, {"lat":52.0797372,"lon":5.1293068,"type":"via"}, {"lat":52.0792731,"lon":5.1343818,"type":"via"}, {"lat":52.0763011,"lon":5.1574637,"type":"via"}, {"lat":52.0782167,"lon":5.1592370,"type":"break"}]})", R"({"costing":"auto","format":"osrm","shape_match":"map_snap","shape":[ {"lat":52.0609632,"lon":5.0917676,"type":"break","time":7}, {"lat":52.0607185,"lon":5.0940566,"type":"break_through","time":11}, {"lat":52.0607180,"lon":5.0950566,"type":"break_through","time":15}, {"lat":52.0797372,"lon":5.1293068,"type":"break_through","time":19}, {"lat":52.0792731,"lon":5.1343818,"type":"break_through","time":23}, {"lat":52.0763011,"lon":5.1574637,"type":"break_through","time":27}, {"lat":52.0782167,"lon":5.1592370,"type":"break","time":13}]})", R"({"date_time":{"type":2,"value":"2019-11-10T09:00"}, "costing":"auto","format":"osrm","shape_match":"map_snap","shape":[ {"lat":52.0609632,"lon":5.0917676,"type":"break"}, {"lat":52.0607185,"lon":5.0940566,"type":"via"}, {"lat":52.0607180,"lon":5.0950566,"type":"via"}, {"lat":52.0797372,"lon":5.1293068,"type":"via"}, {"lat":52.0792731,"lon":5.1343818,"type":"via"}, {"lat":52.0763011,"lon":5.1574637,"type":"via"}, {"lat":52.0782167,"lon":5.1592370,"type":"break"}]})", R"({"date_time":{"type":1,"value":"2019-11-10T09:00"}, "costing":"auto","format":"osrm","shape_match":"map_snap","shape":[ {"lat": 52.068882, "lon": 5.120852, "type": "break"}, {"lat": 52.069671, "lon": 5.121185, "type": "break"}, {"lat": 52.070380, "lon": 5.121523, "type": "break"}, {"lat": 52.070947, "lon": 5.121828, "type": "break"}, {"lat": 52.071827, "lon": 5.122220, "type": "break"}, {"lat": 52.072526, "lon": 5.122553, "type": "break"}, {"lat": 52.073489, "lon": 5.122880, "type": "break"}, {"lat": 52.074554, "lon": 5.122955, "type": "break"}, {"lat": 52.075190, "lon": 5.123067, "type": "break"}, {"lat": 52.075718, "lon": 5.123121, "type": "break"}]})", R"({"date_time":{"type":0}, "costing":"auto","format":"osrm","shape_match":"map_snap","shape":[ {"lat": 52.068882, "lon": 5.120852, "type": "break"}, {"lat": 52.069671, "lon": 5.121185, "type": "through"}, {"lat": 52.070380, "lon": 5.121523, "type": "through"}, {"lat": 52.070947, "lon": 5.121828, "type": "through"}, {"lat": 52.071827, "lon": 5.122220, "type": "through"}, {"lat": 52.072526, "lon": 5.122553, "type": "through"}, {"lat": 52.073489, "lon": 5.122880, "type": "through"}, {"lat": 52.074554, "lon": 5.122955, "type": "through"}, {"lat": 52.075190, "lon": 5.123067, "type": "through"}, {"lat": 52.075718, "lon": 5.123121, "type": "break"}]})", R"({"costing":"auto","format":"osrm","shape_match":"map_snap","shape":[ {"lat": 52.068882, "lon": 5.120852, "type": "break","time":7}, {"lat": 52.069671, "lon": 5.121185, "type": "break","time":9}, {"lat": 52.070380, "lon": 5.121523, "type": "break","time":11}, {"lat": 52.070947, "lon": 5.121828, "type": "break","time":13}, {"lat": 52.071827, "lon": 5.1227, "type": "break", "radius":1,"time":15}, {"lat": 52.072526, "lon": 5.122553, "type": "break","time":17}, {"lat": 52.073489, "lon": 5.122880, "type": "break","time":19}, {"lat": 52.074554, "lon": 5.122955, "type": "break","time":21}, {"lat": 52.075190, "lon": 5.123067, "type": "break","time":23}, {"lat": 52.075718, "lon": 5.123121, "type": "break","time":25}]})", R"({"date_time":{"type":2,"value":"2019-11-10T09:00"}, "costing":"auto","format":"osrm","shape_match":"map_snap","shape":[ {"lat": 52.068882, "lon": 5.120852, "type": "break"}, {"lat": 52.069671, "lon": 5.121185, "type": "through"}, {"lat": 52.070380, "lon": 5.121523, "type": "through"}, {"lat": 52.070947, "lon": 5.121828, "type": "through"}, {"lat": 52.071827, "lon": 5.1227, "type": "through", "radius": 1}, {"lat": 52.072526, "lon": 5.122553, "type": "through"}, {"lat": 52.073489, "lon": 5.122880, "type": "through"}, {"lat": 52.074554, "lon": 5.122955, "type": "through"}, {"lat": 52.075190, "lon": 5.123067, "type": "through"}, {"lat": 52.075718, "lon": 5.123121, "type": "break"}]})"}; using a_t = std::tuple<size_t, size_t, bool>; std::vector<a_t> test_answers = {a_t{3, 3, true}, a_t{3, 3, true}, a_t{2, 3, true}, a_t{3, 3, false}, a_t{1, 9, true}, a_t{1, 1, true}, a_t{2, 7, true}, a_t{2, 2, false}}; // for type 2, we currently do not support them, thus we are not expecting any time stamp for (size_t i = 0; i < test_cases.size(); ++i) { api_tester tester; auto response = tester.match(test_cases[i]); EXPECT_EQ(response.trip().routes_size(), std::get<0>(test_answers[i])); size_t leg_count = 0; for (const auto& route : response.trip().routes()) { leg_count += route.legs_size(); for (const auto& leg : route.legs()) { if (leg.location(0).has_date_time()) { EXPECT_TRUE(std::get<2>(test_answers[i])) << "Found a leg with a start time when it shouldnt have had one"; } else { EXPECT_FALSE(std::get<2>(test_answers[i])) << "Found a leg without a start time when it should have had one"; } } } EXPECT_EQ(leg_count, std::get<1>(test_answers[i])); } } TEST(Mapmatch, test_disconnected_edges_expect_no_route) { std::vector<std::string> test_cases = {R"({"costing":"auto","shape_match":"map_snap","shape":[ {"lat":52.0630834,"lon":5.1037227,"type":"break"}, {"lat":52.0633099,"lon":5.1047193,"type":"break"}, {"lat":52.0640117,"lon":5.1040429,"type":"break"}, {"lat":52.0644313,"lon":5.1041697,"type":"break"}]})"}; std::vector<size_t> test_answers = {0}; size_t illegal_path = 0; tyr::actor_t actor(conf, true); for (size_t i = 0; i < test_cases.size(); ++i) { try { auto matched = json_to_pt(actor.trace_route(test_cases[i])); } catch (...) { EXPECT_EQ(illegal_path++, i) << "Expected no route but got one"; } } } TEST(Mapmatch, test_matching_indices_and_waypoint_indices) { std::vector<std::string> test_cases = { R"({"costing":"auto","format":"osrm","shape_match":"map_snap","shape":[ {"lat": 52.068882, "lon": 5.120852, "type": "break"}, {"lat": 52.069671, "lon": 5.121185, "type": "via"}, {"lat": 52.070380, "lon": 5.121523, "type": "via"}, {"lat": 52.070947, "lon": 5.121828, "type": "via"}, {"lat": 52.071827, "lon": 5.1227, "type": "via"}, {"lat": 52.072526, "lon": 5.122553, "type": "via"}, {"lat": 52.073489, "lon": 5.122880, "type": "via"}, {"lat": 52.074554, "lon": 5.122955, "type": "via"}, {"lat": 52.075190, "lon": 5.123067, "type": "via"}, {"lat": 52.075718, "lon": 5.123121, "type": "break"}]})", R"({"costing":"auto","format":"osrm","shape_match":"map_snap","shape":[ {"lat": 52.0609632, "lon": 5.0917676, "type": "break"}, {"lat": 52.0607180, "lon": 5.0950566, "type": "break"}, {"lat": 52.0797372, "lon": 5.1293068, "type": "break"}, {"lat": 52.0792731, "lon": 5.1343818, "type": "break"}, {"lat": 52.0763011, "lon": 5.1574637, "type": "break"}, {"lat": 52.0782167, "lon": 5.1592370, "type": "via"}, {"lat": 52.0801357, "lon": 5.1605372, "type": "break"}]})"}; std::vector<std::vector<std::pair<std::string, std::string>>> answers{{{"0", "0"}, {"0", "null"}, {"0", "null"}, {"0", "1"}, {"1", "0"}, {"1", "null"}, {"1", "null"}, {"1", "null"}, {"1", "1"}}, { {"0", "0"}, {"0", "1"}, {"1", "0"}, {"1", "1"}, {"2", "0"}, {"2", "null"}, {"2", "1"}, }}; tyr::actor_t actor(conf, true); for (size_t i = 0; i < test_cases.size(); ++i) { auto matched = json_to_pt(actor.trace_route(test_cases[i])); const auto& tracepoints = matched.get_child("tracepoints"); int j = 0; for (const auto& tracepoint : tracepoints) { std::pair<std::string, std::string> result; try { result = {tracepoint.second.get<std::string>("matchings_index"), tracepoint.second.get<std::string>("waypoint_index")}; } catch (...) { // handle the tracepoint null case continue; } EXPECT_EQ(result, answers[i][j]) << "expect matching_index and waypoint_index: (" + answers[i][j].first + "," + answers[i][j].second + "), " + "but got: (" + result.first + "," + result.second + ")"; ++j; } } } TEST(Mapmatch, test_time_rejection) { tyr::actor_t actor(conf, true); auto matched = json_to_pt(actor.trace_attributes( R"({"trace_options":{"max_route_distance_factor":10,"max_route_time_factor":3,"turn_penalty_factor":0}, "costing":"auto","shape_match":"map_snap","shape":[ {"lat":52.09110,"lon":5.09806,"accuracy":10,"time":2}, {"lat":52.09050,"lon":5.09769,"accuracy":100,"time":4}, {"lat":52.09098,"lon":5.09679,"accuracy":10,"time":6}]})")); std::unordered_set<std::string> names; for (const auto& edge : matched.get_child("edges")) for (const auto& name : edge.second.get_child("names")) names.insert(name.second.get_value<std::string>()); EXPECT_EQ(names.find("Jan Pieterszoon Coenstraat"), names.end()) << "Using time it should not take a small detour"; } TEST(Mapmatch, test32bit) { tyr::actor_t actor(conf, true); std::string test_case = R"({"costing":"auto","locations":[{"lat":52.096672,"lon":5.110825}, {"lat":52.081371,"lon":5.125671,"name":"foo","street":"bar","city":"baz", "state":"qux","postal_code":"corge","country":"quux","url":"zoinks", "date_time":"2001-06-07T15:17","heading_tolerance":90,"node_snap_tolerance":5, "way_id":1234,"minimum_reachability":50,"rank_candidates":false, "preferred_side":"neither","search_cutoff":100,"street_side_tolerance":5}],"date_time":{"type":0}})"; actor.route(test_case); } TEST(Mapmatch, test_trace_route_edge_walk_expected_error_code) { // tests expected error_code for trace_route edge_walk auto expected_error_code = 443; tyr::actor_t actor(conf, true); try { auto response = json_to_pt(actor.trace_route( R"({"costing":"auto","shape_match":"edge_walk","shape":[ {"lat":52.088548,"lon":5.15357,"accuracy":30,"time":2}, {"lat":52.088627,"lon":5.153269,"accuracy":30,"time":4}, {"lat":52.08864,"lon":5.15298,"accuracy":30,"time":6}, {"lat":52.08861,"lon":5.15272,"accuracy":30,"time":8}, {"lat":52.08863,"lon":5.15253,"accuracy":30,"time":10}, {"lat":52.08851,"lon":5.15249,"accuracy":30,"time":12}]})")); } catch (const valhalla_exception_t& e) { EXPECT_EQ(e.code, expected_error_code); // If we get here then all good - return return; } // If we get here then fail the test! FAIL() << "Expected trace_route edge_walk exception was not found"; } TEST(Mapmatch, test_trace_route_map_snap_expected_error_code) { // tests expected error_code for trace_route edge_walk auto expected_error_code = 442; tyr::actor_t actor(conf, true); try { auto response = json_to_pt(actor.trace_route( R"({"costing":"auto","shape_match":"map_snap","shape":[ {"lat":52.088548,"lon":5.15357,"radius":5,"time":2}, {"lat":52.088627,"lon":5.153269,"radius":5,"time":4}, {"lat":52.08864,"lon":5.15298,"radius":5,"time":6}, {"lat":52.08861,"lon":5.15272,"radius":5,"time":8}, {"lat":52.08863,"lon":5.15253,"radius":5,"time":10}, {"lat":52.08851,"lon":5.15249,"radius":5,"time":12}]})")); } catch (const valhalla_exception_t& e) { EXPECT_EQ(e.code, expected_error_code); // If we get here then all good - return return; } // If we get here then fail the test! FAIL() << "Expected trace_route map_snap exception was not found"; } TEST(Mapmatch, test_trace_attributes_edge_walk_expected_error_code) { // tests expected error_code for trace_attributes edge_walk auto expected_error_code = 443; tyr::actor_t actor(conf, true); try { auto response = json_to_pt(actor.trace_attributes( R"({"costing":"auto","shape_match":"edge_walk","shape":[ {"lat":52.088548,"lon":5.15357,"accuracy":30,"time":2}, {"lat":52.088627,"lon":5.153269,"accuracy":30,"time":4}, {"lat":52.08864,"lon":5.15298,"accuracy":30,"time":6}, {"lat":52.08861,"lon":5.15272,"accuracy":30,"time":8}, {"lat":52.08863,"lon":5.15253,"accuracy":30,"time":10}, {"lat":52.08851,"lon":5.15249,"accuracy":30,"time":12}]})")); } catch (const valhalla_exception_t& e) { EXPECT_EQ(e.code, expected_error_code); // If we get here then all good - return return; } // If we get here then fail the test! FAIL() << "Expected trace_attributes edge_walk exception was not found"; } TEST(Mapmatch, test_trace_attributes_map_snap_expected_error_code) { // tests expected error_code for trace_attributes edge_walk auto expected_error_code = 444; tyr::actor_t actor(conf, true); try { auto response = json_to_pt(actor.trace_attributes( R"({"costing":"auto","shape_match":"map_snap","shape":[ {"lat":52.088548,"lon":5.15357,"radius":5,"time":2}, {"lat":52.088627,"lon":5.153269,"radius":5,"time":4}, {"lat":52.08864,"lon":5.15298,"radius":5,"time":6}, {"lat":52.08861,"lon":5.15272,"radius":5,"time":8}, {"lat":52.08863,"lon":5.15253,"radius":5,"time":10}, {"lat":52.08851,"lon":5.15249,"radius":5,"time":12}]})")); } catch (const valhalla_exception_t& e) { EXPECT_EQ(e.code, expected_error_code); // If we get here then all good - return return; } // If we get here then fail the test! FAIL() << "Expected trace_attributes map_snap exception was not found"; } TEST(Mapmatch, test_topk_validate) { // tests a fork in the road tyr::actor_t actor(conf, true); // tests a previous segfault due to using a claimed state auto matched = json_to_pt(actor.trace_attributes( R"({"costing":"auto","best_paths":2,"shape_match":"map_snap","shape":[ {"lat":52.088548,"lon":5.15357,"accuracy":30,"time":2}, {"lat":52.088627,"lon":5.153269,"accuracy":30,"time":4}, {"lat":52.08864,"lon":5.15298,"accuracy":30,"time":6}, {"lat":52.08861,"lon":5.15272,"accuracy":30,"time":8}, {"lat":52.08863,"lon":5.15253,"accuracy":30,"time":10}, {"lat":52.08851,"lon":5.15249,"accuracy":30,"time":12}]})")); // this tests a fix for an infinite loop because there is only 1 result and we ask for 4 matched = json_to_pt(actor.trace_attributes( R"({"costing":"auto","best_paths":4,"shape_match":"map_snap","shape":[ {"lat":52.09579,"lon":5.13137,"accuracy":5,"time":2}, {"lat":52.09652,"lon":5.13184,"accuracy":5,"time":4}]})")); EXPECT_EQ(matched.get_child("alternate_paths").size(), 0) << "There should be only one result"; } TEST(Mapmatch, test_topk_fork_alternate) { // tests a fork in the road tyr::actor_t actor(conf, true); auto matched = json_to_pt(actor.trace_attributes( R"({"trace_options":{"search_radius":0},"costing":"auto","best_paths":2,"shape_match":"map_snap","shape":[ {"lat":52.08511,"lon":5.15085,"accuracy":10,"time":2}, {"lat":52.08533,"lon":5.15109,"accuracy":20,"time":4}, {"lat":52.08539,"lon":5.15100,"accuracy":20,"time":6}]})")); /*** Primary path - left at the fork {"type":"FeatureCollection","features":[ {"type":"Feature","geometry":{"type":"Point","coordinates":[5.150850,52.085110]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":0}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.151090,52.085331]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":1}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.151000,52.085388]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":2}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.150851,52.085110]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":0,"matched_point_type":"matched","edge_index":0,"distance_along_edge":0.295,"distance_from_trace_point":0.097}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.151000,52.085323]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":1,"matched_point_type":"matched","edge_index":1,"distance_along_edge":0.152,"distance_from_trace_point":6.149}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.150990,52.085388]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":2,"matched_point_type":"matched","edge_index":1,"distance_along_edge":0.296,"distance_from_trace_point":0.713}} ]} */ std::vector<std::string> names; for (const auto& edge : matched.get_child("edges")) for (const auto& name : edge.second.get_child("names")) names.push_back(name.second.get_value<std::string>()); if (names != std::vector<std::string>{"Louis Saalbornlaan", "Cor Ruyslaan"}) { std::string streets; for (const auto& n : names) streets += n + ", "; FAIL() << "The most obvious result is stay left but got: " + streets; } EXPECT_EQ(matched.get<float>("confidence_score"), 1.0f) << "Confidence of the first result is always 1"; /*** Alternate path - right at the fork {"type":"FeatureCollection","features":[ {"type":"Feature","geometry":{"type":"Point","coordinates":[5.150850,52.085110]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":0}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.151090,52.085331]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":1}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.151000,52.085388]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":2}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.150851,52.085110]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":0,"matched_point_type":"matched","edge_index":0,"distance_along_edge":0.295,"distance_from_trace_point":0.097}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.151095,52.085327]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":1,"matched_point_type":"matched","edge_index":1,"distance_along_edge":0.254,"distance_from_trace_point":0.532}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.151106,52.085339]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":2,"matched_point_type":"matched","edge_index":1,"distance_along_edge":0.293,"distance_from_trace_point":9.044}} ]} */ names.clear(); auto alternate = matched.get_child("alternate_paths").front().second; for (const auto& edge : alternate.get_child("edges")) for (const auto& name : edge.second.get_child("names")) names.push_back(name.second.get_value<std::string>()); if (names != std::vector<std::string>{"Louis Saalbornlaan", "Louis Saalbornlaan"}) { std::string streets; for (const auto& n : names) streets += n + ", "; FAIL() << "The second most obvious result is stay right but got: " + streets; } EXPECT_LT(alternate.get<float>("confidence_score"), 1.0f) << "Confidence of the second result is always less than 1"; EXPECT_LT(matched.get<float>("raw_score"), alternate.get<float>("raw_score")) << "The raw score of the first result is always less than that of the second"; } TEST(Mapmatch, test_topk_loop_alternate) { // tests a loop in the road tyr::actor_t actor(conf, true); auto matched = json_to_pt(actor.trace_attributes( R"({"costing":"auto","best_paths":2,"shape_match":"map_snap","shape":[ {"lat":52.0886,"lon":5.1535,"accuracy":10}, {"lat":52.088619,"lon":5.15315,"accuracy":20}, {"lat":52.08855,"lon":5.152652,"accuracy":25}, {"lat":52.0883,"lon":5.152183,"accuracy":20}, {"lat":52.088062,"lon":5.151963,"accuracy":20}]})")); /*** Primary path - stay left on the same road {"type":"FeatureCollection","features":[ {"type":"Feature","geometry":{"type":"Point","coordinates":[5.153500,52.088600]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":0}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.153150,52.088619]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":1}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.152652,52.088551]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":2}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.152183,52.088299]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":3}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.151963,52.088062]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":4}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.153483,52.088573]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":0,"matched_point_type":"matched","edge_index":0,"distance_along_edge":0.698,"distance_from_trace_point":3.174}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.153239,52.088531]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":1,"matched_point_type":"matched","edge_index":2,"distance_along_edge":0.049,"distance_from_trace_point":11.437}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.152822,52.088379]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":2,"matched_point_type":"matched","edge_index":4,"distance_along_edge":0.136,"distance_from_trace_point":22.209}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.152305,52.088184]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":3,"matched_point_type":"matched","edge_index":5,"distance_along_edge":0.118,"distance_from_trace_point":15.111}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.151974,52.088051]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":4,"matched_point_type":"matched","edge_index":5,"distance_along_edge":0.801,"distance_from_trace_point":1.468}} ]} */ std::vector<std::string> names; for (const auto& edge : matched.get_child("edges")) for (const auto& name : edge.second.get_child("names")) names.push_back(name.second.get_value<std::string>()); if (names != std::vector<std::string>{"Louis Bouwmeesterlaan", "Louis Bouwmeesterlaan", "Louis Bouwmeesterlaan", "Louis Bouwmeesterlaan", "Louis Bouwmeesterlaan", "Louis Bouwmeesterlaan"}) { std::string streets; for (const auto& n : names) streets += n + ", "; FAIL() << "The most obvious result is stay left on the same road - but got: " + streets; } EXPECT_EQ(matched.get<float>("confidence_score"), 1.0f) << "Confidence of the first result is always 1"; /*** Alternate path - loop around to the right {"type":"FeatureCollection","features":[ {"type":"Feature","geometry":{"type":"Point","coordinates":[5.153500,52.088600]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":0}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.153150,52.088619]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":1}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.152652,52.088551]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":2}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.152183,52.088299]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":3}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.151963,52.088062]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":4}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.153483,52.088573]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":0,"matched_point_type":"matched","edge_index":0,"distance_along_edge":0.698,"distance_from_trace_point":3.174}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.153173,52.088627]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":1,"matched_point_type":"matched","edge_index":2,"distance_along_edge":0.308,"distance_from_trace_point":1.769}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.152542,52.088661]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":2,"matched_point_type":"matched","edge_index":4,"distance_along_edge":0.183,"distance_from_trace_point":14.339}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.152305,52.088184]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":3,"matched_point_type":"matched","edge_index":6,"distance_along_edge":0.118,"distance_from_trace_point":15.111}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.151974,52.088051]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":4,"matched_point_type":"matched","edge_index":6,"distance_along_edge":0.801,"distance_from_trace_point":1.468}} ]} */ names.clear(); auto alternate = matched.get_child("alternate_paths").front().second; for (const auto& edge : alternate.get_child("edges")) for (const auto& name : edge.second.get_child("names")) names.push_back(name.second.get_value<std::string>()); if (names != std::vector<std::string>{"Louis Bouwmeesterlaan", "Louis Bouwmeesterlaan", "Eduard Verkadelaan", "Eduard Verkadelaan", "Eduard Verkadelaan", "Eduard Verkadelaan", "Louis Bouwmeesterlaan"}) { std::string streets; for (const auto& n : names) streets += n + ", "; FAIL() << "The second most obvious result is loop around to the right - but got: " + streets; } EXPECT_LT(alternate.get<float>("confidence_score"), 1.0f) << "Confidence of the second result is always less than 1"; EXPECT_LT(matched.get<float>("raw_score"), alternate.get<float>("raw_score")) << "The raw score of the first result is always less than that of the second"; } TEST(Mapmatch, test_topk_frontage_alternate) { // tests a parallel frontage road tyr::actor_t actor(conf, true); auto matched = json_to_pt(actor.trace_attributes( R"({"costing":"auto","best_paths":2,"shape_match":"map_snap","shape":[ {"lat":52.07956040090567,"lon":5.138160288333893,"accuracy":10,"time":2}, {"lat":52.07957358807355,"lon":5.138508975505829,"accuracy":10,"time":4}, {"lat":52.07959666560798,"lon":5.138905942440034,"accuracy":10,"time":6}, {"lat":52.0796213915245,"lon":5.139262676239015,"accuracy":10,"time":8}, {"lat":52.079637875461195,"lon":5.139581859111787,"accuracy":10,"time":10}, {"lat":52.07964776582031,"lon":5.139828622341157,"accuracy":10,"time":12}, {"lat":52.07985600778458,"lon":5.1404121782302865,"accuracy":10,"time":14}]})")); /*** Primary path - use main road {"type":"FeatureCollection","features":[ {"type":"Feature","geometry":{"type":"Point","coordinates":[5.138160,52.079559]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":0}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.138509,52.079575]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":1}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.138906,52.079597]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":2}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.139263,52.079620]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":3}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.139582,52.079639]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":4}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.139829,52.079647]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":5}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.140212,52.079655]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":6}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.138153,52.079605]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":0,"matched_point_type":"matched","edge_index":0,"distance_along_edge":0.254,"distance_from_trace_point":5.085}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.138501,52.079624]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":1,"matched_point_type":"matched","edge_index":0,"distance_along_edge":0.385,"distance_from_trace_point":5.511}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.138898,52.079647]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":2,"matched_point_type":"matched","edge_index":0,"distance_along_edge":0.534,"distance_from_trace_point":5.511}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.139255,52.079670]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":3,"matched_point_type":"matched","edge_index":0,"distance_along_edge":0.668,"distance_from_trace_point":5.511}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.139574,52.079689]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":4,"matched_point_type":"matched","edge_index":0,"distance_along_edge":0.788,"distance_from_trace_point":5.511}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.139820,52.079704]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":5,"matched_point_type":"matched","edge_index":0,"distance_along_edge":0.881,"distance_from_trace_point":6.357}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.140204,52.079727]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":6,"matched_point_type":"matched","edge_index":1,"distance_along_edge":0.070,"distance_from_trace_point":8.033}} ]} */ std::vector<std::string> names; for (const auto& edge : matched.get_child("edges")) for (const auto& name : edge.second.get_child("names")) names.push_back(name.second.get_value<std::string>()); if (names != std::vector<std::string>{"Rubenslaan", "Rubenslaan"}) { std::string streets; for (const auto& n : names) streets += n + ", "; FAIL() << "The most obvious result is stay straight on the same road - but got: " + streets; } EXPECT_EQ(matched.get<float>("confidence_score"), 1.0f) << "Confidence of the first result is always 1"; /*** Alternate path - use one way frontage road {"type":"FeatureCollection","features":[ {"type":"Feature","geometry":{"type":"Point","coordinates":[5.138160,52.079559]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":0}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.138509,52.079575]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":1}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.138906,52.079597]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":2}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.139263,52.079620]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":3}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.139582,52.079639]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":4}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.139829,52.079647]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":5}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.140212,52.079655]},"properties":{"marker-color":"#abd9e9","marker-size":"small","trace_point_index":6}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.138169,52.079498]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":0,"matched_point_type":"matched","edge_index":0,"distance_along_edge":0.223,"distance_from_trace_point":6.774}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.138517,52.079517]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":1,"matched_point_type":"matched","edge_index":0,"distance_along_edge":0.369,"distance_from_trace_point":6.351}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.138914,52.079540]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":2,"matched_point_type":"matched","edge_index":0,"distance_along_edge":0.536,"distance_from_trace_point":6.351}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.139271,52.079559]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":3,"matched_point_type":"matched","edge_index":0,"distance_along_edge":0.686,"distance_from_trace_point":6.774}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.139591,52.079575]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":4,"matched_point_type":"matched","edge_index":0,"distance_along_edge":0.821,"distance_from_trace_point":7.197}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.139837,52.079586]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":5,"matched_point_type":"matched","edge_index":0,"distance_along_edge":0.924,"distance_from_trace_point":6.771}}, {"type":"Feature","geometry":{"type":"Point","coordinates":[5.140018,52.079597]},"properties":{"marker-color":"#2c7bb6","marker-size":"medium","matched_point_index":6,"matched_point_type":"matched","edge_index":0,"distance_along_edge":1.000,"distance_from_trace_point":14.626}} ]} */ names.clear(); auto alternate = matched.get_child("alternate_paths").front().second; for (const auto& edge : alternate.get_child("edges")) { const auto json_names = edge.second.get_child_optional("names"); if (json_names) { for (const auto& name : json_names.get()) names.push_back(name.second.get_value<std::string>()); } else { names.push_back("<empty>"); } } if (names != std::vector<std::string>{"Rubenslaan", "Rubenslaan", "Rubenslaan"}) { std::string streets; for (const auto& n : names) streets += n + ", "; FAIL() << "The second most obvious result is frontage road to the right - but got: " + streets; } EXPECT_LT(alternate.get<float>("confidence_score"), 1.0f) << "Confidence of the second result is always less than 1"; EXPECT_LT(matched.get<float>("raw_score"), alternate.get<float>("raw_score")) << "The raw score of the first result is always less than that of the second"; } TEST(Mapmatch, test_now_matches) { tyr::actor_t actor(conf, true); // once with map matching std::string test_case = R"({"date_time":{"type":0},"shape_match":"map_snap","costing":"auto", "encoded_polyline":"oeyjbBqfjwHeO~M}x@`u@wDmh@oCcd@sAiVcAaKe@cBaNe[u^qg@qH`u@cL{Tmr@c{AtTu_@xVsd@"})"; auto route_json = actor.trace_route(test_case); // again with walking auto route = json_to_pt(route_json); auto encoded_shape = route.get_child("trip.legs").front().second.get<std::string>("shape"); test_case = R"({"date_time":{"type":0},"shape_match":"edge_walk","costing":"auto","encoded_polyline":")" + json_escape(encoded_shape) + "\"}"; actor.trace_route(test_case); } TEST(Mapmatch, test_leg_duration_trimming) { std::vector<std::vector<std::string>> test_cases = { // 2 routes, one leg per route {R"([{"lat": 52.0865058, "lon": 5.1201, "type": "break", "node_snap_tolerance":0}, {"lat": 52.0865512, "lon": 5.1201, "type": "via", "node_snap_tolerance":0}, {"lat": 52.0867449, "lon": 5.12, "type": "break", "node_snap_tolerance":0}])", R"([{"lat": 52.12705182, "lon": 5.0892165, "type": "break", "node_snap_tolerance":0}, {"lat": 52.1267117, "lon": 5.0898420, "type": "via", "node_snap_tolerance":0}, {"lat": 52.1261379, "lon": 5.0907894, "type": "break", "node_snap_tolerance":0}])"}, // 2 routes, multiple legs per route {R"([{"lat": 52.0865058, "lon": 5.1201, "type": "break", "node_snap_tolerance":0}, {"lat": 52.0865512, "lon": 5.1201, "type": "break", "node_snap_tolerance":0}, {"lat": 52.0867449, "lon": 5.12, "type": "break", "node_snap_tolerance":0}])", R"([{"lat": 52.12705182, "lon": 5.0892165, "type": "break", "node_snap_tolerance":0}, {"lat": 52.1267117, "lon": 5.0898420, "type": "break", "node_snap_tolerance":0}, {"lat": 52.1261379, "lon": 5.0907894, "type": "break", "node_snap_tolerance":0}])"}, // 2 routes close together in space {R"([{"lat": 52.0940283, "lon": 5.1133286, "type": "break", "node_snap_tolerance":0}, {"lat": 52.0938604, "lon": 5.1133609, "type": "break", "node_snap_tolerance":0}, {"lat": 52.0935827, "lon": 5.1133893, "type": "break", "node_snap_tolerance":0}])", R"([{"lat": 52.0939489, "lon": 5.1136976, "type": "break", "node_snap_tolerance":0}, {"lat": 52.0939837, "lon": 5.1139511, "type": "break", "node_snap_tolerance":0}])"}, // 1 route with 1 leg across 2 edges and then another leg on the same second edge {R"([{"lat": 52.0957652, "lon": 5.1101366, "type": "break", "node_snap_tolerance":0}, {"lat": 52.0959457, "lon": 5.1106847, "type": "break", "node_snap_tolerance":0}, {"lat": 52.0962535, "lon": 5.1116988, "type": "break", "node_snap_tolerance":0}])"}, // 1 route with 1 leg across 2 edges {R"([{"lat": 52.0957652, "lon": 5.1101366, "type": "break", "node_snap_tolerance":0}, {"lat": 52.0959457, "lon": 5.1106847, "type": "break", "node_snap_tolerance":0}])"}, // 1 route with 1 leg on 1 edge {R"([{"lat": 52.0959457, "lon": 5.1106847, "type": "break", "node_snap_tolerance":0}, {"lat": 52.0962535, "lon": 5.1116988, "type": "break", "node_snap_tolerance":0}])"}, // 1 routes, lots of legs on same edge {R"([{"lat": 52.108781, "lon": 5.1057369, "type": "break", "node_snap_tolerance":0}, {"lat": 52.108982, "lon": 5.1054472, "type": "break", "node_snap_tolerance":0}, {"lat": 52.109180, "lon": 5.1051449, "type": "break", "node_snap_tolerance":0}, {"lat": 52.109407, "lon": 5.1047962, "type": "break", "node_snap_tolerance":0}, {"lat": 52.109589, "lon": 5.1044422, "type": "break", "node_snap_tolerance":0}, {"lat": 52.109806, "lon": 5.1041525, "type": "break", "node_snap_tolerance":0}, {"lat": 52.110030, "lon": 5.1038306, "type": "break", "node_snap_tolerance":0}, {"lat": 52.110244, "lon": 5.1034766, "type": "break", "node_snap_tolerance":0}, {"lat": 52.110521, "lon": 5.1031029, "type": "break", "node_snap_tolerance":0}, {"lat": 52.110735, "lon": 5.1027846, "type": "break", "node_snap_tolerance":0}])"}, {R"([{"lat": 52.0826293, "lon": 5.1267623, "type": "break", "node_snap_tolerance":0}, {"lat": 52.0835867, "lon": 5.1276355, "type": "break", "node_snap_tolerance":0}, {"lat": 52.0837127, "lon": 5.1277763, "type": "break", "node_snap_tolerance":0}, {"lat": 52.0839615, "lon": 5.1280204, "type": "break", "node_snap_tolerance":0}, {"lat": 52.0841756, "lon": 5.1282906, "type": "break", "node_snap_tolerance":0}])"}, {R"([{"lat": 52.0609108, "lon": 5.0924059, "type": "break", "node_snap_tolerance":0}, {"lat": 52.0605926, "lon": 5.0962937, "type": "break", "node_snap_tolerance":0}, {"lat": 52.0604866, "lon": 5.0975675, "type": "break", "node_snap_tolerance":0}, {"lat": 52.0601766, "lon": 5.1005663, "type": "break", "node_snap_tolerance":0}])"}, }; api_tester tester; for (const auto& test_case : test_cases) { // for routing we need to do each route separately, and we manually mash them into one object valhalla::Api route_api; for (const auto& locations : test_case) { auto route_test_case = R"({"costing":"auto","locations":)" + locations + '}'; auto single_route_api = tester.route(route_test_case); route_api.mutable_trip()->mutable_routes()->MergeFrom(single_route_api.trip().routes()); } // for map matching we do it all in one shot by mashing all the locations together // NOTE: if want a test case with a discontinuity make sure a map match is not possible at it std::string match_test_case = R"({"costing":"auto", "shape_match":"map_snap","shape":)"; for (const auto& locations : test_case) { if (match_test_case.back() == ']') match_test_case.back() = ','; match_test_case += locations.substr(match_test_case.back() == ','); } match_test_case += '}'; auto match_api = tester.match(match_test_case); printf("\n%s\n", match_test_case.c_str()); // they should not disagree (unless the map match is very vague) EXPECT_EQ(route_api.trip().routes_size(), match_api.trip().routes_size()) << "Number of routes differs"; for (size_t i = 0; i < route_api.trip().routes_size(); ++i) { const auto& rlegs = route_api.trip().routes(i).legs(); const auto& mlegs = match_api.trip().routes(i).legs(); EXPECT_EQ(rlegs.size(), mlegs.size()) << "Number of legs differs"; printf("Route %zu\n", i); for (size_t j = 0; j < rlegs.size(); ++j) { auto rtime = rlegs.Get(j).node().rbegin()->elapsed_time(); auto mtime = mlegs.Get(j).node().rbegin()->elapsed_time(); printf("r: %.2f %s\n", rtime, rlegs.Get(j).shape().c_str()); printf("m: %.2f %s\n", mtime, mlegs.Get(j).shape().c_str()); EXPECT_TRUE(valhalla::midgard::equal(rtime, mtime, 0.1)) << "Leg time differs"; } } } } TEST(Mapmatch, test_intersection_matching) { std::vector<std::string> test_cases = { R"({"costing":"auto","format":"osrm","shape_match":"map_snap","shape":[ {"lat": 52.0981267, "lon": 5.1296180, "type": "break"}, {"lat": 52.0981280, "lon": 5.1297250, "type": "break"}]})", R"({"costing":"auto","format":"osrm","shape_match":"map_snap","shape":[ {"lat": 52.0981346, "lon": 5.1300437, "type": "break"}, {"lat": 52.0981145, "lon": 5.1309431, "type": "break"}, {"lat": 52.0980642, "lon": 5.1314993, "type": "break"}, {"lat": 52.0971149, "lon": 5.1311002, "type": "break"}]})", R"({"costing":"auto","format":"osrm","shape_match":"map_snap","shape":[ {"lat": 52.0951641, "lon": 5.1285609, "type": "break"}, {"lat": 52.0952055, "lon": 5.1292756, "type": "break"}, {"lat": 52.0952580, "lon": 5.1301359, "type": "break"}, {"lat": 52.0952939, "lon": 5.1309020, "type": "break"}, {"lat": 52.0944788, "lon": 5.1304066, "type": "break"}]})"}; std::vector<std::pair<int, std::vector<float>>> test_answers = {{1, {7.3}}, {3, {61.7, 41.6, 109.4}}, {4, {49.3, 61, 52.6, 99}}}; tyr::actor_t actor(conf, true); for (size_t i = 0; i < test_cases.size(); ++i) { auto matched = json_to_pt(actor.trace_route(test_cases[i])); const auto& routes = matched.get_child("matchings"); for (const auto& route : routes) { const auto& legs = route.second.get_child("legs"); ASSERT_EQ(legs.size(), test_answers[i].first) << "Expected " + std::to_string(test_answers[i].first) + " legs but got " + std::to_string(legs.size()); int j = 0; for (const auto& leg : legs) { float distance = leg.second.get<float>("distance"); ASSERT_EQ(distance, test_answers[i].second[j++]) << "Expected legs with distance" + std::to_string(test_answers[i].second[j - 1]) + " but got " + std::to_string(distance); } } } } } // namespace int main(int argc, char* argv[]) { midgard::logging::Configure({{"type", ""}}); // silence logs if (argc > 1) seed = std::stoi(argv[1]); if (argc > 2) bound = std::stoi(argv[2]); testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
/*ckwg +29 * Copyright 2016-2018 by Kitware, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name Kitware, Inc. nor the names of any contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "MainWindow.h" #include "GuiCommon.h" #include "ui_MainWindow.h" #include "am_MainWindow.h" #include "tools/BundleAdjustTool.h" #include "tools/CanonicalTransformTool.h" #include "tools/ComputeDepthTool.h" #include "tools/InitCamerasLandmarksTool.h" #include "tools/NeckerReversalTool.h" #include "tools/SaveFrameTool.h" #include "tools/SaveKeyFrameTool.h" #include "tools/TrackFeaturesSprokitTool.h" #include "tools/TrackFeaturesTool.h" #include "tools/TrackFilterTool.h" #include "tools/TriangulateTool.h" #include "AboutDialog.h" #include "MatchMatrixWindow.h" #include "Project.h" #include "vtkMaptkImageDataGeometryFilter.h" #include "vtkMaptkImageUnprojectDepth.h" #include "vtkMaptkCamera.h" #include <maptk/version.h> #include <maptk/local_geo_cs.h> #include <vital/algo/video_input.h> #include <vital/io/camera_io.h> #include <vital/io/landmark_map_io.h> #include <vital/io/track_set_io.h> #include <vital/types/camera_perspective.h> #include <vital/types/metadata_map.h> #include <arrows/core/match_matrix.h> #include <kwiversys/SystemTools.hxx> #include <vtkXMLImageDataWriter.h> #include <vtkImageData.h> #include <vtkImageFlip.h> #include <vtkImageImport.h> #include <vtkImageReader2.h> #include <vtkImageReader2Collection.h> #include <vtkImageReader2Factory.h> #include <vtkNew.h> #include <vtkPolyData.h> #include <vtkSmartPointer.h> #include <vtkXMLImageDataReader.h> #include <qtEnumerate.h> #include <qtIndexRange.h> #include <qtMath.h> #include <qtStlUtil.h> #include <qtUiState.h> #include <qtUiStateItem.h> #include <QtGui/QApplication> #include <QtGui/QColorDialog> #include <QtGui/QDesktopServices> #include <QtGui/QFileDialog> #include <QtGui/QMessageBox> #include <QtGui/qpushbutton.h> #include <QtCore/QDebug> #include <QtCore/QQueue> #include <QtCore/QSignalMapper> #include <QtCore/QTimer> #include <QtCore/QUrl> /////////////////////////////////////////////////////////////////////////////// //BEGIN miscellaneous helpers namespace // anonymous { //----------------------------------------------------------------------------- kwiver::vital::path_t kvPath(QString const& s) { return stdString(s); } QString findUserManual() { static auto const name = "telesculptor.html"; static auto const product = "maptk"; static auto const version = MAPTK_VERSION; auto const& prefix = QFileInfo(QApplication::applicationFilePath()).dir().absoluteFilePath(".."); auto locations = QStringList(); // Install location locations.append(QString("%1/share/doc/%2-%3").arg(prefix, product, version)); // Build location locations.append(QString("%1/doc").arg(prefix)); foreach (auto const& path, locations) { auto const fi = QFileInfo(QString("%1/user/%2").arg(path, name)); if (fi.exists()) { // Found manual return fi.canonicalFilePath(); } } // Manual not found return QString(); } //----------------------------------------------------------------------------- QSet<QString> supportedImageExtensions() { QSet<QString> result; auto const whitespace = QRegExp("\\s"); // Get registered readers vtkNew<vtkImageReader2Collection> readers; vtkImageReader2Factory::GetRegisteredReaders(readers.GetPointer()); // Extract extensions for each reader readers->InitTraversal(); while (auto const reader = readers->GetNextItem()) { auto const extensionList = QString::fromLocal8Bit(reader->GetFileExtensions()); auto const& extensions = extensionList.split(whitespace, QString::SkipEmptyParts); foreach (auto const& ext, extensions) { result.insert(ext.mid(1).toLower()); } } return result; } QSet<QString> supportedVideoExtensions() { QSet<QString> result; // For now just add some common extensions we expect to encounter result.insert("mpeg"); result.insert("mpg"); result.insert("mp4"); result.insert("avi"); result.insert("wmw"); result.insert("mov"); return result; } //----------------------------------------------------------------------------- QString makeFilters(QStringList extensions) { auto result = QStringList(); foreach (auto const& extension, extensions) { result.append("*." + extension); } return result.join(" "); } //----------------------------------------------------------------------------- template <typename T> class StateValue : public qtUiState::AbstractItem { public: StateValue(T const& defaultValue = T{}) : data{defaultValue} {} operator T() const { return this->data; } StateValue& operator=(T const& newValue) { this->data = newValue; return *this; } virtual QVariant value() const QTE_OVERRIDE { return QVariant::fromValue(this->data); } virtual void setValue(QVariant const& newValue) QTE_OVERRIDE { this->data = newValue.value<T>(); } protected: T data; }; } // namespace <anonymous> //END miscellaneous helpers /////////////////////////////////////////////////////////////////////////////// //BEGIN MainWindowPrivate //----------------------------------------------------------------------------- class MainWindowPrivate { public: // Data structures struct FrameData { int id; vtkSmartPointer<vtkMaptkCamera> camera; QString depthMapPath; // Full path to depth map data }; // Methods MainWindowPrivate() : activeTool(0) , toolUpdateActiveFrame(-1) , framesToSkip(1) , activeDepthFrame(-1) , activeCameraIndex(-1) {} void addTool(AbstractTool* tool, MainWindow* mainWindow); void addCamera(kwiver::vital::camera_perspective_sptr const& camera); void addImage(QString const& imagePath); void addVideoSource(kwiver::vital::config_block_sptr const& config, QString const& videoPath); void addFrame(kwiver::vital::camera_perspective_sptr const& camera, int id); kwiver::vital::camera_map_sptr cameraMap() const; void updateCameras(kwiver::vital::camera_map_sptr const&); bool updateCamera(kwiver::vital::frame_id_t frame, kwiver::vital::camera_perspective_sptr cam); void setActiveCamera(int); void updateCameraView(); vtkSmartPointer<vtkImageData> vitalToVtkImage(kwiver::vital::image& img); std::string getFrameName(kwiver::vital::frame_id_t frame); void loadImage(FrameData frame); void loadEmptyImage(vtkMaptkCamera* camera); void loadDepthMap(QString const& imagePath); void setActiveTool(AbstractTool* tool); // Member variables Ui::MainWindow UI; Am::MainWindow AM; qtUiState uiState; StateValue<QColor>* viewBackgroundColor; QTimer slideTimer; QSignalMapper toolDispatcher; QAction* toolSeparator; QMenu* toolMenu; AbstractTool* activeTool; QList<AbstractTool*> tools; int toolUpdateActiveFrame; kwiver::vital::camera_map_sptr toolUpdateCameras; kwiver::vital::landmark_map_sptr toolUpdateLandmarks; kwiver::vital::feature_track_set_sptr toolUpdateTracks; vtkSmartPointer<vtkImageData> toolUpdateDepth; QString videoPath; kwiver::vital::algo::video_input_sptr videoSource; kwiver::vital::timestamp currentVideoTimestamp; kwiver::vital::metadata_map::map_metadata_t videoMetadataMap; kwiver::vital::frame_id_t framesToSkip; QList<FrameData> frames; kwiver::vital::feature_track_set_sptr tracks; kwiver::vital::landmark_map_sptr landmarks; vtkSmartPointer<vtkImageData> activeDepth; int activeDepthFrame; kwiver::maptk::local_geo_cs localGeoCs; int activeCameraIndex; // Frames without a camera QQueue<int> orphanFrames; vtkNew<vtkXMLImageDataReader> depthReader; vtkNew<vtkMaptkImageUnprojectDepth> depthFilter; vtkNew<vtkMaptkImageDataGeometryFilter> depthGeometryFilter; // Current project std::shared_ptr<Project> currProject; }; QTE_IMPLEMENT_D_FUNC(MainWindow) //----------------------------------------------------------------------------- void MainWindowPrivate::addTool(AbstractTool* tool, MainWindow* mainWindow) { this->toolMenu->insertAction(this->toolSeparator, tool); this->toolDispatcher.setMapping(tool, tool); QObject::connect(tool, SIGNAL(triggered()), &this->toolDispatcher, SLOT(map())); QObject::connect(tool, SIGNAL(updated(std::shared_ptr<ToolData>)), mainWindow, SLOT(acceptToolResults(std::shared_ptr<ToolData>))); QObject::connect(tool, SIGNAL(completed()), mainWindow, SLOT(acceptToolFinalResults())); tool->setEnabled(false); this->tools.append(tool); } //----------------------------------------------------------------------------- void MainWindowPrivate::addCamera(kwiver::vital::camera_perspective_sptr const& camera) { if (this->orphanFrames.isEmpty()) { this->addFrame(camera, this->frames.count() + 1); return; } auto& fd = this->frames[this->orphanFrames.dequeue()]; fd.camera = vtkSmartPointer<vtkMaptkCamera>::New(); fd.camera->SetCamera(camera); fd.camera->Update(); this->UI.worldView->addCamera(fd.id, fd.camera); if (fd.id == this->activeCameraIndex) { this->UI.worldView->setActiveCamera(fd.id); this->updateCameraView(); } } //----------------------------------------------------------------------------- void MainWindowPrivate::addImage(QString const& imagePath) { // TODO: Create/manage image list video source } //----------------------------------------------------------------------------- void MainWindowPrivate::addVideoSource(kwiver::vital::config_block_sptr const& config, QString const& videoPath) { // Save the configuration so independent video sources can be created for tools if (this->currProject) { this->currProject->projectConfig = config; } this->videoPath = videoPath; // Close the existing video source if it exists if(this->videoSource) { this->videoSource->close(); } kwiver::vital::algo::video_input:: set_nested_algo_configuration("video_reader", config, this->videoSource); using kwiver::vital::vector_2d; kwiver::vital::simple_camera_intrinsics K_def; const std::string bc = "video_reader:base_camera:"; auto K = std::make_shared<kwiver::vital::simple_camera_intrinsics>( config->get_value<double>(bc + "focal_length", K_def.focal_length()), config->get_value<vector_2d>(bc + "principal_point", K_def.principal_point()), config->get_value<double>(bc + "aspect_ratio", K_def.aspect_ratio()), config->get_value<double>(bc + "skew", K_def.skew())); try { if (this->videoSource) { this->videoSource->open(videoPath.toStdString()); } // Set the skip value if present if (this->videoSource && config->has_value("video_reader:vidl_ffmpeg:output_nth_frame")) { this->framesToSkip = config->get_value<int>("video_reader:vidl_ffmpeg:output_nth_frame"); } // Get the video metadata if (this->videoSource) { videoMetadataMap = this->videoSource->metadata_map()->metadata(); } // If we have metadata try and initialize cameras std::map<kwiver::vital::frame_id_t, kwiver::vital::camera_sptr> camMap; if (videoMetadataMap.size() > 0) { std::map<kwiver::vital::frame_id_t, kwiver::vital::metadata_sptr> mdMap; for (auto const& mdIter: this->videoMetadataMap) { // TODO: just using first element of metadata vector for now mdMap[mdIter.first] = mdIter.second[0]; } bool init_cams_with_metadata = config->get_value<bool>("initialize_cameras_with_metadata", true); if (init_cams_with_metadata) { auto im = this->videoSource->frame_image(); auto baseCamera = kwiver::vital::simple_camera_perspective(); baseCamera.set_intrinsics(K); bool init_intrinsics_with_metadata = config->get_value<bool>("initialize_intrinsics_with_metadata", true); if (init_intrinsics_with_metadata) { kwiver::maptk::set_intrinsics_from_metadata(baseCamera, mdMap, im); } camMap = kwiver::maptk::initialize_cameras_with_metadata( mdMap, baseCamera, this->localGeoCs); } } // Add frames for video if needed auto numFrames = this->videoSource->num_frames(); for (unsigned int i = this->frames.count(); i < numFrames; ++i) { // frames start at 1, list index starts at 0 auto frameIdx = i + 1; if (camMap.find(frameIdx) != camMap.end()) { using kwiver::vital::simple_camera_perspective; auto cam_ptr = std::dynamic_pointer_cast<simple_camera_perspective>(camMap[frameIdx]); this->addFrame(cam_ptr, frameIdx); } else { this->orphanFrames.enqueue(i); this->addFrame(nullptr, frameIdx); } } } catch (kwiver::vital::file_not_found_exception const& e) { qWarning() << e.what(); this->videoSource->close(); this->videoSource.reset(); } } //----------------------------------------------------------------------------- void MainWindowPrivate::addFrame( kwiver::vital::camera_perspective_sptr const& camera, int id) { FrameData cd; cd.id = id; if (camera) { this->orphanFrames.clear(); cd.camera = vtkSmartPointer<vtkMaptkCamera>::New(); cd.camera->SetCamera(camera); cd.camera->Update(); this->UI.worldView->addCamera(cd.id, cd.camera); this->UI.actionExportCameras->setEnabled(true); } this->frames.append(cd); this->UI.camera->setRange(1, this->frames.count()); this->UI.cameraSpin->setRange(1, this->frames.count()); // When the first camera is added, show it immediately and reset the camera // view, and enable slideshow controls if (this->frames.count() == 1) { this->UI.actionSlideshowPlay->setEnabled(true); this->UI.camera->setEnabled(true); this->UI.cameraSpin->setEnabled(true); this->setActiveCamera(1); this->UI.cameraView->resetView(); } } //----------------------------------------------------------------------------- kwiver::vital::camera_map_sptr MainWindowPrivate::cameraMap() const { kwiver::vital::camera_map::map_camera_t map; foreach (auto i, qtIndexRange(this->frames.count())) { auto const& cd = this->frames[i]; if (cd.camera) { map.insert(std::make_pair(static_cast<kwiver::vital::frame_id_t>(cd.id), cd.camera->GetCamera())); } } return std::make_shared<kwiver::vital::simple_camera_map>(map); } //----------------------------------------------------------------------------- void MainWindowPrivate::updateCameras( kwiver::vital::camera_map_sptr const& cameras) { auto allowExport = false; foreach (auto const& iter, cameras->cameras()) { using kwiver::vital::camera_perspective; auto cam_ptr = std::dynamic_pointer_cast<camera_perspective>(iter.second); if (updateCamera(iter.first, cam_ptr)) { allowExport = allowExport || iter.second; } } this->UI.actionExportCameras->setEnabled(allowExport); } //----------------------------------------------------------------------------- bool MainWindowPrivate::updateCamera(kwiver::vital::frame_id_t frame, kwiver::vital::camera_perspective_sptr cam) { if (frame > 0 && frame <= this->frames.count() && cam) { auto& cd = this->frames[frame - 1]; if (!cd.camera) { cd.camera = vtkSmartPointer<vtkMaptkCamera>::New(); this->UI.worldView->addCamera(cd.id, cd.camera); } cd.camera->SetCamera(cam); cd.camera->Update(); if (cd.id == this->activeCameraIndex) { this->UI.worldView->setActiveCamera(cd.id); this->updateCameraView(); } return true; } return false; } //----------------------------------------------------------------------------- void MainWindowPrivate::setActiveCamera(int id) { //if only keyframes are to be displayed in the camera view bool only_keyframes = this->UI.actionKeyframesOnly->isChecked(); bool next_frame_found = false; if (id >= this->activeCameraIndex) { //positive movement in sequence //find the next keyframe in the sequence while (id <= this->frames.size()) { if (only_keyframes) { auto fd = std::dynamic_pointer_cast<kwiver::vital::feature_track_set_frame_data>( tracks->frame_data(id)); if (fd && fd->is_keyframe) { next_frame_found = true; break; } } else { if (id == 1 || (id - 1)%this->framesToSkip == 0) { next_frame_found = true; break; } } ++id; } } else { //going backward in sequence //find the previous keyframe in the sequence while (id >= 1) { if (only_keyframes) { auto fd = std::dynamic_pointer_cast<kwiver::vital::feature_track_set_frame_data>( tracks->frame_data(id)); if (fd && fd->is_keyframe) { next_frame_found = true; break; } } else { if (id == 1 || (id - 1)%this->framesToSkip ==0) { next_frame_found = true; break; } } --id; } } if (!next_frame_found) { // There was not a keyframe to move to in the direction we're going. // So set the active camera back to what it was. this->UI.camera->setValue(this->activeCameraIndex); this->UI.cameraSpin->setValue(this->activeCameraIndex); return; } auto oldSignalState = this->UI.camera->blockSignals(true); this->UI.camera->setValue(id); this->UI.camera->blockSignals(oldSignalState); this->UI.cameraSpin->setValue(id); this->activeCameraIndex = id; this->UI.worldView->setActiveCamera(id); this->updateCameraView(); //load from memory if cached if (id == this->activeDepthFrame) { this->depthReader->SetFileName(""); this->depthFilter->RemoveAllInputConnections(0); this->depthFilter->SetInputData(this->activeDepth); this->UI.depthMapView->setValidDepthInput(true); this->UI.worldView->setValidDepthInput(true); this->depthFilter->SetCamera(this->frames[id - 1].camera); this->UI.worldView->updateDepthMap(); this->UI.depthMapView->updateView(true); this->UI.actionExportDepthPoints->setEnabled(true); } else // load from file { auto& cd = this->frames[id - 1]; if (!cd.depthMapPath.isEmpty()) { this->loadDepthMap(cd.depthMapPath); } } // TODO: Uncomment once MeshColoration is working directly off video frames // UI.worldView->setVolumeCurrentFramePath(cd.imagePath); } //----------------------------------------------------------------------------- void MainWindowPrivate::updateCameraView() { if (this->activeCameraIndex < 1) { this->loadEmptyImage(0); this->UI.cameraView->setActiveFrame(static_cast<unsigned>(-1)); this->UI.cameraView->clearLandmarks(); return; } this->UI.cameraView->setActiveFrame( static_cast<unsigned>(this->activeCameraIndex)); QHash<kwiver::vital::track_id_t, kwiver::vital::vector_2d> landmarkPoints; auto const& frame = this->frames[this->activeCameraIndex-1]; // Show camera image this->loadImage(frame); if (!frame.camera) { // Can't show landmarks or residuals with no camera this->UI.cameraView->clearLandmarks(); this->UI.cameraView->clearResiduals(); return; } // Show landmarks this->UI.cameraView->clearLandmarks(); if (this->landmarks) { // Map landmarks to camera space auto const& landmarks = this->landmarks->landmarks(); foreach (auto const& lm, landmarks) { double pp[2]; if (frame.camera->ProjectPoint(lm.second->loc(), pp)) { // Add projected landmark to camera view auto const id = lm.first; this->UI.cameraView->addLandmark(id, pp[0], pp[1]); landmarkPoints.insert(id, kwiver::vital::vector_2d(pp[0], pp[1])); } } } // Show residuals this->UI.cameraView->clearResiduals(); if (this->tracks) { auto const& tracks = this->tracks->tracks(); foreach (auto const& track, tracks) { auto const& state = track->find(this->activeCameraIndex); if ( state == track->end() ) { continue; } auto fts = std::dynamic_pointer_cast<kwiver::vital::feature_track_state>(*state); if ( fts && fts->feature) { auto const id = track->id(); if (landmarkPoints.contains(id)) { auto const& fp = fts->feature->loc(); auto const& lp = landmarkPoints[id]; this->UI.cameraView->addResidual(id, fp[0], fp[1], lp[0], lp[1]); } } } } } //----------------------------------------------------------------------------- // TODO: move this method to a new implementation of image_container in a new // vtk arrow vtkSmartPointer<vtkImageData> MainWindowPrivate::vitalToVtkImage(kwiver::vital::image& img) { auto imgTraits = img.pixel_traits(); // Get the image type int imageType = VTK_VOID; switch (imgTraits.type) { case kwiver::vital::image_pixel_traits::UNSIGNED: imageType = VTK_UNSIGNED_CHAR; break; case kwiver::vital::image_pixel_traits::SIGNED: imageType = VTK_SIGNED_CHAR; break; case kwiver::vital::image_pixel_traits::FLOAT: imageType = VTK_FLOAT; break; default: imageType = VTK_VOID; break; // TODO: exception or error/warning message? } // convert to vtkFrameData vtkSmartPointer<vtkImageImport> imageImport = vtkSmartPointer<vtkImageImport>::New(); imageImport->SetDataScalarType(imageType); imageImport->SetNumberOfScalarComponents(img.depth()); imageImport->SetWholeExtent(0, img.width()-1, 0, img.height()-1, 0, 0); imageImport->SetDataExtentToWholeExtent(); imageImport->SetImportVoidPointer(img.first_pixel()); imageImport->Update(); // Flip image so it has the correct axis for VTK vtkSmartPointer<vtkImageFlip> flipFilter = vtkSmartPointer<vtkImageFlip>::New(); flipFilter->SetFilteredAxis(1); // flip x axis flipFilter->SetInputConnection(imageImport->GetOutputPort()); flipFilter->Update(); return flipFilter->GetOutput(); } std::string MainWindowPrivate::getFrameName(kwiver::vital::frame_id_t frameId) { return frameName(frameId, this->videoMetadataMap); } void MainWindowPrivate::loadEmptyImage(vtkMaptkCamera* camera) { auto imageDimensions = QSize(1, 1); if (camera) { int w, h; camera->GetImageDimensions(w, h); imageDimensions = QSize(w, h); } this->UI.cameraView->setImageData(0, imageDimensions); this->UI.worldView->setImageData(0, imageDimensions); } //----------------------------------------------------------------------------- void MainWindowPrivate::loadImage(FrameData frame) { // TODO: check if seek vs next_frame is needed if (frame.id != this->currentVideoTimestamp.get_frame()) { if (!this->videoSource || !videoSource->seek_frame(this->currentVideoTimestamp, frame.id)) { this->loadEmptyImage(frame.camera); } } // Get frame from video source if (this->videoSource) { // Advance video source if it hasn't been advanced if (!videoSource->good()) { videoSource->next_frame(this->currentVideoTimestamp); } kwiver::vital::image frameImg; auto sourceImg = videoSource->frame_image()->get_image(); // If image is interlaced it is already compatible with VTK if (sourceImg.d_step() == 1) { frameImg = sourceImg; } // Otherwise we need a deep copy to get it to be interlaced else { frameImg = kwiver::vital::image(sourceImg.width(), sourceImg.height(), sourceImg.depth(), true); frameImg.copy_from(sourceImg); } auto imageData = this->vitalToVtkImage(frameImg); int dimensions[3]; imageData->GetDimensions(dimensions); // Test for errors if (dimensions[0] < 2 || dimensions[1] < 2) { qWarning() << "Failed to read image for frame " << frame.id; this->loadEmptyImage(frame.camera); } else { // If successful, update camera image dimensions if (frame.camera) { frame.camera->SetImageDimensions(dimensions); } // Set frame name in camera view std::string frameName = this->getFrameName(frame.id); this->UI.cameraView->setImagePath(QString::fromStdString(frameName)); // Set image on views auto const size = QSize(dimensions[0], dimensions[1]); this->UI.cameraView->setImageData(imageData, size); this->UI.worldView->setImageData(imageData, size); } } else { this->loadEmptyImage(frame.camera); } } //----------------------------------------------------------------------------- void MainWindowPrivate::loadDepthMap(QString const& imagePath) { if (!kwiversys::SystemTools::FileExists(qPrintable(imagePath), true)) { qWarning() << "File doesn't exist: " << imagePath; return; } if (this->depthReader->GetFileName() && !strcmp(this->depthReader->GetFileName(), qPrintable(imagePath))) { // No change to reader input... return without any update return; } this->depthFilter->RemoveAllInputs(); this->depthFilter->SetInputConnection(this->depthReader->GetOutputPort()); this->depthReader->SetFileName(qPrintable(imagePath)); this->UI.depthMapView->setValidDepthInput(true); this->UI.worldView->setValidDepthInput(true); this->depthFilter->SetCamera(this->frames[this->activeCameraIndex-1].camera); this->UI.worldView->updateDepthMap(); this->UI.depthMapView->updateView(true); this->UI.actionExportDepthPoints->setEnabled(true); } //----------------------------------------------------------------------------- void MainWindowPrivate::setActiveTool(AbstractTool* tool) { // Disconnect cancel action QObject::disconnect(this->UI.actionCancelComputation, 0, this->activeTool, 0); // Update current tool this->activeTool = tool; // Connect cancel action if (tool) { QObject::connect(this->UI.actionCancelComputation, SIGNAL(triggered()), tool, SLOT(cancel())); QObject::connect(this->UI.actionCancelComputation, SIGNAL(triggered()), currProject.get(), SLOT(write())); QObject::connect(tool, SIGNAL(completed()), currProject.get(), SLOT(write())); } auto const enableTools = !tool; auto const enableCancel = tool && tool->isCancelable(); foreach (auto const& tool, this->tools) { tool->setEnabled(enableTools); } this->UI.actionCancelComputation->setEnabled(enableCancel); this->UI.actionOpen->setEnabled(enableTools); } //END MainWindowPrivate /////////////////////////////////////////////////////////////////////////////// //BEGIN MainWindow //----------------------------------------------------------------------------- MainWindow::MainWindow(QWidget* parent, Qt::WindowFlags flags) : QMainWindow(parent, flags), d_ptr(new MainWindowPrivate) { QTE_D(); // Set up UI d->UI.setupUi(this); d->AM.setupActions(d->UI, this); d->toolMenu = d->UI.menuCompute; d->toolSeparator = d->UI.menuCompute->insertSeparator(d->UI.actionCancelComputation); d->addTool(new TrackFeaturesTool(this), this); d->addTool(new TriangulateTool(this), this); d->addTool(new BundleAdjustTool(this), this); d->addTool(new SaveFrameTool(this), this); d->addTool(new ComputeDepthTool(this), this); d->toolMenu = d->UI.menuAdvanced; d->toolSeparator = d->UI.menuAdvanced->addSeparator(); d->addTool(new TrackFeaturesSprokitTool(this), this); d->addTool(new NeckerReversalTool(this), this); d->addTool(new TrackFilterTool(this), this); d->addTool(new InitCamerasLandmarksTool(this), this); d->addTool(new SaveKeyFrameTool(this), this); d->addTool(new CanonicalTransformTool(this), this); d->UI.menuView->addSeparator(); d->UI.menuView->addAction(d->UI.cameraViewDock->toggleViewAction()); d->UI.menuView->addAction(d->UI.cameraSelectorDock->toggleViewAction()); d->UI.menuView->addAction(d->UI.depthMapViewDock->toggleViewAction()); d->UI.playSlideshowButton->setDefaultAction(d->UI.actionSlideshowPlay); d->UI.loopSlideshowButton->setDefaultAction(d->UI.actionSlideshowLoop); connect(d->UI.actionOpen, SIGNAL(triggered()), this, SLOT(openFile())); connect(d->UI.actionQuit, SIGNAL(triggered()), qApp, SLOT(quit())); connect(d->UI.actionNewProject, SIGNAL(triggered()), this, SLOT(newProject())); connect(d->UI.actionShowWorldAxes, SIGNAL(toggled(bool)), d->UI.worldView, SLOT(setAxesVisible(bool))); connect(d->UI.actionExportCameras, SIGNAL(triggered()), this, SLOT(saveCameras())); connect(d->UI.actionExportLandmarks, SIGNAL(triggered()), this, SLOT(saveLandmarks())); connect(d->UI.actionExportVolume, SIGNAL(triggered()), this, SLOT(saveVolume())); connect(d->UI.actionExportMesh, SIGNAL(triggered()), this, SLOT(saveMesh())); connect(d->UI.actionExportColoredMesh, SIGNAL(triggered()), this, SLOT(saveColoredMesh())); connect(d->UI.actionExportDepthPoints, SIGNAL(triggered()), this, SLOT(saveDepthPoints())); connect(d->UI.actionExportTracks, SIGNAL(triggered()), this, SLOT(saveTracks())); connect(d->UI.worldView, SIGNAL(depthMapEnabled(bool)), this, SLOT(enableSaveDepthPoints(bool))); connect(d->UI.actionShowMatchMatrix, SIGNAL(triggered()), this, SLOT(showMatchMatrix())); connect(&d->toolDispatcher, SIGNAL(mapped(QObject*)), this, SLOT(executeTool(QObject*))); connect(d->UI.actionSetBackgroundColor, SIGNAL(triggered()), this, SLOT(setViewBackroundColor())); connect(d->UI.actionAbout, SIGNAL(triggered()), this, SLOT(showAboutDialog())); connect(d->UI.actionShowManual, SIGNAL(triggered()), this, SLOT(showUserManual())); connect(&d->slideTimer, SIGNAL(timeout()), this, SLOT(nextSlide())); connect(d->UI.actionSlideshowPlay, SIGNAL(toggled(bool)), this, SLOT(setSlideshowPlaying(bool))); connect(d->UI.slideDelay, SIGNAL(valueChanged(int)), this, SLOT(setSlideDelay(int))); connect(d->UI.camera, SIGNAL(valueChanged(int)), this, SLOT(setActiveCamera(int))); connect(d->UI.worldView, SIGNAL(meshEnabled(bool)), this, SLOT(enableSaveMesh(bool))); connect(d->UI.worldView, SIGNAL(coloredMeshEnabled(bool)), this, SLOT(enableSaveColoredMesh(bool))); connect(d->UI.worldView, SIGNAL(depthMapThresholdsChanged()), d->UI.depthMapView, SLOT(updateThresholds())); connect(d->UI.depthMapViewDock, SIGNAL(visibilityChanged(bool)), d->UI.depthMapView, SLOT(updateView(bool))); this->setSlideDelay(d->UI.slideDelay->value()); #ifdef VTKWEBGLEXPORTER d->UI.actionWebGLScene->setVisible(true); connect(d->UI.actionWebGLScene, SIGNAL(triggered(bool)), this, SLOT(saveWebGLScene())); #endif // Set up UI persistence and restore previous state auto const sdItem = new qtUiState::Item<int, QSlider>( d->UI.slideDelay, &QSlider::value, &QSlider::setValue); d->uiState.map("SlideDelay", sdItem); d->viewBackgroundColor = new StateValue<QColor>{Qt::black}, d->uiState.map("ViewBackground", d->viewBackgroundColor); d->uiState.mapChecked("WorldView/Axes", d->UI.actionShowWorldAxes); d->uiState.mapState("Window/state", this); d->uiState.mapGeometry("Window/geometry", this); d->uiState.restore(); d->UI.worldView->setBackgroundColor(*d->viewBackgroundColor); d->UI.cameraView->setBackgroundColor(*d->viewBackgroundColor); d->UI.depthMapView->setBackgroundColor(*d->viewBackgroundColor); // Hookup basic depth pipeline and pass geometry filter to relevant views d->depthFilter->SetInputConnection(d->depthReader->GetOutputPort()); d->depthGeometryFilter->SetInputConnection(d->depthFilter->GetOutputPort()); d->UI.worldView->setDepthGeometryFilter(d->depthGeometryFilter.GetPointer()); d->UI.depthMapView->setDepthGeometryFilter(d->depthGeometryFilter.GetPointer()); d->UI.worldView->resetView(); } //----------------------------------------------------------------------------- MainWindow::~MainWindow() { QTE_D(); d->uiState.save(); } //----------------------------------------------------------------------------- void MainWindow::openFile() { static auto const imageFilters = makeFilters(supportedImageExtensions().toList()); static auto const videoFilters = makeFilters(supportedVideoExtensions().toList()); // TODO: Add image filters back once that is supported again. auto const paths = QFileDialog::getOpenFileNames( this, "Open File", QString(), "All Supported Files (*.conf *.txt *.ply *.krtd " + videoFilters + ");;" "Project configuration file (*.conf);;" "Video file (" + videoFilters + ");;" "Track file (*.txt);;" "Landmark file (*.ply);;" "Camera file (*.krtd);;" "All Files (*)"); if (!paths.isEmpty()) { this->openFiles(paths); } } //----------------------------------------------------------------------------- void MainWindow::openFile(QString const& path) { static auto const imageExtensions = supportedImageExtensions(); static auto const videoExtensions = supportedVideoExtensions(); auto const fi = QFileInfo(path); if (fi.suffix().toLower() == "conf") { this->loadProject(path); } else if (fi.suffix().toLower() == "txt") { this->loadTracks(path); } else if (fi.suffix().toLower() == "ply") { this->loadLandmarks(path); } else if (fi.suffix().toLower() == "krtd") { this->loadCamera(path); } else if (imageExtensions.contains(fi.suffix().toLower())) { this->loadImage(path); } else if (videoExtensions.contains(fi.suffix().toLower())) { this->loadVideo(path); } else { qWarning() << "Don't know how to read file" << path << "(unrecognized extension)"; } } //----------------------------------------------------------------------------- void MainWindow::openFiles(QStringList const& paths) { foreach (auto const& path, paths) { this->openFile(path); } } //----------------------------------------------------------------------------- void MainWindow::newProject() { QTE_D(); auto const dirname = QFileDialog::getExistingDirectory( this, "Select Project Directory"); if (!dirname.isEmpty()) { // Set the current working directory to the project directory if (!QDir::setCurrent(dirname)) { qWarning() << "Unable to set current working directory to " << "project directory: " << dirname; } d->currProject.reset(); d->currProject = std::shared_ptr<Project>(new Project(dirname)); if (d->videoSource) { d->currProject->videoPath = d->videoPath; auto config = readConfig("gui_video_reader.conf"); d->currProject->projectConfig->merge_config(config); } saveCameras(d->currProject->cameraPath); d->currProject->projectConfig->set_value("output_krtd_dir", kvPath( d->currProject->getContingentRelativePath(d->currProject->cameraPath))); if (!d->localGeoCs.origin().is_empty() && !d->currProject->geoOriginFile.isEmpty()) { saveGeoOrigin(d->currProject->geoOriginFile); } d->currProject->write(); } foreach (auto const& tool, d->tools) { tool->setEnabled(true); } } //----------------------------------------------------------------------------- void MainWindow::loadProject(QString const& path) { QTE_D(); d->currProject = std::make_shared<Project>(); if (!d->currProject->read(path)) { qWarning() << "Failed to load project from" << path; // TODO dialog? d->currProject.reset(); return; } // Set the current working directory to the project directory if (!QDir::setCurrent(d->currProject->workingDir.absolutePath())) { qWarning() << "Unable to set current working directory to " << "project directory: " << d->currProject->workingDir.absolutePath(); } // Get the video source if (d->currProject->projectConfig->has_value("video_reader:type")) { d->addVideoSource(d->currProject->projectConfig, d->currProject->videoPath); } // Load tracks if (d->currProject->projectConfig->has_value("input_track_file") || d->currProject->projectConfig->has_value("output_tracks_file")) { this->loadTracks(d->currProject->tracksPath); } // Load landmarks if (d->currProject->projectConfig->has_value("output_ply_file")) { this->loadLandmarks(d->currProject->landmarksPath); } // Load cameras if (d->currProject->projectConfig->has_value("output_krtd_dir")) { foreach (auto const& frame, d->frames) { auto frameName = QString::fromStdString(d->getFrameName(frame.id) + ".krtd"); try { auto const& camera = kwiver::vital::read_krtd_file( kvPath(frameName), kvPath(d->currProject->cameraPath)); // Add camera to scene d->updateCamera(frame.id, camera); } catch (...) { qWarning() << "failed to read camera file " << frameName << " from " << d->currProject->cameraPath; } } } //find depth map paths if (d->currProject->projectConfig->has_value("output_depth_dir")) { foreach(auto & frame, d->frames) { auto depthName = QString::fromStdString(d->getFrameName(frame.id) + ".vti"); QString depthMapPath = QString::fromStdString(kvPath(d->currProject->depthPath) + '/' + kvPath(depthName)); QFileInfo check_file(depthMapPath); if (check_file.exists() && check_file.isFile()) { frame.depthMapPath = depthMapPath; } } } #ifdef VTKWEBGLEXPORTER d->UI.actionWebGLScene->setEnabled(true); #endif // Load volume if (d->currProject->projectConfig->has_value("volume_file")) { d->UI.worldView->loadVolume(d->currProject->volumePath, d->frames.size(), d->currProject->cameraPath, d->currProject->videoPath); } if (d->currProject->projectConfig->has_value("geo_origin_file")) { if (kwiversys::SystemTools::FileExists( d->currProject->geoOriginFile.toStdString(), true)) { kwiver::maptk::read_local_geo_cs_from_file( d->localGeoCs, d->currProject->geoOriginFile.toStdString()); } else { qWarning() << "Failed to open geo origin file " << d->currProject->geoOriginFile << ". File does not exist."; } } d->UI.worldView->queueResetView(); foreach (auto const& tool, d->tools) { tool->setEnabled(true); } d->setActiveCamera(d->activeCameraIndex); } //----------------------------------------------------------------------------- void MainWindow::loadImage(QString const& path) { QTE_D(); d->addImage(path); } void MainWindow::loadVideo(QString const& path) { QTE_D(); auto config = readConfig("gui_video_reader.conf"); if (d->currProject) { d->currProject->projectConfig->merge_config(config); d->currProject->videoPath = path; } try { d->addVideoSource(config, path); } catch (std::exception const& e) { QMessageBox::critical( this, "Error loading video\n", e.what()); } if (d->currProject) { saveCameras(d->currProject->cameraPath); d->currProject->projectConfig->set_value("output_krtd_dir", kvPath( d->currProject->getContingentRelativePath(d->currProject->cameraPath))); if (!d->localGeoCs.origin().is_empty() && !d->currProject->geoOriginFile.isEmpty()) { saveGeoOrigin(d->currProject->geoOriginFile); } d->currProject->write(); } d->UI.worldView->queueResetView(); } //----------------------------------------------------------------------------- void MainWindow::loadCamera(QString const& path) { QTE_D(); try { auto const& camera = kwiver::vital::read_krtd_file(kvPath(path)); d->addCamera(camera); } catch (...) { qWarning() << "failed to read camera from" << path; } } //----------------------------------------------------------------------------- void MainWindow::loadTracks(QString const& path) { QTE_D(); try { using namespace kwiver::vital; auto tracks = read_feature_track_file(kvPath(path)); if (tracks) { // check for older zero-based track files if (tracks->first_frame() == 0) { qWarning() << "Loaded tracks have zero-based indexing, " "shifting to one-based indexing"; // shift tracks to start with frame one std::vector<track_sptr> new_tracks; for (auto track : tracks->tracks()) { auto new_track = track::create(track->data()); new_track->set_id(track->id()); for (auto ts : *track) { auto fts = std::dynamic_pointer_cast<feature_track_state>(ts); auto new_fts = std::make_shared<feature_track_state>(ts->frame()+1, fts->feature, fts->descriptor); new_track->append(new_fts); } new_tracks.push_back(new_track); } tracks = std::make_shared<feature_track_set>(new_tracks); } d->tracks = tracks; d->updateCameraView(); for (auto const& track : tracks->tracks()) { d->UI.cameraView->addFeatureTrack(*track); } d->UI.actionExportTracks->setEnabled( d->tracks && d->tracks->size()); d->UI.actionShowMatchMatrix->setEnabled(!tracks->tracks().empty()); d->UI.actionKeyframesOnly->setEnabled(!tracks->tracks().empty()); } } catch (std::exception const& e) { qWarning() << "failed to read tracks from" << path << " with error: " << e.what(); } } //----------------------------------------------------------------------------- void MainWindow::loadLandmarks(QString const& path) { QTE_D(); try { auto const& landmarks = kwiver::vital::read_ply_file(kvPath(path)); if (landmarks) { d->landmarks = landmarks; d->UI.worldView->setLandmarks(*landmarks); d->UI.cameraView->setLandmarksData(*landmarks); d->UI.actionExportLandmarks->setEnabled( d->landmarks && d->landmarks->size()); d->updateCameraView(); } } catch (...) { qWarning() << "failed to read landmarks from" << path; } } //----------------------------------------------------------------------------- void MainWindow::saveLandmarks() { auto const path = QFileDialog::getSaveFileName( this, "Export Landmarks", QString(), "Landmark file (*.ply);;" "All Files (*)"); if (!path.isEmpty()) { this->saveLandmarks(path, false); } } //----------------------------------------------------------------------------- void MainWindow::saveLandmarks(QString const& path, bool writeToProject) { QTE_D(); try { kwiver::vital::write_ply_file(d->landmarks, kvPath(path)); if (writeToProject && d->currProject) { d->currProject->projectConfig->set_value("output_ply_file", kvPath( d->currProject->getContingentRelativePath(path))); } } catch (...) { auto const msg = QString("An error occurred while exporting landmarks to \"%1\". " "The output file may not have been written correctly."); QMessageBox::critical(this, "Export error", msg.arg(d->currProject->landmarksPath)); } } //----------------------------------------------------------------------------- void MainWindow::saveTracks() { auto const path = QFileDialog::getSaveFileName( this, "Export Tracks", QString(), "Track file (*.txt);;" "All Files (*)"); if (!path.isEmpty()) { this->saveTracks(path, false); } } //----------------------------------------------------------------------------- void MainWindow::saveTracks(QString const& path, bool writeToProject) { QTE_D(); try { kwiver::vital::write_feature_track_file(d->tracks, kvPath(path)); if (writeToProject && d->currProject) { d->currProject->projectConfig->set_value("output_tracks_file", kvPath( d->currProject->getContingentRelativePath(path))); } } catch (...) { auto const msg = QString("An error occurred while exporting tracks to \"%1\". " "The output file may not have been written correctly."); QMessageBox::critical(this, "Export error", msg.arg(path)); } } //----------------------------------------------------------------------------- void MainWindow::saveCameras() { auto const path = QFileDialog::getExistingDirectory(this, "Export Cameras"); if (!path.isEmpty()) { this->saveCameras(path, false); } } //----------------------------------------------------------------------------- void MainWindow::saveCameras(QString const& path, bool writeToProject) { QTE_D(); auto out = QHash<QString, kwiver::vital::camera_perspective_sptr>(); auto willOverwrite = QStringList(); foreach (auto i, qtIndexRange(d->frames.count())) { auto const& cd = d->frames[i]; if (cd.camera) { auto const camera = cd.camera->GetCamera(); if (camera) { auto cameraName = QString::fromStdString(d->getFrameName(cd.id) + ".krtd"); auto const filepath = d->currProject->cameraPath + "/" + cameraName; out.insert(filepath, camera); if (QFileInfo(filepath).exists()) { willOverwrite.append(filepath); } } } } // warn about overwriting files only if not auto-saving to the project if (!writeToProject && !willOverwrite.isEmpty()) { QMessageBox mb(QMessageBox::Warning, "Confirm overwrite", "One or more files will be overwritten by this operation. " "Do you wish to continue?", QMessageBox::Cancel, this); QAbstractButton* myOverwrite = mb.addButton("&Overwrite", QMessageBox::AcceptRole); mb.setDetailedText("The following file(s) will be overwritten:\n " + willOverwrite.join(" \n")); mb.exec(); if (mb.clickedButton() != myOverwrite) { // User canceled operation return; } } auto errors = QStringList(); foreach (auto const& iter, qtEnumerate(out)) { try { auto cam_ptr = std::dynamic_pointer_cast<kwiver::vital::camera_perspective>(iter.value()); kwiver::vital::write_krtd_file(*cam_ptr, kvPath(iter.key())); } catch (...) { errors.append(iter.key()); } } if (writeToProject && d->currProject) { d->currProject->projectConfig->set_value("output_krtd_dir", kvPath( d->currProject->getContingentRelativePath(path))); } if (!errors.isEmpty()) { auto const msg = QString("Error(s) occurred while exporting cameras to \"%1\". " "One or more output files may not have been written correctly."); QMessageBox mb(QMessageBox::Critical, "Export error", msg.arg(d->currProject->cameraPath), QMessageBox::Ok, this); mb.setDetailedText("Error writing the following file(s):\n " + errors.join(" \n")); mb.exec(); } } //----------------------------------------------------------------------------- void MainWindow::saveDepthImage(QString const& path) { QTE_D(); if (!d->activeDepth || d->activeDepthFrame < 1) { return; } QString filename = QString::fromStdString(d->getFrameName(d->activeDepthFrame) + ".vti"); if (!QDir(path).exists()) { QDir().mkdir(path); } vtkNew<vtkXMLImageDataWriter> writerI; auto const filepath = path + "/" + filename; writerI->SetFileName(stdString(filepath).c_str()); writerI->AddInputDataObject(d->activeDepth.Get()); writerI->SetDataModeToBinary(); writerI->Write(); d->frames[d->activeDepthFrame].depthMapPath = filepath; } //----------------------------------------------------------------------------- void MainWindow::enableSaveDepthPoints(bool state) { QTE_D(); if (state && d->depthGeometryFilter->GetOutput()->GetNumberOfVerts() <= 0) { state = false; } d->UI.actionExportDepthPoints->setEnabled(state); } //----------------------------------------------------------------------------- void MainWindow::saveDepthPoints() { auto const path = QFileDialog::getSaveFileName( this, "Export Depth Point Cloud", QString(), "PLY file (*.ply);;" "All Files (*)"); if (!path.isEmpty()) { this->saveDepthPoints(path); } } //----------------------------------------------------------------------------- void MainWindow::saveDepthPoints(QString const& path) { QTE_D(); try { d->UI.worldView->saveDepthPoints(path); d->currProject->projectConfig->set_value("depthmaps_images_file", kvPath(d->currProject->getContingentRelativePath(path))); d->currProject->write(); } catch (...) { auto const msg = QString("An error occurred while exporting depth points to \"%1\". " "The output file may not have been written correctly."); QMessageBox::critical(this, "Export error", msg.arg(path)); } } void MainWindow::saveGeoOrigin(QString const& path) { QTE_D(); d->currProject->projectConfig->set_value("geo_origin_file", kvPath( d->currProject->getContingentRelativePath(path))); kwiver::maptk::write_local_geo_cs_to_file(d->localGeoCs, path.toStdString()); } //----------------------------------------------------------------------------- void MainWindow::saveWebGLScene() { #ifdef VTKWEBGLEXPORTER QTE_D(); auto const path = QFileDialog::getSaveFileName( this, "Export Scene to WebGL", QString(), "WebGL scene file (*.html);;" "All Files (*)"); if (!path.isEmpty()) { d->UI.worldView->exportWebGLScene(path); } #endif } //----------------------------------------------------------------------------- void MainWindow::enableSaveMesh(bool state) { QTE_D(); d->UI.actionExportVolume->setEnabled(state); d->UI.actionExportMesh->setEnabled(state); } //----------------------------------------------------------------------------- void MainWindow::enableSaveColoredMesh(bool state) { QTE_D(); d->UI.actionExportColoredMesh->setEnabled(state); } //----------------------------------------------------------------------------- void MainWindow::saveMesh() { QTE_D(); auto const path = QFileDialog::getSaveFileName( this, "Export Mesh", QString("mesh.vtp"), "Mesh file (*.vtp);;" "All Files (*)"); if (!path.isEmpty()) { d->UI.worldView->saveMesh(path); } } //----------------------------------------------------------------------------- void MainWindow::saveVolume() { QTE_D(); auto const path = QFileDialog::getSaveFileName( this, "Export Volume", QString("volume.vts"), "Mesh file (*.vts);;" "All Files (*)"); if (!path.isEmpty()) { d->UI.worldView->saveVolume(path); d->currProject->volumePath = d->currProject->getContingentRelativePath(path); d->currProject->projectConfig->set_value("volume_file", kvPath(d->currProject->volumePath)); d->currProject->write(); } } //----------------------------------------------------------------------------- void MainWindow::saveColoredMesh() { QTE_D(); auto const path = QFileDialog::getSaveFileName( this, "Export Colored Mesh", QString("colored_mesh.vtp"), "VTK Polydata (*.vtp);;" "PLY File (*.ply);;" "All Files (*)"); if (!path.isEmpty()) { d->UI.worldView->saveColoredMesh(path); } } //----------------------------------------------------------------------------- void MainWindow::setSlideDelay(int delayExp) { QTE_D(); static auto const ttFormat = QString("%1 (%2)").arg(d->UI.slideDelay->toolTip()); auto const de = static_cast<double>(delayExp) * 0.1; auto const delay = qRound(qPow(10.0, de)); d->slideTimer.setInterval(delay); if (delay < 1000) { auto const fps = 1e3 / delay; auto const dt = QString("%1 / sec").arg(fps, 0, 'f', 1); d->UI.slideDelay->setToolTip(ttFormat.arg(dt)); } else { auto const dt = QString("%1 sec").arg(delay / 1e3, 0, 'f', 1); d->UI.slideDelay->setToolTip(ttFormat.arg(dt)); } } //----------------------------------------------------------------------------- void MainWindow::setSlideshowPlaying(bool playing) { QTE_D(); if (playing) { if (d->UI.camera->value() == d->UI.camera->maximum()) { d->UI.camera->triggerAction(QAbstractSlider::SliderToMinimum); } d->slideTimer.start(); } else { d->slideTimer.stop(); } d->UI.camera->setEnabled(!playing); } //----------------------------------------------------------------------------- void MainWindow::nextSlide() { QTE_D(); if (d->UI.camera->value() == d->UI.camera->maximum()) { if (d->UI.actionSlideshowLoop->isChecked()) { d->UI.camera->triggerAction(QAbstractSlider::SliderToMinimum); } else { d->UI.actionSlideshowPlay->setChecked(false); } } else { d->UI.camera->triggerAction(QAbstractSlider::SliderSingleStepAdd); } } //----------------------------------------------------------------------------- void MainWindow::setActiveCamera(int id) { QTE_D(); if (id < 1 || id > d->frames.count()) { qDebug() << "MainWindow::setActiveCamera:" << " requested ID" << id << "is invalid"; return; } d->setActiveCamera(id); } //----------------------------------------------------------------------------- void MainWindow::executeTool(QObject* object) { QTE_D(); try { auto const tool = qobject_cast<AbstractTool*>(object); if (tool && !d->activeTool) { d->setActiveTool(tool); tool->setActiveFrame(d->activeCameraIndex); tool->setTracks(d->tracks); tool->setCameras(d->cameraMap()); tool->setLandmarks(d->landmarks); tool->setVideoPath(d->videoPath.toStdString()); tool->setConfig(d->currProject->projectConfig); if (!tool->execute()) { d->setActiveTool(0); } } } catch (std::exception const& e) { QString message("The tool failed with the following error:\n"); message += e.what(); QMessageBox::critical( this, "Error in Tool", message); } } //----------------------------------------------------------------------------- void MainWindow::acceptToolFinalResults() { QTE_D(); if (d->activeTool) { acceptToolResults(d->activeTool->data(), true); saveToolResults(); } d->setActiveTool(0); } //----------------------------------------------------------------------------- void MainWindow::acceptToolResults(std::shared_ptr<ToolData> data, bool isFinal) { QTE_D(); // if all the update variables are Null then trigger a GUI update after // extracting the data otherwise we've already triggered an update that // hasn't happened yet, so don't trigger another bool updateNeeded = !d->toolUpdateCameras && !d->toolUpdateLandmarks && !d->toolUpdateTracks && !d->toolUpdateDepth && d->toolUpdateActiveFrame < 0; if (d->activeTool) { auto const outputs = d->activeTool->outputs(); d->toolUpdateCameras = NULL; d->toolUpdateLandmarks = NULL; d->toolUpdateTracks = NULL; d->toolUpdateActiveFrame = -1; d->toolUpdateDepth = NULL; if (outputs.testFlag(AbstractTool::Cameras)) { d->toolUpdateCameras = data->cameras; } if (outputs.testFlag(AbstractTool::Landmarks)) { d->toolUpdateLandmarks = data->landmarks; } if (outputs.testFlag(AbstractTool::Tracks)) { d->toolUpdateTracks = data->tracks; } if (outputs.testFlag(AbstractTool::Depth)) { d->toolUpdateDepth = data->active_depth; } if (outputs.testFlag(AbstractTool::ActiveFrame)) { d->toolUpdateActiveFrame = static_cast<int>(data->activeFrame); } } if (isFinal) { updateToolResults(); //force immediate update on tool finish so we ensure update before saving } else if(updateNeeded) { QTimer::singleShot(1000, this, SLOT(updateToolResults())); } } //----------------------------------------------------------------------------- void MainWindow::saveToolResults() { QTE_D(); if (d->activeTool) { auto const outputs = d->activeTool->outputs(); if (outputs.testFlag(AbstractTool::Cameras)) { saveCameras(d->currProject->cameraPath); } if (outputs.testFlag(AbstractTool::Landmarks)) { saveLandmarks(d->currProject->landmarksPath); } if (outputs.testFlag(AbstractTool::Tracks)) { saveTracks(d->currProject->tracksPath); } if (!d->localGeoCs.origin().is_empty() && !d->currProject->geoOriginFile.isEmpty()) { saveGeoOrigin(d->currProject->geoOriginFile); } if (outputs.testFlag(AbstractTool::Depth)) { saveDepthImage(d->currProject->depthPath); d->currProject->projectConfig->set_value("output_depth_dir", kvPath( d->currProject->getContingentRelativePath(d->currProject->depthPath))); } d->currProject->write(); } } //----------------------------------------------------------------------------- void MainWindow::updateToolResults() { QTE_D(); if (d->toolUpdateCameras) { d->updateCameras(d->toolUpdateCameras); d->toolUpdateCameras = NULL; } if (d->toolUpdateLandmarks) { d->landmarks = d->toolUpdateLandmarks; d->UI.worldView->setLandmarks(*d->landmarks); d->UI.actionExportLandmarks->setEnabled( d->landmarks && d->landmarks->size()); d->toolUpdateLandmarks = NULL; } if (d->toolUpdateTracks) { d->tracks = d->toolUpdateTracks; d->UI.cameraView->clearFeatureTracks(); d->updateCameraView(); foreach (auto const& track, d->tracks->tracks()) { d->UI.cameraView->addFeatureTrack(*track); } d->UI.actionExportTracks->setEnabled( d->tracks && d->tracks->size()); d->UI.actionShowMatchMatrix->setEnabled(!d->tracks->tracks().empty()); d->UI.actionKeyframesOnly->setEnabled(!d->tracks->tracks().empty()); d->toolUpdateTracks = NULL; } if (d->toolUpdateDepth) { d->activeDepth = d->toolUpdateDepth; d->activeDepthFrame = d->toolUpdateActiveFrame; d->toolUpdateDepth = NULL; } if (d->toolUpdateActiveFrame >= 0) { d->UI.camera->setValue(d->toolUpdateActiveFrame); this->setActiveCamera(d->toolUpdateActiveFrame); d->toolUpdateActiveFrame = -1; } if (!d->frames.isEmpty()) { d->setActiveCamera(d->activeCameraIndex); } } //----------------------------------------------------------------------------- void MainWindow::showMatchMatrix() { QTE_D(); if (d->tracks) { // Get matrix auto frames = std::vector<kwiver::vital::frame_id_t>(); auto const mm = kwiver::arrows::match_matrix(d->tracks, frames); // Show window auto window = new MatchMatrixWindow(); window->setMatrix(mm, frames); window->show(); } } //----------------------------------------------------------------------------- void MainWindow::setViewBackroundColor() { QTE_D(); QColorDialog dlg; dlg.setCurrentColor(*d->viewBackgroundColor); if (dlg.exec() == QDialog::Accepted) { *d->viewBackgroundColor = dlg.currentColor(); d->UI.worldView->setBackgroundColor(*d->viewBackgroundColor); d->UI.cameraView->setBackgroundColor(*d->viewBackgroundColor); d->UI.depthMapView->setBackgroundColor(*d->viewBackgroundColor); } } //----------------------------------------------------------------------------- void MainWindow::showAboutDialog() { AboutDialog dlg(this); dlg.exec(); } //----------------------------------------------------------------------------- void MainWindow::showUserManual() { auto const path = findUserManual(); if (!path.isEmpty()) { auto const& uri = QUrl::fromLocalFile(path); QDesktopServices::openUrl(uri); } else { QMessageBox::information( this, "Not found", "The user manual could not be located. Please check your installation."); } } //END MainWindow
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/chromeos/login/enrollment/auto_enrollment_controller.h" #include "base/bind.h" #include "base/bind_helpers.h" #include "base/command_line.h" #include "base/logging.h" #include "base/strings/string_number_conversions.h" #include "chrome/browser/browser_process.h" #include "chrome/browser/chromeos/policy/device_cloud_policy_manager_chromeos.h" #include "chromeos/chromeos_switches.h" #include "components/policy/core/browser/browser_policy_connector.h" #include "components/policy/core/common/cloud/device_management_service.h" #include "net/url_request/url_request_context_getter.h" namespace chromeos { namespace { // Returns the int value of the |switch_name| argument, clamped to the [0, 62] // interval. Returns 0 if the argument doesn't exist or isn't an int value. int GetSanitizedArg(const std::string& switch_name) { CommandLine* command_line = CommandLine::ForCurrentProcess(); if (!command_line->HasSwitch(switch_name)) return 0; std::string value = command_line->GetSwitchValueASCII(switch_name); int int_value; if (!base::StringToInt(value, &int_value)) { LOG(ERROR) << "Switch \"" << switch_name << "\" is not a valid int. " << "Defaulting to 0."; return 0; } if (int_value < 0) { LOG(ERROR) << "Switch \"" << switch_name << "\" can't be negative. " << "Using 0"; return 0; } if (int_value > policy::AutoEnrollmentClient::kMaximumPower) { LOG(ERROR) << "Switch \"" << switch_name << "\" can't be greater than " << policy::AutoEnrollmentClient::kMaximumPower << ". Using " << policy::AutoEnrollmentClient::kMaximumPower; return policy::AutoEnrollmentClient::kMaximumPower; } return int_value; } } // namespace const char AutoEnrollmentController::kForcedReEnrollmentAlways[] = "always"; const char AutoEnrollmentController::kForcedReEnrollmentLegacy[] = "legacy"; const char AutoEnrollmentController::kForcedReEnrollmentNever[] = "never"; const char AutoEnrollmentController::kForcedReEnrollmentOfficialBuild[] = "official"; AutoEnrollmentController::Mode AutoEnrollmentController::GetMode() { CommandLine* command_line = CommandLine::ForCurrentProcess(); if (!command_line->HasSwitch(switches::kEnterpriseEnableForcedReEnrollment)) return MODE_LEGACY_AUTO_ENROLLMENT; std::string command_line_mode = command_line->GetSwitchValueASCII( switches::kEnterpriseEnableForcedReEnrollment); if (command_line_mode == kForcedReEnrollmentAlways) { return MODE_FORCED_RE_ENROLLMENT; } else if (command_line_mode.empty() || command_line_mode == kForcedReEnrollmentOfficialBuild) { #if defined(OFFICIAL_BUILD) return MODE_FORCED_RE_ENROLLMENT; #else return MODE_NONE; #endif } else if (command_line_mode == kForcedReEnrollmentLegacy) { return MODE_LEGACY_AUTO_ENROLLMENT; } return MODE_NONE; } AutoEnrollmentController::AutoEnrollmentController() : state_(policy::AUTO_ENROLLMENT_STATE_IDLE), weak_factory_(this) {} AutoEnrollmentController::~AutoEnrollmentController() {} void AutoEnrollmentController::Start() { // This method is called at the point in the OOBE/login flow at which the // auto-enrollment check can start. This happens either after the EULA is // accepted, or right after a reboot if the EULA has already been accepted. // Do not communicate auto-enrollment data to the server if // 1. we are running integration or perf tests with telemetry. // 2. modulus configuration is not present. // 3. Auto-enrollment is disabled via the command line. CommandLine* command_line = CommandLine::ForCurrentProcess(); if (command_line->HasSwitch(chromeos::switches::kOobeSkipPostLogin) || (!command_line->HasSwitch( chromeos::switches::kEnterpriseEnrollmentInitialModulus) && !command_line->HasSwitch( chromeos::switches::kEnterpriseEnrollmentModulusLimit)) || GetMode() == MODE_NONE) { VLOG(1) << "Auto-enrollment disabled."; UpdateState(policy::AUTO_ENROLLMENT_STATE_NO_ENROLLMENT); return; } // If there already is a client, bail out. if (client_) return; // Start by checking if the device has already been owned. UpdateState(policy::AUTO_ENROLLMENT_STATE_PENDING); weak_factory_.InvalidateWeakPtrs(); DeviceSettingsService::Get()->GetOwnershipStatusAsync( base::Bind(&AutoEnrollmentController::OnOwnershipStatusCheckDone, weak_factory_.GetWeakPtr())); } void AutoEnrollmentController::Cancel() { if (client_) { // Cancelling the |client_| allows it to determine whether // its protocol finished before login was complete. client_.release()->CancelAndDeleteSoon(); } } void AutoEnrollmentController::Retry() { if (client_) client_->Retry(); } scoped_ptr<AutoEnrollmentController::ProgressCallbackList::Subscription> AutoEnrollmentController::RegisterProgressCallback( const ProgressCallbackList::CallbackType& callback) { return progress_callbacks_.Add(callback); } bool AutoEnrollmentController::ShouldEnrollSilently() { return state_ == policy::AUTO_ENROLLMENT_STATE_TRIGGER_ENROLLMENT && GetMode() == MODE_LEGACY_AUTO_ENROLLMENT; } void AutoEnrollmentController::OnOwnershipStatusCheckDone( DeviceSettingsService::OwnershipStatus status) { if (status != DeviceSettingsService::OWNERSHIP_NONE) { // The device is already owned. No need for auto-enrollment checks. VLOG(1) << "Device already owned, skipping auto-enrollment check"; UpdateState(policy::AUTO_ENROLLMENT_STATE_NO_ENROLLMENT); return; } policy::BrowserPolicyConnector* connector = g_browser_process->browser_policy_connector(); policy::DeviceManagementService* service = connector->device_management_service(); service->ScheduleInitialization(0); int power_initial = GetSanitizedArg( chromeos::switches::kEnterpriseEnrollmentInitialModulus); int power_limit = GetSanitizedArg( chromeos::switches::kEnterpriseEnrollmentModulusLimit); if (power_initial > power_limit) { LOG(ERROR) << "Initial auto-enrollment modulus is larger than the limit, " << "clamping to the limit."; power_initial = power_limit; } bool retrieve_device_state = false; std::string device_id; if (GetMode() == MODE_FORCED_RE_ENROLLMENT) { retrieve_device_state = true; device_id = policy::DeviceCloudPolicyManagerChromeOS::GetCurrentDeviceStateKey(); } else { device_id = policy::DeviceCloudPolicyManagerChromeOS::GetMachineID(); } client_.reset(new policy::AutoEnrollmentClient( base::Bind(&AutoEnrollmentController::UpdateState, base::Unretained(this)), service, g_browser_process->local_state(), g_browser_process->system_request_context(), device_id, retrieve_device_state, power_initial, power_limit)); VLOG(1) << "Starting auto-enrollment client."; client_->Start(); } void AutoEnrollmentController::UpdateState( policy::AutoEnrollmentState new_state) { state_ = new_state; progress_callbacks_.Notify(state_); } } // namespace chromeos
// ----------------------------------------------------------------------------------------------------- // Copyright (c) 2006-2019, Knut Reinert & Freie Universität Berlin // Copyright (c) 2016-2019, Knut Reinert & MPI für molekulare Genetik // This file may be used, modified and/or redistributed under the terms of the 3-clause BSD-License // shipped with this file and also available at: https://github.com/seqan/seqan3/blob/master/LICENSE // ----------------------------------------------------------------------------------------------------- #include <gtest/gtest.h> #include <seqan3/alignment/band/static_band.hpp> #include <seqan3/alignment/matrix/alignment_coordinate.hpp> #include <seqan3/alignment/matrix/alignment_optimum.hpp> #include <seqan3/alignment/pairwise/policy/affine_gap_banded_init_policy.hpp> #include <seqan3/alignment/scoring/gap_scheme.hpp> class affine_gap_banded_init_policy_mock : public seqan3::detail::affine_gap_banded_init_policy<affine_gap_banded_init_policy_mock> { public: using base_t = seqan3::detail::affine_gap_banded_init_policy<affine_gap_banded_init_policy_mock>; using base_t::init_origin_cell; using base_t::init_column_cell; using base_t::init_row_cell; using base_t::balance_leading_gaps; }; TEST(affine_gap_banded_init_policy, construction) { EXPECT_TRUE(std::is_default_constructible_v<affine_gap_banded_init_policy_mock>); EXPECT_TRUE(std::is_copy_constructible_v<affine_gap_banded_init_policy_mock>); EXPECT_TRUE(std::is_move_constructible_v<affine_gap_banded_init_policy_mock>); EXPECT_TRUE(std::is_copy_assignable_v<affine_gap_banded_init_policy_mock>); EXPECT_TRUE(std::is_move_assignable_v<affine_gap_banded_init_policy_mock>); EXPECT_TRUE(std::is_destructible_v<affine_gap_banded_init_policy_mock>); } TEST(affine_gap_banded_init_policy, init_origin_cell) { std::tuple cell{std::tuple{0, 0, std::ignore}, std::tuple{0, 0, std::ignore}}; std::tuple cache{std::tuple{0, 0, std::ignore}, -10, -1}; affine_gap_banded_init_policy_mock mock{}; mock.init_origin_cell(std::make_tuple(std::ref(cell), seqan3::alignment_coordinate{}, std::ignore), cache); int first; int second; std::tie(first, second, std::ignore) = std::get<0>(cell); EXPECT_EQ((std::tie(first, second)), (std::tuple{0, -10})); std::tie(first, second, std::ignore) = std::get<1>(cell); EXPECT_EQ((std::tie(first, second)), (std::tuple{0, 0})); std::tie(first, second, std::ignore) = std::get<0>(cache); EXPECT_EQ((std::tie(first, second)), (std::tuple{0, -10})); EXPECT_EQ(std::get<1>(cache), -10); EXPECT_EQ(std::get<2>(cache), -1); } TEST(affine_gap_banded_init_policy, init_column_cell) { std::tuple cell{std::tuple{0, -10, std::ignore}, std::tuple{0, 0, std::ignore}}; std::tuple cache{std::tuple{0, -10, std::ignore}, -10, -1}; affine_gap_banded_init_policy_mock mock{}; mock.init_column_cell(std::make_tuple(std::ref(cell), seqan3::alignment_coordinate{}, std::ignore), cache); int first; int second; std::tie(first, second, std::ignore) = std::get<0>(cell); EXPECT_EQ((std::tie(first, second)), (std::tuple{-10, -20})); std::tie(first, second, std::ignore) = std::get<1>(cell); EXPECT_EQ((std::tie(first, second)), (std::tuple{0, 0})); std::tie(first, second, std::ignore) = std::get<0>(cache); EXPECT_EQ((std::tie(first, second)), (std::tuple{0, -11})); EXPECT_EQ(std::get<1>(cache), -10); EXPECT_EQ(std::get<2>(cache), -1); } TEST(affine_gap_banded_init_policy, init_row_cell) { std::tuple cell{std::tuple{0, 0, std::ignore}, std::tuple{0, -10, std::ignore}}; std::tuple cache{std::tuple{0, 0, std::ignore}, -10, -1}; affine_gap_banded_init_policy_mock mock{}; mock.init_row_cell(std::make_tuple(std::ref(cell), seqan3::alignment_coordinate{}, std::ignore), cache); int first; int second; std::tie(first, second, std::ignore) = std::get<0>(cell); EXPECT_EQ((std::tie(first, second)), (std::tuple{-10, -11})); std::tie(first, second, std::ignore) = std::get<1>(cell); EXPECT_EQ((std::tie(first, second)), (std::tuple{0, -10})); std::tie(first, second, std::ignore) = std::get<0>(cache); EXPECT_EQ((std::tie(first, second)), (std::tuple{0, -20})); EXPECT_EQ(std::get<1>(cache), -10); EXPECT_EQ(std::get<2>(cache), -1); } // TODO Templatize TEST(affine_gap_banded_init_policy, balance_leading_gaps) { using namespace seqan3; // static_band band{lower_bound{-3}, upper_bound{3}}; gap_scheme scheme{gap_score{-1}, gap_open_score{-10}}; affine_gap_banded_init_policy_mock mock{}; detail::alignment_optimum<int> total; total.score = 0; mock.balance_leading_gaps(total, band, scheme); EXPECT_EQ(total.score, 0); band.lower_bound = -4; band.upper_bound = -3; mock.balance_leading_gaps(total, band, scheme); EXPECT_EQ(total.score, -13); band.lower_bound = 4; band.upper_bound = 10; mock.balance_leading_gaps(total, band, scheme); EXPECT_EQ(total.score, -27); }
// // TargetImplementation.hpp // Clock Signal // // Created by Thomas Harte on 19/08/2019. // Copyright © 2019 Thomas Harte. All rights reserved. // template <typename Executor> Target<Executor>::Target(Bus &bus, int scsi_id) : bus_(bus), scsi_id_mask_(BusState(1 << scsi_id)), scsi_bus_device_id_(bus.add_device()) { bus.add_observer(this); } template <typename Executor> void Target<Executor>::scsi_bus_did_change(Bus *, BusState new_state, double time_since_change) { /* "The target determines that it is selected when the SEL# signal and its SCSI ID bit are active and the BSY# and I#/O signals are false. It then asserts the signal within a selection abort time." */ // Wait for deskew, at the very least. if(time_since_change < SCSI::DeskewDelay) return; // A reset always takes precedence over anything else ongoing. if(new_state & Line::Reset) { phase_ = Phase::AwaitingSelection; bus_state_ = DefaultBusState; set_device_output(bus_state_); return; } switch(phase_) { /* While awaiting selection the SCSI target is passively watching the bus waiting for its ID to be set during a target selection. It will segue automatically from there to the command phase regardless of its executor. */ case Phase::AwaitingSelection: if( (new_state & scsi_id_mask_) && ((new_state & (Line::SelectTarget | Line::Busy | Line::Input)) == Line::SelectTarget) ) { phase_ = Phase::Command; command_.resize(0); command_pointer_ = 0; bus_state_ |= Line::Busy; // Initiate the command phase: request a command byte. set_device_output(bus_state_); } break; /* In the command phase, the target will stream an appropriate number of bytes for the command it is being offered, before giving the executor a chance to handle the command. If the target supports this command, it becomes responsible for the appropriate next phase transition. If it reports that it doesn't support that command, a suitable response is automatically dispatched. */ case Phase::Command: // Wait for select to be disabled before beginning the control phase proper. if((new_state & Line::SelectTarget)) return; bus_state_ |= Line::Control; switch(new_state & (Line::Request | Line::Acknowledge)) { // If request and acknowledge are both enabled, grab a byte and cancel the request. case Line::Request | Line::Acknowledge: bus_state_ &= ~Line::Request; if(command_.empty()) { begin_command(uint8_t(new_state)); // TODO: if(command_.empty()) signal_error_somehow(); } else { command_[command_pointer_] = uint8_t(new_state); ++command_pointer_; if(command_pointer_ == command_.size()) { if(!dispatch_command()) { // This is just a guess for now; I don't know how SCSI // devices are supposed to respond if they don't support // a command. terminate_command(Responder::Status::TaskAborted); } } } break; // The reset of request has caused the initiator to reset acknowledge, so it is now // safe to request the next byte. case 0: bus_state_ |= Line::Request; break; default: break; } set_device_output(bus_state_); break; case Phase::ReceivingData: switch(new_state & (Line::Request | Line::Acknowledge)) { case Line::Request | Line::Acknowledge: bus_state_ &= ~Line::Request; data_[data_pointer_] = uint8_t(new_state); ++data_pointer_; break; case 0: if(data_pointer_ == data_.size()) { next_function_(CommandState(command_, data_), *this); } else { bus_state_ |= Line::Request; } break; } set_device_output(bus_state_); break; case Phase::SendingData: case Phase::SendingStatus: case Phase::SendingMessage: switch(new_state & (Line::Request | Line::Acknowledge)) { case Line::Request | Line::Acknowledge: bus_state_ &= ~(Line::Request | 0xff); ++data_pointer_; break; case 0: if( (phase_ == Phase::SendingMessage && data_pointer_ == 1) || (phase_ == Phase::SendingStatus && data_pointer_ == 1) || (phase_ == Phase::SendingData && data_pointer_ == data_.size()) ) { next_function_(CommandState(command_, data_), *this); } else { bus_state_ |= Line::Request; bus_state_ &= ~0xff; switch(phase_) { case Phase::SendingData: bus_state_ |= data_[data_pointer_]; break; case Phase::SendingStatus: bus_state_ |= BusState(status_); break; default: case Phase::SendingMessage: bus_state_ |= BusState(message_); break; } } break; } set_device_output(bus_state_); break; } } template <typename Executor> void Target<Executor>::begin_command(uint8_t first_byte) { // The logic below is valid for SCSI-1. TODO: other SCSIs. switch(first_byte >> 5) { default: break; case 0: command_.resize(6); break; // Group 0 commands: 6 bytes long. case 1: command_.resize(10); break; // Group 1 commands: 10 bytes long. case 5: command_.resize(12); break; // Group 5 commands: 12 bytes long. } // Store the first byte if it was recognised. if(!command_.empty()) { command_[0] = first_byte; command_pointer_ = 1; } } template <typename Executor> bool Target<Executor>::dispatch_command() { CommandState arguments(command_, data_); #define G0(x) x #define G1(x) (0x20|x) #define G5(x) (0xa0|x) LOG("---Command " << PADHEX(2) << int(command_[0]) << "---"); switch(command_[0]) { default: return false; case G0(0x00): return executor_.test_unit_ready(arguments, *this); case G0(0x01): return executor_.rezero_unit(arguments, *this); case G0(0x03): return executor_.request_sense(arguments, *this); case G0(0x04): return executor_.format_unit(arguments, *this); case G0(0x08): return executor_.read(arguments, *this); case G0(0x0a): return executor_.write(arguments, *this); case G0(0x0b): return executor_.seek(arguments, *this); case G0(0x12): return executor_.inquiry(arguments, *this); case G0(0x15): return executor_.mode_select(arguments, *this); case G0(0x16): return executor_.reserve_unit(arguments, *this); case G0(0x17): return executor_.release_unit(arguments, *this); case G0(0x1a): return executor_.mode_sense(arguments, *this); case G0(0x1c): return executor_.read_diagnostic(arguments, *this); case G0(0x1d): return executor_.write_diagnostic(arguments, *this); case G1(0x05): return executor_.read_capacity(arguments, *this); case G1(0x08): return executor_.read(arguments, *this); case G1(0x0a): return executor_.write(arguments, *this); case G1(0x0e): return executor_.write_and_verify(arguments, *this); case G1(0x0f): return executor_.verify(arguments, *this); case G1(0x11): return executor_.search_data_equal(arguments, *this); case G1(0x10): return executor_.search_data_high(arguments, *this); case G1(0x12): return executor_.search_data_low(arguments, *this); case G1(0x1c): return executor_.read_buffer(arguments, *this); case G1(0x15): return executor_.mode_select(arguments, *this); case G5(0x09): return executor_.set_block_limits(arguments, *this); } #undef G0 #undef G1 #undef G5 return false; } template <typename Executor> void Target<Executor>::send_data(std::vector<uint8_t> &&data, continuation next) { // Data out phase: control and message all reset, input set. bus_state_ &= ~(Line::Control | Line::Input | Line::Message); bus_state_ |= Line::Input; phase_ = Phase::SendingData; next_function_ = next; data_ = std::move(data); data_pointer_ = 0; set_device_output(bus_state_); } template <typename Executor> void Target<Executor>::receive_data(size_t length, continuation next) { // Data out phase: control, input and message all reset. bus_state_ &= ~(Line::Control | Line::Input | Line::Message); phase_ = Phase::ReceivingData; next_function_ = next; data_.resize(length); data_pointer_ = 0; set_device_output(bus_state_); } template <typename Executor> void Target<Executor>::send_status(Status status, continuation next) { // Status phase: message reset, control and input set. bus_state_ &= ~(Line::Control | Line::Input | Line::Message); bus_state_ |= Line::Input | Line::Control; status_ = status; phase_ = Phase::SendingStatus; next_function_ = next; data_pointer_ = 0; set_device_output(bus_state_); } template <typename Executor> void Target<Executor>::send_message(Message message, continuation next) { // Message in phase: message, control and input set. bus_state_ |= Line::Message | Line::Control | Line::Input; message_ = message; phase_ = Phase::SendingMessage; next_function_ = next; data_pointer_ = 0; set_device_output(bus_state_); } template <typename Executor> void Target<Executor>::end_command() { // TODO: was this a linked command? // Release all bus lines and return to awaiting selection. phase_ = Phase::AwaitingSelection; bus_state_ = DefaultBusState; set_device_output(bus_state_); LOG("---Done---"); }
/******************************************************************* Author: David Ge (dge893@gmail.com, aka Wei Ge) Last modified: 03/31/2018 Allrights reserved by David Ge ********************************************************************/ //#include "stdafx.h" #include "randomTools.h" #include <malloc.h> #include <math.h> #define _USE_MATH_DEFINES // for C++ #include <cmath> #include <stdio.h> #include <stdlib.h> #include <time.h> double rand_double() { return rand()/(double)RAND_MAX; } double rand_double2(double x, double y) { return (y - x)*rand_double() + x; } int rand_int(int a, int b) { return (int)((b - a + 1)*rand_double()) + a; }
#include <frstd/option.hpp>
// g2o - General Graph Optimization // Copyright (C) 2011 R. Kuemmerle, G. Grisetti, W. Burgard // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED // TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "sparse_optimizer.h" #include <iostream> #include <iomanip> #include <algorithm> #include <iterator> #include <cassert> #include <algorithm> #include "estimate_propagator.h" #include "optimization_algorithm.h" #include "batch_stats.h" #include "hyper_graph_action.h" #include "robust_kernel.h" #include "g2o/stuff/timeutil.h" #include "g2o/stuff/macros.h" #include "g2o/stuff/misc.h" #include "g2o/config.h" #include "g2o/core/ownership.h" namespace g2o{ using namespace std; SparseOptimizer::SparseOptimizer() : _forceStopFlag(0), _verbose(false), _algorithm(nullptr), _computeBatchStatistics(false) { _graphActions.resize(AT_NUM_ELEMENTS); } SparseOptimizer::~SparseOptimizer() { release(_algorithm); G2OBatchStatistics::setGlobalStats(0); } void SparseOptimizer::computeActiveErrors() { // call the callbacks in case there is something registered HyperGraphActionSet& actions = _graphActions[AT_COMPUTEACTIVERROR]; if (actions.size() > 0) { for (HyperGraphActionSet::iterator it = actions.begin(); it != actions.end(); ++it) (*(*it))(this); } # ifdef G2O_OPENMP # pragma omp parallel for default (shared) if (_activeEdges.size() > 50) # endif for (int k = 0; k < static_cast<int>(_activeEdges.size()); ++k) { OptimizableGraph::Edge* e = _activeEdges[k]; e->computeError(); } # ifndef NDEBUG for (int k = 0; k < static_cast<int>(_activeEdges.size()); ++k) { OptimizableGraph::Edge* e = _activeEdges[k]; bool hasNan = arrayHasNaN(e->errorData(), e->dimension()); if (hasNan) { cerr << "computeActiveErrors(): found NaN in error for edge " << e << endl; } } # endif } number_t SparseOptimizer::activeChi2( ) const { number_t chi = 0.0; for (EdgeContainer::const_iterator it = _activeEdges.begin(); it != _activeEdges.end(); ++it) { const OptimizableGraph::Edge* e = *it; chi += e->chi2(); } return chi; } number_t SparseOptimizer::activeRobustChi2() const { Vector3 rho; number_t chi = 0.0; for (EdgeContainer::const_iterator it = _activeEdges.begin(); it != _activeEdges.end(); ++it) { const OptimizableGraph::Edge* e = *it; if (e->robustKernel()) { e->robustKernel()->robustify(e->chi2(), rho); chi += rho[0]; } else chi += e->chi2(); } return chi; } OptimizableGraph::Vertex* SparseOptimizer::findGauge(){ if (vertices().empty()) return nullptr; int maxDim=0; for (HyperGraph::VertexIDMap::iterator it=vertices().begin(); it!=vertices().end(); ++it){ OptimizableGraph::Vertex* v=static_cast<OptimizableGraph::Vertex*>(it->second); maxDim=std::max(maxDim,v->dimension()); } OptimizableGraph::Vertex* rut=0; for (HyperGraph::VertexIDMap::iterator it=vertices().begin(); it!=vertices().end(); ++it){ OptimizableGraph::Vertex* v=static_cast<OptimizableGraph::Vertex*>(it->second); if (v->dimension()==maxDim){ rut=v; break; } } return rut; } bool SparseOptimizer::gaugeFreedom() { if (vertices().empty()) return false; int maxDim=0; for (HyperGraph::VertexIDMap::iterator it=vertices().begin(); it!=vertices().end(); ++it){ OptimizableGraph::Vertex* v=static_cast<OptimizableGraph::Vertex*>(it->second); maxDim = std::max(maxDim,v->dimension()); } for (HyperGraph::VertexIDMap::iterator it=vertices().begin(); it!=vertices().end(); ++it){ OptimizableGraph::Vertex* v=static_cast<OptimizableGraph::Vertex*>(it->second); if (v->dimension() == maxDim) { // test for fixed vertex if (v->fixed()) { return false; } // test for full dimension prior for (HyperGraph::EdgeSet::const_iterator eit = v->edges().begin(); eit != v->edges().end(); ++eit) { OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*eit); if (e->vertices().size() == 1 && e->dimension() == maxDim) return false; } } } return true; } bool SparseOptimizer::buildIndexMapping(SparseOptimizer::VertexContainer& vlist){ if (! vlist.size()){ _ivMap.clear(); return false; } _ivMap.resize(vlist.size()); size_t i = 0; for (int k=0; k<2; k++) for (VertexContainer::iterator it=vlist.begin(); it!=vlist.end(); ++it){ OptimizableGraph::Vertex* v = *it; if (! v->fixed()){ if (static_cast<int>(v->marginalized()) == k){ v->setHessianIndex(i); _ivMap[i]=v; i++; } } else { v->setHessianIndex(-1); } } _ivMap.resize(i); return true; } void SparseOptimizer::clearIndexMapping(){ for (size_t i=0; i<_ivMap.size(); ++i){ _ivMap[i]->setHessianIndex(-1); _ivMap[i]=0; } } bool SparseOptimizer::initializeOptimization(int level){ HyperGraph::VertexSet vset; for (VertexIDMap::iterator it=vertices().begin(); it!=vertices().end(); ++it) vset.insert(it->second); return initializeOptimization(vset,level); } bool SparseOptimizer::initializeOptimization(HyperGraph::VertexSet& vset, int level){ if (edges().size() == 0) { cerr << __PRETTY_FUNCTION__ << ": Attempt to initialize an empty graph" << endl; return false; } preIteration(-1); bool workspaceAllocated = _jacobianWorkspace.allocate(); (void) workspaceAllocated; assert(workspaceAllocated && "Error while allocating memory for the Jacobians"); clearIndexMapping(); _activeVertices.clear(); _activeVertices.reserve(vset.size()); _activeEdges.clear(); set<Edge*> auxEdgeSet; // temporary structure to avoid duplicates for (HyperGraph::VertexSet::iterator it=vset.begin(); it!=vset.end(); ++it){ OptimizableGraph::Vertex* v= (OptimizableGraph::Vertex*) *it; const OptimizableGraph::EdgeSet& vEdges=v->edges(); // count if there are edges in that level. If not remove from the pool int levelEdges=0; for (OptimizableGraph::EdgeSet::const_iterator it=vEdges.begin(); it!=vEdges.end(); ++it){ OptimizableGraph::Edge* e=reinterpret_cast<OptimizableGraph::Edge*>(*it); if (level < 0 || e->level() == level) { bool allVerticesOK = true; for (vector<HyperGraph::Vertex*>::const_iterator vit = e->vertices().begin(); vit != e->vertices().end(); ++vit) { if (vset.find(*vit) == vset.end()) { allVerticesOK = false; break; } } if (allVerticesOK && !e->allVerticesFixed()) { auxEdgeSet.insert(e); levelEdges++; } } } if (levelEdges){ _activeVertices.push_back(v); // test for NANs in the current estimate if we are debugging # ifndef NDEBUG int estimateDim = v->estimateDimension(); if (estimateDim > 0) { VectorX estimateData(estimateDim); if (v->getEstimateData(estimateData.data()) == true) { int k; bool hasNan = arrayHasNaN(estimateData.data(), estimateDim, &k); if (hasNan) cerr << __PRETTY_FUNCTION__ << ": Vertex " << v->id() << " contains a nan entry at index " << k << endl; } } # endif } } _activeEdges.reserve(auxEdgeSet.size()); for (set<Edge*>::iterator it = auxEdgeSet.begin(); it != auxEdgeSet.end(); ++it) _activeEdges.push_back(*it); sortVectorContainers(); bool indexMappingStatus = buildIndexMapping(_activeVertices); postIteration(-1); return indexMappingStatus; } bool SparseOptimizer::initializeOptimization(HyperGraph::EdgeSet& eset){ preIteration(-1); bool workspaceAllocated = _jacobianWorkspace.allocate(); (void) workspaceAllocated; assert(workspaceAllocated && "Error while allocating memory for the Jacobians"); clearIndexMapping(); _activeVertices.clear(); _activeEdges.clear(); _activeEdges.reserve(eset.size()); set<Vertex*> auxVertexSet; // temporary structure to avoid duplicates for (HyperGraph::EdgeSet::iterator it=eset.begin(); it!=eset.end(); ++it){ OptimizableGraph::Edge* e=(OptimizableGraph::Edge*)(*it); if (e->numUndefinedVertices()) continue; for (vector<HyperGraph::Vertex*>::const_iterator vit = e->vertices().begin(); vit != e->vertices().end(); ++vit) { auxVertexSet.insert(static_cast<OptimizableGraph::Vertex*>(*vit)); } _activeEdges.push_back(reinterpret_cast<OptimizableGraph::Edge*>(*it)); } _activeVertices.reserve(auxVertexSet.size()); for (set<Vertex*>::iterator it = auxVertexSet.begin(); it != auxVertexSet.end(); ++it) _activeVertices.push_back(*it); sortVectorContainers(); bool indexMappingStatus = buildIndexMapping(_activeVertices); postIteration(-1); return indexMappingStatus; } void SparseOptimizer::setToOrigin(){ for (VertexIDMap::iterator it=vertices().begin(); it!=vertices().end(); ++it) { OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(it->second); v->setToOrigin(); } } void SparseOptimizer::computeInitialGuess() { EstimatePropagator::PropagateCost costFunction(this); computeInitialGuess(costFunction); } void SparseOptimizer::computeInitialGuess(EstimatePropagatorCost& costFunction) { OptimizableGraph::VertexSet emptySet; std::set<Vertex*> backupVertices; HyperGraph::VertexSet fixedVertices; // these are the root nodes where to start the initialization for (EdgeContainer::iterator it = _activeEdges.begin(); it != _activeEdges.end(); ++it) { OptimizableGraph::Edge* e = *it; for (size_t i = 0; i < e->vertices().size(); ++i) { OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(e->vertex(i)); if (!v) continue; if (v->fixed()) fixedVertices.insert(v); else { // check for having a prior which is able to fully initialize a vertex for (EdgeSet::const_iterator vedgeIt = v->edges().begin(); vedgeIt != v->edges().end(); ++vedgeIt) { OptimizableGraph::Edge* vedge = static_cast<OptimizableGraph::Edge*>(*vedgeIt); if (vedge->vertices().size() == 1 && vedge->initialEstimatePossible(emptySet, v) > 0.) { //cerr << "Initialize with prior for " << v->id() << endl; vedge->initialEstimate(emptySet, v); fixedVertices.insert(v); } } } if (v->hessianIndex() == -1) { std::set<Vertex*>::const_iterator foundIt = backupVertices.find(v); if (foundIt == backupVertices.end()) { v->push(); backupVertices.insert(v); } } } } EstimatePropagator estimatePropagator(this); estimatePropagator.propagate(fixedVertices, costFunction); // restoring the vertices that should not be initialized for (std::set<Vertex*>::iterator it = backupVertices.begin(); it != backupVertices.end(); ++it) { Vertex* v = *it; v->pop(); } if (verbose()) { computeActiveErrors(); cerr << "iteration= -1\t chi2= " << activeChi2() << "\t time= 0.0" << "\t cumTime= 0.0" << "\t (using initial guess from " << costFunction.name() << ")" << endl; } } int SparseOptimizer::optimize(int iterations, bool online) { if (_ivMap.size() == 0) { cerr << __PRETTY_FUNCTION__ << ": 0 vertices to optimize, maybe forgot to call initializeOptimization()" << endl; return -1; } int cjIterations=0; number_t cumTime=0; bool ok=true; ok = _algorithm->init(online); if (! ok) { cerr << __PRETTY_FUNCTION__ << " Error while initializing" << endl; return -1; } _batchStatistics.clear(); if (_computeBatchStatistics) _batchStatistics.resize(iterations); OptimizationAlgorithm::SolverResult result = OptimizationAlgorithm::OK; for (int i=0; i<iterations && ! terminate() && ok; i++){ preIteration(i); if (_computeBatchStatistics) { G2OBatchStatistics& cstat = _batchStatistics[i]; G2OBatchStatistics::setGlobalStats(&cstat); cstat.iteration = i; cstat.numEdges = _activeEdges.size(); cstat.numVertices = _activeVertices.size(); } number_t ts = get_monotonic_time(); result = _algorithm->solve(i, online); ok = ( result == OptimizationAlgorithm::OK ); bool errorComputed = false; if (_computeBatchStatistics) { computeActiveErrors(); errorComputed = true; _batchStatistics[i].chi2 = activeRobustChi2(); _batchStatistics[i].timeIteration = get_monotonic_time()-ts; } if (verbose()){ number_t dts = get_monotonic_time()-ts; cumTime += dts; if (! errorComputed) computeActiveErrors(); cerr << "iteration= " << i << "\t chi2= " << FIXED(activeRobustChi2()) << "\t time= " << dts << "\t cumTime= " << cumTime << "\t edges= " << _activeEdges.size(); _algorithm->printVerbose(cerr); cerr << endl; } ++cjIterations; postIteration(i); } if (result == OptimizationAlgorithm::Fail) { return 0; } return cjIterations; } void SparseOptimizer::update(const number_t* update) { // update the graph by calling oplus on the vertices for (size_t i=0; i < _ivMap.size(); ++i) { OptimizableGraph::Vertex* v= _ivMap[i]; #ifndef NDEBUG bool hasNan = arrayHasNaN(update, v->dimension()); if (hasNan) cerr << __PRETTY_FUNCTION__ << ": Update contains a nan for vertex " << v->id() << endl; #endif v->oplus(update); update += v->dimension(); } } void SparseOptimizer::setComputeBatchStatistics(bool computeBatchStatistics) { if ((_computeBatchStatistics == true) && (computeBatchStatistics == false)) { G2OBatchStatistics::setGlobalStats(0); _batchStatistics.clear(); } _computeBatchStatistics = computeBatchStatistics; } bool SparseOptimizer::updateInitialization(HyperGraph::VertexSet& vset, HyperGraph::EdgeSet& eset) { std::vector<HyperGraph::Vertex*> newVertices; newVertices.reserve(vset.size()); _activeVertices.reserve(_activeVertices.size() + vset.size()); _activeEdges.reserve(_activeEdges.size() + eset.size()); for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) { OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it); if (!e->allVerticesFixed()) _activeEdges.push_back(e); } // update the index mapping size_t next = _ivMap.size(); for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end(); ++it) { OptimizableGraph::Vertex* v=static_cast<OptimizableGraph::Vertex*>(*it); if (! v->fixed()){ if (! v->marginalized()){ v->setHessianIndex(next); _ivMap.push_back(v); newVertices.push_back(v); _activeVertices.push_back(v); next++; } else // not supported right now abort(); } else { v->setHessianIndex(-1); } } //if (newVertices.size() != vset.size()) //cerr << __PRETTY_FUNCTION__ << ": something went wrong " << PVAR(vset.size()) << " " << PVAR(newVertices.size()) << endl; return _algorithm->updateStructure(newVertices, eset); } void SparseOptimizer::sortVectorContainers() { // sort vector structures to get deterministic ordering based on IDs sort(_activeVertices.begin(), _activeVertices.end(), VertexIDCompare()); sort(_activeEdges.begin(), _activeEdges.end(), EdgeIDCompare()); } void SparseOptimizer::clear() { _ivMap.clear(); _activeVertices.clear(); _activeEdges.clear(); OptimizableGraph::clear(); } SparseOptimizer::VertexContainer::const_iterator SparseOptimizer::findActiveVertex(const OptimizableGraph::Vertex* v) const { VertexContainer::const_iterator lower = lower_bound(_activeVertices.begin(), _activeVertices.end(), v, VertexIDCompare()); if (lower == _activeVertices.end()) return _activeVertices.end(); if ((*lower) == v) return lower; return _activeVertices.end(); } SparseOptimizer::EdgeContainer::const_iterator SparseOptimizer::findActiveEdge(const OptimizableGraph::Edge* e) const { EdgeContainer::const_iterator lower = lower_bound(_activeEdges.begin(), _activeEdges.end(), e, EdgeIDCompare()); if (lower == _activeEdges.end()) return _activeEdges.end(); if ((*lower) == e) return lower; return _activeEdges.end(); } void SparseOptimizer::push(SparseOptimizer::VertexContainer& vlist) { for (VertexContainer::iterator it = vlist.begin(); it != vlist.end(); ++it) (*it)->push(); } void SparseOptimizer::pop(SparseOptimizer::VertexContainer& vlist) { for (VertexContainer::iterator it = vlist.begin(); it != vlist.end(); ++it) (*it)->pop(); } void SparseOptimizer::push(HyperGraph::VertexSet& vlist) { for (HyperGraph::VertexSet::iterator it = vlist.begin(); it != vlist.end(); ++it) { OptimizableGraph::Vertex* v = dynamic_cast<OptimizableGraph::Vertex*>(*it); if (v) v->push(); else cerr << __FUNCTION__ << ": FATAL PUSH SET" << endl; } } void SparseOptimizer::pop(HyperGraph::VertexSet& vlist) { for (HyperGraph::VertexSet::iterator it = vlist.begin(); it != vlist.end(); ++it){ OptimizableGraph::Vertex* v = dynamic_cast<OptimizableGraph::Vertex*> (*it); if (v) v->pop(); else cerr << __FUNCTION__ << ": FATAL POP SET" << endl; } } void SparseOptimizer::discardTop(SparseOptimizer::VertexContainer& vlist) { for (VertexContainer::iterator it = vlist.begin(); it != vlist.end(); ++it) (*it)->discardTop(); } void SparseOptimizer::setVerbose(bool verbose) { _verbose = verbose; } void SparseOptimizer::setAlgorithm(OptimizationAlgorithm* algorithm) { if (_algorithm) // reset the optimizer for the formerly used solver _algorithm->setOptimizer(nullptr); _algorithm = algorithm; if (_algorithm) _algorithm->setOptimizer(this); } bool SparseOptimizer::computeMarginals(SparseBlockMatrix<MatrixX>& spinv, const std::vector<std::pair<int, int> >& blockIndices){ return _algorithm->computeMarginals(spinv, blockIndices); } void SparseOptimizer::setForceStopFlag(bool* flag) { _forceStopFlag=flag; } bool SparseOptimizer::removeVertex(HyperGraph::Vertex* v, bool detach) { OptimizableGraph::Vertex* vv = static_cast<OptimizableGraph::Vertex*>(v); if (vv->hessianIndex() >= 0) { clearIndexMapping(); _ivMap.clear(); } return HyperGraph::removeVertex(v, detach); } bool SparseOptimizer::addComputeErrorAction(HyperGraphAction* action) { std::pair<HyperGraphActionSet::iterator, bool> insertResult = _graphActions[AT_COMPUTEACTIVERROR].insert(action); return insertResult.second; } bool SparseOptimizer::removeComputeErrorAction(HyperGraphAction* action) { return _graphActions[AT_COMPUTEACTIVERROR].erase(action) > 0; } void SparseOptimizer::push() { push(_activeVertices); } void SparseOptimizer::pop() { pop(_activeVertices); } void SparseOptimizer::discardTop() { discardTop(_activeVertices); } } // end namespace
#include "Runtime/Weapon/CFlameInfo.hpp" namespace metaforce { CFlameInfo::CFlameInfo(s32 w1, s32 w2, CAssetId flameFxId, s32 w3, float f1, float f2, float f3) : x0_propertyCount(w1), x4_attributes(w2), x8_flameFxId(flameFxId), xc_length(w3), x10_(f1), x18_(f2), x1c_(f3) {} CFlameInfo::CFlameInfo(CInputStream& in) : x0_propertyCount(in.readUint32Big()) , x4_attributes(in.readUint32Big()) , x8_flameFxId(in) , xc_length(in.readUint32Big()) , x10_(in.readFloatBig()) , x18_(in.readFloatBig()) , x1c_(in.readFloatBig()) {} } // namespace metaforce
// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file // for details. All rights reserved. Use of this source code is governed by a // BSD-style license that can be found in the LICENSE file. #include "vm/compiler/backend/il.h" #include "vm/compiler/backend/il_test_helper.h" #include "vm/unit_test.h" namespace dart { ISOLATE_UNIT_TEST_CASE(InstructionTests) { TargetEntryInstr* target_instr = new TargetEntryInstr(1, kInvalidTryIndex, DeoptId::kNone); EXPECT(target_instr->IsBlockEntry()); EXPECT(!target_instr->IsDefinition()); SpecialParameterInstr* context = new SpecialParameterInstr( SpecialParameterInstr::kContext, DeoptId::kNone, target_instr); EXPECT(context->IsDefinition()); EXPECT(!context->IsBlockEntry()); EXPECT(context->GetBlock() == target_instr); } ISOLATE_UNIT_TEST_CASE(OptimizationTests) { JoinEntryInstr* join = new JoinEntryInstr(1, kInvalidTryIndex, DeoptId::kNone); Definition* def1 = new PhiInstr(join, 0); Definition* def2 = new PhiInstr(join, 0); Value* use1a = new Value(def1); Value* use1b = new Value(def1); EXPECT(use1a->Equals(use1b)); Value* use2 = new Value(def2); EXPECT(!use2->Equals(use1a)); ConstantInstr* c1 = new ConstantInstr(Bool::True()); ConstantInstr* c2 = new ConstantInstr(Bool::True()); EXPECT(c1->Equals(c2)); ConstantInstr* c3 = new ConstantInstr(Object::ZoneHandle()); ConstantInstr* c4 = new ConstantInstr(Object::ZoneHandle()); EXPECT(c3->Equals(c4)); EXPECT(!c3->Equals(c1)); } ISOLATE_UNIT_TEST_CASE(IRTest_EliminateWriteBarrier) { const char* kScript = R"( class Container<T> { operator []=(var index, var value) { return data[index] = value; } List<T> data = new List<T>()..length = 10; } Container<int> x = new Container<int>(); foo() { for (int i = 0; i < 10; ++i) { x[i] = i; } } )"; const auto& root_library = Library::Handle(LoadTestScript(kScript)); const auto& function = Function::Handle(GetFunction(root_library, "foo")); Invoke(root_library, "foo"); TestPipeline pipeline(function, CompilerPass::kJIT); FlowGraph* flow_graph = pipeline.RunPasses({}); auto entry = flow_graph->graph_entry()->normal_entry(); EXPECT(entry != nullptr); StoreIndexedInstr* store_indexed = nullptr; ILMatcher cursor(flow_graph, entry, true); RELEASE_ASSERT(cursor.TryMatch({ kMoveGlob, kMatchAndMoveBranchTrue, kMoveGlob, {kMatchStoreIndexed, &store_indexed}, })); EXPECT(!store_indexed->value()->NeedsWriteBarrier()); } } // namespace dart
#include<bits/stdc++.h> using namespace std; const int N=1e5+5; int substree_sum[N]; int evencnt[N]; vector<int> graph[N]; void dfs(int vertex, int parent=0){ if(vertex%2==0){ evencnt[vertex]++; } substree_sum[vertex] +=vertex; for(int child : graph[vertex]){ if(child==parent) continue; dfs(child, vertex); substree_sum[vertex] +=substree_sum[child]; evencnt[vertex] +=evencnt[child]; } } int main(){ int n; cin>>n; for(int i=0; i<n; i++){ int u, v; cin>>u>>v; graph[u].push_back(v); graph[v].push_back(u); } dfs(1); for(int i=1; i<=n; i++){ cout<<substree_sum[i]<<" "<<evencnt[i]<<endl; } /* int q; cin>>q; while(q--){ int v; cin>>v; cout<<substree[v]<<" "<<evencnt[v]<<endl; } */ }
//===- SymbolManager.h - Management of Symbolic Values --------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines SymbolManager, a class that manages symbolic values // created for use by ExprEngine and related classes. // //===----------------------------------------------------------------------===// #include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Expr.h" #include "clang/Analysis/Analyses/LiveVariables.h" #include "clang/Analysis/AnalysisDeclContext.h" #include "clang/Basic/LLVM.h" #include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h" #include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h" #include "clang/StaticAnalyzer/Core/PathSensitive/Store.h" #include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h" #include "llvm/ADT/FoldingSet.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" #include <cassert> using namespace clang; using namespace ento; void SymExpr::anchor() {} LLVM_DUMP_METHOD void SymExpr::dump() const { dumpToStream(llvm::errs()); } void SymIntExpr::dumpToStream(raw_ostream &os) const { os << '('; getLHS()->dumpToStream(os); os << ") " << BinaryOperator::getOpcodeStr(getOpcode()) << ' '; if (getRHS().isUnsigned()) os << getRHS().getZExtValue(); else os << getRHS().getSExtValue(); if (getRHS().isUnsigned()) os << 'U'; } void IntSymExpr::dumpToStream(raw_ostream &os) const { if (getLHS().isUnsigned()) os << getLHS().getZExtValue(); else os << getLHS().getSExtValue(); if (getLHS().isUnsigned()) os << 'U'; os << ' ' << BinaryOperator::getOpcodeStr(getOpcode()) << " ("; getRHS()->dumpToStream(os); os << ')'; } void SymSymExpr::dumpToStream(raw_ostream &os) const { os << '('; getLHS()->dumpToStream(os); os << ") " << BinaryOperator::getOpcodeStr(getOpcode()) << " ("; getRHS()->dumpToStream(os); os << ')'; } void SymbolCast::dumpToStream(raw_ostream &os) const { os << '(' << ToTy.getAsString() << ") ("; Operand->dumpToStream(os); os << ')'; } void SymbolConjured::dumpToStream(raw_ostream &os) const { os << "conj_$" << getSymbolID() << '{' << T.getAsString() << ", LC" << LCtx->getID(); if (S) os << ", S" << S->getID(LCtx->getDecl()->getASTContext()); else os << ", no stmt"; os << ", #" << Count << '}'; } void SymbolDerived::dumpToStream(raw_ostream &os) const { os << "derived_$" << getSymbolID() << '{' << getParentSymbol() << ',' << getRegion() << '}'; } void SymbolExtent::dumpToStream(raw_ostream &os) const { os << "extent_$" << getSymbolID() << '{' << getRegion() << '}'; } void SymbolMetadata::dumpToStream(raw_ostream &os) const { os << "meta_$" << getSymbolID() << '{' << getRegion() << ',' << T.getAsString() << '}'; } void SymbolData::anchor() {} void SymbolRegionValue::dumpToStream(raw_ostream &os) const { os << "reg_$" << getSymbolID() << '<' << getType().getAsString() << ' ' << R << '>'; } bool SymExpr::symbol_iterator::operator==(const symbol_iterator &X) const { return itr == X.itr; } bool SymExpr::symbol_iterator::operator!=(const symbol_iterator &X) const { return itr != X.itr; } SymExpr::symbol_iterator::symbol_iterator(const SymExpr *SE) { itr.push_back(SE); } SymExpr::symbol_iterator &SymExpr::symbol_iterator::operator++() { assert(!itr.empty() && "attempting to iterate on an 'end' iterator"); expand(); return *this; } SymbolRef SymExpr::symbol_iterator::operator*() { assert(!itr.empty() && "attempting to dereference an 'end' iterator"); return itr.back(); } void SymExpr::symbol_iterator::expand() { const SymExpr *SE = itr.pop_back_val(); switch (SE->getKind()) { case SymExpr::SymbolRegionValueKind: case SymExpr::SymbolConjuredKind: case SymExpr::SymbolDerivedKind: case SymExpr::SymbolExtentKind: case SymExpr::SymbolMetadataKind: return; case SymExpr::SymbolCastKind: itr.push_back(cast<SymbolCast>(SE)->getOperand()); return; case SymExpr::SymIntExprKind: itr.push_back(cast<SymIntExpr>(SE)->getLHS()); return; case SymExpr::IntSymExprKind: itr.push_back(cast<IntSymExpr>(SE)->getRHS()); return; case SymExpr::SymSymExprKind: { const auto *x = cast<SymSymExpr>(SE); itr.push_back(x->getLHS()); itr.push_back(x->getRHS()); return; } } llvm_unreachable("unhandled expansion case"); } const SymbolRegionValue* SymbolManager::getRegionValueSymbol(const TypedValueRegion* R) { llvm::FoldingSetNodeID profile; SymbolRegionValue::Profile(profile, R); void *InsertPos; SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos); if (!SD) { SD = (SymExpr*) BPAlloc.Allocate<SymbolRegionValue>(); new (SD) SymbolRegionValue(SymbolCounter, R); DataSet.InsertNode(SD, InsertPos); ++SymbolCounter; } return cast<SymbolRegionValue>(SD); } const SymbolConjured* SymbolManager::conjureSymbol(const Stmt *E, const LocationContext *LCtx, QualType T, unsigned Count, const void *SymbolTag) { llvm::FoldingSetNodeID profile; SymbolConjured::Profile(profile, E, T, Count, LCtx, SymbolTag); void *InsertPos; SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos); if (!SD) { SD = (SymExpr*) BPAlloc.Allocate<SymbolConjured>(); new (SD) SymbolConjured(SymbolCounter, E, LCtx, T, Count, SymbolTag); DataSet.InsertNode(SD, InsertPos); ++SymbolCounter; } return cast<SymbolConjured>(SD); } const SymbolDerived* SymbolManager::getDerivedSymbol(SymbolRef parentSymbol, const TypedValueRegion *R) { llvm::FoldingSetNodeID profile; SymbolDerived::Profile(profile, parentSymbol, R); void *InsertPos; SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos); if (!SD) { SD = (SymExpr*) BPAlloc.Allocate<SymbolDerived>(); new (SD) SymbolDerived(SymbolCounter, parentSymbol, R); DataSet.InsertNode(SD, InsertPos); ++SymbolCounter; } return cast<SymbolDerived>(SD); } const SymbolExtent* SymbolManager::getExtentSymbol(const SubRegion *R) { llvm::FoldingSetNodeID profile; SymbolExtent::Profile(profile, R); void *InsertPos; SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos); if (!SD) { SD = (SymExpr*) BPAlloc.Allocate<SymbolExtent>(); new (SD) SymbolExtent(SymbolCounter, R); DataSet.InsertNode(SD, InsertPos); ++SymbolCounter; } return cast<SymbolExtent>(SD); } const SymbolMetadata * SymbolManager::getMetadataSymbol(const MemRegion* R, const Stmt *S, QualType T, const LocationContext *LCtx, unsigned Count, const void *SymbolTag) { llvm::FoldingSetNodeID profile; SymbolMetadata::Profile(profile, R, S, T, LCtx, Count, SymbolTag); void *InsertPos; SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos); if (!SD) { SD = (SymExpr*) BPAlloc.Allocate<SymbolMetadata>(); new (SD) SymbolMetadata(SymbolCounter, R, S, T, LCtx, Count, SymbolTag); DataSet.InsertNode(SD, InsertPos); ++SymbolCounter; } return cast<SymbolMetadata>(SD); } const SymbolCast* SymbolManager::getCastSymbol(const SymExpr *Op, QualType From, QualType To) { llvm::FoldingSetNodeID ID; SymbolCast::Profile(ID, Op, From, To); void *InsertPos; SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos); if (!data) { data = (SymbolCast*) BPAlloc.Allocate<SymbolCast>(); new (data) SymbolCast(Op, From, To); DataSet.InsertNode(data, InsertPos); } return cast<SymbolCast>(data); } const SymIntExpr *SymbolManager::getSymIntExpr(const SymExpr *lhs, BinaryOperator::Opcode op, const llvm::APSInt& v, QualType t) { llvm::FoldingSetNodeID ID; SymIntExpr::Profile(ID, lhs, op, v, t); void *InsertPos; SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos); if (!data) { data = (SymIntExpr*) BPAlloc.Allocate<SymIntExpr>(); new (data) SymIntExpr(lhs, op, v, t); DataSet.InsertNode(data, InsertPos); } return cast<SymIntExpr>(data); } const IntSymExpr *SymbolManager::getIntSymExpr(const llvm::APSInt& lhs, BinaryOperator::Opcode op, const SymExpr *rhs, QualType t) { llvm::FoldingSetNodeID ID; IntSymExpr::Profile(ID, lhs, op, rhs, t); void *InsertPos; SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos); if (!data) { data = (IntSymExpr*) BPAlloc.Allocate<IntSymExpr>(); new (data) IntSymExpr(lhs, op, rhs, t); DataSet.InsertNode(data, InsertPos); } return cast<IntSymExpr>(data); } const SymSymExpr *SymbolManager::getSymSymExpr(const SymExpr *lhs, BinaryOperator::Opcode op, const SymExpr *rhs, QualType t) { llvm::FoldingSetNodeID ID; SymSymExpr::Profile(ID, lhs, op, rhs, t); void *InsertPos; SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos); if (!data) { data = (SymSymExpr*) BPAlloc.Allocate<SymSymExpr>(); new (data) SymSymExpr(lhs, op, rhs, t); DataSet.InsertNode(data, InsertPos); } return cast<SymSymExpr>(data); } QualType SymbolConjured::getType() const { return T; } QualType SymbolDerived::getType() const { return R->getValueType(); } QualType SymbolExtent::getType() const { ASTContext &Ctx = R->getMemRegionManager().getContext(); return Ctx.getSizeType(); } QualType SymbolMetadata::getType() const { return T; } QualType SymbolRegionValue::getType() const { return R->getValueType(); } bool SymbolManager::canSymbolicate(QualType T) { T = T.getCanonicalType(); if (Loc::isLocType(T)) return true; if (T->isIntegralOrEnumerationType()) return true; if (T->isRecordType() && !T->isUnionType()) return true; return false; } void SymbolManager::addSymbolDependency(const SymbolRef Primary, const SymbolRef Dependent) { auto &dependencies = SymbolDependencies[Primary]; if (!dependencies) { dependencies = std::make_unique<SymbolRefSmallVectorTy>(); } dependencies->push_back(Dependent); } const SymbolRefSmallVectorTy *SymbolManager::getDependentSymbols( const SymbolRef Primary) { SymbolDependTy::const_iterator I = SymbolDependencies.find(Primary); if (I == SymbolDependencies.end()) return nullptr; return I->second.get(); } void SymbolReaper::markDependentsLive(SymbolRef sym) { // Do not mark dependents more then once. SymbolMapTy::iterator LI = TheLiving.find(sym); assert(LI != TheLiving.end() && "The primary symbol is not live."); if (LI->second == HaveMarkedDependents) return; LI->second = HaveMarkedDependents; if (const SymbolRefSmallVectorTy *Deps = SymMgr.getDependentSymbols(sym)) { for (const auto I : *Deps) { if (TheLiving.find(I) != TheLiving.end()) continue; markLive(I); } } } void SymbolReaper::markLive(SymbolRef sym) { TheLiving[sym] = NotProcessed; markDependentsLive(sym); } void SymbolReaper::markLive(const MemRegion *region) { RegionRoots.insert(region->getBaseRegion()); markElementIndicesLive(region); } void SymbolReaper::markElementIndicesLive(const MemRegion *region) { for (auto SR = dyn_cast<SubRegion>(region); SR; SR = dyn_cast<SubRegion>(SR->getSuperRegion())) { if (const auto ER = dyn_cast<ElementRegion>(SR)) { SVal Idx = ER->getIndex(); for (auto SI = Idx.symbol_begin(), SE = Idx.symbol_end(); SI != SE; ++SI) markLive(*SI); } } } void SymbolReaper::markInUse(SymbolRef sym) { if (isa<SymbolMetadata>(sym)) MetadataInUse.insert(sym); } bool SymbolReaper::isLiveRegion(const MemRegion *MR) { // TODO: For now, liveness of a memory region is equivalent to liveness of its // base region. In fact we can do a bit better: say, if a particular FieldDecl // is not used later in the path, we can diagnose a leak of a value within // that field earlier than, say, the variable that contains the field dies. MR = MR->getBaseRegion(); if (RegionRoots.count(MR)) return true; if (const auto *SR = dyn_cast<SymbolicRegion>(MR)) return isLive(SR->getSymbol()); if (const auto *VR = dyn_cast<VarRegion>(MR)) return isLive(VR, true); // FIXME: This is a gross over-approximation. What we really need is a way to // tell if anything still refers to this region. Unlike SymbolicRegions, // AllocaRegions don't have associated symbols, though, so we don't actually // have a way to track their liveness. if (isa<AllocaRegion>(MR)) return true; if (isa<CXXThisRegion>(MR)) return true; if (isa<MemSpaceRegion>(MR)) return true; if (isa<CodeTextRegion>(MR)) return true; return false; } bool SymbolReaper::isLive(SymbolRef sym) { if (TheLiving.count(sym)) { markDependentsLive(sym); return true; } bool KnownLive; switch (sym->getKind()) { case SymExpr::SymbolRegionValueKind: KnownLive = isLiveRegion(cast<SymbolRegionValue>(sym)->getRegion()); break; case SymExpr::SymbolConjuredKind: KnownLive = false; break; case SymExpr::SymbolDerivedKind: KnownLive = isLive(cast<SymbolDerived>(sym)->getParentSymbol()); break; case SymExpr::SymbolExtentKind: KnownLive = isLiveRegion(cast<SymbolExtent>(sym)->getRegion()); break; case SymExpr::SymbolMetadataKind: KnownLive = MetadataInUse.count(sym) && isLiveRegion(cast<SymbolMetadata>(sym)->getRegion()); if (KnownLive) MetadataInUse.erase(sym); break; case SymExpr::SymIntExprKind: KnownLive = isLive(cast<SymIntExpr>(sym)->getLHS()); break; case SymExpr::IntSymExprKind: KnownLive = isLive(cast<IntSymExpr>(sym)->getRHS()); break; case SymExpr::SymSymExprKind: KnownLive = isLive(cast<SymSymExpr>(sym)->getLHS()) && isLive(cast<SymSymExpr>(sym)->getRHS()); break; case SymExpr::SymbolCastKind: KnownLive = isLive(cast<SymbolCast>(sym)->getOperand()); break; } if (KnownLive) markLive(sym); return KnownLive; } bool SymbolReaper::isLive(const Stmt *ExprVal, const LocationContext *ELCtx) const { if (LCtx == nullptr) return false; if (LCtx != ELCtx) { // If the reaper's location context is a parent of the expression's // location context, then the expression value is now "out of scope". if (LCtx->isParentOf(ELCtx)) return false; return true; } // If no statement is provided, everything is this and parent contexts is live. if (!Loc) return true; return LCtx->getAnalysis<RelaxedLiveVariables>()->isLive(Loc, ExprVal); } bool SymbolReaper::isLive(const VarRegion *VR, bool includeStoreBindings) const{ const StackFrameContext *VarContext = VR->getStackFrame(); if (!VarContext) return true; if (!LCtx) return false; const StackFrameContext *CurrentContext = LCtx->getStackFrame(); if (VarContext == CurrentContext) { // If no statement is provided, everything is live. if (!Loc) return true; // Anonymous parameters of an inheriting constructor are live for the entire // duration of the constructor. if (isa<CXXInheritedCtorInitExpr>(Loc)) return true; if (LCtx->getAnalysis<RelaxedLiveVariables>()->isLive(Loc, VR->getDecl())) return true; if (!includeStoreBindings) return false; unsigned &cachedQuery = const_cast<SymbolReaper *>(this)->includedRegionCache[VR]; if (cachedQuery) { return cachedQuery == 1; } // Query the store to see if the region occurs in any live bindings. if (Store store = reapedStore.getStore()) { bool hasRegion = reapedStore.getStoreManager().includedInBindings(store, VR); cachedQuery = hasRegion ? 1 : 2; return hasRegion; } return false; } return VarContext->isParentOf(CurrentContext); }
//***************************************************************************** // Copyright 2017-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //***************************************************************************** #include <fstream> #include "ngraph/env_util.hpp" #include "ngraph/file_util.hpp" #include "ngraph/function.hpp" #include "ngraph/graph_util.hpp" #include "ngraph/log.hpp" #include "ngraph/node.hpp" #include "ngraph/op/constant.hpp" #include "ngraph/op/experimental/compiled_kernel.hpp" #include "ngraph/op/parameter.hpp" #include "ngraph/pass/pass.hpp" #include "ngraph/pass/visualize_tree.hpp" #include "ngraph/util.hpp" using namespace ngraph; using namespace std; // // As we are visualizing the graph, we will make some tweaks to the generated dot file to make // routing more tractable for Graphviz as well as (hopefully) more legible for the user. // // NOTE: It's possible, even likely, that better algorithms are available here. I just tried a // few different things without doing much research, and this seemed to work well. Please feel // free to improve on this. --amprocte // // ----------------- // // The first tweak is to trim edges that, intuitively speaking, have long "skip distance". For // example: // // [Actual Graph Structure] [Visualization] // n0 n0 // | \ | \ // n1 \ n1 [to n50] // | | | // n2 | n2 // | | | // n3 | n3 // | | | // ... | ... [from n0] // | / | / // n50 n50 // // This is useful for training graphs especially, which tend to have very long feed-forward edges // for intermediate values from fprop being stored for later reuse in the bprop phase. // // Efficiently detecting a "long skip" is a bit tricky. We want to come up with a metric that is // reasonably fast to compute, but does not result in cuts that will split the graph into multiple // components. The heuristic we are using for the jump distance between n and m is the maximum // difference in maximum path length from n and m to any result node that is reachable from both // n and m (or 0, if no such result node exists). Not sure if this is mathematically *guaranteed* // not to split graph components, but it seems to work well in practice. // // Formally: // // Compute-Heights-Above-Each-Parameter(N): // Inputs: nodes N; define R={n in N | n is a Result node} // Output: height_maps: map from N to (map from R to int) // // height_maps is initially empty // // for each r in R: // Insert into height_map the map {r -> 1} // // for each n in N in reverse topological ("results-first") order: // for each user m of n: // for each r in height_maps[m].keys: // height_maps[n][r] := max(height_maps[n][r], height_maps[m][r]+1) // // Jump-Distance(n,m,height_maps): // Inputs: n (source node), m (destination node), height_maps (pre-computed above) // Output: jump_distance: int // // jump_distance := 0 // // for each r in height_maps[n].keys: // if r is in height_maps[m].keys: // jump_distance := max(jump_distance, abs(height_maps[n][r] - height_maps[m][r])) // // Later on, if E is an edge from n to m, and Jump-Distance(n,m,height_map) > K (where K is kind // of arbitrary but currently set to 20), we will "cut" the edge as illustrated above. // // ----------------- // // The second tweak aims to eliminate routing pressure from nodes that have large outdegree and // are connected to many otherwise-distant places in the graph. For this, the only thing we are // doing at the moment is to "float" Parameter and Constant nodes. This means that rather than // visualizing them as a single node (which might have very large outdegree as in, e.g., a // learning rate parameter being fed to many different places), we make a "copy" of the node at // each occurrence site (with a dashed outline). // // NOTE: This tweak could probably be extended to float other kinds of nodes with high out-degree. // (This situation is likely to arise after constant subexpression elimination.) Here one has to // be careful to avoid splitting the components. I have some rough ideas on how this could be // dealt with, but have not had time to implement them yet. --amprocte // const int ngraph::pass::VisualizeTree::max_jump_distance = 20; class HeightMap { public: HeightMap() {} HeightMap(std::set<Node*> initials) { for (auto& n : initials) { m_heights[n] = 0; } } void absorb(const HeightMap& other) { for (auto& p : other.m_heights) { auto k = p.first; auto v = p.second; m_heights[k] = std::max(m_heights[k], v + 1); } } int64_t max_jump_to(const HeightMap& target) { int64_t result = 0; for (auto& p : m_heights) { auto k = p.first; auto v = p.second; if (target.m_heights.count(k) != 0) { result = std::max(result, std::abs(target.m_heights.at(k) - v)); } } return result; } private: std::unordered_map<Node*, int64_t> m_heights; }; static std::string label_edge(const Output<Node>& output, const std::shared_ptr<Node>& dst, int64_t jump_distance) { std::stringstream ss; for (Input<Node> input : output.get_target_inputs()) { if (input.get_node() == dst.get()) { stringstream label; label << "[label=\" " << output.get_index() << "-" << input.get_index() << " \"]"; ss << label.str(); } } if (getenv_bool("NGRAPH_VISUALIZE_EDGE_JUMP_DISTANCE")) { if (jump_distance > 1) { stringstream label; label << "[label=\"jump=" << jump_distance << "\"]"; ss << label.str(); } } return ss.str(); } bool pass::VisualizeTree::run_on_module(vector<shared_ptr<Function>>& functions) { for (shared_ptr<Function> f : functions) { unordered_map<Node*, HeightMap> height_maps; for (auto& node : f->get_ops()) { if (is_type<op::v0::Result>(node)) { height_maps[node.get()] = HeightMap({node.get()}); } else { height_maps[node.get()] = HeightMap(); } } auto nodes = topological_sort(f->get_ops()); for (auto it = nodes.rbegin(); it != nodes.rend(); ++it) { auto& node = *it; for (auto& output : node->outputs()) { for (auto& input : output.get_target_inputs()) { auto target_node = input.get_node(); height_maps[node.get()].absorb(height_maps[target_node]); } } } // TODO(amprocte): Maybe find a way to make this tunable. size_t fake_node_ctr = 0; traverse_nodes(f, [&](shared_ptr<Node> node) { if (auto ck = as_type_ptr<ngraph::op::CompiledKernel>(node)) { // print sub-graph auto nodes_list = ck->get_node_list(); // all nodes inside the CK sub-graph for (auto& ck_node : nodes_list) { m_ss << add_attributes(ck_node); } // all edges to each node in the sub-graph for (auto& subgraph_node : nodes_list) { add_node_arguments(subgraph_node, height_maps, fake_node_ctr); } } add_node_arguments(node, height_maps, fake_node_ctr); }); } render(); return false; } pass::VisualizeTree::VisualizeTree(const string& file_name, node_modifiers_t nm, bool dot_only) : m_name{file_name} , m_node_modifiers{nm} , m_dot_only(dot_only) { } void pass::VisualizeTree::add_node_arguments(shared_ptr<Node> node, unordered_map<Node*, HeightMap>& height_maps, size_t& fake_node_ctr) { for (auto input_value : node->input_values()) { auto arg = input_value.get_node_shared_ptr(); size_t jump_distance = height_maps[arg.get()].max_jump_to(height_maps[node.get()]); if (is_type<ngraph::op::Constant>(arg) || is_type<ngraph::op::Parameter>(arg)) { auto clone_name = "CLONE_" + to_string(fake_node_ctr); auto color = (is_type<op::v0::Parameter>(arg) ? "blue" : "black"); m_ss << " " << clone_name << "[shape=\"box\" style=\"dashed,filled\" color=\"" << color << "\" fillcolor=\"white\" label=\"" << get_node_name(arg) << "\"]\n"; m_ss << " " << clone_name << " -> " << node->get_name() << label_edge(input_value, node, jump_distance) << "\n"; fake_node_ctr++; } else if (jump_distance > max_jump_distance) { m_ss << add_attributes(arg); m_ss << add_attributes(node); auto recv_node_name = "RECV_" + to_string(fake_node_ctr); auto send_node_name = "SEND_" + to_string(fake_node_ctr); m_ss << " " << recv_node_name << "[shape=\"box\" style=\"solid,filled\" " "fillcolor=\"#ffcccc\" label=\"Receive[" << arg->get_name() << "]\"]\n"; m_ss << " " << send_node_name << "[shape=\"box\" style=\"solid,filled\" " "fillcolor=\"#ccffcc\" label=\"Send[" << node->get_name() << "]\"]\n"; m_ss << " " << arg->get_name() << " -> " << send_node_name << label_edge(input_value, node, jump_distance) << "\n"; m_ss << " " << recv_node_name << " -> " << node->get_name() << label_edge(input_value, node, jump_distance) << "\n"; fake_node_ctr++; } else { m_ss << add_attributes(arg); m_ss << add_attributes(node); m_ss << " " << arg->get_name() << " -> " << node->get_name() << label_edge(input_value, node, jump_distance) << "\n"; } } } string pass::VisualizeTree::add_attributes(shared_ptr<Node> node) { string rc; if (m_nodes_with_attributes.find(node) == m_nodes_with_attributes.end()) { m_nodes_with_attributes.insert(node); rc = get_attributes(node); } return rc; } static std::string pretty_partial_shape(const PartialShape& shape) { std::stringstream ss; if (shape.rank().is_dynamic()) { ss << "?"; } else { bool first = true; ss << "["; for (size_t i = 0; i < shape.rank().get_length(); i++) { if (!first) { ss << ","; } if (shape[i].is_dynamic()) { ss << "?"; } else { ss << shape[i].get_length(); } first = false; } ss << "]"; } return ss.str(); } string pass::VisualizeTree::get_attributes(shared_ptr<Node> node) { vector<string> attributes; attributes.push_back("shape=box"); if (node->is_output()) { attributes.push_back("color=crimson"); attributes.push_back("penwidth=1.5"); } else { attributes.push_back("color=black"); } // Construct the label attribute { stringstream label; label << "label=<<table border=\"0\" cellborder=\"0\" cellpadding=\"0\" " "style=\"\"><tr><td align=\"center\" colspan=\"5\">" << node->get_name() << "</td></tr>"; size_t index = 0; const string td_start = "<td><font point-size=\"10\" face=\"courier\">"; const string td_end = "</font></td>"; vector<string> rows; vector<string> row_compare; for (auto input : node->inputs()) { stringstream row_ss; stringstream row_compare_ss; row_ss << "<tr>"; row_ss << td_start << "I[" << index++ << "]" << td_end; row_compare_ss << td_start << input.get_element_type().get_type_name() << td_end; row_compare_ss << td_start << pretty_partial_shape(input.get_shape()) << td_end; row_ss << row_compare_ss.str() << "</tr>"; rows.push_back(row_ss.str()); row_compare.push_back("I" + row_compare_ss.str()); } index = 0; for (auto output : node->outputs()) { stringstream row_ss; stringstream row_compare_ss; row_ss << "<tr>"; row_ss << td_start << "O[" << index++ << "]" << td_end; row_compare_ss << td_start << output.get_element_type().get_type_name() << td_end; row_compare_ss << td_start << pretty_partial_shape(output.get_shape()) << td_end; row_ss << row_compare_ss.str() << "</tr>"; rows.push_back(row_ss.str()); row_compare.push_back("O" + row_compare_ss.str()); } // Collapse duplicate rows vector<int64_t> remove_list; for (size_t i = 1; i < row_compare.size() - 1; i++) { string s1 = row_compare[i - 1]; string s2 = row_compare[i]; string s3 = row_compare[i + 1]; if (s1 == s2 && s2 == s3) { remove_list.push_back(i); } } if (remove_list.size() > 3) { // Go backwards through the list to make removal easier int64_t start = remove_list[remove_list.size() - 1]; int64_t end = start; int64_t count = 0; for (int64_t i = remove_list.size() - 2; i >= 0; --i) { int64_t row = remove_list[i]; if (row == start - 1) { // continue start = row; count++; } else { rows.erase(rows.begin() + start, rows.begin() + end + 1); string str = "<tr><td align=\"center\" colspan=\"5\">...</td></tr>"; rows.insert(rows.begin() + start, str); end = row; start = row; } } if (start != end) { rows.erase(rows.begin() + start, rows.begin() + end + 1); string str = "<tr><td align=\"center\" colspan=\"5\">...</td></tr>"; rows.insert(rows.begin() + start, str); } } // if (get_provenance_enabled()) // { // for (auto tag : node->get_provenance_tags()) // { // string str = "<tr><td align=\"left\" colspan=\"5\">tag=" + tag + "</td></tr>"; // rows.push_back(str); // } // } for (const string& s : rows) { label << s; } label << "</table>>"; attributes.push_back(label.str()); } if (m_node_modifiers) { m_node_modifiers(*node, attributes); } stringstream ss; ss << " " << node->get_name() << " [" << join(attributes, " ") << "]\n"; return ss.str(); } string pass::VisualizeTree::get_node_name(shared_ptr<Node> node) { string rc = node->get_friendly_name(); if (node->get_friendly_name() != node->get_name()) { rc += "\\n" + node->get_name(); } if (auto ck = as_type_ptr<ngraph::op::CompiledKernel>(node)) { rc += "\\n{"; // add sub-graph node names for (auto& ck_node : ck->get_node_list()) { rc += ck_node->get_name(); rc += ", "; } rc += "}\\n"; } return rc; } void pass::VisualizeTree::render() const { string ext = file_util::get_file_ext(m_name); string output_format = ext.substr(1); string dot_file = m_name; if (to_lower(ext) != ".dot") { dot_file += ".dot"; } ofstream out(dot_file); if (out) { out << "digraph ngraph\n{\n"; out << m_ss.str(); out << "}\n"; out.close(); if (!m_dot_only && to_lower(ext) != ".dot") { #ifndef _WIN32 stringstream ss; ss << "dot -T" << output_format << " " << dot_file << " -o" << m_name; auto cmd = ss.str(); auto stream = popen(cmd.c_str(), "r"); if (stream) { pclose(stream); } #endif } } }
/*********************************************************************** created: Tue Apr 30 2013 authors: Paul D Turner <paul@cegui.org.uk> Lukas E Meindl *************************************************************************/ /*************************************************************************** * Copyright (C) 2004 - 2013 Paul D Turner & The CEGUI Development Team * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. ***************************************************************************/ #include "CEGUI/RendererModules/OpenGL/GL.h" #include "CEGUI/RendererModules/OpenGL/RendererBase.h" #include "CEGUI/RendererModules/OpenGL/Texture.h" #include "CEGUI/RendererModules/OpenGL/TextureTarget.h" #include "CEGUI/RendererModules/OpenGL/ViewportTarget.h" #include "CEGUI/RendererModules/OpenGL/GeometryBufferBase.h" #include "CEGUI/RendererModules/OpenGL/GlmPimpl.h" #include "CEGUI/Exceptions.h" #include "CEGUI/ImageCodec.h" #include "CEGUI/DynamicModule.h" #include "CEGUI/System.h" #include "CEGUI/Logger.h" #include <sstream> #include <algorithm> namespace CEGUI { //----------------------------------------------------------------------------// String OpenGLRendererBase::d_rendererID("--- subclass did not set ID: Fix this!"); //----------------------------------------------------------------------------// OpenGLRendererBase::OpenGLRendererBase() : d_viewProjectionMatrix(0), d_activeRenderTarget(0) { init(); initialiseDisplaySizeWithViewportSize(); d_defaultTarget = CEGUI_NEW_AO OpenGLViewportTarget(*this); } //----------------------------------------------------------------------------// OpenGLRendererBase::OpenGLRendererBase(const Sizef& display_size) : d_displaySize(display_size), d_viewProjectionMatrix(0), d_activeRenderTarget(0) { init(); d_defaultTarget = CEGUI_NEW_AO OpenGLViewportTarget(*this); } //----------------------------------------------------------------------------// OpenGLRendererBase::OpenGLRendererBase(bool set_glew_experimental) : d_viewProjectionMatrix(0), d_activeRenderTarget(0) { init(true, set_glew_experimental); initialiseDisplaySizeWithViewportSize(); d_defaultTarget = CEGUI_NEW_AO OpenGLViewportTarget(*this); } //----------------------------------------------------------------------------// OpenGLRendererBase::OpenGLRendererBase(const Sizef& display_size, bool set_glew_experimental) : d_displaySize(display_size), d_viewProjectionMatrix(0), d_activeRenderTarget(0) { init(true, set_glew_experimental); d_defaultTarget = CEGUI_NEW_AO OpenGLViewportTarget(*this); } //----------------------------------------------------------------------------// void OpenGLRendererBase::init(bool init_glew, bool set_glew_experimental) { d_displayDPI.d_x = d_displayDPI.d_y = 96; d_initExtraStates = false; d_activeBlendMode = BM_INVALID; d_viewProjectionMatrix = new mat4Pimpl(); #if defined CEGUI_USE_GLEW if (init_glew) { if (set_glew_experimental) glewExperimental = GL_TRUE; GLenum err = glewInit(); if(err != GLEW_OK) { std::ostringstream err_string; //Problem: glewInit failed, something is seriously wrong. err_string << "failed to initialise the GLEW library. " << glewGetErrorString(err); CEGUI_THROW(RendererException(err_string.str().c_str())); } //Clear the useless error glew produces as of version 1.7.0, when using OGL3.2 Core Profile glGetError(); } #else CEGUI_UNUSED(init_glew); CEGUI_UNUSED(set_glew_experimental); #endif OpenGLInfo::getSingleton().init(); initialiseMaxTextureSize(); } //----------------------------------------------------------------------------// OpenGLRendererBase::~OpenGLRendererBase() { destroyAllGeometryBuffers(); destroyAllTextureTargets(); destroyAllTextures(); CEGUI_DELETE_AO d_defaultTarget; delete d_viewProjectionMatrix; } //----------------------------------------------------------------------------// void OpenGLRendererBase::initialiseDisplaySizeWithViewportSize() { GLint vp[4]; glGetIntegerv(GL_VIEWPORT, vp); d_displaySize = Sizef(static_cast<float>(vp[2]), static_cast<float>(vp[3])); } //----------------------------------------------------------------------------// void OpenGLRendererBase::initialiseMaxTextureSize() { GLint max_tex_size; glGetIntegerv(GL_MAX_TEXTURE_SIZE, &max_tex_size); d_maxTextureSize = max_tex_size; } //----------------------------------------------------------------------------// RenderTarget& OpenGLRendererBase::getDefaultRenderTarget() { return *d_defaultTarget; } //----------------------------------------------------------------------------// GeometryBuffer& OpenGLRendererBase::createGeometryBuffer() { OpenGLGeometryBufferBase* b = createGeometryBuffer_impl(); d_geometryBuffers.push_back(b); return *b; } //----------------------------------------------------------------------------// void OpenGLRendererBase::destroyGeometryBuffer(const GeometryBuffer& buffer) { GeometryBufferList::iterator i = std::find(d_geometryBuffers.begin(), d_geometryBuffers.end(), &buffer); if (d_geometryBuffers.end() != i) { d_geometryBuffers.erase(i); CEGUI_DELETE_AO &buffer; } } //----------------------------------------------------------------------------// void OpenGLRendererBase::destroyAllGeometryBuffers() { while (!d_geometryBuffers.empty()) destroyGeometryBuffer(**d_geometryBuffers.begin()); } //----------------------------------------------------------------------------// TextureTarget* OpenGLRendererBase::createTextureTarget() { TextureTarget* t = createTextureTarget_impl(); if (t) d_textureTargets.push_back(t); return t; } //----------------------------------------------------------------------------// void OpenGLRendererBase::destroyTextureTarget(TextureTarget* target) { TextureTargetList::iterator i = std::find(d_textureTargets.begin(), d_textureTargets.end(), target); if (d_textureTargets.end() != i) { d_textureTargets.erase(i); CEGUI_DELETE_AO target; } } //----------------------------------------------------------------------------// void OpenGLRendererBase::destroyAllTextureTargets() { while (!d_textureTargets.empty()) destroyTextureTarget(*d_textureTargets.begin()); } //----------------------------------------------------------------------------// Texture& OpenGLRendererBase::createTexture(const String& name) { if (d_textures.find(name) != d_textures.end()) CEGUI_THROW(AlreadyExistsException( "A texture named '" + name + "' already exists.")); OpenGLTexture* tex = CEGUI_NEW_AO OpenGLTexture(*this, name); d_textures[name] = tex; logTextureCreation(name); return *tex; } //----------------------------------------------------------------------------// Texture& OpenGLRendererBase::createTexture(const String& name, const String& filename, const String& resourceGroup) { if (d_textures.find(name) != d_textures.end()) CEGUI_THROW(AlreadyExistsException( "A texture named '" + name + "' already exists.")); OpenGLTexture* tex = CEGUI_NEW_AO OpenGLTexture(*this, name, filename, resourceGroup); d_textures[name] = tex; logTextureCreation(name); return *tex; } //----------------------------------------------------------------------------// Texture& OpenGLRendererBase::createTexture(const String& name, const Sizef& size) { if (d_textures.find(name) != d_textures.end()) CEGUI_THROW(AlreadyExistsException( "A texture named '" + name + "' already exists.")); OpenGLTexture* tex = CEGUI_NEW_AO OpenGLTexture(*this, name, size); d_textures[name] = tex; logTextureCreation(name); return *tex; } //----------------------------------------------------------------------------// void OpenGLRendererBase::logTextureCreation(const String& name) { Logger* logger = Logger::getSingletonPtr(); if (logger) logger->logEvent("[OpenGLRenderer] Created texture: " + name); } //----------------------------------------------------------------------------// void OpenGLRendererBase::destroyTexture(Texture& texture) { destroyTexture(texture.getName()); } //----------------------------------------------------------------------------// void OpenGLRendererBase::destroyTexture(const String& name) { TextureMap::iterator i = d_textures.find(name); if (d_textures.end() != i) { logTextureDestruction(name); CEGUI_DELETE_AO i->second; d_textures.erase(i); } } //----------------------------------------------------------------------------// void OpenGLRendererBase::logTextureDestruction(const String& name) { Logger* logger = Logger::getSingletonPtr(); if (logger) logger->logEvent("[OpenGLRenderer] Destroyed texture: " + name); } //----------------------------------------------------------------------------// void OpenGLRendererBase::destroyAllTextures() { while (!d_textures.empty()) destroyTexture(d_textures.begin()->first); } //----------------------------------------------------------------------------// Texture& OpenGLRendererBase::getTexture(const String& name) const { TextureMap::const_iterator i = d_textures.find(name); if (i == d_textures.end()) CEGUI_THROW(UnknownObjectException( "No texture named '" + name + "' is available.")); return *i->second; } //----------------------------------------------------------------------------// bool OpenGLRendererBase::isTextureDefined(const String& name) const { return d_textures.find(name) != d_textures.end(); } //----------------------------------------------------------------------------// const Sizef& OpenGLRendererBase::getDisplaySize() const { return d_displaySize; } //----------------------------------------------------------------------------// const Vector2f& OpenGLRendererBase::getDisplayDPI() const { return d_displayDPI; } //----------------------------------------------------------------------------// uint OpenGLRendererBase::getMaxTextureSize() const { return d_maxTextureSize; } //----------------------------------------------------------------------------// const String& OpenGLRendererBase::getIdentifierString() const { return d_rendererID; } //----------------------------------------------------------------------------// Texture& OpenGLRendererBase::createTexture(const String& name, GLuint tex, const Sizef& sz) { if (d_textures.find(name) != d_textures.end()) CEGUI_THROW(AlreadyExistsException( "A texture named '" + name + "' already exists.")); OpenGLTexture* t = CEGUI_NEW_AO OpenGLTexture(*this, name, tex, sz); d_textures[name] = t; logTextureCreation(name); return *t; } //----------------------------------------------------------------------------// void OpenGLRendererBase::setDisplaySize(const Sizef& sz) { if (sz != d_displaySize) { d_displaySize = sz; // update the default target's area Rectf area(d_defaultTarget->getArea()); area.setSize(sz); d_defaultTarget->setArea(area); } } //----------------------------------------------------------------------------// void OpenGLRendererBase::enableExtraStateSettings(bool setting) { d_initExtraStates = setting; } //----------------------------------------------------------------------------// void OpenGLRendererBase::grabTextures() { // perform grab operations for texture targets TextureTargetList::iterator target_iterator = d_textureTargets.begin(); for (; target_iterator != d_textureTargets.end(); ++target_iterator) static_cast<OpenGLTextureTarget*>(*target_iterator)->grabTexture(); // perform grab on regular textures TextureMap::iterator texture_iterator = d_textures.begin(); for (; texture_iterator != d_textures.end(); ++texture_iterator) texture_iterator->second->grabTexture(); } //----------------------------------------------------------------------------// void OpenGLRendererBase::restoreTextures() { // perform restore on textures TextureMap::iterator texture_iterator = d_textures.begin(); for (; texture_iterator != d_textures.end(); ++texture_iterator) texture_iterator->second->restoreTexture(); // perform restore operations for texture targets TextureTargetList::iterator target_iterator = d_textureTargets.begin(); for (; target_iterator != d_textureTargets.end(); ++target_iterator) static_cast<OpenGLTextureTarget*>(*target_iterator)->restoreTexture(); } //----------------------------------------------------------------------------// Sizef OpenGLRendererBase::getAdjustedTextureSize(const Sizef& sz) const { Sizef out(sz); // if we can't support non power of two sizes, get appropriate POT values. if (!OpenGLInfo::getSingleton().isNpotTextureSupported()) { out.d_width = getNextPOTSize(out.d_width); out.d_height = getNextPOTSize(out.d_height); } return out; } //----------------------------------------------------------------------------// float OpenGLRendererBase::getNextPOTSize(const float f) { uint size = static_cast<uint>(f); // if not power of 2 if ((size & (size - 1)) || !size) { int log = 0; // get integer log of 'size' to base 2 while (size >>= 1) ++log; // use log to calculate value to use as size. size = (2 << log); } return static_cast<float>(size); } //----------------------------------------------------------------------------// const mat4Pimpl* OpenGLRendererBase::getViewProjectionMatrix() { return d_viewProjectionMatrix; } //----------------------------------------------------------------------------// void OpenGLRendererBase::setViewProjectionMatrix(const mat4Pimpl* viewProjectionMatrix) { *d_viewProjectionMatrix = *viewProjectionMatrix; } //----------------------------------------------------------------------------// const CEGUI::Rectf& OpenGLRendererBase::getActiveViewPort() { return d_activeRenderTarget->getArea(); } //----------------------------------------------------------------------------// void OpenGLRendererBase::setActiveRenderTarget(RenderTarget* renderTarget) { d_activeRenderTarget = renderTarget; } //----------------------------------------------------------------------------// RenderTarget* OpenGLRendererBase::getActiveRenderTarget() { return d_activeRenderTarget; } //----------------------------------------------------------------------------// }
#include <hpx/config.hpp> #include <hpx/hpx_init.hpp> #if defined(BOOST_MSVC) #pragma warning(disable: 4786) // identifier truncated in debug info #pragma warning(disable: 4710) // function not inlined #pragma warning(disable: 4711) // function selected for automatic inline expansion #pragma warning(disable: 4514) // unreferenced inline removed #endif // Taken from the Boost.Bind library // // mem_fn_derived_test.cpp - tests mem_fn.hpp with derived objects // // Copyright (c) 2001, 2002 Peter Dimov and Multi Media Ltd. // Copyright (c) 2013 Agustin Berge // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // #include <hpx/util/mem_fn.hpp> #include <boost/shared_ptr.hpp> #if defined(BOOST_MSVC) && (BOOST_MSVC < 1300) #pragma warning(push, 3) #endif #include <iostream> #if defined(BOOST_MSVC) && (BOOST_MSVC < 1300) #pragma warning(pop) #endif #include <hpx/util/lightweight_test.hpp> struct B { mutable unsigned int hash; B(): hash(0) {} int f0() { f1(17); return 0; } int g0() const { g1(17); return 0; } int f1(int a1) { hash = (hash * 17041 + a1) % 32768; return 0; } int g1(int a1) const { hash = (hash * 17041 + a1 * 2) % 32768; return 0; } int f2(int a1, int a2) { f1(a1); f1(a2); return 0; } int g2(int a1, int a2) const { g1(a1); g1(a2); return 0; } int f3(int a1, int a2, int a3) { f2(a1, a2); f1(a3); return 0; } int g3(int a1, int a2, int a3) const { g2(a1, a2); g1(a3); return 0; } int f4(int a1, int a2, int a3, int a4) { f3(a1, a2, a3); f1(a4); return 0; } int g4(int a1, int a2, int a3, int a4) const { g3(a1, a2, a3); g1(a4); return 0; } int f5(int a1, int a2, int a3, int a4, int a5) { f4(a1, a2, a3, a4); f1(a5); return 0; } int g5(int a1, int a2, int a3, int a4, int a5) const { g4(a1, a2, a3, a4); g1(a5); return 0; } int f6(int a1, int a2, int a3, int a4, int a5, int a6) { f5(a1, a2, a3, a4, a5); f1(a6); return 0; } int g6(int a1, int a2, int a3, int a4, int a5, int a6) const { g5(a1, a2, a3, a4, a5); g1(a6); return 0; } int f7(int a1, int a2, int a3, int a4, int a5, int a6, int a7) { f6(a1, a2, a3, a4, a5, a6); f1(a7); return 0; } int g7(int a1, int a2, int a3, int a4, int a5, int a6, int a7) const { g6(a1, a2, a3, a4, a5, a6); g1(a7); return 0; } int f8(int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8) { f7(a1, a2, a3, a4, a5, a6, a7); f1(a8); return 0; } int g8(int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8) const { g7(a1, a2, a3, a4, a5, a6, a7); g1(a8); return 0; } }; struct X: public B { }; int main() { X x; X const & rcx = x; X const * pcx = &x; boost::shared_ptr<X> sp(new X); hpx::util::mem_fn(&X::f0)(x); hpx::util::mem_fn(&X::f0)(&x); hpx::util::mem_fn(&X::f0)(sp); hpx::util::mem_fn(&X::g0)(x); hpx::util::mem_fn(&X::g0)(rcx); hpx::util::mem_fn(&X::g0)(&x); hpx::util::mem_fn(&X::g0)(pcx); hpx::util::mem_fn(&X::g0)(sp); hpx::util::mem_fn(&X::f1)(x, 1); hpx::util::mem_fn(&X::f1)(&x, 1); hpx::util::mem_fn(&X::f1)(sp, 1); hpx::util::mem_fn(&X::g1)(x, 1); hpx::util::mem_fn(&X::g1)(rcx, 1); hpx::util::mem_fn(&X::g1)(&x, 1); hpx::util::mem_fn(&X::g1)(pcx, 1); hpx::util::mem_fn(&X::g1)(sp, 1); hpx::util::mem_fn(&X::f2)(x, 1, 2); hpx::util::mem_fn(&X::f2)(&x, 1, 2); hpx::util::mem_fn(&X::f2)(sp, 1, 2); hpx::util::mem_fn(&X::g2)(x, 1, 2); hpx::util::mem_fn(&X::g2)(rcx, 1, 2); hpx::util::mem_fn(&X::g2)(&x, 1, 2); hpx::util::mem_fn(&X::g2)(pcx, 1, 2); hpx::util::mem_fn(&X::g2)(sp, 1, 2); hpx::util::mem_fn(&X::f3)(x, 1, 2, 3); hpx::util::mem_fn(&X::f3)(&x, 1, 2, 3); hpx::util::mem_fn(&X::f3)(sp, 1, 2, 3); hpx::util::mem_fn(&X::g3)(x, 1, 2, 3); hpx::util::mem_fn(&X::g3)(rcx, 1, 2, 3); hpx::util::mem_fn(&X::g3)(&x, 1, 2, 3); hpx::util::mem_fn(&X::g3)(pcx, 1, 2, 3); hpx::util::mem_fn(&X::g3)(sp, 1, 2, 3); hpx::util::mem_fn(&X::f4)(x, 1, 2, 3, 4); hpx::util::mem_fn(&X::f4)(&x, 1, 2, 3, 4); hpx::util::mem_fn(&X::f4)(sp, 1, 2, 3, 4); hpx::util::mem_fn(&X::g4)(x, 1, 2, 3, 4); hpx::util::mem_fn(&X::g4)(rcx, 1, 2, 3, 4); hpx::util::mem_fn(&X::g4)(&x, 1, 2, 3, 4); hpx::util::mem_fn(&X::g4)(pcx, 1, 2, 3, 4); hpx::util::mem_fn(&X::g4)(sp, 1, 2, 3, 4); hpx::util::mem_fn(&X::f5)(x, 1, 2, 3, 4, 5); hpx::util::mem_fn(&X::f5)(&x, 1, 2, 3, 4, 5); hpx::util::mem_fn(&X::f5)(sp, 1, 2, 3, 4, 5); hpx::util::mem_fn(&X::g5)(x, 1, 2, 3, 4, 5); hpx::util::mem_fn(&X::g5)(rcx, 1, 2, 3, 4, 5); hpx::util::mem_fn(&X::g5)(&x, 1, 2, 3, 4, 5); hpx::util::mem_fn(&X::g5)(pcx, 1, 2, 3, 4, 5); hpx::util::mem_fn(&X::g5)(sp, 1, 2, 3, 4, 5); hpx::util::mem_fn(&X::f6)(x, 1, 2, 3, 4, 5, 6); hpx::util::mem_fn(&X::f6)(&x, 1, 2, 3, 4, 5, 6); hpx::util::mem_fn(&X::f6)(sp, 1, 2, 3, 4, 5, 6); hpx::util::mem_fn(&X::g6)(x, 1, 2, 3, 4, 5, 6); hpx::util::mem_fn(&X::g6)(rcx, 1, 2, 3, 4, 5, 6); hpx::util::mem_fn(&X::g6)(&x, 1, 2, 3, 4, 5, 6); hpx::util::mem_fn(&X::g6)(pcx, 1, 2, 3, 4, 5, 6); hpx::util::mem_fn(&X::g6)(sp, 1, 2, 3, 4, 5, 6); hpx::util::mem_fn(&X::f7)(x, 1, 2, 3, 4, 5, 6, 7); hpx::util::mem_fn(&X::f7)(&x, 1, 2, 3, 4, 5, 6, 7); hpx::util::mem_fn(&X::f7)(sp, 1, 2, 3, 4, 5, 6, 7); hpx::util::mem_fn(&X::g7)(x, 1, 2, 3, 4, 5, 6, 7); hpx::util::mem_fn(&X::g7)(rcx, 1, 2, 3, 4, 5, 6, 7); hpx::util::mem_fn(&X::g7)(&x, 1, 2, 3, 4, 5, 6, 7); hpx::util::mem_fn(&X::g7)(pcx, 1, 2, 3, 4, 5, 6, 7); hpx::util::mem_fn(&X::g7)(sp, 1, 2, 3, 4, 5, 6, 7); hpx::util::mem_fn(&X::f8)(x, 1, 2, 3, 4, 5, 6, 7, 8); hpx::util::mem_fn(&X::f8)(&x, 1, 2, 3, 4, 5, 6, 7, 8); hpx::util::mem_fn(&X::f8)(sp, 1, 2, 3, 4, 5, 6, 7, 8); hpx::util::mem_fn(&X::g8)(x, 1, 2, 3, 4, 5, 6, 7, 8); hpx::util::mem_fn(&X::g8)(rcx, 1, 2, 3, 4, 5, 6, 7, 8); hpx::util::mem_fn(&X::g8)(&x, 1, 2, 3, 4, 5, 6, 7, 8); hpx::util::mem_fn(&X::g8)(pcx, 1, 2, 3, 4, 5, 6, 7, 8); hpx::util::mem_fn(&X::g8)(sp, 1, 2, 3, 4, 5, 6, 7, 8); HPX_TEST(hpx::util::mem_fn(&X::hash)(x) == 17610); HPX_TEST(hpx::util::mem_fn(&X::hash)(sp) == 2155); return hpx::util::report_errors(); }
/* Copyright (C) 2013, Basis Technology Corp. Copyright (C) 2007-2011, Golden G. Richard III and Vico Marziale. Copyright (C) 2005-2007, Golden G. Richard III. * Written by Golden G. Richard III and Vico Marziale. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Thanks to Kris Kendall, Jesse Kornblum, et al for their work on Foremost. Foremost 0.69 was used as the starting point for Scalpel, in 2005. */ #include "scalpel.h" // globals defined in scalpel.h // current wildcard character char wildcard; // signal has been caught by signal handler int signal_caught; // width of tty, for progress bar int ttywidth; // propagate verbose flag to reader layer int inputReaderVerbose; int extractSearchSpecData(struct scalpelState *state, struct SearchSpecLine *s, char **tokenarray) { int err = 0; // process one line from config file: // token[0] = suffix // token[1] = case sensitive? // token[2] = maximum carve size // token[3] = begintag // token[4] = endtag // token[5] = search type (optional) s->suffix = (char *) malloc(MAX_SUFFIX_LENGTH * sizeof(char)); checkMemoryAllocation(state, s->suffix, __LINE__, __FILE__, "s->suffix"); s->begin = (char *) malloc(MAX_STRING_LENGTH * sizeof(char)); checkMemoryAllocation(state, s->begin, __LINE__, __FILE__, "s->begin"); s->end = (char *) malloc(MAX_STRING_LENGTH * sizeof(char)); checkMemoryAllocation(state, s->end, __LINE__, __FILE__, "s->end"); s->begintext = (char *) malloc(MAX_STRING_LENGTH * sizeof(char)); checkMemoryAllocation(state, s->begintext, __LINE__, __FILE__, "s->begintext"); s->endtext = (char *) malloc(MAX_STRING_LENGTH * sizeof(char)); checkMemoryAllocation(state, s->endtext, __LINE__, __FILE__, "s->endtext"); if (!strncasecmp(tokenarray[0], SCALPEL_NOEXTENSION_SUFFIX, strlen(SCALPEL_NOEXTENSION_SUFFIX))) { s->suffix[0] = SCALPEL_NOEXTENSION; s->suffix[1] = 0; } else { memcpy(s->suffix, tokenarray[0], MAX_SUFFIX_LENGTH); } // case sensitivity check s->casesensitive = (!strncasecmp(tokenarray[1], "y", 1) || !strncasecmp(tokenarray[1], "yes", 3)); //#ifdef _WIN32 // s->length = _atoi64(tokenarray[2]); //#else // s->length = atoull(tokenarray[2]); //#endif char split[MAX_STRING_LENGTH]; char *maxcarvelength; strcpy(split, tokenarray[2]); maxcarvelength = strchr(split, ':'); if (!maxcarvelength) { s->minlength = 0; s->length = strtoull(split, 0, 10); } else { *maxcarvelength = 0; maxcarvelength++; s->minlength = strtoull(split, 0, 10); s->length = strtoull(maxcarvelength, 0, 10); } // determine search type for this needle s->searchtype = SEARCHTYPE_FORWARD; if (!strncasecmp(tokenarray[5], "REVERSE", strlen("REVERSE"))) { s->searchtype = SEARCHTYPE_REVERSE; } else if (!strncasecmp(tokenarray[5], "NEXT", strlen("NEXT"))) { s->searchtype = SEARCHTYPE_FORWARD_NEXT; } // FORWARD is the default, but OK if the user defines it explicitly else if (!strncasecmp(tokenarray[5], "FORWARD", strlen("FORWARD"))) { s->searchtype = SEARCHTYPE_FORWARD; } // regular expressions must be handled separately if (isRegularExpression(tokenarray[3])) { #ifdef GPU_THREADING // GPU execution does not support regex needles. std::stringstream ss; ss << "ERROR: GPU search for regex headers is not supported!\n"; ss << "Please modify the config file for non-regex headers only.\n"; std::string msg = ss.str(); fprintf(stderr, "%s", msg.c_str()); throw std::runtime_error(msg); #endif // copy RE, zap leading/training '/' and prepare for regular expression compilation s->beginisRE = 1; strcpy(s->begin, tokenarray[3]); strcpy(s->begintext, tokenarray[3]); s->beginlength = strlen(tokenarray[3]); s->begin[s->beginlength] = 0; // compile regular expression err = regncomp(&(s->beginstate.re), s->begin + 1, s->beginlength - 2, REG_EXTENDED | (REG_ICASE * (!s->casesensitive))); if (err) { return SCALPEL_ERROR_BAD_HEADER_REGEX; } } else { // non-regular expression header s->beginisRE = 0; strcpy(s->begintext, tokenarray[3]); s->beginlength = translate(tokenarray[3]); memcpy(s->begin, tokenarray[3], s->beginlength); init_bm_table(s->begin, s->beginstate.bm_table, s->beginlength, s->casesensitive); } if (isRegularExpression(tokenarray[4])) { #ifdef GPU_THREADING // GPU execution does not support regex needles. std::stringstream ss; ss << "ERROR: GPU search for regex footers is not supported!\n"; ss << "Please modify the config file for non-regex footers only.\n"; std::string msg = ss.str(); fprintf(stderr, "%s", msg.c_str()); throw std::runtime_error(msg); #endif // copy RE, zap leading/training '/' and prepare for for regular expression compilation s->endisRE = 1; strcpy(s->end, tokenarray[4]); strcpy(s->endtext, tokenarray[4]); s->endlength = strlen(tokenarray[4]); s->end[s->endlength] = 0; // compile regular expression err = regncomp(&(s->endstate.re), s->end + 1, s->endlength - 2, REG_EXTENDED | (REG_ICASE * (!s->casesensitive))); if (err) { return SCALPEL_ERROR_BAD_FOOTER_REGEX; } } else { s->endisRE = 0; strcpy(s->endtext, tokenarray[4]); s->endlength = translate(tokenarray[4]); memcpy(s->end, tokenarray[4], s->endlength); init_bm_table(s->end, s->endstate.bm_table, s->endlength, s->casesensitive); } return SCALPEL_OK; } int processSearchSpecLine(struct scalpelState *state, char *buffer, int lineNumber) { char *buf = buffer; char *token; int i = 0, err = 0, len = strlen(buffer); // murder CTRL-M (0x0d) characters // if(buffer[len - 2] == 0x0d && buffer[len - 1] == 0x0a) { if (len >= 2 && buffer[len - 2] == 0x0d && buffer[len - 1] == 0x0a) { buffer[len - 2] = buffer[len - 1]; buffer[len - 1] = buffer[len]; } buf = (char *) skipWhiteSpace(buf); token = strtok(buf, " \t\n"); // lines beginning with # are comments if (token == NULL || token[0] == '#') { return SCALPEL_OK; } // allow wildcard to be changed if (!strncasecmp(token, "wildcard", 9)) { if ((token = strtok(NULL, " \t\n")) != NULL ) { translate(token); } else { fprintf(stdout, "Warning: Empty wildcard in configuration file line %d. Ignoring.\n", lineNumber); return SCALPEL_OK; } if(strlen(token) > 1) { fprintf(stderr, "Warning: Wildcard can only be one character," " but you specified %d characters.\n" " Using the first character, \"%c\", as the wildcard.\n", (int)strlen(token), token[0]); } wildcard = token[0]; return SCALPEL_OK; } char **tokenarray = (char **) malloc( 6 * sizeof(char[MAX_STRING_LENGTH + 1])); checkMemoryAllocation(state, tokenarray, __LINE__, __FILE__, "tokenarray"); while (token && (i < NUM_SEARCH_SPEC_ELEMENTS)) { tokenarray[i] = token; i++; token = strtok(NULL, " \t\n"); } switch (NUM_SEARCH_SPEC_ELEMENTS - i) { case 2: tokenarray[NUM_SEARCH_SPEC_ELEMENTS - 1] = (char *) ""; tokenarray[NUM_SEARCH_SPEC_ELEMENTS - 2] = (char *) ""; break; case 1: tokenarray[NUM_SEARCH_SPEC_ELEMENTS - 1] = (char *) ""; break; case 0: break; default: fprintf(stderr, "\nERROR: In line %d of the configuration file, expected %d tokens,\n" " but instead found only %d.\n", lineNumber, NUM_SEARCH_SPEC_ELEMENTS, i); free(tokenarray); return SCALPEL_ERROR_NO_SEARCH_SPEC; break; } if ((err = extractSearchSpecData(state, &(state->SearchSpec[state->specLines]), tokenarray))) { switch (err) { case SCALPEL_ERROR_BAD_HEADER_REGEX: fprintf(stderr, "\nERROR: In line %d of the configuration file, bad regular expression for header.\n", lineNumber) ; break; case SCALPEL_ERROR_BAD_FOOTER_REGEX: fprintf(stderr, "\nERROR: In line %d of the configuration file, bad regular expression for footer.\n", lineNumber); break; default: fprintf(stderr, "\nERROR: Unknown error on line %d of the configuration file.\n", lineNumber); } } state->specLines++; free(tokenarray); return SCALPEL_OK; } // process configuration file int readSearchSpecFile(struct scalpelState *state) { int lineNumber = 0, status; FILE *f; char *buffer = (char *) malloc( (NUM_SEARCH_SPEC_ELEMENTS * MAX_STRING_LENGTH + 1) * sizeof(char)); checkMemoryAllocation(state, buffer, __LINE__, __FILE__, "buffer"); f = fopen(state->conffile, "r"); if (f == NULL ) { fprintf(stderr, "ERROR: Couldn't open configuration file:\n%s -- %s\n", state->conffile, strerror(errno)); free(buffer); buffer = NULL; return SCALPEL_ERROR_FATAL_READ; } while (fgets(buffer, NUM_SEARCH_SPEC_ELEMENTS * MAX_STRING_LENGTH, f)) { lineNumber++; if (state->specLines > MAX_FILE_TYPES) { fprintf(stderr, "Your conf file contains too many file types.\n"); fprintf(stderr, "This version was compiled with MAX_FILE_TYPES == %d.\n", MAX_FILE_TYPES); fprintf(stderr,"Increase MAX_FILE_TYPES, recompile, and try again.\n"); free(buffer); buffer = NULL; return SCALPEL_ERROR_TOO_MANY_TYPES; } if ((status = processSearchSpecLine(state, buffer, lineNumber)) != SCALPEL_OK) { free(buffer); buffer = NULL; return status; } } // add an empty object to the end of the list as a marker state->SearchSpec[state->specLines].suffix = NULL; state->SearchSpec[state->specLines].casesensitive = 0; state->SearchSpec[state->specLines].length = 0; state->SearchSpec[state->specLines].begin = NULL; state->SearchSpec[state->specLines].beginlength = 0; state->SearchSpec[state->specLines].end = NULL; state->SearchSpec[state->specLines].endlength = 0; // GGRIII: offsets field is uninitialized--it doesn't // matter, since we won't use this entry. fclose(f); free(buffer); buffer = NULL; return SCALPEL_OK; } // initialize state variable and copy command line arguments if passed in (argv can be NULL) void initializeState(char ** argv, struct scalpelState *state) { char **argvcopy = argv; int sss; int i; state->inReader = NULL; // Allocate memory for state state->inputFileList = (char *) malloc(MAX_STRING_LENGTH * sizeof(char)); checkMemoryAllocation(state, state->inputFileList, __LINE__, __FILE__, "state->inputFileList"); state->conffile = (char *) malloc(MAX_STRING_LENGTH * sizeof(char)); checkMemoryAllocation(state, state->conffile, __LINE__, __FILE__, "state->conffile"); state->outputdirectory = (char *) malloc(MAX_STRING_LENGTH * sizeof(char)); checkMemoryAllocation(state, state->conffile, __LINE__, __FILE__, "state->outputdirectory"); state->invocation = (char *) malloc(MAX_STRING_LENGTH * sizeof(char)); checkMemoryAllocation(state, state->invocation, __LINE__, __FILE__, "state->invocation"); // GGRIII: memory allocation made more sane, because we're storing // more information in Scalpel than foremost had to, for each file // type. sss = (MAX_FILE_TYPES + 1) * sizeof(struct SearchSpecLine); state->SearchSpec = (struct SearchSpecLine *) malloc(sss); memset(state->SearchSpec, 0, sss); checkMemoryAllocation(state, state->SearchSpec, __LINE__, __FILE__, "state->SearchSpec"); state->specLines = 0; // GGRIII: initialize header/footer offset data, carved file count, // et al. The header/footer database is re-initialized in "dig.c" // after each image file is processed (numfilestocarve and // organizeDirNum are not). Storage for the header/footer offsets // will be reallocated as needed. for (i = 0; i < MAX_FILE_TYPES; i++) { state->SearchSpec[i].offsets.headers = 0; state->SearchSpec[i].offsets.headerlens = 0; state->SearchSpec[i].offsets.footers = 0; state->SearchSpec[i].offsets.footerlens = 0; state->SearchSpec[i].offsets.numheaders = 0; state->SearchSpec[i].offsets.numfooters = 0; state->SearchSpec[i].offsets.headerstorage = 0; state->SearchSpec[i].offsets.footerstorage = 0; state->SearchSpec[i].numfilestocarve = 0; state->SearchSpec[i].organizeDirNum = 0; } state->fileswritten = 0; state->skip = 0; state->organizeMaxFilesPerSub = MAX_FILES_PER_SUBDIRECTORY; state->modeVerbose = FALSE; state->modeNoSuffix = FALSE; state->useInputFileList = FALSE; state->carveWithMissingFooters = FALSE; state->noSearchOverlap = FALSE; state->generateHeaderFooterDatabase = FALSE; state->updateCoverageBlockmap = FALSE; state->useCoverageBlockmap = FALSE; state->coverageblocksize = 0; state->blockAlignedOnly = FALSE; state->organizeSubdirectories = TRUE; state->previewMode = FALSE; state->handleEmbedded = FALSE; state->auditFile = NULL; inputReaderVerbose = FALSE; // default values for output directory, config file, wildcard character, // coverage blockmap directory strncpy(state->outputdirectory, SCALPEL_DEFAULT_OUTPUT_DIR, strlen(SCALPEL_DEFAULT_OUTPUT_DIR)); strncpy(state->conffile, SCALPEL_DEFAULT_CONFIG_FILE, MAX_STRING_LENGTH); state->coveragefile = state->outputdirectory; wildcard = SCALPEL_DEFAULT_WILDCARD; signal_caught = 0; state->invocation[0] = 0; // copy the invocation string into the state do { strncat(state->invocation, *argvcopy, MAX_STRING_LENGTH - strlen(state->invocation)); strncat(state->invocation, " ", MAX_STRING_LENGTH - strlen(state->invocation)); ++argvcopy; } while (*argvcopy); } static void freeOffsets(SearchSpecOffsets * offsets) { if (offsets->footers) { free(offsets->footers); offsets->footers = NULL; } if (offsets->headers) { free(offsets->headers); offsets->headers = NULL; } if (offsets->headerlens) { free(offsets->headerlens); offsets->headerlens = NULL; } if (offsets->footerlens) { free(offsets->footerlens); offsets->footerlens = NULL; } } static void freeSearchSpec(struct SearchSpecLine *s) { for (int i = 0; i < MAX_FILE_TYPES; i++) { if (s[i].suffix) { free(s[i].suffix); s[i].suffix = NULL; } if (s[i].begin) { free(s[i].begin); s[i].begin = NULL; } if (s[i].end) { free(s[i].end); s[i].end = NULL; } if (s[i].begintext) { free(s[i].begintext); s[i].begintext = NULL; } if (s[i].endtext) { free(s[i].endtext); s[i].endtext = NULL; } freeOffsets(&(s[i].offsets) ); } free(s); } void freeState(struct scalpelState *state) { //return; //TODO @@@ validate freeState is correct then reenable if (state->inputFileList) { free(state->inputFileList); state->inputFileList = NULL; } if (state->conffile) { free(state->conffile); state->conffile = NULL; } if (state->outputdirectory) { free(state->outputdirectory); state->outputdirectory = NULL; } if (state->invocation) { free(state->invocation); state->invocation = NULL; } if (state->SearchSpec) { freeSearchSpec(state->SearchSpec); state->SearchSpec = NULL; } } // full pathnames for all files used void convertFileNames(struct scalpelState *state) { char fn[MAX_STRING_LENGTH]; // should be [PATH_MAX +1] from limits.h if (realpath(state->outputdirectory, fn)) { strncpy(state->outputdirectory, fn, MAX_STRING_LENGTH); } else { // perror("realpath"); } if (realpath(state->conffile, fn)) { strncpy(state->conffile, fn, MAX_STRING_LENGTH); } else { // perror("realpath"); } } int libscalpel_initialize(scalpelState ** state, char * confFilePath, char * outDir, const scalpelState & options) { std::string funcname("libscalpel_initialize"); if (state == NULL) throw std::runtime_error(funcname + ": state argument must not be NULL."); if (*state != NULL) throw std::runtime_error(funcname + ": state has already been allocated."); if (outDir == NULL || strlen(outDir) == 0) throw std::runtime_error(funcname + ": no output directory provided."); if (confFilePath == NULL || strlen(confFilePath) == 0) throw std::runtime_error(funcname + ": no configuration file path provided."); scalpelState * pState = new scalpelState(options); char * argv[3]; argv[0] = confFilePath; argv[1] = outDir; argv[2] = NULL; initializeState(&argv[0], pState); const size_t outDirLen = strlen(outDir); strncpy(pState->outputdirectory, outDir, outDirLen + 1); pState->outputdirectory[outDirLen + 1] = 0; const size_t confFilePathLen = strlen(confFilePath); strncpy(pState->conffile, confFilePath, confFilePathLen + 1); pState->conffile[confFilePathLen + 1] = 0; convertFileNames(pState); int err = 0; // prepare audit file and make sure output directory is empty. if ((err = openAuditFile(pState))) { handleError(pState, err); //can throw std::stringstream ss; ss << ": Error opening audit file, error code: " << err; throw std::runtime_error(funcname + ss.str()); } // read configuration file if ((err = readSearchSpecFile(pState))) { // problem with config file handleError(pState, err); //can throw std::stringstream ss; ss << ": Error reading spec file, error code: " << err; throw std::runtime_error(funcname + ss.str()); } // Initialize the backing store of buffer to read-in, process image data. init_store(); // Initialize threading model for cpu or gpu search. init_threading_model(pState); *state = pState; return SCALPEL_OK; } int libscalpel_carve_input(scalpelState * state, ScalpelInputReader * const reader) { std::string funcname("libscalpel_carve_input"); if (state == NULL) throw std::runtime_error(funcname + ": NULL pointer provided for state."); if (reader == NULL) throw std::runtime_error(funcname + ": NULL pointer provided for Reader."); if (!reader->dataSource || !reader->id) { throw std::runtime_error(funcname + ": Reader datasource or id not set."); } if (!reader->open || !reader->read || !reader->seeko || !reader->tello || !reader->close || !reader->getError || !reader->getSize) { throw std::runtime_error(funcname + ": Reader callbacks not setup"); } state->inReader = reader; int err = 0; if ((err = digImageFile(state))) { handleError(state, err); //can throw std::stringstream ss; ss << ": Error digging file, error code: " << err; throw std::runtime_error(funcname + ss.str()); } if ((err = carveImageFile(state))) { handleError(state, err); //can throw std::stringstream ss; ss << ": Error carving file, error code: " << err; throw std::runtime_error(funcname + ss.str()); } return SCALPEL_OK; } int libscalpel_finalize(scalpelState ** state) { std::string funcname("libscalpel_finalize"); if (state == NULL) throw std::runtime_error(funcname + ": state argument must not be NULL."); if (*state == NULL) throw std::runtime_error(funcname + ": state has not been allocated."); closeAuditFile((*state)->auditFile); destroy_threading_model(*state); destroyStore(); freeState(*state); return SCALPEL_OK; } // the exposed libscalpel API // NOTE: This function is deprecated and will be removed. Use the // libscalpel_* functions instead. // TODO make the driver in scalpel_exec.c use this (minor refactoring needed) // TODO add support for the remaining options avail from cmd-line // returns SCALPEL_OK on no error, can throw runtime_error exception on errors int scalpel_carveSingleInput(ScalpelInputReader * const reader, const char * const confFilePath, const char * const outDir, const unsigned char generateFooterDb, const unsigned char handleEmbedded, const unsigned char organizeSubdirs, const unsigned char previewMode, const unsigned char carveWithMissingFooters, const unsigned char noSearchOverlap) throw (std::runtime_error) { if (!reader || ! confFilePath || ! outDir) { //invalid args throw std::runtime_error("Invalid empty arguments"); } if (!reader->dataSource || !reader->id) { throw std::runtime_error("Invalid empty input reader arguments"); } //check fns if (!reader->open || !reader->read || !reader->seeko || !reader->tello || !reader->close || !reader->getError || !reader->getSize) { throw std::runtime_error("Reader callbacks not setup"); } struct scalpelState state; std::string processorName ("scalpel_carveSingleInput()"); char * args[5]; args[0] = const_cast<char*> ( processorName.c_str()); args[1] = reader->id; args[2] = const_cast<char*> (confFilePath); args[3] = const_cast<char*> (outDir); args[4] = 0; initializeState(args, &state); //setup input state.inReader = reader; //setup options const size_t outDirLen = strlen(outDir); strncpy(state.outputdirectory, outDir, outDirLen); state.outputdirectory[outDirLen] = 0; const size_t confFilePathLen = strlen(confFilePath); strncpy(state.conffile, confFilePath, confFilePathLen); state.conffile[confFilePathLen] = 0; state.generateHeaderFooterDatabase = generateFooterDb; state.handleEmbedded = handleEmbedded; state.organizeSubdirectories = organizeSubdirs; state.previewMode = previewMode; state.carveWithMissingFooters = carveWithMissingFooters; state.noSearchOverlap = noSearchOverlap; convertFileNames(&state); // read configuration file int err; if ((err = readSearchSpecFile(&state))) { // problem with config file handleError(&state, err); //can throw freeState(&state); std::stringstream ss; ss << "Error reading spec file, error code: " << err; throw std::runtime_error(ss.str()); } // prepare audit file and make sure output directory is empty. if ((err = openAuditFile(&state))) { handleError(&state, err); //can throw freeState(&state); std::stringstream ss; ss << "Error opening audit file, error code: " << err; throw std::runtime_error(ss.str()); } // Initialize the backing store of buffer to read-in, process image data. init_store(); // Initialize threading model for cpu or gpu search. init_threading_model(&state); if ((err = digImageFile(&state))) { handleError(&state, err); //can throw closeAuditFile(state.auditFile); destroyStore(); freeState(&state); std::stringstream ss; ss << "Error digging file, error code: " << err; throw std::runtime_error(ss.str()); } if ((err = carveImageFile(&state))) { handleError(&state, err); //can throw closeAuditFile(state.auditFile); destroy_threading_model(&state); destroyStore(); freeState(&state); std::stringstream ss; ss << "Error carving file, error code: " << err; throw std::runtime_error(ss.str()); } closeAuditFile(state.auditFile); destroy_threading_model(&state); destroyStore(); freeState(&state); return SCALPEL_OK; }
/* =========FOR INTERNAL USE ONLY. NO DISTRIBUTION PLEASE ========== */ /********************************************************************* Copyright 1999-2007, University of Cincinnati. All rights reserved. By using this software the USER indicates that he or she has read, understood and will comply with the following: --- University of Cincinnati hereby grants USER nonexclusive permission to use, copy and/or modify this software for internal, noncommercial, research purposes only. Any distribution, including commercial sale or license, of this software, copies of the software, its associated documentation and/or modifications of either is strictly prohibited without the prior consent of University of Cincinnati. Title to copyright to this software and its associated documentation shall at all times remain with University of Cincinnati. Appropriate copyright notice shall be placed on all software copies, and a complete copy of this notice shall be included in all copies of the associated documentation. No right is granted to use in advertising, publicity or otherwise any trademark, service mark, or the name of University of Cincinnati. --- This software and any associated documentation is provided "as is" UNIVERSITY OF CINCINNATI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, INCLUDING THOSE OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, OR THAT USE OF THE SOFTWARE, MODIFICATIONS, OR ASSOCIATED DOCUMENTATION WILL NOT INFRINGE ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER INTELLECTUAL PROPERTY RIGHTS OF A THIRD PARTY. University of Cincinnati shall not be liable under any circumstances for any direct, indirect, special, incidental, or consequential damages with respect to any claim by USER or any third party on account of or arising from the use, or inability to use, this software or its associated documentation, even if University of Cincinnati has been advised of the possibility of those damages. *********************************************************************/ #include "sbsat.h" #include "sbsat_preproc.h" int Split_Large(); int max_bdds; int num_bdds; BDDNode **BDDFuncs; char p[100]; int res_var; int Do_Split() { dX_printf(3, "SPLITTING LARGE FUNCTIONS - "); int ret = PREP_NO_CHANGE; sprintf(p, "{0:0/%d}", nmbrFunctions); str_length = dX_printf(3, p); ret = Split_Large (); dX_printf(3, "\n"); d2e_printf1("\r "); return ret; } BDDNode *splitBDD(BDDNode *f) { // fprintf(stderr, "flag = %d, ", f->flag); // printBDDerr(f); // fprintf(stderr, "\n"); if(IS_TRUE_FALSE(f)) return f; if(IS_TRUE_FALSE(f->thenCase) && IS_TRUE_FALSE(f->elseCase)) return f; if(f->flag == 0) return f; if(f->flag < 0) { //node has previously been split. // fprintf(stderr, "Returning 0, "); // printBDDerr(ite_var(-(f->flag))); // fprintf(stderr, "\n"); return ite_var(-(f->flag)); } else if(f->notCase->flag < 0) { //notCase has previously been split. // fprintf(stderr, "Returning 1, "); // printBDDerr(ite_var(f->notCase->flag)); // fprintf(stderr, "\n"); return ite_var(f->notCase->flag); } else if(f->flag < 10) { //Node not referenced enough to be interesting BDDNode *tmp = ite(ite_var(f->variable), splitBDD(f->thenCase), splitBDD(f->elseCase)); tmp->flag = 0; // fprintf(stderr, "Returning 2, "); // printBDDerr(g); // fprintf(stderr, "\n"); return tmp; } affected++; while(getsym_i(++res_var)!=NULL); int new_var = i_getsym_int(res_var, SYM_VAR); f->flag = -new_var; BDDFuncs[num_bdds] = ite_equ(ite_var(new_var), f); assert(BDDFuncs[num_bdds]->inferences == NULL); BDDFuncs[num_bdds++]->flag = 0; if(num_bdds >= max_bdds) { BDDFuncs = (BDDNode **)ite_recalloc(BDDFuncs, max_bdds, max_bdds+25, sizeof(BDDNode *), 9, "BDDFuncs"); max_bdds += 25; } // fprintf(stderr, "Creating, "); // printBDDerr(BDDFuncs[num_bdds-1]); // fprintf(stderr, "\n"); BDDNode *tmp = ite_var(new_var); tmp->flag = 0; return tmp; } //Assumes BDDFuncs has been allocated int Split_ref_counts() { clear_all_bdd_flags(); for(int j = 0; j < nmbrFunctions; j++) { if (functionType[j] == UNSURE && length[j] > do_split_max_vars) { //dX_printf(3, "\n%d: ", j); //printBDD(functions[j]); //dX_printf(3, "\n"); marknodes(functions[j]); functions[j]->flag = 0; // fprintf(stderr, "Marked %d\n", j); } } //Nodes marked w/ rough reference counts int ret = PREP_CHANGED; res_var = numinp; while(ret == PREP_CHANGED) { ret = PREP_NO_CHANGE; for(int j = 0; j < nmbrFunctions; j++) { if (j % 100 == 0) { for(int iter = 0; iter<str_length; iter++) dX_printf(3, "\b"); sprintf(p, "***{%ld:%d/%d}", affected, j, nmbrFunctions); str_length = dX_printf(3, p); } if (functionType[j] == UNSURE && length[j] > do_split_max_vars) { //Split the BDD based on the reference counts. // fprintf(stderr, "Splitting %d, ", j); // printBDDerr(functions[j]); // fprintf(stderr, "\n"); BDDNode *tmp = ite(ite_var(functions[j]->variable), splitBDD(functions[j]->thenCase), splitBDD(functions[j]->elseCase)); tmp->flag = 0; if(functions[j]!=tmp) { functions[j] = tmp; ret = PREP_CHANGED; } // fprintf(stderr, "Finish with, "); // printBDDerr(functions[j]); // fprintf(stderr, "\n"); assert(functions[j]->inferences == NULL); vars_alloc(numinp+num_bdds); } } } if(num_bdds != 0) { ret = PREP_CHANGED; bool OLD_DO_INFERENCES = DO_INFERENCES; DO_INFERENCES = 0; for(int j = 0; j < nmbrFunctions; j++) { if (functionType[j] == UNSURE && length[j] > do_split_max_vars) { switch (int r=Rebuild_BDDx(j)) { //This should not be causing any inferences. case TRIV_UNSAT: case TRIV_SAT: case PREP_ERROR: ret=r; return ret; default: break; } } } DO_INFERENCES = OLD_DO_INFERENCES; int *new_bdds; switch (int r = add_newFunctions(BDDFuncs, num_bdds, &new_bdds)) { case TRIV_UNSAT: case TRIV_SAT: case PREP_ERROR: ret=r; return ret; default: break; } ite_free((void**)&new_bdds); num_bdds = 0; //BDDFuncs is not free'd here. Can continue to use the same memory in this loop numinp = getNuminp(); } return ret; } int nCk(int n, int k) { if(n == k) return 1; if(k == 0) return 1; return nCk(n-1, k) + nCk(n-1, k-1); } void nCk_Sets(int n, int k, int *vars, int *whereat, int n_orig, BDDNode *bdd, int orig_bdd, int target_k) { if(length[orig_bdd] <= target_k) return; if (n==0 && k==0) { //printBDD(tempBDD); //dX_printf(3, "\n"); for(int i = 0; i < (*whereat) && bdd != true_ptr; i++) bdd = pruning(bdd, BDDFuncs[i]); if(bdd != true_ptr) { BDDNode *tempBDD; tempBDD = pruning(functions[orig_bdd], bdd); if(tempBDD == functions[orig_bdd]) return; functions[orig_bdd] = tempBDD; Rebuild_BDDx(orig_bdd); //dX_printf(3, "whereat = %d: \n", (*whereat)); BDDFuncs[(*whereat)] = bdd; (*whereat)++; if((*whereat) >= max_bdds) { BDDFuncs = (BDDNode **)ite_recalloc(BDDFuncs, max_bdds, max_bdds+25, sizeof(BDDNode *), 9, "BDDFuncs"); max_bdds += 25; num_bdds = (*whereat)-1; } } } else { if (k>0) { nCk_Sets(n-1,k-1, vars, whereat, n_orig, bdd, orig_bdd, target_k); } if (n>k) { BDDNode *bdd_0 = xquantify(bdd, vars[n_orig-n]); if(bdd_0 == true_ptr) return; nCk_Sets(n-1,k, vars, whereat, n_orig, bdd_0, orig_bdd, target_k); } } } BDDNode *level_x_decomp(BDDNode *f, int level) { if(f == true_ptr) return f; if(f->inferences != NULL) return Build_BDD_From_Inferences(f); //if(level == 0) return Build_BDD_From_Inferences(f); int v = f->variable; BDDNode *r = level_x_decomp(f->thenCase, level-1); BDDNode *e = level_x_decomp(f->elseCase, level-1); if(r == e) return r; return ite_xvar_y_z(ite_var(v), r, e); } int Split_Large () { int ret = PREP_NO_CHANGE; max_bdds = 0; num_bdds = 0; int k_size = do_split_max_vars; BDDFuncs = (BDDNode **)ite_recalloc(NULL, max_bdds, max_bdds+10, sizeof(BDDNode *), 9, "BDDFuncs"); max_bdds += 10; if(0) { //SEAN switch (int r = Split_ref_counts()) { case TRIV_UNSAT: case TRIV_SAT: case PREP_ERROR: ret=r; return r; default: break; } } affected = 0; int old_nmbrFunctions = nmbrFunctions; for(int j = 0; j < old_nmbrFunctions; j++) { if (j % 100 == 0) { for(int iter = 0; iter<str_length; iter++) dX_printf(3, "\b"); sprintf(p, "{%ld:%d/%d}", affected, j, old_nmbrFunctions); str_length = dX_printf(3, p); } if (j % 100 == 0) { if (nCtrlC) { dX_printf(3, "\nBreaking out of Splitting"); //for(; j < nmbrFunctions; j++) SPLIT_REPEATS[x] = 0; ?? nCtrlC = 0; break; } d2e_printf3("\rPreprocessing Sp %d/%d", j, old_nmbrFunctions); } if (functionType[j] == UNSURE && length[j] > k_size) { bool OLD_DO_INFERENCES = DO_INFERENCES; DO_INFERENCES = 0; //dX_printf(3, "\n%d: ", j); //printBDD(functions[j]); //dX_printf(3, "\n"); //Maximum Split Size: //int num_splits = nCk(length[j], k_size); //dX_printf(3, "%d C %d = %d\n", length[j], k_size, num_splits); int whereat = 0; if(findandset_fnType(j) == 1) continue; //dX_printf(3, "false paths:%d\n", countFalses (functions[j])); int *vars_copy = new int[length[j]]; //This is necessary because variables[j].num is modified inside nCk_Sets for(int i = 0; i < length[j]; i++) vars_copy[i] = variables[j].num[i]; nCk_Sets(length[j], k_size, vars_copy, &whereat, length[j], functions[j], j, k_size); delete [] vars_copy; //dX_printf(3, "whereat = %d: \n", whereat); //add BDDFuncs to functions; if(whereat > 0) { affected++; ret = PREP_CHANGED; int *new_bdds; switch (int r = add_newFunctions(BDDFuncs, whereat, &new_bdds)) { case TRIV_UNSAT: case TRIV_SAT: case PREP_ERROR: ret=r; goto sp_bailout; default: break; } ite_free((void**)&new_bdds); if(length[j] > k_size) { BDDNode *Conjunction = BDDFuncs[0]; for(int x = 1; x < whereat; x++) Conjunction = ite_and(Conjunction, BDDFuncs[x]); functions[j] = ite_or(functions[j], ite_not(Conjunction)); } switch (int r=Rebuild_BDDx(j)) { case TRIV_UNSAT: case TRIV_SAT: case PREP_ERROR: ret=r; goto sp_bailout; default: break; } } DO_INFERENCES = OLD_DO_INFERENCES; } if (functionType[j] == UNSURE && length[j] > k_size) { //Function is still too large, must use alternative method (Break into CNF clauses). //Count the clauses bool OLD_DO_INFERENCES = DO_INFERENCES; DO_INFERENCES = 0; int num = countFalses (functions[j]); if(num == 1) functionType[j] = PLAINOR; else if(num > 1) { affected++; ret = PREP_CHANGED; intlist *list = new intlist[num]; int listx = 0; int tempint_max = 0; int *tempint = NULL; findPathsToFalse (functions[j], &tempint_max, &tempint, list, &listx); ite_free((void**)&tempint); tempint_max = 0; if(listx >= max_bdds) { BDDFuncs = (BDDNode **)ite_recalloc(BDDFuncs, max_bdds, listx+1, sizeof(BDDNode *), 9, "BDDFuncs"); max_bdds = listx+1; num_bdds = listx; } for (int i = 0; i < listx; i++) { BDDFuncs[i] = false_ptr; for(int x = list[i].length-1; x >= 0; x--) BDDFuncs[i] = ite_or(BDDFuncs[i], ite_var(-list[i].num[x])); //Need to negate all literals to get CNF! delete [] list[i].num; } delete [] list; int *new_bdds; switch (int r = add_newFunctions(BDDFuncs, listx, &new_bdds)) { case TRIV_UNSAT: case TRIV_SAT: case PREP_ERROR: ret=r; goto sp_bailout; default: break; } ite_free((void**)&new_bdds); equalityVble[j] = 0; functionType[j] = UNSURE; functions[j] = true_ptr; switch (int r=Rebuild_BDDx(j)) { case TRIV_UNSAT: case TRIV_SAT: case PREP_ERROR: ret=r; goto sp_bailout; default: break; } } DO_INFERENCES = OLD_DO_INFERENCES; } //dX_printf(3, "\n%d: ", j); //printBDD(functions[j]); //dX_printf(3, "\n"); } sp_bailout:; ite_free((void **)&BDDFuncs); return ret; }
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/kernels/mlir_generated/base_gpu_op.h" namespace tensorflow { GENERATE_AND_REGISTER_BINARY_GPU_KERNEL(Mul, DT_HALF); GENERATE_AND_REGISTER_BINARY_GPU_KERNEL(Mul, DT_FLOAT); GENERATE_AND_REGISTER_BINARY_GPU_KERNEL(Mul, DT_DOUBLE); GENERATE_AND_REGISTER_BINARY_GPU_KERNEL(Mul, DT_COMPLEX64); GENERATE_AND_REGISTER_BINARY_GPU_KERNEL(Mul, DT_COMPLEX128); GENERATE_AND_REGISTER_BINARY_GPU_KERNEL(Mul, DT_INT8); // TODO(b/25387198): We cannot use a regular GPU kernel for int32. GENERATE_AND_REGISTER_BINARY_GPU_KERNEL(Mul, DT_INT16); GENERATE_AND_REGISTER_BINARY_GPU_KERNEL(Mul, DT_INT64); } // namespace tensorflow
/* * @Descripttion: https://blog.csdn.net/chenyutingdaima/article/details/81978251 * @version: 1.0 * @Author: Geeks_Z * @Date: 2021-05-31 09:42:01 * @LastEditors: Geeks_Z * @LastEditTime: 2021-05-31 09:43:14 */ //1.用结构体数组存储相关信息,用先按性别排序,将女性排在前面,再按分数降序排序, //这样数组首位是女性最高,末尾是男性最低。 //2.输出时,判断首位和末尾是否是正确的性别,有一个不对,就输出NA。 #include <cstdio> #include <cstring> #include <algorithm> using namespace std; struct Node { char name[15], id[15], gender; int grade; } node[110]; bool cmp(Node a, Node b) { if (a.gender != b.gender) return a.gender < b.gender; else return a.grade > b.grade; } int main() { int n; bool flag = true; scanf("%d", &n); for (int i = 0; i < n; i++) { scanf("%s %c %s %d", node[i].name, &node[i].gender, node[i].id, &node[i].grade); } sort(node, node + n, cmp); if (node[0].gender == 'F') { printf("%s %s\n", node[0].name, node[0].id); } else { printf("Absent\n"); flag = false; } if (node[n - 1].gender == 'M') { printf("%s %s\n", node[n - 1].name, node[n - 1].id); } else { printf("Absent\n"); flag = false; } if (flag == true) { printf("%d\n", node[0].grade - node[n - 1].grade); } else { printf("NA\n"); } return 0; }
/* * Copyright (c) Contributors to the Open 3D Engine Project. * For complete copyright and license terms please see the LICENSE at the root of this distribution. * * SPDX-License-Identifier: Apache-2.0 OR MIT * */ #include <QATLControlsTreeModel.h> #include <ACEEnums.h> #include <AudioControl.h> #include <AudioControlsEditorUndo.h> #include <IEditor.h> #include <QAudioControlTreeWidget.h> #include <QStandardItem> #include <QMessageBox> namespace AudioControls { //-------------------------------------------------------------------------------------------// QATLTreeModel::QATLTreeModel() : m_pControlsModel(nullptr) { } //-------------------------------------------------------------------------------------------// QATLTreeModel::~QATLTreeModel() { if (m_pControlsModel) { m_pControlsModel->RemoveListener(this); } } //-------------------------------------------------------------------------------------------// void QATLTreeModel::Initialize(CATLControlsModel* pControlsModel) { m_pControlsModel = pControlsModel; m_pControlsModel->AddListener(this); } //-------------------------------------------------------------------------------------------// QStandardItem* QATLTreeModel::GetItemFromControlID(CID nID) { QModelIndexList indexes = match(index(0, 0, QModelIndex()), eDR_ID, nID, 1, Qt::MatchRecursive); if (!indexes.empty()) { return itemFromIndex(indexes.at(0)); } return nullptr; } //-------------------------------------------------------------------------------------------// QStandardItem* QATLTreeModel::AddControl(CATLControl* pControl, QStandardItem* pParent, int nRow) { if (pControl && pParent) { QStandardItem* pItem = new QAudioControlItem(QString(pControl->GetName().c_str()), pControl); if (pItem) { pParent->insertRow(nRow, pItem); SetItemAsDirty(pItem); } return pItem; } return nullptr; } //-------------------------------------------------------------------------------------------// QStandardItem* QATLTreeModel::CreateFolder(QStandardItem* pParent, const AZStd::string_view sName, int nRow) { if (pParent) { // Make a valid name for the folder (avoid having folders with the same name under same root) QString sRootName(sName.data()); QString sFolderName(sRootName); int number = 1; bool bFoundName = false; while (!bFoundName) { bFoundName = true; const int size = pParent->rowCount(); for (int i = 0; i < size; ++i) { QStandardItem* pItem = pParent->child(i); if (pItem && (pItem->data(eDR_TYPE) == eIT_FOLDER) && (QString::compare(sFolderName, pItem->text(), Qt::CaseInsensitive) == 0)) { bFoundName = false; sFolderName = sRootName + "_" + QString::number(number); ++number; break; } } } if (!sFolderName.isEmpty()) { if (QStandardItem* pFolderItem = new QFolderItem(sFolderName)) { SetItemAsDirty(pFolderItem); pParent->insertRow(nRow, pFolderItem); if (!CUndo::IsSuspended()) { CUndo undo("Audio Folder Created"); CUndo::Record(new CUndoFolderAdd(pFolderItem)); } return pFolderItem; } } } return nullptr; } //-------------------------------------------------------------------------------------------// void QATLTreeModel::RemoveItem(QModelIndex index) { QModelIndexList indexList; indexList.push_back(index); RemoveItems(indexList); } //-------------------------------------------------------------------------------------------// void QATLTreeModel::RemoveItems(QModelIndexList indexList) { // Sort the controls by the level they are inside the tree // (the deepest in the tree first) and then by their row number. // This way we guarantee we don't delete the parent of a // control before its children struct STreeIndex { QPersistentModelIndex m_index; int m_level; STreeIndex(QPersistentModelIndex index, int level) : m_index(index) , m_level(level) {} bool operator< (const STreeIndex& index) const { if (m_level == index.m_level) { return m_index.row() > index.m_index.row(); } return m_level > index.m_level; } }; AZStd::vector<STreeIndex> sortedIndexList; const int size = indexList.length(); for (int i = 0; i < size; ++i) { int level = 0; QModelIndex index = indexList[i]; while (index.isValid()) { ++level; index = index.parent(); } sortedIndexList.push_back(STreeIndex(indexList[i], level)); } std::sort(sortedIndexList.begin(), sortedIndexList.end()); for (int i = 0; i < size; ++i) { QModelIndex index = sortedIndexList[i].m_index; if (index.isValid()) { DeleteInternalData(index); // Mark parent as modified QModelIndex parent = index.parent(); if (parent.isValid()) { SetItemAsDirty(itemFromIndex(parent)); } removeRow(index.row(), index.parent()); } } } //-------------------------------------------------------------------------------------------// void QATLTreeModel::DeleteInternalData(QModelIndex root) { // Delete children first and in reverse order // of their row (so that we can undo them in the same order) AZStd::vector<QModelIndex> childs; QModelIndex child = index(0, 0, root); for (int i = 1; child.isValid(); ++i) { childs.push_back(child); child = index(i, 0, root); } const size_t size = childs.size(); for (size_t i = 0; i < size; ++i) { DeleteInternalData(childs[(size - 1) - i]); } if (root.data(eDR_TYPE) == eIT_AUDIO_CONTROL) { m_pControlsModel->RemoveControl(root.data(eDR_ID).toUInt()); } else { if (!CUndo::IsSuspended()) { CUndo::Record(new CUndoFolderRemove(itemFromIndex(root))); } } } //-------------------------------------------------------------------------------------------// CATLControl* QATLTreeModel::GetControlFromIndex(QModelIndex index) { if (m_pControlsModel && index.isValid() && (index.data(eDR_TYPE) == eIT_AUDIO_CONTROL)) { return m_pControlsModel->GetControlByID(index.data(eDR_ID).toUInt()); } return nullptr; } //-------------------------------------------------------------------------------------------// void QATLTreeModel::OnControlModified(CATLControl* pControl) { if (pControl) { if (QStandardItem* pItem = GetItemFromControlID(pControl->GetId())) { QString sNewName(pControl->GetName().c_str()); if (pItem->text() != sNewName) { pItem->setText(sNewName); } SetItemAsDirty(pItem); } } } //-------------------------------------------------------------------------------------------// void QATLTreeModel::SetItemAsDirty(QStandardItem* pItem) { if (pItem) { blockSignals(true); pItem->setData(true, eDR_MODIFIED); blockSignals(false); SetItemAsDirty(pItem->parent()); } } //-------------------------------------------------------------------------------------------// CATLControl* QATLTreeModel::CreateControl(EACEControlType eControlType, const AZStd::string_view sName, CATLControl* pParent) { AZStd::string sFinalName = m_pControlsModel->GenerateUniqueName(sName, eControlType, pParent ? pParent->GetScope() : "", pParent); return m_pControlsModel->CreateControl(sFinalName, eControlType, pParent); } //-------------------------------------------------------------------------------------------// bool QATLTreeModel::dropMimeData(const QMimeData* mimeData, Qt::DropAction action, int row, int column, const QModelIndex& parent) { // LY-17684 QStandardItem* rootItem = invisibleRootItem(); QStandardItem* targetItem = rootItem; if (parent.isValid()) { targetItem = itemFromIndex(parent); } if (targetItem) { if (targetItem && (targetItem->data(eDR_TYPE) == eIT_FOLDER || targetItem == rootItem)) { const QString format = "application/x-qabstractitemmodeldatalist"; if (mimeData->hasFormat(format)) { QByteArray encoded = mimeData->data(format); QDataStream stream(&encoded, QIODevice::ReadOnly); while (!stream.atEnd()) { int streamRow, streamCol; QMap<int, QVariant> roleDataMap; stream >> streamRow >> streamCol >> roleDataMap; if (!roleDataMap.isEmpty()) { // If dropping a folder, make sure that folder name doesn't already exist where it is being dropped if (roleDataMap[eDR_TYPE] == eIT_FOLDER) { // Make sure the target folder doesn't have a folder with the same name QString droppedFolderName = roleDataMap[Qt::DisplayRole].toString(); const int size = targetItem->rowCount(); for (int i = 0; i < size; ++i) { QStandardItem* pItem = targetItem->child(i); if (pItem && (pItem->data(eDR_TYPE) == eIT_FOLDER) && (QString::compare(droppedFolderName, pItem->text(), Qt::CaseInsensitive) == 0)) { QMessageBox messageBox; messageBox.setStandardButtons(QMessageBox::Ok); messageBox.setWindowTitle("Audio Controls Editor"); messageBox.setText("This destination already contains a folder named '" + droppedFolderName + "'."); messageBox.exec(); return false; } } } } } } } } if (mimeData && action == Qt::MoveAction) { if (!CUndo::IsSuspended()) { CUndo undo("Audio Control Moved"); CUndo::Record(new CUndoItemMove()); } } return QStandardItemModel::dropMimeData(mimeData, action, row, column, parent); } //-------------------------------------------------------------------------------------------// bool QATLTreeModel::canDropMimeData(const QMimeData* mimeData, Qt::DropAction action, int row, int column, const QModelIndex& parent) const { if (!parent.isValid()) { // Prevent moving controls to the root (outside a folder) const QString format = "application/x-qabstractitemmodeldatalist"; if (mimeData->hasFormat(format)) { QByteArray data = mimeData->data(format); QDataStream stream(&data, QIODevice::ReadOnly); int streamRow, streamCol; QMap<int, QVariant> roleDataMap; stream >> streamRow >> streamCol >> roleDataMap; if (!roleDataMap.isEmpty() && roleDataMap[eDR_TYPE] != eIT_FOLDER) { return false; } } } else if (parent.data(eDR_TYPE) == eIT_AUDIO_CONTROL) { // Prevent dropping on switches CID nID = parent.data(eDR_ID).toUInt(); if (CATLControl* pControl = m_pControlsModel->GetControlByID(nID)) { EACEControlType eType = pControl->GetType(); if (eType == eACET_SWITCH || eType == eACET_SWITCH_STATE) { return false; } } } return QStandardItemModel::canDropMimeData(mimeData, action, row, column, parent); } } // namespace AudioControls
#include "graphics.h" #include <fstream> using namespace std; #if defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || \ defined(__TOS_WIN__) #include <conio.h> //for getch(), needed in wait_for_key() #endif Graphics::Graphics(string path) { gnuplot = path; #ifdef _WIN32 gnuplotPipe = _popen(gnuplot.c_str(), "w"); #else gnuplotPipe = popen(gnuplot.c_str(), "w"); #endif emptyPlot("", 0, 1, 0, 1); } Graphics::~Graphics() {} void Graphics::emptyPlot(string title, double xmin, double xmax, double ymin, double ymax, double zmin, double zmax, int angleX, int angleY, string xlabel, string ylabel, string zlabel) { stringstream cmdstr; cmdstr << "splot [" << xmin << ":" << xmax << "][" << ymin << ":" << ymax << "][" << zmin << ":" << zmax << "] 0 with linespoints lt \"white\" pt 0.01"; cmdstr << " title \"" << title << "\" \n"; cmdstr << "set xlabel \"$" << xlabel << "$\" \n"; cmdstr << "set ylabel \"$" << ylabel << "$\" \n"; cmdstr << "set zlabel \"$" << zlabel << "$\" \n"; fprintf(gnuplotPipe, cmdstr.str().c_str()); fflush(gnuplotPipe); } void Graphics::emptyPlot(string title, double xmin, double xmax, double ymin, double ymax) { stringstream cmdstr; cmdstr << "plot [" << xmin << ":" << xmax << "][" << ymin << ":" << ymax << "] 0 with linespoints lt \"white\" pt 0.01"; cmdstr << " title \"" << title << "\" \n"; fprintf(gnuplotPipe, cmdstr.str().c_str()); fflush(gnuplotPipe); } void Graphics::waitForKey() { #if defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || \ defined(__TOS_WIN__) // every keypress registered, also arrow keys cout << endl << "Press any key to continue..." << endl; // FlushConsoleInputBuffer(GetStdHandle(STD_INPUT_HANDLE)); _getch(); #elif defined(unix) || defined(__unix) || defined(__unix__) || \ defined(__APPLE__) cout << endl << "Press ENTER to continue..." << endl; std::cin.clear(); std::cin.ignore(std::cin.rdbuf()->in_avail()); std::cin.get(); #endif return; } void Graphics::saveToPdf(string path) { stringstream cmdstr; cmdstr << " set terminal png size 400,300 enhanced font \"Helvetica,20\" \n"; cmdstr << " set output '" << path << "' \n"; cmdstr << " replot \n"; fprintf(gnuplotPipe, cmdstr.str().c_str()); fflush(gnuplotPipe); } void Graphics::execute(string str) { fprintf(gnuplotPipe, str.c_str()); fflush(gnuplotPipe); string buffer = "replot\n"; fprintf(gnuplotPipe, buffer.c_str()); fflush(gnuplotPipe); /*string filename = "duplex.gnuplot"; ofstream plotfile; plotfile.open(filename); plotfile << str; plotfile.close(); system("gnuplot duplex.gnuplot -persist");*/ }
/** * Definition for singly-linked list. * struct ListNode { * int val; * ListNode *next; * ListNode(int x) : val(x), next(NULL) {} * }; */ class Solution { public: ListNode* mergeTwoLists(ListNode* l1, ListNode* l2) { if (l1 == NULL) return l2; else if (l2 == NULL) return l1; else if (l1->val < l2->val) { l1->next = this->mergeTwoLists(l1->next, l2); return l1; } else { l2->next = this->mergeTwoLists(l1, l2->next); return l2; } } };
/****************************************************************************** * The MIT License (MIT) * * Copyright (c) 2019-2020 Baldur Karlsson * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. ******************************************************************************/ #include "d3d11_test.h" RD_TEST(D3D11_Mesh_Zoo, D3D11GraphicsTest) { static constexpr const char *Description = "Draws some primitives for testing the mesh view."; std::string vertex = R"EOSHADER( struct vertin { float3 pos : POSITION; float4 col : COLOR0; }; struct v2f { float4 pos : SV_POSITION; float2 col2 : COLOR0; float4 col : COLOR1; }; cbuffer consts : register(b0) { float4 scale; float4 offset; }; v2f main(vertin IN, uint vid : SV_VertexID, uint inst : SV_InstanceID) { v2f OUT = (v2f)0; OUT.pos = float4(IN.pos.xy * scale.xy + offset.xy, IN.pos.z, 1.0f); OUT.col = IN.col; if(inst > 0) { OUT.pos *= 0.3f; OUT.pos.xy += 0.1f; OUT.col.x = 1.0f; } OUT.col2 = OUT.pos.xy; return OUT; } )EOSHADER"; std::string pixel = R"EOSHADER( struct v2f { float4 pos : SV_POSITION; float2 col2 : COLOR0; float4 col : COLOR1; }; float4 main(v2f IN) : SV_Target0 { return IN.col + 1.0e-20 * IN.col2.xyxy; } )EOSHADER"; int main() { // initialise, create window, create device, etc if(!Init()) return 3; ID3DBlobPtr vsblob = Compile(vertex, "main", "vs_4_0"); ID3DBlobPtr psblob = Compile(pixel, "main", "ps_4_0"); CreateDefaultInputLayout(vsblob); ID3D11VertexShaderPtr vs = CreateVS(vsblob); ID3D11PixelShaderPtr ps = CreatePS(psblob); const DefaultA2V test[] = { // single color quad {Vec3f(50.0f, 250.0f, 0.2f), Vec4f(0.0f, 1.0f, 0.0f, 1.0f), Vec2f(0.0f, 0.0f)}, {Vec3f(250.0f, 250.0f, 0.2f), Vec4f(0.0f, 1.0f, 0.0f, 1.0f), Vec2f(0.0f, 0.0f)}, {Vec3f(50.0f, 50.0f, 0.2f), Vec4f(0.0f, 1.0f, 0.0f, 1.0f), Vec2f(0.0f, 0.0f)}, {Vec3f(250.0f, 250.0f, 0.2f), Vec4f(0.0f, 1.0f, 0.0f, 1.0f), Vec2f(0.0f, 0.0f)}, {Vec3f(250.0f, 50.0f, 0.2f), Vec4f(0.0f, 1.0f, 0.0f, 1.0f), Vec2f(0.0f, 0.0f)}, {Vec3f(50.0f, 50.0f, 0.2f), Vec4f(0.0f, 1.0f, 0.0f, 1.0f), Vec2f(0.0f, 0.0f)}, // points, to test vertex picking {Vec3f(50.0f, 250.0f, 0.2f), Vec4f(0.0f, 1.0f, 0.0f, 1.0f), Vec2f(0.0f, 0.0f)}, {Vec3f(250.0f, 250.0f, 0.2f), Vec4f(0.0f, 1.0f, 0.0f, 1.0f), Vec2f(0.0f, 0.0f)}, {Vec3f(250.0f, 50.0f, 0.2f), Vec4f(0.0f, 1.0f, 0.0f, 1.0f), Vec2f(0.0f, 0.0f)}, {Vec3f(50.0f, 50.0f, 0.2f), Vec4f(0.0f, 1.0f, 0.0f, 1.0f), Vec2f(0.0f, 0.0f)}, {Vec3f(70.0f, 170.0f, 0.1f), Vec4f(1.0f, 0.0f, 1.0f, 1.0f), Vec2f(0.0f, 0.0f)}, {Vec3f(170.0f, 170.0f, 0.1f), Vec4f(1.0f, 0.0f, 1.0f, 1.0f), Vec2f(0.0f, 0.0f)}, {Vec3f(70.0f, 70.0f, 0.1f), Vec4f(1.0f, 0.0f, 1.0f, 1.0f), Vec2f(0.0f, 0.0f)}, }; ID3D11BufferPtr vb = MakeBuffer().Vertex().Data(test); Vec4f cbufferdata[] = { Vec4f(2.0f / (float)screenWidth, 2.0f / (float)screenHeight, 1.0f, 1.0f), Vec4f(-1.0f, -1.0f, 0.0f, 0.0f), }; ID3D11BufferPtr cb = MakeBuffer().Constant().Data(cbufferdata); ID3D11Texture2DPtr bbDepth = MakeTexture(DXGI_FORMAT_D32_FLOAT_S8X24_UINT, screenWidth, screenHeight).DSV(); ID3D11DepthStencilViewPtr bbDSV = MakeDSV(bbDepth); CD3D11_DEPTH_STENCIL_DESC dd = CD3D11_DEPTH_STENCIL_DESC(CD3D11_DEFAULT()); dd.DepthEnable = TRUE; dd.DepthWriteMask = D3D11_DEPTH_WRITE_MASK_ALL; dd.DepthFunc = D3D11_COMPARISON_LESS; dd.StencilEnable = FALSE; dd.StencilWriteMask = dd.StencilReadMask = 0xff; ID3D11DepthStencilStatePtr ds; CHECK_HR(dev->CreateDepthStencilState(&dd, &ds)); while(Running()) { ClearRenderTargetView(bbRTV, {0.2f, 0.2f, 0.2f, 1.0f}); ctx->ClearDepthStencilView(bbDSV, D3D11_CLEAR_DEPTH | D3D11_CLEAR_STENCIL, 1.0f, 0); IASetVertexBuffer(vb, sizeof(DefaultA2V), 0); ctx->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST); ctx->IASetInputLayout(defaultLayout); ctx->OMSetDepthStencilState(ds, 0); ctx->VSSetShader(vs, NULL, 0); ctx->PSSetShader(ps, NULL, 0); ctx->VSSetConstantBuffers(0, 1, &cb.GetInterfacePtr()); RSSetViewport({0.0f, 0.0f, (float)screenWidth, (float)screenHeight, 0.0f, 1.0f}); ctx->OMSetRenderTargets(1, &bbRTV.GetInterfacePtr(), bbDSV); // a previous draw for testing 'whole pass' rendering ctx->Draw(3, 10); setMarker("Quad"); // draw two instances so we can test rendering other instances ctx->DrawInstanced(6, 2, 0, 0); setMarker("Points"); ctx->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_POINTLIST); ctx->Draw(4, 6); setMarker("Stride 0"); IASetVertexBuffer(vb, 0, 0); ctx->Draw(1, 0); Present(); } return 0; } }; REGISTER_TEST();
/* * Distributed under the OSI-approved Apache License, Version 2.0. See * accompanying file Copyright.txt for details. * * Write a global array from multiple processors. * * A global array is an N-dimensional array. A process can write a sub-array * into the global array by stating the N-dimensional offset and the size of * the sub-array. At reading, one can read back any portion of the array * regardless of how many processors wrote that data. * * Processes are NOT required * - to stay in the boundaries of the global dimensions. However, one will not * be able to read back data outside of the boundaries. * - to fill the whole global array, i.e. one can leave holes in it. At reading, * one will get the fill-value set for the array for those coordinates that * are not written by any process. * * The global dimensions of a global array MUST NOT change over time. * If they are, then the array should be handled as a local array. Of course, if * only a single output step is written to a file, that still shows up at * reading as a global array. * * The decomposition of the array across the processes, however, can change * between output steps. * * Created on: Jun 2, 2017 * Author: pnorbert */ #include <iostream> #include <vector> #include <adios2.h> #if ADIOS2_USE_MPI #include <mpi.h> #endif int main(int argc, char *argv[]) { int rank = 0, nproc = 1; #if ADIOS2_USE_MPI MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nproc); #endif const int NSTEPS = 5; #if ADIOS2_USE_MPI adios2::ADIOS adios(MPI_COMM_WORLD); #else adios2::ADIOS adios; #endif // Application variables for output const unsigned int Nx = 10; // Global 2D array, size of nproc x Nx, with 1D decomposition // Each process writes one "row" of the 2D matrix. std::vector<double> row(Nx); try { // Get io settings from the config file or // create one with default settings here adios2::IO io = adios.DeclareIO("Output"); /* * Define global array: type, name, global dimensions * The local process' part (start, count) can be defined now or later * before Write(). */ adios2::Variable<double> varGlobalArray = io.DefineVariable<double>("GlobalArray", {(unsigned int)nproc, Nx}); // Open file. "w" means we overwrite any existing file on disk, // but Advance() will append steps to the same file. adios2::Engine writer = io.Open("globalArray.bp", adios2::Mode::Write); for (size_t step = 0; step < NSTEPS; step++) { writer.BeginStep(); for (size_t i = 0; i < Nx; i++) { row[i] = step * Nx * nproc * 1.0 + rank * Nx * 1.0 + (double)i; } // Make a 2D selection to describe the local dimensions of the // variable we write and its offsets in the global spaces // adios2::SelectionBoundingBox sel(); varGlobalArray.SetSelection(adios2::Box<adios2::Dims>( {static_cast<size_t>(rank), 0}, {1, static_cast<size_t>(Nx)})); writer.Put<double>(varGlobalArray, row.data()); // Indicate we are done for this step. // Disk I/O will be performed during this call unless // time aggregation postpones all of that to some later step writer.EndStep(); } // Called once: indicate that we are done with this output for the run writer.Close(); } catch (std::invalid_argument &e) { if (rank == 0) { std::cout << "Invalid argument exception, STOPPING PROGRAM\n"; std::cout << e.what() << "\n"; } } catch (std::ios_base::failure &e) { if (rank == 0) { std::cout << "System exception, STOPPING PROGRAM\n"; std::cout << e.what() << "\n"; } } catch (std::exception &e) { if (rank == 0) { std::cout << "Exception, STOPPING PROGRAM\n"; std::cout << e.what() << "\n"; } } #if ADIOS2_USE_MPI MPI_Finalize(); #endif return 0; }
//===--- TypeCheckCompletionCallback.cpp ----------------------------------===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// #include "swift/IDE/TypeCheckCompletionCallback.h" #include "swift/IDE/CompletionLookup.h" #include "swift/Sema/CompletionContextFinder.h" #include "swift/Sema/ConstraintSystem.h" #include "swift/Sema/IDETypeChecking.h" using namespace swift; using namespace swift::ide; using namespace swift::constraints; void TypeCheckCompletionCallback::fallbackTypeCheck(DeclContext *DC) { assert(!GotCallback); CompletionContextFinder finder(DC); if (!finder.hasCompletionExpr()) return; auto fallback = finder.getFallbackCompletionExpr(); if (!fallback) return; SolutionApplicationTarget completionTarget(fallback->E, fallback->DC, CTP_Unused, Type(), /*isDiscared=*/true); typeCheckForCodeCompletion(completionTarget, /*needsPrecheck=*/true, [&](const Solution &S) { sawSolution(S); }); } // MARK: - Utility functions for subclasses of TypeCheckCompletionCallback Type swift::ide::getTypeForCompletion(const constraints::Solution &S, ASTNode Node) { if (!S.hasType(Node)) { assert(false && "Expression wasn't type checked?"); return nullptr; } auto &CS = S.getConstraintSystem(); Type Result; if (isExpr<CodeCompletionExpr>(Node)) { Result = S.simplifyTypeForCodeCompletion(S.getType(Node)); } else { Result = S.getResolvedType(Node); } if (!Result || Result->is<UnresolvedType>()) { Result = CS.getContextualType(Node, /*forConstraint=*/false); } if (Result && Result->is<UnresolvedType>()) { Result = Type(); } return Result; } /// If the code completion expression \p E occurs in a pattern matching /// position, we have an AST that looks like this. /// \code /// (binary_expr implicit type='$T3' /// (overloaded_decl_ref_expr function_ref=compound decls=[ /// Swift.(file).~=, /// Swift.(file).Optional extension.~=]) /// (argument_list implicit /// (argument /// (code_completion_expr implicit type='$T1')) /// (argument /// (declref_expr implicit decl=swift_ide_test.(file).foo(x:).$match)))) /// \endcode /// If the code completion expression occurs in such an AST, return the /// declaration of the \c $match variable, otherwise return \c nullptr. static VarDecl *getMatchVarIfInPatternMatch(Expr *E, ConstraintSystem &CS) { auto &Context = CS.getASTContext(); auto *Binary = dyn_cast_or_null<BinaryExpr>(CS.getParentExpr(E)); if (!Binary || !Binary->isImplicit() || Binary->getLHS() != E) { return nullptr; } auto CalledOperator = Binary->getFn(); if (!isPatternMatchingOperator(CalledOperator)) { return nullptr; } auto MatchArg = dyn_cast_or_null<DeclRefExpr>(Binary->getRHS()); if (!MatchArg || !MatchArg->isImplicit()) { return nullptr; } auto MatchVar = MatchArg->getDecl(); if (MatchVar && MatchVar->isImplicit() && MatchVar->getBaseName() == Context.Id_PatternMatchVar) { return dyn_cast<VarDecl>(MatchVar); } else { return nullptr; } } Type swift::ide::getPatternMatchType(const constraints::Solution &S, Expr *E) { if (auto MatchVar = getMatchVarIfInPatternMatch(E, S.getConstraintSystem())) { Type MatchVarType; // If the MatchVar has an explicit type, it's not part of the solution. But // we can look it up in the constraint system directly. if (auto T = S.getConstraintSystem().getVarType(MatchVar)) { MatchVarType = T; } else { MatchVarType = getTypeForCompletion(S, MatchVar); } if (MatchVarType) { return MatchVarType; } } return nullptr; } void swift::ide::getSolutionSpecificVarTypes( const constraints::Solution &S, llvm::SmallDenseMap<const VarDecl *, Type> &Result) { assert(Result.empty()); for (auto NT : S.nodeTypes) { if (auto VD = dyn_cast_or_null<VarDecl>(NT.first.dyn_cast<Decl *>())) { Result[VD] = S.simplifyType(NT.second); } } } bool swift::ide::isImplicitSingleExpressionReturn(ConstraintSystem &CS, Expr *CompletionExpr) { Expr *ParentExpr = CS.getParentExpr(CompletionExpr); if (!ParentExpr) return CS.getContextualTypePurpose(CompletionExpr) == CTP_ReturnSingleExpr; if (auto *ParentCE = dyn_cast<ClosureExpr>(ParentExpr)) { if (ParentCE->hasSingleExpressionBody() && ParentCE->getSingleExpressionBody() == CompletionExpr) { ASTNode Last = ParentCE->getBody()->getLastElement(); return !Last.isStmt(StmtKind::Return) || Last.isImplicit(); } } return false; } bool swift::ide::isContextAsync(const constraints::Solution &S, DeclContext *DC) { // We are in an async context if // - the decl context is async if (S.getConstraintSystem().isAsynchronousContext(DC)) { return true; } // - the decl context is sync but it's used in a context that expectes an // async function. This happens if the code completion token is in a // closure that doesn't contain any async calles. Thus the closure is // type-checked as non-async, but it might get converted to an async // closure based on its contextual type auto target = S.solutionApplicationTargets.find(dyn_cast<ClosureExpr>(DC)); if (target != S.solutionApplicationTargets.end()) { if (auto ContextTy = target->second.getClosureContextualType()) { if (auto ContextFuncTy = S.simplifyType(ContextTy)->getAs<AnyFunctionType>()) { return ContextFuncTy->isAsync(); } } } // - we did not record any information about async-ness of the context in the // solution, but the type information recorded AST declares the context as // async. return canDeclContextHandleAsync(DC); } bool swift::ide::nullableTypesEqual(Type LHS, Type RHS) { if (LHS.isNull() && RHS.isNull()) { return true; } else if (LHS.isNull() || RHS.isNull()) { // One type is null but the other is not. return false; } else { return LHS->isEqual(RHS); } }
// Generated by Haxe 4.1.5 #include <hxcpp.h> #ifndef INCLUDED_95f339a1d026d52c #define INCLUDED_95f339a1d026d52c #include "hxMath.h" #endif #ifndef INCLUDED_lime__internal_graphics__ImageDataUtil_ImageDataView #include <lime/_internal/graphics/_ImageDataUtil/ImageDataView.h> #endif #ifndef INCLUDED_lime_graphics_Image #include <lime/graphics/Image.h> #endif #ifndef INCLUDED_lime_graphics_ImageBuffer #include <lime/graphics/ImageBuffer.h> #endif #ifndef INCLUDED_lime_math_Rectangle #include <lime/math/Rectangle.h> #endif HX_DEFINE_STACK_FRAME(_hx_pos_0f70c5927802d56d_1672_new,"lime._internal.graphics._ImageDataUtil.ImageDataView","new",0x36089ace,"lime._internal.graphics._ImageDataUtil.ImageDataView.new","lime/_internal/graphics/ImageDataUtil.hx",1672,0x59dddace) HX_LOCAL_STACK_FRAME(_hx_pos_0f70c5927802d56d_1696_clip,"lime._internal.graphics._ImageDataUtil.ImageDataView","clip",0x0a3ec4a2,"lime._internal.graphics._ImageDataUtil.ImageDataView.clip","lime/_internal/graphics/ImageDataUtil.hx",1696,0x59dddace) HX_LOCAL_STACK_FRAME(_hx_pos_0f70c5927802d56d_1742___update,"lime._internal.graphics._ImageDataUtil.ImageDataView","__update",0xdf2597fb,"lime._internal.graphics._ImageDataUtil.ImageDataView.__update","lime/_internal/graphics/ImageDataUtil.hx",1742,0x59dddace) namespace lime{ namespace _internal{ namespace graphics{ namespace _ImageDataUtil{ void ImageDataView_obj::__construct( ::lime::graphics::Image image, ::lime::math::Rectangle rect){ HX_STACKFRAME(&_hx_pos_0f70c5927802d56d_1672_new) HXLINE(1673) this->image = image; HXLINE(1675) if (::hx::IsNull( rect )) { HXLINE(1677) this->rect = image->get_rect(); } else { HXLINE(1681) if ((rect->x < 0)) { HXLINE(1681) rect->x = ( (Float)(0) ); } HXLINE(1682) if ((rect->y < 0)) { HXLINE(1682) rect->y = ( (Float)(0) ); } HXLINE(1683) if (((rect->x + rect->width) > image->width)) { HXLINE(1683) rect->width = (( (Float)(image->width) ) - rect->x); } HXLINE(1684) if (((rect->y + rect->height) > image->height)) { HXLINE(1684) rect->height = (( (Float)(image->height) ) - rect->y); } HXLINE(1685) if ((rect->width < 0)) { HXLINE(1685) rect->width = ( (Float)(0) ); } HXLINE(1686) if ((rect->height < 0)) { HXLINE(1686) rect->height = ( (Float)(0) ); } HXLINE(1687) this->rect = rect; } HXLINE(1690) this->stride = image->buffer->get_stride(); HXLINE(1692) this->_hx___update(); } Dynamic ImageDataView_obj::__CreateEmpty() { return new ImageDataView_obj; } void *ImageDataView_obj::_hx_vtable = 0; Dynamic ImageDataView_obj::__Create(::hx::DynamicArray inArgs) { ::hx::ObjectPtr< ImageDataView_obj > _hx_result = new ImageDataView_obj(); _hx_result->__construct(inArgs[0],inArgs[1]); return _hx_result; } bool ImageDataView_obj::_hx_isInstanceOf(int inClassId) { return inClassId==(int)0x00000001 || inClassId==(int)0x178fc370; } void ImageDataView_obj::clip(int x,int y,int width,int height){ HX_GC_STACKFRAME(&_hx_pos_0f70c5927802d56d_1696_clip) HXLINE(1697) if (::hx::IsNull( this->tempRect )) { HXLINE(1697) this->tempRect = ::lime::math::Rectangle_obj::__alloc( HX_CTX ,null(),null(),null(),null()); } HXLINE(1698) this->tempRect->setTo(( (Float)(x) ),( (Float)(y) ),( (Float)(width) ),( (Float)(height) )); HXLINE(1700) this->rect->intersection(this->tempRect,this->rect); HXLINE(1701) this->_hx___update(); } HX_DEFINE_DYNAMIC_FUNC4(ImageDataView_obj,clip,(void)) void ImageDataView_obj::_hx___update(){ HX_STACKFRAME(&_hx_pos_0f70c5927802d56d_1742___update) HXLINE(1743) this->x = ::Math_obj::ceil(this->rect->x); HXLINE(1744) this->y = ::Math_obj::ceil(this->rect->y); HXLINE(1745) this->width = ::Math_obj::floor(this->rect->width); HXLINE(1746) this->height = ::Math_obj::floor(this->rect->height); HXLINE(1747) this->byteOffset = ((this->stride * (this->y + this->image->offsetY)) + ((this->x + this->image->offsetX) * 4)); } HX_DEFINE_DYNAMIC_FUNC0(ImageDataView_obj,_hx___update,(void)) ::hx::ObjectPtr< ImageDataView_obj > ImageDataView_obj::__new( ::lime::graphics::Image image, ::lime::math::Rectangle rect) { ::hx::ObjectPtr< ImageDataView_obj > __this = new ImageDataView_obj(); __this->__construct(image,rect); return __this; } ::hx::ObjectPtr< ImageDataView_obj > ImageDataView_obj::__alloc(::hx::Ctx *_hx_ctx, ::lime::graphics::Image image, ::lime::math::Rectangle rect) { ImageDataView_obj *__this = (ImageDataView_obj*)(::hx::Ctx::alloc(_hx_ctx, sizeof(ImageDataView_obj), true, "lime._internal.graphics._ImageDataUtil.ImageDataView")); *(void **)__this = ImageDataView_obj::_hx_vtable; __this->__construct(image,rect); return __this; } ImageDataView_obj::ImageDataView_obj() { } void ImageDataView_obj::__Mark(HX_MARK_PARAMS) { HX_MARK_BEGIN_CLASS(ImageDataView); HX_MARK_MEMBER_NAME(x,"x"); HX_MARK_MEMBER_NAME(y,"y"); HX_MARK_MEMBER_NAME(height,"height"); HX_MARK_MEMBER_NAME(width,"width"); HX_MARK_MEMBER_NAME(byteOffset,"byteOffset"); HX_MARK_MEMBER_NAME(image,"image"); HX_MARK_MEMBER_NAME(rect,"rect"); HX_MARK_MEMBER_NAME(stride,"stride"); HX_MARK_MEMBER_NAME(tempRect,"tempRect"); HX_MARK_END_CLASS(); } void ImageDataView_obj::__Visit(HX_VISIT_PARAMS) { HX_VISIT_MEMBER_NAME(x,"x"); HX_VISIT_MEMBER_NAME(y,"y"); HX_VISIT_MEMBER_NAME(height,"height"); HX_VISIT_MEMBER_NAME(width,"width"); HX_VISIT_MEMBER_NAME(byteOffset,"byteOffset"); HX_VISIT_MEMBER_NAME(image,"image"); HX_VISIT_MEMBER_NAME(rect,"rect"); HX_VISIT_MEMBER_NAME(stride,"stride"); HX_VISIT_MEMBER_NAME(tempRect,"tempRect"); } ::hx::Val ImageDataView_obj::__Field(const ::String &inName,::hx::PropertyAccess inCallProp) { switch(inName.length) { case 1: if (HX_FIELD_EQ(inName,"x") ) { return ::hx::Val( x ); } if (HX_FIELD_EQ(inName,"y") ) { return ::hx::Val( y ); } break; case 4: if (HX_FIELD_EQ(inName,"rect") ) { return ::hx::Val( rect ); } if (HX_FIELD_EQ(inName,"clip") ) { return ::hx::Val( clip_dyn() ); } break; case 5: if (HX_FIELD_EQ(inName,"width") ) { return ::hx::Val( width ); } if (HX_FIELD_EQ(inName,"image") ) { return ::hx::Val( image ); } break; case 6: if (HX_FIELD_EQ(inName,"height") ) { return ::hx::Val( height ); } if (HX_FIELD_EQ(inName,"stride") ) { return ::hx::Val( stride ); } break; case 8: if (HX_FIELD_EQ(inName,"tempRect") ) { return ::hx::Val( tempRect ); } if (HX_FIELD_EQ(inName,"__update") ) { return ::hx::Val( _hx___update_dyn() ); } break; case 10: if (HX_FIELD_EQ(inName,"byteOffset") ) { return ::hx::Val( byteOffset ); } } return super::__Field(inName,inCallProp); } ::hx::Val ImageDataView_obj::__SetField(const ::String &inName,const ::hx::Val &inValue,::hx::PropertyAccess inCallProp) { switch(inName.length) { case 1: if (HX_FIELD_EQ(inName,"x") ) { x=inValue.Cast< int >(); return inValue; } if (HX_FIELD_EQ(inName,"y") ) { y=inValue.Cast< int >(); return inValue; } break; case 4: if (HX_FIELD_EQ(inName,"rect") ) { rect=inValue.Cast< ::lime::math::Rectangle >(); return inValue; } break; case 5: if (HX_FIELD_EQ(inName,"width") ) { width=inValue.Cast< int >(); return inValue; } if (HX_FIELD_EQ(inName,"image") ) { image=inValue.Cast< ::lime::graphics::Image >(); return inValue; } break; case 6: if (HX_FIELD_EQ(inName,"height") ) { height=inValue.Cast< int >(); return inValue; } if (HX_FIELD_EQ(inName,"stride") ) { stride=inValue.Cast< int >(); return inValue; } break; case 8: if (HX_FIELD_EQ(inName,"tempRect") ) { tempRect=inValue.Cast< ::lime::math::Rectangle >(); return inValue; } break; case 10: if (HX_FIELD_EQ(inName,"byteOffset") ) { byteOffset=inValue.Cast< int >(); return inValue; } } return super::__SetField(inName,inValue,inCallProp); } void ImageDataView_obj::__GetFields(Array< ::String> &outFields) { outFields->push(HX_("x",78,00,00,00)); outFields->push(HX_("y",79,00,00,00)); outFields->push(HX_("height",e7,07,4c,02)); outFields->push(HX_("width",06,b6,62,ca)); outFields->push(HX_("byteOffset",bb,20,44,38)); outFields->push(HX_("image",5b,1f,69,bd)); outFields->push(HX_("rect",24,4d,a7,4b)); outFields->push(HX_("stride",19,20,30,11)); outFields->push(HX_("tempRect",58,56,1b,9b)); super::__GetFields(outFields); }; #ifdef HXCPP_SCRIPTABLE static ::hx::StorageInfo ImageDataView_obj_sMemberStorageInfo[] = { {::hx::fsInt,(int)offsetof(ImageDataView_obj,x),HX_("x",78,00,00,00)}, {::hx::fsInt,(int)offsetof(ImageDataView_obj,y),HX_("y",79,00,00,00)}, {::hx::fsInt,(int)offsetof(ImageDataView_obj,height),HX_("height",e7,07,4c,02)}, {::hx::fsInt,(int)offsetof(ImageDataView_obj,width),HX_("width",06,b6,62,ca)}, {::hx::fsInt,(int)offsetof(ImageDataView_obj,byteOffset),HX_("byteOffset",bb,20,44,38)}, {::hx::fsObject /* ::lime::graphics::Image */ ,(int)offsetof(ImageDataView_obj,image),HX_("image",5b,1f,69,bd)}, {::hx::fsObject /* ::lime::math::Rectangle */ ,(int)offsetof(ImageDataView_obj,rect),HX_("rect",24,4d,a7,4b)}, {::hx::fsInt,(int)offsetof(ImageDataView_obj,stride),HX_("stride",19,20,30,11)}, {::hx::fsObject /* ::lime::math::Rectangle */ ,(int)offsetof(ImageDataView_obj,tempRect),HX_("tempRect",58,56,1b,9b)}, { ::hx::fsUnknown, 0, null()} }; static ::hx::StaticInfo *ImageDataView_obj_sStaticStorageInfo = 0; #endif static ::String ImageDataView_obj_sMemberFields[] = { HX_("x",78,00,00,00), HX_("y",79,00,00,00), HX_("height",e7,07,4c,02), HX_("width",06,b6,62,ca), HX_("byteOffset",bb,20,44,38), HX_("image",5b,1f,69,bd), HX_("rect",24,4d,a7,4b), HX_("stride",19,20,30,11), HX_("tempRect",58,56,1b,9b), HX_("clip",d0,6e,c2,41), HX_("__update",29,f1,34,2f), ::String(null()) }; ::hx::Class ImageDataView_obj::__mClass; void ImageDataView_obj::__register() { ImageDataView_obj _hx_dummy; ImageDataView_obj::_hx_vtable = *(void **)&_hx_dummy; ::hx::Static(__mClass) = new ::hx::Class_obj(); __mClass->mName = HX_("lime._internal.graphics._ImageDataUtil.ImageDataView",dc,db,9b,c0); __mClass->mSuper = &super::__SGetClass(); __mClass->mConstructEmpty = &__CreateEmpty; __mClass->mConstructArgs = &__Create; __mClass->mGetStaticField = &::hx::Class_obj::GetNoStaticField; __mClass->mSetStaticField = &::hx::Class_obj::SetNoStaticField; __mClass->mStatics = ::hx::Class_obj::dupFunctions(0 /* sStaticFields */); __mClass->mMembers = ::hx::Class_obj::dupFunctions(ImageDataView_obj_sMemberFields); __mClass->mCanCast = ::hx::TCanCast< ImageDataView_obj >; #ifdef HXCPP_SCRIPTABLE __mClass->mMemberStorageInfo = ImageDataView_obj_sMemberStorageInfo; #endif #ifdef HXCPP_SCRIPTABLE __mClass->mStaticStorageInfo = ImageDataView_obj_sStaticStorageInfo; #endif ::hx::_hx_RegisterClass(__mClass->mName, __mClass); } } // end namespace lime } // end namespace _internal } // end namespace graphics } // end namespace _ImageDataUtil
#pragma once // This file is generated from the Game's Reflection data #include <cstdint> #include <RED4ext/Common.hpp> #include <RED4ext/REDhash.hpp> #include <RED4ext/DynArray.hpp> #include <RED4ext/Handle.hpp> namespace RED4ext { namespace anim { struct AdditionalTransformEntry; } namespace anim { struct AdditionalTransformContainer { static constexpr const char* NAME = "animAdditionalTransformContainer"; static constexpr const char* ALIAS = NAME; DynArray<Handle<anim::AdditionalTransformEntry>> entries; // 00 }; RED4EXT_ASSERT_SIZE(AdditionalTransformContainer, 0x10); } // namespace anim } // namespace RED4ext
/* * This file is part of the DUDS project. It is subject to the BSD-style * license terms in the LICENSE file found in the top-level directory of this * distribution and at https://github.com/jjackowski/duds/blob/master/LICENSE. * No part of DUDS, including this file, may be copied, modified, propagated, * or distributed except according to the terms contained in the LICENSE file. * * Copyright (C) 2020 Jeff Jackowski */ #ifndef INPUTHANDLERS_HPP #define INPUTHANDLERS_HPP #include <unordered_map> #include <boost/signals2/signal.hpp> #include <duds/os/linux/EventTypeCode.hpp> #ifdef linux // !@?!#?!#? #undef linux #endif namespace duds { namespace os { namespace linux { /** * The signal type that will handle input events. * @param etc The event type and event code of the input event to handle. * @param value The value of the input. */ typedef boost::signals2::signal<void(EventTypeCode etc, std::int32_t value)> InputSignal; /** * Maintains a set of InputSignal objects to respond to input events. * These are held separately from the input device so that the input handlers * can be applied to multiple input devices. * @author Jeff Jackowski */ class InputHandlers { /** * A type that relates events to signal handlers. */ typedef std::unordered_map<EventTypeCode, InputSignal> InputMap; /** * Relates events to signal handlers. */ InputMap receivers; /** * Handles input for events that are not listed in the @a receivers InputMap. */ InputSignal defReceiver; public: /** * Dispatches the provided input event to the appropriate InputSignal. * @param etc The event type and code of the input event to handle. * @param value The value of the input. * @throw object Anything thrown by the functions invoked by the event's * input signal. */ void handleEvent(EventTypeCode etc, std::int32_t value); /** * Removes all input handlers. */ void clear(); /** * Make a connection to an input event signal. * See the [Boost reference documentation](https://www.boost.org/doc/libs/1_67_0/doc/html/boost/signals2/signal.html#idp182137616-bb) * for more details, or the [tutorial](https://www.boost.org/doc/libs/1_67_0/doc/html/signals2/tutorial.html) * for an overview of the whole boost::singals2 system. * @param etc The event type and code that will be forwarded to the * provided slot function. */ boost::signals2::connection connect( EventTypeCode etc, const InputSignal::slot_type &slot, boost::signals2::connect_position at = boost::signals2::at_back ) { return receivers[etc].connect(slot, at); } /** * Make a connection to an input event signal. * See the [Boost reference documentation](https://www.boost.org/doc/libs/1_67_0/doc/html/boost/signals2/signal.html#idp182137616-bb) * for more details, or the [tutorial](https://www.boost.org/doc/libs/1_67_0/doc/html/signals2/tutorial.html) * for an overview of the whole boost::singals2 system. * @param etc The event type and code that will be forwarded to the * provided slot function. */ boost::signals2::connection connect( EventTypeCode etc, const InputSignal::group_type &group, const InputSignal::slot_type &slot, boost::signals2::connect_position at = boost::signals2::at_back ) { return receivers[etc].connect(group, slot, at); } /** * Make a connection to an input event signal. * See the [Boost reference documentation](https://www.boost.org/doc/libs/1_67_0/doc/html/boost/signals2/signal.html#idp182137616-bb) * for more details, or the [tutorial](https://www.boost.org/doc/libs/1_67_0/doc/html/signals2/tutorial.html) * for an overview of the whole boost::singals2 system. * @param etc The event type and code that will be forwarded to the * provided slot function. */ boost::signals2::connection connectExtended( EventTypeCode etc, const InputSignal::extended_slot_type &slot, boost::signals2::connect_position at = boost::signals2::at_back ) { return receivers[etc].connect_extended(slot, at); } /** * Make a connection to an input event signal. * See the [Boost reference documentation](https://www.boost.org/doc/libs/1_67_0/doc/html/boost/signals2/signal.html#idp182137616-bb) * for more details, or the [tutorial](https://www.boost.org/doc/libs/1_67_0/doc/html/signals2/tutorial.html) * for an overview of the whole boost::singals2 system. * @param etc The event type and code that will be forwarded to the * provided slot function. */ boost::signals2::connection connectExtended( EventTypeCode etc, const InputSignal::group_type &group, const InputSignal::extended_slot_type &slot, boost::signals2::connect_position at = boost::signals2::at_back ) { return receivers[etc].connect_extended(group, slot, at); } /** * Disconnect a group from an input event signal. * See the [Boost reference documentation](https://www.boost.org/doc/libs/1_67_0/doc/html/boost/signals2/signal.html#idp182137616-bb) * for more details, or the [tutorial](https://www.boost.org/doc/libs/1_67_0/doc/html/signals2/tutorial.html) * for an overview of the whole boost::singals2 system. * @param etc The event type and code of the items to remove. */ void disconnect( EventTypeCode etc, const InputSignal::group_type &group ) { receivers[etc].disconnect(group); } /** * Disconnect a slot from an input event signal. * See the [Boost reference documentation](https://www.boost.org/doc/libs/1_67_0/doc/html/boost/signals2/signal.html#idp182137616-bb) * for more details, or the [tutorial](https://www.boost.org/doc/libs/1_67_0/doc/html/signals2/tutorial.html) * for an overview of the whole boost::singals2 system. * @param etc The event type and code of the items to remove. */ template<typename Slot> void disconnect(EventTypeCode etc, const Slot &slotFunc) { receivers[etc].disconnect(slotFunc); } /** * Disconnects all slots from an input event signal. * This actually destructs the signal object for the given event. * @param etc The event type and code of the items to remove. */ void disconnectAll(EventTypeCode etc) { receivers.erase(etc); } /** * Make a connection to the default input event signal. * See the [Boost reference documentation](https://www.boost.org/doc/libs/1_67_0/doc/html/boost/signals2/signal.html#idp182137616-bb) * for more details, or the [tutorial](https://www.boost.org/doc/libs/1_67_0/doc/html/signals2/tutorial.html) * for an overview of the whole boost::singals2 system. */ boost::signals2::connection connect( const InputSignal::slot_type &slot, boost::signals2::connect_position at = boost::signals2::at_back ) { return defReceiver.connect(slot, at); } /** * Make a connection to the default input event signal. * See the [Boost reference documentation](https://www.boost.org/doc/libs/1_67_0/doc/html/boost/signals2/signal.html#idp182137616-bb) * for more details, or the [tutorial](https://www.boost.org/doc/libs/1_67_0/doc/html/signals2/tutorial.html) * for an overview of the whole boost::singals2 system. */ boost::signals2::connection connect( const InputSignal::group_type &group, const InputSignal::slot_type &slot, boost::signals2::connect_position at = boost::signals2::at_back ) { return defReceiver.connect(group, slot, at); } /** * Make a connection to the default input event signal. * See the [Boost reference documentation](https://www.boost.org/doc/libs/1_67_0/doc/html/boost/signals2/signal.html#idp182137616-bb) * for more details, or the [tutorial](https://www.boost.org/doc/libs/1_67_0/doc/html/signals2/tutorial.html) * for an overview of the whole boost::singals2 system. */ boost::signals2::connection connectExtended( const InputSignal::extended_slot_type &slot, boost::signals2::connect_position at = boost::signals2::at_back ) { return defReceiver.connect_extended(slot, at); } /** * Make a connection to the default input event signal. * See the [Boost reference documentation](https://www.boost.org/doc/libs/1_67_0/doc/html/boost/signals2/signal.html#idp182137616-bb) * for more details, or the [tutorial](https://www.boost.org/doc/libs/1_67_0/doc/html/signals2/tutorial.html) * for an overview of the whole boost::singals2 system. */ boost::signals2::connection connectExtended( const InputSignal::group_type &group, const InputSignal::extended_slot_type &slot, boost::signals2::connect_position at = boost::signals2::at_back ) { return defReceiver.connect_extended(group, slot, at); } /** * Disconnect a group from the default input event signal. * See the [Boost reference documentation](https://www.boost.org/doc/libs/1_67_0/doc/html/boost/signals2/signal.html#idp182137616-bb) * for more details, or the [tutorial](https://www.boost.org/doc/libs/1_67_0/doc/html/signals2/tutorial.html) * for an overview of the whole boost::singals2 system. */ void disconnect( const InputSignal::group_type &group ) { defReceiver.disconnect(group); } /** * Disconnect a slot from the default input event signal. * See the [Boost reference documentation](https://www.boost.org/doc/libs/1_67_0/doc/html/boost/signals2/signal.html#idp182137616-bb) * for more details, or the [tutorial](https://www.boost.org/doc/libs/1_67_0/doc/html/signals2/tutorial.html) * for an overview of the whole boost::singals2 system. */ template<typename Slot> void disconnect(const Slot &slotFunc) { defReceiver.disconnect(slotFunc); } /** * Disconnects all slots from the default input event signal. */ void disconnectAll() { defReceiver.disconnect_all_slots(); } }; /** * Shared pointer to a InputHandlers class. */ typedef std::shared_ptr<InputHandlers> InputHandlersSptr; } } } #endif // #ifndef INPUTHANDLERS_HPP
//===----------------------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // UNSUPPORTED: c++98, c++03, c++11, c++14, c++17 // XFAIL: * // <chrono> // class year_month_weekday; // template<class charT, class traits> // basic_ostream<charT, traits>& // operator<<(basic_ostream<charT, traits>& os, const year_month_weekday& ym); // // Returns: os << ym.year() << '/' << ym.month(). // // // template<class charT, class traits> // basic_ostream<charT, traits>& // to_stream(basic_ostream<charT, traits>& os, const charT* fmt, const year_month_weekday& ym); // // Effects: Streams ym into os using the format specified by the NTCTS fmt. fmt encoding follows the rules specified in 25.11. // // template<class charT, class traits, class Alloc = allocator<charT>> // basic_istream<charT, traits>& // from_stream(basic_istream<charT, traits>& is, const charT* fmt, // year_month_weekday& ym, basic_string<charT, traits, Alloc>* abbrev = nullptr, // minutes* offset = nullptr); // // Effects: Attempts to parse the input stream is into the year_month_weekday ym using the format // flags given in the NTCTS fmt as specified in 25.12. If the parse fails to decode // a valid year_month_weekday, is.setstate(ios_- base::failbit) shall be called and ym shall // not be modified. If %Z is used and successfully parsed, that value will be assigned // to *abbrev if abbrev is non-null. If %z (or a modified variant) is used and // successfully parsed, that value will be assigned to *offset if offset is non-null. #include <chrono> #include <type_traits> #include <cassert> #include <iostream> #include "test_macros.h" int main() { using year_month_weekday = std::chrono::year_month_weekday; using year = std::chrono::year; using month = std::chrono::month; using weekday = std::chrono::weekday; std::cout << year_month_weekday{year{2018}, month{3}, weekday{4}}; }
#include "BinaryData.h" #include <algorithm> #include <sstream> namespace carto { BinaryData::BinaryData() : _dataPtr(std::make_shared<std::vector<unsigned char> >()) { } BinaryData::BinaryData(std::vector<unsigned char> data) : _dataPtr(std::make_shared<std::vector<unsigned char> >(std::move(data))) { } BinaryData::BinaryData(const unsigned char* data, std::size_t size) : _dataPtr(std::make_shared<std::vector<unsigned char> >(data, data + size)) { } bool BinaryData::empty() const { return _dataPtr->empty(); } std::size_t BinaryData::size() const { return _dataPtr->size(); } const unsigned char* BinaryData::data() const { return _dataPtr->data(); } std::shared_ptr<std::vector<unsigned char> > BinaryData::getDataPtr() const { return _dataPtr; } bool BinaryData::operator ==(const BinaryData& data) const { if (_dataPtr->size() != data._dataPtr->size()) { return false; } return std::equal(_dataPtr->begin(), _dataPtr->end(), data._dataPtr->begin()); } bool BinaryData::operator !=(const BinaryData& data) const { return !(*this == data); } int BinaryData::hash() const { return static_cast<int>(std::hash<std::string>()(std::string(reinterpret_cast<const char*>(_dataPtr->data()), _dataPtr->size()))); } std::string BinaryData::toString() const { std::stringstream ss; ss << "BinaryData [size=" << _dataPtr->size() << "]"; return ss.str(); } }
#ifndef MAVE_AVX512F_VECTOR3_DOUBLE_HPP #define MAVE_AVX512F_VECTOR3_DOUBLE_HPP #ifndef __AVX512F__ #error "mave/avx512f/vector3d.hpp requires avx512f support but __AVX512F__ is not defined." #endif #ifndef MAVE_VECTOR_HPP #error "do not use mave/avx/vector3d.hpp alone. please include mave/vector.hpp." #endif #ifdef MAVE_VECTOR3_DOUBLE_IMPLEMENTATION #error "specialization of vector for 3x double is already defined" #endif #define MAVE_VECTOR3_DOUBLE_IMPLEMENTATION "avx512f" #include <x86intrin.h> // for *nix #include <type_traits> #include <array> #include <cmath> namespace mave { template<> struct alignas(32) matrix<double, 3, 1> { static constexpr std::size_t row_size = 3; static constexpr std::size_t column_size = 1; static constexpr std::size_t total_size = 3; using value_type = double; using storage_type = std::array<double, 4>; using pointer = value_type*; using const_pointer = value_type const*; using reference = value_type&; using const_reference = value_type const&; using size_type = std::size_t; template<typename T1, typename T2, typename T3> matrix(T1&& v1, T2&& v2, T3&& v3) noexcept : vs_{{static_cast<double>(v1), static_cast<double>(v2), static_cast<double>(v3), 0.0}} {} matrix(const std::array<double, 3>& arg) noexcept : vs_{{arg[0], arg[1], arg[2], 0.0}} {} matrix(__m256d pack) noexcept { _mm256_store_pd(this->data(), pack); } matrix& operator=(__m256d pack) noexcept { _mm256_store_pd(this->data(), pack); return *this; } matrix(__m128 pack) noexcept { _mm256_store_pd(this->data(), _mm256_cvtps_pd(pack)); } matrix& operator=(__m128 pack) noexcept { _mm256_store_pd(this->data(), _mm256_cvtps_pd(pack)); return *this; } matrix(__m128i pack) noexcept { _mm256_store_pd(this->data(), _mm256_cvtepi32_pd(pack)); } matrix& operator=(__m128i pack) noexcept { _mm256_store_pd(this->data(), _mm256_cvtepi32_pd(pack)); return *this; } matrix(): vs_{{0.0, 0.0, 0.0, 0.0}}{}; ~matrix() = default; matrix(const matrix&) = default; matrix(matrix&&) = default; matrix& operator=(const matrix&) = default; matrix& operator=(matrix&&) = default; template<typename T> matrix& operator=(const matrix<T, 3, 1>& rhs) noexcept { vs_[0] = static_cast<double>(rhs[0]); vs_[1] = static_cast<double>(rhs[1]); vs_[2] = static_cast<double>(rhs[2]); vs_[3] = 0.0; return *this; } matrix& operator+=(const matrix<double, 3, 1>& other) noexcept { const __m256d v1 = _mm256_load_pd(this->data()); const __m256d v2 = _mm256_load_pd(other.data()); _mm256_store_pd(this->data(), _mm256_add_pd(v1, v2)); return *this; } matrix& operator-=(const matrix<double, 3, 1>& other) noexcept { const __m256d v1 = _mm256_load_pd(this->data()); const __m256d v2 = _mm256_load_pd(other.data()); _mm256_store_pd(this->data(), _mm256_sub_pd(v1, v2)); return *this; } matrix& operator*=(const double other) noexcept { const __m256d v1 = _mm256_load_pd(this->data()); const __m256d v2 = _mm256_set1_pd(other); _mm256_store_pd(this->data(), _mm256_mul_pd(v1, v2)); return *this; } matrix& operator/=(const double other) noexcept { const __m256d v1 = _mm256_load_pd(this->data()); const __m256d v2 = _mm256_set1_pd(other); _mm256_store_pd(this->data(), _mm256_div_pd(v1, v2)); return *this; } size_type size() const noexcept {return total_size;} pointer data() noexcept {return vs_.data();} const_pointer data() const noexcept {return vs_.data();} reference at(size_type i) {return vs_.at(i);} const_reference at(size_type i) const {return vs_.at(i);} reference operator[](size_type i) noexcept {return vs_[i];} const_reference operator[](size_type i) const noexcept {return vs_[i];} reference at(size_type i, size_type) {return vs_.at(i);} const_reference at(size_type i, size_type) const {return vs_.at(i);} reference operator()(size_type i, size_type) noexcept {return vs_[i];} const_reference operator()(size_type i, size_type) const noexcept {return vs_[i];} bool diagnosis() const noexcept {return vs_[3] == 0.0;} void zero() noexcept {_mm256_store_pd(this->data(), _mm256_setzero_pd());} private: alignas(32) storage_type vs_; }; // --------------------------------------------------------------------------- // negation operator- // --------------------------------------------------------------------------- template<> MAVE_INLINE matrix<double, 3, 1> operator-(const matrix<double, 3, 1>& v) noexcept { return _mm256_sub_pd(_mm256_setzero_pd(), _mm256_load_pd(v.data())); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>> operator-(std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&> vs ) noexcept { const __m512d v12 = _mm512_sub_pd(_mm512_setzero_pd(), _mm512_insertf64x4( _mm512_castpd256_pd512(_mm256_load_pd(std::get<0>(vs).data())), _mm256_load_pd(std::get<1>(vs).data()), 1)); return std::make_tuple(matrix<double, 3, 1>(_mm512_castpd512_pd256(v12)), matrix<double, 3, 1>(_mm512_extractf64x4_pd(v12, 1))); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>> operator-(std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> vs) noexcept { const auto v12 = -std::tie(std::get<0>(vs), std::get<1>(vs)); return std::make_tuple(std::get<0>(v12), std::get<1>(v12), -std::get<2>(vs)); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>> operator-(std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> vs ) noexcept { const auto v12 = -std::tie(std::get<0>(vs), std::get<1>(vs)); const auto v34 = -std::tie(std::get<2>(vs), std::get<3>(vs)); return std::make_tuple(std::get<0>(v12), std::get<1>(v12), std::get<0>(v34), std::get<1>(v34)); } // --------------------------------------------------------------------------- // addition operator+ // --------------------------------------------------------------------------- template<> MAVE_INLINE matrix<double, 3, 1> operator+( const matrix<double, 3, 1>& v1, const matrix<double, 3, 1>& v2) noexcept { return _mm256_add_pd(_mm256_load_pd(v1.data()), _mm256_load_pd(v2.data())); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>> operator+(std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&> v1, std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&> v2 ) noexcept { const __m512d v11 = _mm512_insertf64x4( _mm512_castpd256_pd512(_mm256_load_pd(std::get<0>(v1).data())), _mm256_load_pd(std::get<1>(v1).data()), 1); const __m512d v22 = _mm512_insertf64x4( _mm512_castpd256_pd512(_mm256_load_pd(std::get<0>(v2).data())), _mm256_load_pd(std::get<1>(v2).data()), 1); const __m512d rslt = _mm512_add_pd(v11, v22); return std::make_tuple(matrix<double, 3, 1>(_mm512_castpd512_pd256(rslt)), matrix<double, 3, 1>(_mm512_extractf64x4_pd(rslt, 1))); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>> operator+(std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> v1, std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> v2) noexcept { const auto r12 = std::tie(std::get<0>(v1), std::get<1>(v1)) + std::tie(std::get<0>(v2), std::get<1>(v2)); return std::make_tuple(std::get<0>(r12), std::get<1>(r12), std::get<2>(v1) + std::get<2>(v2)); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>> operator+(std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> v1, std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> v2 ) noexcept { const auto r12 = std::tie(std::get<0>(v1), std::get<1>(v1)) + std::tie(std::get<0>(v2), std::get<1>(v2)); const auto r34 = std::tie(std::get<2>(v1), std::get<3>(v1)) + std::tie(std::get<2>(v2), std::get<3>(v2)); return std::make_tuple(std::get<0>(r12), std::get<1>(r12), std::get<0>(r34), std::get<1>(r34)); } // assignment ---------------------------------------------------------------- template<> MAVE_INLINE void operator+=( std::tuple< matrix<double,3,1>&, matrix<double,3,1>&> v1, std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&> v2 ) noexcept { const __m512d v11 = _mm512_insertf64x4( _mm512_castpd256_pd512(_mm256_load_pd(std::get<0>(v1).data())), _mm256_load_pd(std::get<1>(v1).data()), 1); const __m512d v22 = _mm512_insertf64x4( _mm512_castpd256_pd512(_mm256_load_pd(std::get<0>(v2).data())), _mm256_load_pd(std::get<1>(v2).data()), 1); const __m512d rslt = _mm512_add_pd(v11, v22); _mm256_store_pd(std::get<0>(v1).data(), _mm512_castpd512_pd256(rslt)); _mm256_store_pd(std::get<1>(v1).data(), _mm512_extractf64x4_pd(rslt, 1)); return ; } template<> MAVE_INLINE void operator+=( std::tuple< matrix<double,3,1>&, matrix<double,3,1>&, matrix<double,3,1>&> v1, std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> v2) noexcept { std::tie(std::get<0>(v1), std::get<1>(v1)) += std::tie(std::get<0>(v2), std::get<1>(v2)); std::get<2>(v1) += std::get<2>(v2); return ; } template<> MAVE_INLINE void operator+=( std::tuple< matrix<double,3,1>&, matrix<double,3,1>&, matrix<double,3,1>&, matrix<double,3,1>&> v1, std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> v2 ) noexcept { std::tie(std::get<0>(v1), std::get<1>(v1)) += std::tie(std::get<0>(v2), std::get<1>(v2)); std::tie(std::get<2>(v1), std::get<3>(v1)) += std::tie(std::get<2>(v2), std::get<3>(v2)); return ; } // --------------------------------------------------------------------------- // subtraction // --------------------------------------------------------------------------- template<> MAVE_INLINE matrix<double, 3, 1> operator-( const matrix<double, 3, 1>& v1, const matrix<double, 3, 1>& v2) noexcept { return _mm256_sub_pd(_mm256_load_pd(v1.data()), _mm256_load_pd(v2.data())); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>> operator-(std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&> v1, std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&> v2 ) noexcept { const __m512d v11 = _mm512_insertf64x4( _mm512_castpd256_pd512(_mm256_load_pd(std::get<0>(v1).data())), _mm256_load_pd(std::get<1>(v1).data()), 1); const __m512d v22 = _mm512_insertf64x4( _mm512_castpd256_pd512(_mm256_load_pd(std::get<0>(v2).data())), _mm256_load_pd(std::get<1>(v2).data()), 1); const __m512d rslt = _mm512_sub_pd(v11, v22); return std::make_tuple(matrix<double, 3, 1>(_mm512_castpd512_pd256(rslt)), matrix<double, 3, 1>(_mm512_extractf64x4_pd(rslt, 1))); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>> operator-(std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> v1, std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> v2) noexcept { const auto r12 = std::tie(std::get<0>(v1), std::get<1>(v1)) - std::tie(std::get<0>(v2), std::get<1>(v2)); return std::make_tuple(std::get<0>(r12), std::get<1>(r12), std::get<2>(v1) - std::get<2>(v2)); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>> operator-(std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> v1, std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> v2 ) noexcept { const auto r12 = std::tie(std::get<0>(v1), std::get<1>(v1)) - std::tie(std::get<0>(v2), std::get<1>(v2)); const auto r34 = std::tie(std::get<2>(v1), std::get<3>(v1)) - std::tie(std::get<2>(v2), std::get<3>(v2)); return std::make_tuple(std::get<0>(r12), std::get<1>(r12), std::get<0>(r34), std::get<1>(r34)); } // assignment ---------------------------------------------------------------- template<> MAVE_INLINE void operator-=( std::tuple< matrix<double,3,1>&, matrix<double,3,1>&> v1, std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&> v2 ) noexcept { const __m512d v11 = _mm512_insertf64x4( _mm512_castpd256_pd512(_mm256_load_pd(std::get<0>(v1).data())), _mm256_load_pd(std::get<1>(v1).data()), 1); const __m512d v22 = _mm512_insertf64x4( _mm512_castpd256_pd512(_mm256_load_pd(std::get<0>(v2).data())), _mm256_load_pd(std::get<1>(v2).data()), 1); const __m512d rslt = _mm512_sub_pd(v11, v22); _mm256_store_pd(std::get<0>(v1).data(), _mm512_castpd512_pd256(rslt)); _mm256_store_pd(std::get<1>(v1).data(), _mm512_extractf64x4_pd(rslt, 1)); } template<> MAVE_INLINE void operator-=( std::tuple< matrix<double,3,1>&, matrix<double,3,1>&, matrix<double,3,1>&> v1, std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> v2) noexcept { std::tie(std::get<0>(v1), std::get<1>(v1)) -= std::tie(std::get<0>(v2), std::get<1>(v2)); std::get<2>(v1) -= std::get<2>(v2); return ; } template<> MAVE_INLINE void operator-=( std::tuple< matrix<double,3,1>&, matrix<double,3,1>&, matrix<double,3,1>&, matrix<double,3,1>&> v1, std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> v2 ) noexcept { std::tie(std::get<0>(v1), std::get<1>(v1)) -= std::tie(std::get<0>(v2), std::get<1>(v2)); std::tie(std::get<2>(v1), std::get<3>(v1)) -= std::tie(std::get<2>(v2), std::get<3>(v2)); return ; } // --------------------------------------------------------------------------- // multiplication // --------------------------------------------------------------------------- // scalar * vector ----------------------------------------------------------- template<> MAVE_INLINE matrix<double, 3, 1> operator*( const double v1, const matrix<double, 3, 1>& v2) noexcept { return _mm256_mul_pd(_mm256_set1_pd(v1), _mm256_load_pd(v2.data())); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>> operator*(std::tuple<double, double> v1, std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&> v2 ) noexcept { const __m512d v11 = _mm512_insertf64x4( _mm512_castpd256_pd512(_mm256_set1_pd(std::get<0>(v1))), _mm256_set1_pd(std::get<1>(v1)), 1); const __m512d v22 = _mm512_insertf64x4( _mm512_castpd256_pd512(_mm256_load_pd(std::get<0>(v2).data())), _mm256_load_pd(std::get<1>(v2).data()), 1); const __m512d rslt = _mm512_mul_pd(v11, v22); return std::make_tuple(matrix<double, 3, 1>(_mm512_castpd512_pd256(rslt)), matrix<double, 3, 1>(_mm512_extractf64x4_pd(rslt, 1))); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>> operator*(std::tuple<double, double, double> v1, std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> v2) noexcept { const auto r12 = std::tuple<double, double>(std::get<0>(v1), std::get<1>(v1)) * std::tie(std::get<0>(v2), std::get<1>(v2)); return std::make_tuple(std::get<0>(r12), std::get<1>(r12), std::get<2>(v1) * std::get<2>(v2)); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>> operator*(std::tuple<double, double, double, double> v1, std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> v2 ) noexcept { const auto r12 = std::tuple<double, double>(std::get<0>(v1), std::get<1>(v1)) * std::tie(std::get<0>(v2), std::get<1>(v2)); const auto r34 = std::tuple<double, double>(std::get<2>(v1), std::get<3>(v1)) * std::tie(std::get<2>(v2), std::get<3>(v2)); return std::make_tuple(std::get<0>(r12), std::get<1>(r12), std::get<0>(r34), std::get<1>(r34)); } // vector * scalar ----------------------------------------------------------- template<> MAVE_INLINE matrix<double, 3, 1> operator*( const matrix<double, 3, 1>& v1, const double v2) noexcept { return _mm256_mul_pd(_mm256_load_pd(v1.data()), _mm256_set1_pd(v2)); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>> operator*(std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&> v1, std::tuple<double, double> v2) noexcept { const __m512d v11 = _mm512_insertf64x4( _mm512_castpd256_pd512(_mm256_load_pd(std::get<0>(v1).data())), _mm256_load_pd(std::get<1>(v1).data()), 1); const __m512d v22 = _mm512_insertf64x4( _mm512_castpd256_pd512(_mm256_set1_pd(std::get<0>(v2))), _mm256_set1_pd(std::get<1>(v2)), 1); const __m512d rslt = _mm512_mul_pd(v11, v22); return std::make_tuple(matrix<double, 3, 1>(_mm512_castpd512_pd256(rslt)), matrix<double, 3, 1>(_mm512_extractf64x4_pd(rslt, 1))); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>> operator*(std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> v1, std::tuple<double, double, double> v2) noexcept { const auto r12 = std::tie(std::get<0>(v1), std::get<1>(v1)) * std::tuple<double, double>(std::get<0>(v2), std::get<1>(v2)); return std::make_tuple(std::get<0>(r12), std::get<1>(r12), std::get<2>(v1) * std::get<2>(v2)); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>> operator*(std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> v1, std::tuple<double, double, double, double> v2) noexcept { const auto r12 = std::tie(std::get<0>(v1), std::get<1>(v1)) * std::tuple<double, double>(std::get<0>(v2), std::get<1>(v2)); const auto r34 = std::tie(std::get<2>(v1), std::get<3>(v1)) * std::tuple<double, double>(std::get<2>(v2), std::get<3>(v2)); return std::make_tuple(std::get<0>(r12), std::get<1>(r12), std::get<0>(r34), std::get<1>(r34)); } // assignment ---------------------------------------------------------------- template<> MAVE_INLINE void operator*=( std::tuple<matrix<double,3,1>&, matrix<double,3,1>&> v1, std::tuple<double, double> v2) noexcept { const __m512d v11 = _mm512_insertf64x4( _mm512_castpd256_pd512(_mm256_load_pd(std::get<0>(v1).data())), _mm256_load_pd(std::get<1>(v1).data()), 1); const __m512d v22 = _mm512_insertf64x4( _mm512_castpd256_pd512(_mm256_set1_pd(std::get<0>(v2))), _mm256_set1_pd(std::get<1>(v2)), 1); const __m512d rslt = _mm512_mul_pd(v11, v22); _mm256_store_pd(std::get<0>(v1).data(), _mm512_castpd512_pd256(rslt)); _mm256_store_pd(std::get<1>(v1).data(), _mm512_extractf64x4_pd(rslt, 1)); return ; } template<> MAVE_INLINE void operator*=( std::tuple<matrix<double,3,1>&, matrix<double,3,1>&, matrix<double,3,1>&> v1, std::tuple<double, double, double> v2) noexcept { std::tie(std::get<0>(v1), std::get<1>(v1)) *= std::tuple<double, double>(std::get<0>(v2), std::get<1>(v2)); std::get<2>(v1) *= std::get<2>(v2); return ; } template<> MAVE_INLINE void operator*=( std::tuple<matrix<double,3,1>&, matrix<double,3,1>&, matrix<double,3,1>&, matrix<double,3,1>&> v1, std::tuple<double, double, double, double> v2 ) noexcept { std::tie(std::get<0>(v1), std::get<1>(v1)) *= std::tuple<double, double>(std::get<0>(v2), std::get<1>(v2)); std::tie(std::get<2>(v1), std::get<3>(v1)) *= std::tuple<double, double>(std::get<2>(v2), std::get<3>(v2)); return ; } // --------------------------------------------------------------------------- // division operator/ // --------------------------------------------------------------------------- template<> MAVE_INLINE matrix<double, 3, 1> operator/( const matrix<double, 3, 1>& v1, const double v2) noexcept { return _mm256_div_pd(_mm256_load_pd(v1.data()), _mm256_set1_pd(v2)); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>> operator/(std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&> v1, std::tuple<double, double> v2) noexcept { const __m512d v11 = _mm512_insertf64x4( _mm512_castpd256_pd512(_mm256_load_pd(std::get<0>(v1).data())), _mm256_load_pd(std::get<1>(v1).data()), 1); const __m512d v22 = _mm512_insertf64x4( _mm512_castpd256_pd512(_mm256_set1_pd(std::get<0>(v2))), _mm256_set1_pd(std::get<1>(v2)), 1); const __m512d rslt = _mm512_div_pd(v11, v22); return std::make_tuple(matrix<double, 3, 1>(_mm512_castpd512_pd256(rslt)), matrix<double, 3, 1>(_mm512_extractf64x4_pd(rslt, 1))); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>> operator/(std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> v1, std::tuple<double, double, double> v2) noexcept { const auto r12 = std::tie(std::get<0>(v1), std::get<1>(v1)) / std::tuple<double, double>(std::get<0>(v2), std::get<1>(v2)); return std::make_tuple(std::get<0>(r12), std::get<1>(r12), std::get<2>(v1) / std::get<2>(v2)); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>> operator/(std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> v1, std::tuple<double, double, double, double> v2) noexcept { const auto r12 = std::tie(std::get<0>(v1), std::get<1>(v1)) / std::tuple<double, double>(std::get<0>(v2), std::get<1>(v2)); const auto r34 = std::tie(std::get<2>(v1), std::get<3>(v1)) / std::tuple<double, double>(std::get<2>(v2), std::get<3>(v2)); return std::make_tuple(std::get<0>(r12), std::get<1>(r12), std::get<0>(r34), std::get<1>(r34)); } // assignment ---------------------------------------------------------------- template<> MAVE_INLINE void operator/=( std::tuple<matrix<double,3,1>&, matrix<double,3,1>&> v1, std::tuple<double, double> v2) noexcept { const __m512d v11 = _mm512_insertf64x4( _mm512_castpd256_pd512(_mm256_load_pd(std::get<0>(v1).data())), _mm256_load_pd(std::get<1>(v1).data()), 1); const __m512d v22 = _mm512_insertf64x4( _mm512_castpd256_pd512(_mm256_set1_pd(std::get<0>(v2))), _mm256_set1_pd(std::get<1>(v2)), 1); const __m512d rslt = _mm512_div_pd(v11, v22); _mm256_store_pd(std::get<0>(v1).data(), _mm512_castpd512_pd256(rslt)); _mm256_store_pd(std::get<1>(v1).data(), _mm512_extractf64x4_pd(rslt, 1)); } template<> MAVE_INLINE void operator/=( std::tuple<matrix<double,3,1>&, matrix<double,3,1>&, matrix<double,3,1>&> v1, std::tuple<double, double, double> v2) noexcept { std::tie(std::get<0>(v1), std::get<1>(v1)) /= std::tuple<double, double>(std::get<0>(v2), std::get<1>(v2)); std::get<2>(v1) /= std::get<2>(v2); return ; } template<> MAVE_INLINE void operator/=( std::tuple<matrix<double,3,1>&, matrix<double,3,1>&, matrix<double,3,1>&, matrix<double,3,1>&> v1, std::tuple<double, double, double, double> v2) noexcept { std::tie(std::get<0>(v1), std::get<1>(v1)) /= std::tuple<double, double>(std::get<0>(v2), std::get<1>(v2)); std::tie(std::get<2>(v1), std::get<3>(v1)) /= std::tuple<double, double>(std::get<2>(v2), std::get<3>(v2)); return ; } // --------------------------------------------------------------------------- // length // --------------------------------------------------------------------------- // length_sq ----------------------------------------------------------------- template<> MAVE_INLINE double length_sq(const matrix<double, 3, 1>& v) noexcept { const __m256d arg = _mm256_load_pd(v.data()); alignas(32) double pack[4]; _mm256_store_pd(pack, _mm256_mul_pd(arg, arg)); return pack[0] + pack[1] + pack[2]; } template<> MAVE_INLINE std::tuple<double, double> length_sq( const matrix<double, 3, 1>& v1, const matrix<double, 3, 1>& v2) noexcept { alignas(16) double pack[2]; const __m256d arg1 = _mm256_load_pd(v1.data()); const __m256d arg2 = _mm256_load_pd(v2.data()); // |a1|a2|a3|00| |b1|b2|b3|00| // +--' +--' +--' | | // | .--|-------' | | hadd // | | | .----------+--' // |aa|bb|a3|b3| hadd // | | | | // v v | | // |aa|bb| | | extractf128_pd // |a3|b3|<+--+ const __m256d hadd = _mm256_hadd_pd( _mm256_mul_pd(arg1, arg1), _mm256_mul_pd(arg2, arg2)); _mm_store_pd(pack, _mm_add_pd(_mm256_extractf128_pd(hadd, 0), _mm256_extractf128_pd(hadd, 1))); return std::make_tuple(pack[0], pack[1]); } template<> MAVE_INLINE std::tuple<double, double, double> length_sq( const matrix<double, 3, 1>& v1, const matrix<double, 3, 1>& v2, const matrix<double, 3, 1>& v3) noexcept { alignas(32) double pack[4]; const __m256d arg1 = _mm256_load_pd(v1.data()); const __m256d arg2 = _mm256_load_pd(v2.data()); const __m256d arg3 = _mm256_load_pd(v3.data()); const __m512d arg12 = _mm512_insertf64x4(_mm512_castpd256_pd512(arg1), arg2, 1); const __m512d mul12 = _mm512_mul_pd(arg12, arg12); const __m512d mul3x = _mm512_castpd256_pd512(_mm256_mul_pd(arg3, arg3)); // |a1|a2|a3|00|b1|b2|b3|00| |c1|c2|c3|00|xx|xx|xx|xx| // 0 1 2 3 4 5 6 7 8 9 A B C D E F // // 0 4 8 3 1 5 9 7 _mm512_permutex2var_pd // |a1|b1|c1|00|a2|b2|c2|00| // 2 6 A 3 2 6 A 3 _mm512_permutex2var_pd // |a3|b3|c3|00|a3|b3|c3|00| const __m512d abc12 = _mm512_permutex2var_pd(mul12,_mm512_set_epi64( 0x07, 0x09, 0x05, 0x01, 0x03, 0x08, 0x04, 0x00), mul3x); const __m512d abc3x = _mm512_permutex2var_pd(mul12,_mm512_set_epi64( 0x03, 0x0A, 0x06, 0x02, 0x03, 0x0A, 0x06, 0x02), mul3x); _mm256_store_pd(pack, _mm256_add_pd(_mm256_add_pd( _mm512_castpd512_pd256(abc12), _mm512_extractf64x4_pd(abc12, 1) ), _mm512_castpd512_pd256(abc3x))); return std::make_tuple(pack[0], pack[1], pack[2]); } template<> MAVE_INLINE std::tuple<double, double, double, double> length_sq( const matrix<double, 3, 1>& v1, const matrix<double, 3, 1>& v2, const matrix<double, 3, 1>& v3, const matrix<double, 3, 1>& v4) noexcept { alignas(32) double pack[4]; const __m256d arg1 = _mm256_load_pd(v1.data()); const __m256d arg2 = _mm256_load_pd(v2.data()); const __m256d arg3 = _mm256_load_pd(v3.data()); const __m256d arg4 = _mm256_load_pd(v4.data()); const __m512d arg12 = _mm512_insertf64x4(_mm512_castpd256_pd512(arg1), arg2, 1); const __m512d arg34 = _mm512_insertf64x4(_mm512_castpd256_pd512(arg3), arg4, 1); const __m512d mul12 = _mm512_mul_pd(arg12, arg12); const __m512d mul34 = _mm512_mul_pd(arg34, arg34); // |a1|a2|a3|00|b1|b2|b3|00| |c1|c2|c3|00|d1|d2|d3|00| // 0 1 2 3 4 5 6 7 8 9 A B C D E F // // 0 4 8 C 1 5 9 D _mm512_permutex2var_pd // |a1|b1|c1|d1|a2|b2|c2|d2| // 2 6 A E 2 6 A E _mm512_permutex2var_pd // |a3|b3|c3|d3|a3|b3|c3|d3| const __m512d abc12 = _mm512_permutex2var_pd(mul12,_mm512_set_epi64( 0x0D, 0x09, 0x05, 0x01, 0x0C, 0x08, 0x04, 0x00), mul34); const __m512d abc3x = _mm512_permutex2var_pd(mul12,_mm512_set_epi64( 0x0E, 0x0A, 0x06, 0x02, 0x0E, 0x0A, 0x06, 0x02), mul34); _mm256_store_pd(pack, _mm256_add_pd(_mm256_add_pd( _mm512_castpd512_pd256(abc12), _mm512_extractf64x4_pd(abc12, 1) ), _mm512_castpd512_pd256(abc3x))); return std::make_tuple(pack[0], pack[1], pack[2], pack[3]); } // length -------------------------------------------------------------------- template<> MAVE_INLINE double length(const matrix<double, 3, 1>& v) noexcept { return std::sqrt(length_sq(v)); } template<> MAVE_INLINE std::tuple<double, double> length( const matrix<double, 3, 1>& v1, const matrix<double, 3, 1>& v2) noexcept { alignas(16) double pack[2]; const __m256d arg1 = _mm256_load_pd(v1.data()); const __m256d arg2 = _mm256_load_pd(v2.data()); // |a1|a2|a3|00| |b1|b2|b3|00| // +--' | | +--' | | // | +--' | | | hadd // | .--|--.----+-----+--' // |aa|bb|a3|b3| pack // | | | | // v v | | // |aa|bb| | | extractf128_pd // |a3|b3|<+--+ const __m256d hadd = _mm256_hadd_pd( _mm256_mul_pd(arg1, arg1), _mm256_mul_pd(arg2, arg2)); _mm_store_pd(pack, _mm_sqrt_pd(_mm_add_pd( _mm256_extractf128_pd(hadd, 0), _mm256_extractf128_pd(hadd, 1)))); return std::make_tuple(pack[0], pack[1]); } template<> MAVE_INLINE std::tuple<double, double, double> length( const matrix<double, 3, 1>& v1, const matrix<double, 3, 1>& v2, const matrix<double, 3, 1>& v3) noexcept { alignas(32) double pack[4]; const __m256d arg1 = _mm256_load_pd(v1.data()); const __m256d arg2 = _mm256_load_pd(v2.data()); const __m256d arg3 = _mm256_load_pd(v3.data()); const __m512d arg12 = _mm512_insertf64x4(_mm512_castpd256_pd512(arg1), arg2, 1); const __m512d mul12 = _mm512_mul_pd(arg12, arg12); const __m512d mul3x = _mm512_castpd256_pd512(_mm256_mul_pd(arg3, arg3)); // |a1|a2|a3|00|b1|b2|b3|00| |c1|c2|c3|00|xx|xx|xx|xx| // 0 1 2 3 4 5 6 7 8 9 A B C D E F // // 0 4 8 3 1 5 9 7 _mm512_permutex2var_pd // |a1|b1|c1|00|a2|b2|c2|00| // 2 6 A 3 2 6 A 3 _mm512_permutex2var_pd // |a3|b3|c3|00|a3|b3|c3|00| const __m512d abc12 = _mm512_permutex2var_pd(mul12,_mm512_set_epi64( 0x07, 0x09, 0x05, 0x01, 0x03, 0x08, 0x04, 0x00), mul3x); const __m512d abc3x = _mm512_permutex2var_pd(mul12,_mm512_set_epi64( 0x03, 0x0A, 0x06, 0x02, 0x03, 0x0A, 0x06, 0x02), mul3x); _mm256_store_pd(pack, _mm256_sqrt_pd(_mm256_add_pd(_mm256_add_pd( _mm512_castpd512_pd256(abc12), _mm512_extractf64x4_pd(abc12, 1) ), _mm512_castpd512_pd256(abc3x)))); return std::make_tuple(pack[0], pack[1], pack[2]); } template<> MAVE_INLINE std::tuple<double, double, double, double> length( const matrix<double, 3, 1>& v1, const matrix<double, 3, 1>& v2, const matrix<double, 3, 1>& v3, const matrix<double, 3, 1>& v4) noexcept { alignas(32) double pack[4]; const __m256d arg1 = _mm256_load_pd(v1.data()); const __m256d arg2 = _mm256_load_pd(v2.data()); const __m256d arg3 = _mm256_load_pd(v3.data()); const __m256d arg4 = _mm256_load_pd(v4.data()); const __m512d arg12 = _mm512_insertf64x4(_mm512_castpd256_pd512(arg1), arg2, 1); const __m512d arg34 = _mm512_insertf64x4(_mm512_castpd256_pd512(arg3), arg4, 1); const __m512d mul12 = _mm512_mul_pd(arg12, arg12); const __m512d mul34 = _mm512_mul_pd(arg34, arg34); // |a1|a2|a3|00|b1|b2|b3|00| |c1|c2|c3|00|d1|d2|d3|00| // 0 1 2 3 4 5 6 7 8 9 A B C D E F // // 0 4 8 C 1 5 9 D _mm512_permutex2var_pd // |a1|b1|c1|d1|a2|b2|c2|d2| // 2 6 A E 2 6 A E _mm512_permutex2var_pd // |a3|b3|c3|d3|a3|b3|c3|d3| const __m512d abc12 = _mm512_permutex2var_pd(mul12,_mm512_set_epi64( 0x0D, 0x09, 0x05, 0x01, 0x0C, 0x08, 0x04, 0x00), mul34); const __m512d abc3x = _mm512_permutex2var_pd(mul12,_mm512_set_epi64( 0x0E, 0x0A, 0x06, 0x02, 0x0E, 0x0A, 0x06, 0x02), mul34); _mm256_store_pd(pack, _mm256_sqrt_pd(_mm256_add_pd(_mm256_add_pd( _mm512_castpd512_pd256(abc12), _mm512_extractf64x4_pd(abc12, 1) ), _mm512_castpd512_pd256(abc3x)))); return std::make_tuple(pack[0], pack[1], pack[2], pack[3]); } // rlength ------------------------------------------------------------------- template<> MAVE_INLINE double rlength(const matrix<double, 3, 1>& v) noexcept { return 1.0 / std::sqrt(length_sq(v)); } template<> MAVE_INLINE std::tuple<double, double> rlength(const matrix<double, 3, 1>& v1, const matrix<double, 3, 1>& v2) noexcept { alignas(16) double pack[2]; const __m256d arg1 = _mm256_load_pd(v1.data()); const __m256d arg2 = _mm256_load_pd(v2.data()); // |a1|a2|a3|00| |b1|b2|b3|00| // +--' +--' +--' | | // | .--|-------' | | hadd // | | | .----------+--' // |aa|bb|a3|b3| hadd // | | | | // v v | | // |aa|bb| | | extractf128_pd // |a3|b3|<+--+ const __m256d hadd = _mm256_hadd_pd( _mm256_mul_pd(arg1, arg1), _mm256_mul_pd(arg2, arg2)); _mm_store_pd(pack, _mm_div_pd(_mm_set1_pd(1.0), _mm_sqrt_pd( _mm_add_pd(_mm256_extractf128_pd(hadd, 0), _mm256_extractf128_pd(hadd, 1))))); return std::make_tuple(pack[0], pack[1]); } template<> MAVE_INLINE std::tuple<double, double, double> rlength(const matrix<double, 3, 1>& v1, const matrix<double, 3, 1>& v2, const matrix<double, 3, 1>& v3) noexcept { alignas(32) double pack[4]; const __m256d arg1 = _mm256_load_pd(v1.data()); const __m256d arg2 = _mm256_load_pd(v2.data()); const __m256d arg3 = _mm256_load_pd(v3.data()); const __m512d arg12 = _mm512_insertf64x4(_mm512_castpd256_pd512(arg1), arg2, 1); const __m512d mul12 = _mm512_mul_pd(arg12, arg12); const __m512d mul3x = _mm512_castpd256_pd512(_mm256_mul_pd(arg3, arg3)); // |a1|a2|a3|00|b1|b2|b3|00| |c1|c2|c3|00|xx|xx|xx|xx| // 0 1 2 3 4 5 6 7 8 9 A B C D E F // // 0 4 8 3 1 5 9 7 _mm512_permutex2var_pd // |a1|b1|c1|00|a2|b2|c2|00| // 2 6 A 3 2 6 A 3 _mm512_permutex2var_pd // |a3|b3|c3|00|a3|b3|c3|00| const __m512d abc12 = _mm512_permutex2var_pd(mul12,_mm512_set_epi64( 0x07, 0x09, 0x05, 0x01, 0x03, 0x08, 0x04, 0x00), mul3x); const __m512d abc3x = _mm512_permutex2var_pd(mul12,_mm512_set_epi64( 0x03, 0x0A, 0x06, 0x02, 0x03, 0x0A, 0x06, 0x02), mul3x); _mm256_store_pd(pack, _mm256_div_pd(_mm256_set1_pd(1.0), _mm256_sqrt_pd( _mm256_add_pd(_mm256_add_pd( _mm512_castpd512_pd256(abc12), _mm512_extractf64x4_pd(abc12, 1) ), _mm512_castpd512_pd256(abc3x))))); return std::make_tuple(pack[0], pack[1], pack[2]); } template<> MAVE_INLINE std::tuple<double, double, double, double> rlength(const matrix<double, 3, 1>& v1, const matrix<double, 3, 1>& v2, const matrix<double, 3, 1>& v3, const matrix<double, 3, 1>& v4) noexcept { alignas(32) double pack[4]; const __m256d arg1 = _mm256_load_pd(v1.data()); const __m256d arg2 = _mm256_load_pd(v2.data()); const __m256d arg3 = _mm256_load_pd(v3.data()); const __m256d arg4 = _mm256_load_pd(v4.data()); const __m512d arg12 = _mm512_insertf64x4(_mm512_castpd256_pd512(arg1), arg2, 1); const __m512d arg34 = _mm512_insertf64x4(_mm512_castpd256_pd512(arg3), arg4, 1); const __m512d mul12 = _mm512_mul_pd(arg12, arg12); const __m512d mul34 = _mm512_mul_pd(arg34, arg34); // |a1|a2|a3|00|b1|b2|b3|00| |c1|c2|c3|00|d1|d2|d3|00| // 0 1 2 3 4 5 6 7 8 9 A B C D E F // // 0 4 8 C 1 5 9 D _mm512_permutex2var_pd // |a1|b1|c1|d1|a2|b2|c2|d2| // 2 6 A E 2 6 A E _mm512_permutex2var_pd // |a3|b3|c3|d3|a3|b3|c3|d3| const __m512d abc12 = _mm512_permutex2var_pd(mul12,_mm512_set_epi64( 0x0D, 0x09, 0x05, 0x01, 0x0C, 0x08, 0x04, 0x00), mul34); const __m512d abc3x = _mm512_permutex2var_pd(mul12,_mm512_set_epi64( 0x0E, 0x0A, 0x06, 0x02, 0x0E, 0x0A, 0x06, 0x02), mul34); _mm256_store_pd(pack, _mm256_div_pd(_mm256_set1_pd(1.0), _mm256_sqrt_pd( _mm256_add_pd(_mm256_add_pd( _mm512_castpd512_pd256(abc12), _mm512_extractf64x4_pd(abc12, 1) ), _mm512_castpd512_pd256(abc3x))))); return std::make_tuple(pack[0], pack[1], pack[2], pack[3]); } // regularize ---------------------------------------------------------------- template<> MAVE_INLINE std::pair<matrix<double, 3, 1>, double> regularize(const matrix<double, 3, 1>& v) noexcept { const auto l = length(v); return std::make_pair(v * (1.0 / l), l); } template<> MAVE_INLINE std::tuple<std::pair<matrix<double, 3, 1>, double>, std::pair<matrix<double, 3, 1>, double>> regularize(const matrix<double, 3, 1>& v1, const matrix<double, 3, 1>& v2 ) noexcept { alignas(16) double pack[2]; const __m256d arg1 = _mm256_load_pd(v1.data()); const __m256d arg2 = _mm256_load_pd(v2.data()); // |a1|a2|a3|00| |b1|b2|b3|00| // +--' | | +--' | | // | +--' | | | hadd // | .--|--.----+-----+--' // |aa|bb|a3|b3| pack // | | | | // v v | | // |aa|bb| | | extractf128_pd // |a3|b3|<+--+ const __m256d hadd = _mm256_hadd_pd( _mm256_mul_pd(arg1, arg1), _mm256_mul_pd(arg2, arg2)); const __m128d len = _mm_sqrt_pd(_mm_add_pd( _mm256_extractf128_pd(hadd, 0), _mm256_extractf128_pd(hadd, 1))); _mm_store_pd(pack, _mm_div_pd(_mm_set1_pd(1.0), len)); const __m256d rv1 = _mm256_mul_pd(arg1, _mm256_set1_pd(pack[0])); const __m256d rv2 = _mm256_mul_pd(arg2, _mm256_set1_pd(pack[1])); _mm_store_pd(pack, len); return std::make_tuple(std::make_pair(matrix<double, 3, 1>(rv1), pack[0]), std::make_pair(matrix<double, 3, 1>(rv2), pack[1])); } template<> MAVE_INLINE std::tuple<std::pair<matrix<double, 3, 1>, double>, std::pair<matrix<double, 3, 1>, double>, std::pair<matrix<double, 3, 1>, double>> regularize(const matrix<double, 3, 1>& v1, const matrix<double, 3, 1>& v2, const matrix<double, 3, 1>& v3) noexcept { alignas(32) double pack[4]; const __m256d arg1 = _mm256_load_pd(v1.data()); const __m256d arg2 = _mm256_load_pd(v2.data()); const __m256d arg3 = _mm256_load_pd(v3.data()); const __m512d arg12 = _mm512_insertf64x4(_mm512_castpd256_pd512(arg1), arg2, 1); const __m512d mul12 = _mm512_mul_pd(arg12, arg12); const __m512d mul3x = _mm512_castpd256_pd512(_mm256_mul_pd(arg3, arg3)); // |a1|a2|a3|00|b1|b2|b3|00| |c1|c2|c3|00|xx|xx|xx|xx| // 0 1 2 3 4 5 6 7 8 9 A B C D E F // // 0 4 8 3 1 5 9 7 _mm512_permutex2var_pd // |a1|b1|c1|00|a2|b2|c2|00| // 2 6 A 3 2 6 A 3 _mm512_permutex2var_pd // |a3|b3|c3|00|a3|b3|c3|00| const __m512d abc12 = _mm512_permutex2var_pd(mul12,_mm512_set_epi64( 0x07, 0x09, 0x05, 0x01, 0x03, 0x08, 0x04, 0x00), mul3x); const __m512d abc3x = _mm512_permutex2var_pd(mul12,_mm512_set_epi64( 0x03, 0x0A, 0x06, 0x02, 0x03, 0x0A, 0x06, 0x02), mul3x); const __m256d len = _mm256_sqrt_pd(_mm256_add_pd(_mm256_add_pd( _mm512_castpd512_pd256(abc12), _mm512_extractf64x4_pd(abc12, 1) ), _mm512_castpd512_pd256(abc3x))); _mm256_store_pd(pack, _mm256_div_pd(_mm256_set1_pd(1.0), len)); const __m256d rv1 = _mm256_mul_pd(arg1, _mm256_set1_pd(pack[0])); const __m256d rv2 = _mm256_mul_pd(arg2, _mm256_set1_pd(pack[1])); const __m256d rv3 = _mm256_mul_pd(arg3, _mm256_set1_pd(pack[2])); _mm256_store_pd(pack, len); return std::make_tuple(std::make_pair(matrix<double, 3, 1>(rv1), pack[0]), std::make_pair(matrix<double, 3, 1>(rv2), pack[1]), std::make_pair(matrix<double, 3, 1>(rv3), pack[2])); } template<> MAVE_INLINE std::tuple<std::pair<matrix<double, 3, 1>, double>, std::pair<matrix<double, 3, 1>, double>, std::pair<matrix<double, 3, 1>, double>, std::pair<matrix<double, 3, 1>, double>> regularize(const matrix<double, 3, 1>& v1, const matrix<double, 3, 1>& v2, const matrix<double, 3, 1>& v3, const matrix<double, 3, 1>& v4 ) noexcept { alignas(32) double pack[4]; const __m256d arg1 = _mm256_load_pd(v1.data()); const __m256d arg2 = _mm256_load_pd(v2.data()); const __m256d arg3 = _mm256_load_pd(v3.data()); const __m256d arg4 = _mm256_load_pd(v4.data()); const __m512d arg12 = _mm512_insertf64x4(_mm512_castpd256_pd512(arg1), arg2, 1); const __m512d arg34 = _mm512_insertf64x4(_mm512_castpd256_pd512(arg3), arg4, 1); const __m512d mul12 = _mm512_mul_pd(arg12, arg12); const __m512d mul34 = _mm512_mul_pd(arg34, arg34); // |a1|a2|a3|00|b1|b2|b3|00| |c1|c2|c3|00|d1|d2|d3|00| // 0 1 2 3 4 5 6 7 8 9 A B C D E F // // 0 4 8 C 1 5 9 D _mm512_permutex2var_pd // |a1|b1|c1|d1|a2|b2|c2|d2| // 2 6 A E 2 6 A E _mm512_permutex2var_pd // |a3|b3|c3|d3|a3|b3|c3|d3| const __m512d abc12 = _mm512_permutex2var_pd(mul12,_mm512_set_epi64( 0x0D, 0x09, 0x05, 0x01, 0x0C, 0x08, 0x04, 0x00), mul34); const __m512d abc3x = _mm512_permutex2var_pd(mul12,_mm512_set_epi64( 0x0E, 0x0A, 0x06, 0x02, 0x0E, 0x0A, 0x06, 0x02), mul34); const __m256d len = _mm256_sqrt_pd(_mm256_add_pd(_mm256_add_pd( _mm512_castpd512_pd256(abc12), _mm512_extractf64x4_pd(abc12, 1) ), _mm512_castpd512_pd256(abc3x))); _mm256_store_pd(pack, _mm256_div_pd(_mm256_set1_pd(1.0), len)); const __m256d rv1 = _mm256_mul_pd(arg1, _mm256_set1_pd(pack[0])); const __m256d rv2 = _mm256_mul_pd(arg2, _mm256_set1_pd(pack[1])); const __m256d rv3 = _mm256_mul_pd(arg3, _mm256_set1_pd(pack[2])); const __m256d rv4 = _mm256_mul_pd(arg4, _mm256_set1_pd(pack[3])); _mm256_store_pd(pack, len); return std::make_tuple(std::make_pair(matrix<double, 3, 1>(rv1), pack[0]), std::make_pair(matrix<double, 3, 1>(rv2), pack[1]), std::make_pair(matrix<double, 3, 1>(rv3), pack[2]), std::make_pair(matrix<double, 3, 1>(rv4), pack[3])); } // --------------------------------------------------------------------------- // math functions // --------------------------------------------------------------------------- template<> MAVE_INLINE matrix<double, 3, 1> max( const matrix<double, 3, 1>& v1, const matrix<double, 3, 1>& v2) noexcept { return _mm256_max_pd(_mm256_load_pd(v1.data()), _mm256_load_pd(v2.data())); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>> max(std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&> v1, std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&> v2 ) noexcept { const __m512d v11 = _mm512_insertf64x4( _mm512_castpd256_pd512(_mm256_load_pd(std::get<0>(v1).data())), _mm256_load_pd(std::get<1>(v1).data()), 1); const __m512d v22 = _mm512_insertf64x4( _mm512_castpd256_pd512(_mm256_load_pd(std::get<0>(v2).data())), _mm256_load_pd(std::get<1>(v2).data()), 1); const __m512d rslt = _mm512_max_pd(v11, v22); return std::make_tuple(matrix<double, 3, 1>(_mm512_castpd512_pd256(rslt)), matrix<double, 3, 1>(_mm512_extractf64x4_pd(rslt, 1))); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>> max(std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> v1, std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> v2) noexcept { const auto r12 = max(std::tie(std::get<0>(v1), std::get<1>(v1)), std::tie(std::get<0>(v2), std::get<1>(v2))); return std::make_tuple(std::get<0>(r12), std::get<1>(r12), max(std::get<2>(v1), std::get<2>(v2))); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>> max(std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> v1, std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> v2 ) noexcept { const auto r12 = max(std::tie(std::get<0>(v1), std::get<1>(v1)), std::tie(std::get<0>(v2), std::get<1>(v2))); const auto r34 = max(std::tie(std::get<2>(v1), std::get<3>(v1)), std::tie(std::get<2>(v2), std::get<3>(v2))); return std::make_tuple(std::get<0>(r12), std::get<1>(r12), std::get<0>(r34), std::get<1>(r34)); } template<> MAVE_INLINE matrix<double, 3, 1> min( const matrix<double, 3, 1>& v1, const matrix<double, 3, 1>& v2) noexcept { return _mm256_min_pd(_mm256_load_pd(v1.data()), _mm256_load_pd(v2.data())); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>> min(std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&> v1, std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&> v2 ) noexcept { const __m512d v11 = _mm512_insertf64x4( _mm512_castpd256_pd512(_mm256_load_pd(std::get<0>(v1).data())), _mm256_load_pd(std::get<1>(v1).data()), 1); const __m512d v22 = _mm512_insertf64x4( _mm512_castpd256_pd512(_mm256_load_pd(std::get<0>(v2).data())), _mm256_load_pd(std::get<1>(v2).data()), 1); const __m512d rslt = _mm512_min_pd(v11, v22); return std::make_tuple(matrix<double, 3, 1>(_mm512_castpd512_pd256(rslt)), matrix<double, 3, 1>(_mm512_extractf64x4_pd(rslt, 1))); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>> min(std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> v1, std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> v2) noexcept { const auto r12 = min(std::tie(std::get<0>(v1), std::get<1>(v1)), std::tie(std::get<0>(v2), std::get<1>(v2))); return std::make_tuple(std::get<0>(r12), std::get<1>(r12), min(std::get<2>(v1), std::get<2>(v2))); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>> min(std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> v1, std::tuple<const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&, const matrix<double,3,1>&> v2 ) noexcept { const auto r12 = min(std::tie(std::get<0>(v1), std::get<1>(v1)), std::tie(std::get<0>(v2), std::get<1>(v2))); const auto r34 = min(std::tie(std::get<2>(v1), std::get<3>(v1)), std::tie(std::get<2>(v2), std::get<3>(v2))); return std::make_tuple(std::get<0>(r12), std::get<1>(r12), std::get<0>(r34), std::get<1>(r34)); } // floor --------------------------------------------------------------------- template<> MAVE_INLINE matrix<double, 3, 1> floor(const matrix<double, 3, 1>& v) noexcept { return _mm256_floor_pd(_mm256_load_pd(v.data())); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>> floor(const matrix<double, 3, 1>& v1, const matrix<double, 3, 1>& v2) noexcept { return std::make_tuple(floor(v1), floor(v2)); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>> floor(const matrix<double, 3, 1>& v1, const matrix<double, 3, 1>& v2, const matrix<double, 3, 1>& v3) noexcept { return std::make_tuple(floor(v1), floor(v2), floor(v3)); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>> floor(const matrix<double, 3, 1>& v1, const matrix<double, 3, 1>& v2, const matrix<double, 3, 1>& v3, const matrix<double, 3, 1>& v4) noexcept { return std::make_tuple(floor(v1), floor(v2), floor(v3), floor(v4)); } // ceil ---------------------------------------------------------------------- template<> MAVE_INLINE matrix<double, 3, 1> ceil(const matrix<double, 3, 1>& v) noexcept { return _mm256_ceil_pd(_mm256_load_pd(v.data())); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>> ceil(const matrix<double, 3, 1>& v1, const matrix<double, 3, 1>& v2) noexcept { return std::make_tuple(ceil(v1), ceil(v2)); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>> ceil(const matrix<double, 3, 1>& v1, const matrix<double, 3, 1>& v2, const matrix<double, 3, 1>& v3) noexcept { return std::make_tuple(ceil(v1), ceil(v2), ceil(v3)); } template<> MAVE_INLINE std::tuple<matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>, matrix<double, 3, 1>> ceil(const matrix<double, 3, 1>& v1, const matrix<double, 3, 1>& v2, const matrix<double, 3, 1>& v3, const matrix<double, 3, 1>& v4) noexcept { return std::make_tuple(ceil(v1), ceil(v2), ceil(v3), ceil(v4)); } // --------------------------------------------------------------------------- // dot_product // --------------------------------------------------------------------------- template<> MAVE_INLINE double dot_product( const matrix<double, 3, 1>& lhs, const matrix<double, 3, 1>& rhs) noexcept { alignas(32) double pack[4]; _mm256_store_pd(pack, _mm256_mul_pd( _mm256_load_pd(lhs.data()), _mm256_load_pd(rhs.data()))); return pack[0] + pack[1] + pack[2]; } template<> MAVE_INLINE std::tuple<double, double> dot_product( std::tuple<const matrix<double, 3, 1>&, const matrix<double, 3, 1>&> lhs, std::tuple<const matrix<double, 3, 1>&, const matrix<double, 3, 1>&> rhs ) noexcept { alignas(16) double pack[2]; // |a1|a2|a3|00| |b1|b2|b3|00| // +--' +--' +--' | | // | .--|-------' | | hadd // | | | .----------+--' // |aa|bb|a3|b3| hadd // | | | | // v v | | // |aa|bb| | | extractf128_pd // |a3|b3|<+--+ const __m256d hadd = _mm256_hadd_pd( _mm256_mul_pd(_mm256_load_pd(std::get<0>(lhs).data()), _mm256_load_pd(std::get<0>(rhs).data())), _mm256_mul_pd(_mm256_load_pd(std::get<1>(lhs).data()), _mm256_load_pd(std::get<1>(rhs).data()))); _mm_store_pd(pack, _mm_add_pd(_mm256_extractf128_pd(hadd, 0), _mm256_extractf128_pd(hadd, 1))); return std::make_tuple(pack[0], pack[1]); } template<> MAVE_INLINE std::tuple<double, double, double> dot_product( std::tuple<const matrix<double, 3, 1>&, const matrix<double, 3, 1>&, const matrix<double, 3, 1>&> lhs, std::tuple<const matrix<double, 3, 1>&, const matrix<double, 3, 1>&, const matrix<double, 3, 1>&> rhs) noexcept { alignas(32) double pack[4]; const __m512d argl12 = _mm512_insertf64x4(_mm512_castpd256_pd512( _mm256_load_pd(std::get<0>(lhs).data())), _mm256_load_pd(std::get<1>(lhs).data()), 1); const __m512d argr12 = _mm512_insertf64x4(_mm512_castpd256_pd512( _mm256_load_pd(std::get<0>(rhs).data())), _mm256_load_pd(std::get<1>(rhs).data()), 1); const __m512d mul12 = _mm512_mul_pd(argl12, argr12); const __m512d mul3x = _mm512_castpd256_pd512(_mm256_mul_pd( _mm256_load_pd(std::get<2>(lhs).data()), _mm256_load_pd(std::get<2>(rhs).data()))); // |a1|a2|a3|00|b1|b2|b3|00| |c1|c2|c3|00|xx|xx|xx|xx| // 0 1 2 3 4 5 6 7 8 9 A B C D E F // // 0 4 8 3 1 5 9 7 _mm512_permutex2var_pd // |a1|b1|c1|00|a2|b2|c2|00| // 2 6 A 3 2 6 A 3 _mm512_permutex2var_pd // |a3|b3|c3|00|a3|b3|c3|00| const __m512d abc12 = _mm512_permutex2var_pd(mul12,_mm512_set_epi64( 0x07, 0x09, 0x05, 0x01, 0x03, 0x08, 0x04, 0x00), mul3x); const __m512d abc3x = _mm512_permutex2var_pd(mul12,_mm512_set_epi64( 0x03, 0x0A, 0x06, 0x02, 0x03, 0x0A, 0x06, 0x02), mul3x); _mm256_store_pd(pack, _mm256_add_pd(_mm256_add_pd( _mm512_castpd512_pd256(abc12), _mm512_extractf64x4_pd(abc12, 1) ), _mm512_castpd512_pd256(abc3x))); return std::make_tuple(pack[0], pack[1], pack[2]); } template<> MAVE_INLINE std::tuple<double, double, double, double> dot_product( std::tuple<const matrix<double, 3, 1>&, const matrix<double, 3, 1>&, const matrix<double, 3, 1>&, const matrix<double, 3, 1>&> lhs, std::tuple<const matrix<double, 3, 1>&, const matrix<double, 3, 1>&, const matrix<double, 3, 1>&, const matrix<double, 3, 1>&> rhs ) noexcept { alignas(32) double pack[4]; const __m512d argl12 = _mm512_insertf64x4(_mm512_castpd256_pd512( _mm256_load_pd(std::get<0>(lhs).data())), _mm256_load_pd(std::get<1>(lhs).data()), 1); const __m512d argl34 = _mm512_insertf64x4(_mm512_castpd256_pd512( _mm256_load_pd(std::get<2>(lhs).data())), _mm256_load_pd(std::get<3>(lhs).data()), 1); const __m512d argr12 = _mm512_insertf64x4(_mm512_castpd256_pd512( _mm256_load_pd(std::get<0>(rhs).data())), _mm256_load_pd(std::get<1>(rhs).data()), 1); const __m512d argr34 = _mm512_insertf64x4(_mm512_castpd256_pd512( _mm256_load_pd(std::get<2>(rhs).data())), _mm256_load_pd(std::get<3>(rhs).data()), 1); const __m512d mul12 = _mm512_mul_pd(argl12, argr12); const __m512d mul34 = _mm512_mul_pd(argl34, argr34); // |a1|a2|a3|00|b1|b2|b3|00| |c1|c2|c3|00|d1|d2|d3|00| // 0 1 2 3 4 5 6 7 8 9 A B C D E F // // 0 4 8 C 1 5 9 D _mm512_permutex2var_pd // |a1|b1|c1|d1|a2|b2|c2|d2| // 2 6 A E 2 6 A E _mm512_permutex2var_pd // |a3|b3|c3|d3|a3|b3|c3|d3| const __m512d abc12 = _mm512_permutex2var_pd(mul12,_mm512_set_epi64( 0x0D, 0x09, 0x05, 0x01, 0x0C, 0x08, 0x04, 0x00), mul34); const __m512d abc3x = _mm512_permutex2var_pd(mul12,_mm512_set_epi64( 0x0E, 0x0A, 0x06, 0x02, 0x0E, 0x0A, 0x06, 0x02), mul34); _mm256_store_pd(pack, _mm256_add_pd(_mm256_add_pd( _mm512_castpd512_pd256(abc12), _mm512_extractf64x4_pd(abc12, 1) ), _mm512_castpd512_pd256(abc3x))); return std::make_tuple(pack[0], pack[1], pack[2], pack[3]); } // --------------------------------------------------------------------------- // cross_product // --------------------------------------------------------------------------- template<> MAVE_INLINE matrix<double, 3, 1> cross_product( const matrix<double, 3, 1>& x, const matrix<double, 3, 1>& y) noexcept { const __m256d arg1 = _mm256_load_pd(x.data()); const __m256d arg2 = _mm256_load_pd(y.data()); // 3 0 2 1 --> 0b 11 00 10 01 == 201 const __m256d x_ = _mm256_permute4x64_pd(arg1, 201u); const __m256d y_ = _mm256_permute4x64_pd(arg2, 201u); const __m256d z = #ifdef __FMA__ _mm256_fmsub_pd(arg1, y_, _mm256_mul_pd(arg2, x_)); #else _mm256_sub_pd(_mm256_mul_pd(arg1, y_), _mm256_mul_pd(arg2, x_)); #endif return _mm256_permute4x64_pd(z, 201u); } } // mave #endif // MAVE_MATH_MATRIX_HPP
// Copyright (c) 2019 The Crown Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <clientversion.h> #include <flatfile.h> #include <streams.h> #include <test/util/setup_common.h> #include <util/system.h> #include <boost/test/unit_test.hpp> BOOST_FIXTURE_TEST_SUITE(flatfile_tests, BasicTestingSetup) BOOST_AUTO_TEST_CASE(flatfile_filename) { const auto data_dir = GetDataDir(); FlatFilePos pos(456, 789); FlatFileSeq seq1(data_dir, "a", 16 * 1024); BOOST_CHECK_EQUAL(seq1.FileName(pos), data_dir / "a00456.dat"); FlatFileSeq seq2(data_dir / "a", "b", 16 * 1024); BOOST_CHECK_EQUAL(seq2.FileName(pos), data_dir / "a" / "b00456.dat"); } BOOST_AUTO_TEST_CASE(flatfile_open) { const auto data_dir = GetDataDir(); FlatFileSeq seq(data_dir, "a", 16 * 1024); std::string line1("A purely peer-to-peer version of electronic cash would allow online " "payments to be sent directly from one party to another without going " "through a financial institution."); std::string line2("Digital signatures provide part of the solution, but the main benefits are " "lost if a trusted third party is still required to prevent double-spending."); size_t pos1 = 0; size_t pos2 = pos1 + GetSerializeSize(line1, CLIENT_VERSION); // Write first line to file. { CAutoFile file(seq.Open(FlatFilePos(0, pos1)), SER_DISK, CLIENT_VERSION); file << LIMITED_STRING(line1, 256); } // Attempt to append to file opened in read-only mode. { CAutoFile file(seq.Open(FlatFilePos(0, pos2), true), SER_DISK, CLIENT_VERSION); BOOST_CHECK_THROW(file << LIMITED_STRING(line2, 256), std::ios_base::failure); } // Append second line to file. { CAutoFile file(seq.Open(FlatFilePos(0, pos2)), SER_DISK, CLIENT_VERSION); file << LIMITED_STRING(line2, 256); } // Read text from file in read-only mode. { std::string text; CAutoFile file(seq.Open(FlatFilePos(0, pos1), true), SER_DISK, CLIENT_VERSION); file >> LIMITED_STRING(text, 256); BOOST_CHECK_EQUAL(text, line1); file >> LIMITED_STRING(text, 256); BOOST_CHECK_EQUAL(text, line2); } // Read text from file with position offset. { std::string text; CAutoFile file(seq.Open(FlatFilePos(0, pos2)), SER_DISK, CLIENT_VERSION); file >> LIMITED_STRING(text, 256); BOOST_CHECK_EQUAL(text, line2); } // Ensure another file in the sequence has no data. { std::string text; CAutoFile file(seq.Open(FlatFilePos(1, pos2)), SER_DISK, CLIENT_VERSION); BOOST_CHECK_THROW(file >> LIMITED_STRING(text, 256), std::ios_base::failure); } } BOOST_AUTO_TEST_CASE(flatfile_allocate) { const auto data_dir = GetDataDir(); FlatFileSeq seq(data_dir, "a", 100); bool out_of_space; BOOST_CHECK_EQUAL(seq.Allocate(FlatFilePos(0, 0), 1, out_of_space), 100U); BOOST_CHECK_EQUAL(fs::file_size(seq.FileName(FlatFilePos(0, 0))), 100U); BOOST_CHECK(!out_of_space); BOOST_CHECK_EQUAL(seq.Allocate(FlatFilePos(0, 99), 1, out_of_space), 0U); BOOST_CHECK_EQUAL(fs::file_size(seq.FileName(FlatFilePos(0, 99))), 100U); BOOST_CHECK(!out_of_space); BOOST_CHECK_EQUAL(seq.Allocate(FlatFilePos(0, 99), 2, out_of_space), 101U); BOOST_CHECK_EQUAL(fs::file_size(seq.FileName(FlatFilePos(0, 99))), 200U); BOOST_CHECK(!out_of_space); } BOOST_AUTO_TEST_CASE(flatfile_flush) { const auto data_dir = GetDataDir(); FlatFileSeq seq(data_dir, "a", 100); bool out_of_space; seq.Allocate(FlatFilePos(0, 0), 1, out_of_space); // Flush without finalize should not truncate file. seq.Flush(FlatFilePos(0, 1)); BOOST_CHECK_EQUAL(fs::file_size(seq.FileName(FlatFilePos(0, 1))), 100U); // Flush with finalize should truncate file. seq.Flush(FlatFilePos(0, 1), true); BOOST_CHECK_EQUAL(fs::file_size(seq.FileName(FlatFilePos(0, 1))), 1U); } BOOST_AUTO_TEST_SUITE_END()
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s // SIMD-ONLY0-NOT: {{__kmpc|__tgt}} // expected-no-diagnostics #ifndef HEADER #define HEADER enum omp_allocator_handle_t { omp_null_allocator = 0, omp_default_mem_alloc = 1, omp_large_cap_mem_alloc = 2, omp_const_mem_alloc = 3, omp_high_bw_mem_alloc = 4, omp_low_lat_mem_alloc = 5, omp_cgroup_mem_alloc = 6, omp_pteam_mem_alloc = 7, omp_thread_mem_alloc = 8, KMP_ALLOCATOR_MAX_HANDLE = __UINTPTR_MAX__ }; volatile double g, g_orig; volatile double &g1 = g_orig; struct BaseS { int x; }; struct BaseS1 { float y; }; template <class T> struct S : public BaseS, public BaseS1 { T f; S(T a) : f(a + g) {} S() : f(g) {} S& operator=(const S&); ~S() {} }; void red(BaseS1&, const BaseS1&); void red_plus(BaseS1&, const BaseS1&); void init(BaseS1&, const BaseS1&); void init1(BaseS1&, const BaseS1&); void init2(BaseS1&, const BaseS1&); void init_plus(BaseS1&, const BaseS1&); #pragma omp declare reduction(operator& : BaseS1 : red(omp_out, omp_in)) initializer(init(omp_priv, omp_orig)) #pragma omp declare reduction(+ : BaseS1 : red_plus(omp_out, omp_in)) initializer(init_plus(omp_priv, omp_orig)) #pragma omp declare reduction(&& : S<float>, S<int> : omp_out.f *= omp_in.f) initializer(init1(omp_priv, omp_orig)) // CHECK-DAG: [[S_FLOAT_TY:%.+]] = type { %{{[^,]+}}, %{{[^,]+}}, float } // CHECK-DAG: [[S_INT_TY:%.+]] = type { %{{[^,]+}}, %{{[^,]+}}, i{{[0-9]+}} } // CHECK-DAG: [[IMPLICIT_BARRIER_LOC:@.+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 66, i32 0, i32 0, i8* // CHECK-DAG: [[REDUCTION_LOC:@.+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 18, i32 0, i32 0, i8* // CHECK-DAG: [[REDUCTION_LOCK:@.+]] = common global [8 x i32] zeroinitializer #pragma omp declare reduction(operator* : S<int> : omp_out.f = 17 * omp_in.f) initializer(omp_priv = S<int>()) // CHECK-LABEL: bazz void bazz() { S<int> s; // CHECK: [[S_ADDR:%.+]] = alloca [[S_INT_TY]], // CHECK: call {{.*}} [[S_INT_TY_CONSTR:@.+]]([[S_INT_TY]]* [[S_ADDR]]) // CHECK: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @{{.+}}, i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [[S_INT_TY]]*)* [[BAZZ_OUTLINE:@.+]] to void (i32*, i32*, ...)*), [[S_INT_TY]]* [[S_ADDR]]) // CHECK: call {{.*}} [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]* // CHECK: ret void #pragma omp parallel #pragma omp simd reduction(*: s) for (int I = 0; I < 10; ++I) ; } // CHECK: define internal void [[BAZZ_OUTLINE]](i32* {{.+}}, i32* {{.+}}, [[S_INT_TY]]* {{.+}}) // CHECK: [[S_PRIV_ADDR:%.+]] = alloca [[S_INT_TY]], // CHECK: call void [[BAZZ_INIT:@.+]]([[S_INT_TY]]* [[S_PRIV_ADDR]], [[S_INT_TY]]* [[S_ORIG_ADDR:%.+]]) // CHECK: call void @{{.+}}([[S_INT_TY]]* [[S_ORIG_ADDR]], [[S_INT_TY]]* [[S_PRIV_ADDR]]) // CHECK-NEXT: call void [[S_INT_TY_DESTR]]([[S_INT_TY]]* [[S_PRIV_ADDR]]) // CHECK-NEXT: ret void // CHECK: define internal void [[BAZZ_INIT]]([[S_INT_TY]]* {{.*}}[[S_PRIV_ADDR:%.+]], [[S_INT_TY]]* {{.*}}[[S_ORIG_ADDR:%.+]]) // CHECK: store [[S_INT_TY]]* [[S_PRIV_ADDR]], [[S_INT_TY]]** [[S_PRIV_ADDR_REF:%.+]], // CHECK: [[S_PRIV_ADDR:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** [[S_PRIV_ADDR_REF]], // CHECK: call void [[S_INT_TY_CONSTR]]([[S_INT_TY]]* [[S_PRIV_ADDR]]) // CHECK-NEXT: ret void #pragma omp declare reduction(operator&& : int : omp_out = 111 & omp_in) template <typename T, int length> T tmain() { T t; S<T> test; T t_var = T(), t_var1; T vec[] = {1, 2}; S<T> s_arr[] = {1, 2}; S<T> &var = test; S<T> var1; S<T> arr[length]; #pragma omp declare reduction(operator& : T : omp_out = 15 + omp_in) #pragma omp declare reduction(operator+ : T : omp_out = 1513 + omp_in) initializer(omp_priv = 321) #pragma omp declare reduction(min : T : omp_out = 47 - omp_in) initializer(omp_priv = 432 / omp_orig) #pragma omp declare reduction(operator&& : S<T> : omp_out.f = 17 * omp_in.f) initializer(init2(omp_priv, omp_orig)) #pragma omp declare reduction(operator&& : T : omp_out = 17 * omp_in) #pragma omp parallel #pragma omp for reduction(+ : t_var) reduction(& : var) reduction(&& : var1) reduction(min : t_var1) nowait for (int i = 0; i < 2; ++i) { vec[i] = t_var; s_arr[i] = var; } #pragma omp parallel #pragma omp for reduction(&& : t_var) for (int i = 0; i < 2; ++i) { vec[i] = t_var; s_arr[i] = var; } #pragma omp parallel #pragma omp for reduction(+ : arr[1:length-2]) for (int i = 0; i < 2; ++i) { vec[i] = t_var; s_arr[i] = var; } return T(); } extern S<float> **foo(); #pragma omp declare reduction(operator- : float, double : omp_out = 333 + omp_in) #pragma omp declare reduction(min : float, double : omp_out = 555 + omp_in) int main() { #pragma omp declare reduction(operator+ : float, double : omp_out = 222 - omp_in) initializer(omp_priv = -1) S<float> test; float t_var = 0, t_var1; int vec[] = {1, 2}; S<float> s_arr[] = {1, 2, 3, 4}; S<float> &var = test; S<float> var1, arrs[10][4]; S<float> **var2 = foo(); S<float> vvar2[5]; S<float>(&var3)[4] = s_arr; #pragma omp declare reduction(operator+ : int : omp_out = 555 * omp_in) initializer(omp_priv = 888) #pragma omp parallel #pragma omp for reduction(+ : t_var) reduction(& : var) reduction(&& : var1) reduction(min : t_var1) for (int i = 0; i < 2; ++i) { vec[i] = t_var; s_arr[i] = var; } int arr[10][vec[1]]; #pragma omp parallel for reduction(+ : arr[1][ : vec[1]]) reduction(& : arrs[1 : vec[1]][1 : 2]) for (int i = 0; i < 10; ++i) ++arr[1][i]; #pragma omp parallel #pragma omp for reduction(+ : arr) reduction(& : arrs) for (int i = 0; i < 10; ++i) ++arr[1][i]; #pragma omp parallel #pragma omp for reduction(& : var2[0 : 5][1 : 6]) for (int i = 0; i < 10; ++i) ; #pragma omp parallel #pragma omp for reduction(& : vvar2[0 : 5]) for (int i = 0; i < 10; ++i) ; #pragma omp parallel #pragma omp for reduction(& : var3[1 : 2]) for (int i = 0; i < 10; ++i) ; #pragma omp parallel #pragma omp for reduction(& : var3) allocate(omp_cgroup_mem_alloc: var3) for (int i = 0; i < 10; ++i) ; return tmain<int, 42>(); } // CHECK: define {{.*}}i{{[0-9]+}} @main() // CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]], // CHECK: call {{.*}} [[S_FLOAT_TY_CONSTR:@.+]]([[S_FLOAT_TY]]* [[TEST]]) // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 6, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, float*, [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]*, float*, [2 x i32]*, [4 x [[S_FLOAT_TY]]]*)* [[MAIN_MICROTASK:@.+]] to void // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 5, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i64, i64, i32*, [2 x i32]*, [10 x [4 x [[S_FLOAT_TY]]]]*)* [[MAIN_MICROTASK1:@.+]] to void // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 4, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i64, i64, i32*, [10 x [4 x [[S_FLOAT_TY]]]]*)* [[MAIN_MICROTASK2:@.+]] to void // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[S_FLOAT_TY]]***)* [[MAIN_MICROTASK3:@.+]] to void // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [5 x [[S_FLOAT_TY]]]*)* [[MAIN_MICROTASK4:@.+]] to void // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [4 x [[S_FLOAT_TY]]]*)* [[MAIN_MICROTASK5:@.+]] to void // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [4 x [[S_FLOAT_TY]]]*)* [[MAIN_MICROTASK6:@.+]] to void // CHECK: = call {{.*}}i{{.+}} [[TMAIN_INT_42:@.+]]() // CHECK: call {{.*}} [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]* // CHECK: ret // // CHECK: define internal void [[MAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, float* nonnull align 4 dereferenceable(4) %{{.+}}, [[S_FLOAT_TY]]* nonnull align 4 dereferenceable(12) %{{.+}}, [[S_FLOAT_TY]]* nonnull align 4 dereferenceable(12) %{{.+}}, float* nonnull align 4 dereferenceable(4) %{{.+}}, [2 x i32]* nonnull align 4 dereferenceable(8) %vec, [4 x [[S_FLOAT_TY]]]* nonnull align 4 dereferenceable(48) %{{.+}}) // CHECK: [[T_VAR_PRIV:%.+]] = alloca float, // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_FLOAT_TY]], // CHECK: [[VAR1_PRIV:%.+]] = alloca [[S_FLOAT_TY]], // CHECK: [[T_VAR1_PRIV:%.+]] = alloca float, // Reduction list for runtime. // CHECK: [[RED_LIST:%.+]] = alloca [4 x i8*], // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]], // CHECK: [[T_VAR_REF:%.+]] = load float*, float** % // CHECK: [[VAR1_REF:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** % // CHECK: [[T_VAR1_REF:%.+]] = load float*, float** % // For + reduction operation initial value of private variable is -1. // CHECK: call void [[RED_INIT1:@.+]](float* %{{.+}}, float* %{{.+}}) // CHECK: call void @_ZN1SIfEC1Ev([[S_FLOAT_TY]]* [[VAR_PRIV]] // For & reduction operation initial value of private variable is defined by call of 'init()' function. // CHECK: call void [[RED_INIT2:@.+]]( // CHECK: call void @_ZN1SIfEC1Ev([[S_FLOAT_TY]]* [[VAR1_PRIV]] // For && reduction operation initial value of private variable is 1.0. // CHECK: call void [[RED_INIT3:@.+]]( // For min reduction operation initial value of private variable is largest repesentable value. // CHECK: [[INIT:%.+]] = load float, float* @ // CHECK: store float [[INIT]], float* [[T_VAR1_PRIV]], // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]] // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] // CHECK: call void @__kmpc_for_static_init_4( // Skip checks for internal operations. // CHECK: call void @__kmpc_for_static_fini( // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; // CHECK: [[T_VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 0 // CHECK: [[BITCAST:%.+]] = bitcast float* [[T_VAR_PRIV]] to i8* // CHECK: store i8* [[BITCAST]], i8** [[T_VAR_PRIV_REF]], // CHECK: [[VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 1 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR_PRIV]] to i8* // CHECK: store i8* [[BITCAST]], i8** [[VAR_PRIV_REF]], // CHECK: [[VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 2 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR1_PRIV]] to i8* // CHECK: store i8* [[BITCAST]], i8** [[VAR1_PRIV_REF]], // CHECK: [[T_VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 3 // CHECK: [[BITCAST:%.+]] = bitcast float* [[T_VAR1_PRIV]] to i8* // CHECK: store i8* [[BITCAST]], i8** [[T_VAR1_PRIV_REF]], // res = __kmpc_reduce(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>); // CHECK: [[BITCAST:%.+]] = bitcast [4 x i8*]* [[RED_LIST]] to i8* // CHECK: [[RES:%.+]] = call i32 @__kmpc_reduce(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], i32 4, i64 32, i8* [[BITCAST]], void (i8*, i8*)* [[REDUCTION_FUNC:@.+]], [8 x i32]* [[REDUCTION_LOCK]]) // switch(res) // CHECK: switch i32 [[RES]], label %[[RED_DONE:.+]] [ // CHECK: i32 1, label %[[CASE1:.+]] // CHECK: i32 2, label %[[CASE2:.+]] // CHECK: ] // case 1: // t_var += t_var_reduction; // CHECK: call void [[RED_COMB1:@.+]](float* %{{.+}}, float* %{{.+}}) // var = var.operator &(var_reduction); // CHECK: call void [[RED_COMB2:@.+]]( // var1 = var1.operator &&(var1_reduction); // CHECK: call void [[RED_COMB3:@.+]]( // t_var1 = min(t_var1, t_var1_reduction); // CHECK: call void [[RED_COMB4:@.+]]( // __kmpc_end_reduce(<loc>, <gtid>, &<lock>); // CHECK: call void @__kmpc_end_reduce(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], [8 x i32]* [[REDUCTION_LOCK]]) // break; // CHECK: br label %[[RED_DONE]] // case 2: // t_var += t_var_reduction; // CHECK: call void @__kmpc_critical( // CHECK: call void [[RED_COMB1]](float* %{{.+}}, float* %{{.+}}) // CHECK: call void @__kmpc_end_critical( // var = var.operator &(var_reduction); // CHECK: call void @__kmpc_critical( // CHECK: call void [[RED_COMB2]] // CHECK: call void @__kmpc_end_critical( // var1 = var1.operator &&(var1_reduction); // CHECK: call void @__kmpc_critical( // CHECK: call void [[RED_COMB3]] // CHECK: call void @__kmpc_end_critical( // t_var1 = min(t_var1, t_var1_reduction); // CHECK: call void @__kmpc_critical( // CHECK: call void [[RED_COMB4]] // CHECK: call void @__kmpc_end_critical( // __kmpc_end_reduce(<loc>, <gtid>, &<lock>); // CHECK: call void @__kmpc_end_reduce(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], [8 x i32]* [[REDUCTION_LOCK]]) // break; // CHECK: br label %[[RED_DONE]] // CHECK: [[RED_DONE]] // CHECK-DAG: call {{.*}} [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]]) // CHECK-DAG: call {{.*}} [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]]) // CHECK: ret void // CHECK: define internal void [[RED_COMB1]](float* noalias %0, float* noalias %1) // CHECK: fsub float 2.220000e+02, % // CHECK: define internal void [[RED_INIT1]](float* noalias %0, float* noalias %1) // CHECK: store float -1.0{{.+}}, float* // CHECK: define internal void [[RED_COMB2]]( // CHECK: call {{.*}}void @_Z3redR6BaseS1RKS_( // CHECK: define internal void [[RED_INIT2]]( // CHECK: call {{.*}}void @_Z4initR6BaseS1RKS_( // CHECK: define internal void [[RED_COMB3]]( // CHECK: fmul float // CHECK: define internal void [[RED_INIT3]]( // CHECK: call {{.*}}void @_Z5init1R6BaseS1RKS_( // void reduce_func(void *lhs[<n>], void *rhs[<n>]) { // *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]); // ... // *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1], // *(Type<n>-1*)rhs[<n>-1]); // } // CHECK: define internal void [[REDUCTION_FUNC]](i8* %0, i8* %1) // t_var_lhs = (float*)lhs[0]; // CHECK: [[T_VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS:%.+]], i64 0, i64 0 // CHECK: [[T_VAR_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR_RHS_REF]], // CHECK: [[T_VAR_RHS:%.+]] = bitcast i8* [[T_VAR_RHS_VOID]] to float* // t_var_rhs = (float*)rhs[0]; // CHECK: [[T_VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS:%.+]], i64 0, i64 0 // CHECK: [[T_VAR_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR_LHS_REF]], // CHECK: [[T_VAR_LHS:%.+]] = bitcast i8* [[T_VAR_LHS_VOID]] to float* // var_lhs = (S<float>*)lhs[1]; // CHECK: [[VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 1 // CHECK: [[VAR_RHS_VOID:%.+]] = load i8*, i8** [[VAR_RHS_REF]], // CHECK: [[VAR_RHS:%.+]] = bitcast i8* [[VAR_RHS_VOID]] to [[S_FLOAT_TY]]* // var_rhs = (S<float>*)rhs[1]; // CHECK: [[VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 1 // CHECK: [[VAR_LHS_VOID:%.+]] = load i8*, i8** [[VAR_LHS_REF]], // CHECK: [[VAR_LHS:%.+]] = bitcast i8* [[VAR_LHS_VOID]] to [[S_FLOAT_TY]]* // var1_lhs = (S<float>*)lhs[2]; // CHECK: [[VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 2 // CHECK: [[VAR1_RHS_VOID:%.+]] = load i8*, i8** [[VAR1_RHS_REF]], // CHECK: [[VAR1_RHS:%.+]] = bitcast i8* [[VAR1_RHS_VOID]] to [[S_FLOAT_TY]]* // var1_rhs = (S<float>*)rhs[2]; // CHECK: [[VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 2 // CHECK: [[VAR1_LHS_VOID:%.+]] = load i8*, i8** [[VAR1_LHS_REF]], // CHECK: [[VAR1_LHS:%.+]] = bitcast i8* [[VAR1_LHS_VOID]] to [[S_FLOAT_TY]]* // t_var1_lhs = (float*)lhs[3]; // CHECK: [[T_VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 3 // CHECK: [[T_VAR1_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_RHS_REF]], // CHECK: [[T_VAR1_RHS:%.+]] = bitcast i8* [[T_VAR1_RHS_VOID]] to float* // t_var1_rhs = (float*)rhs[3]; // CHECK: [[T_VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 3 // CHECK: [[T_VAR1_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_LHS_REF]], // CHECK: [[T_VAR1_LHS:%.+]] = bitcast i8* [[T_VAR1_LHS_VOID]] to float* // t_var_lhs += t_var_rhs; // CHECK: call void [[RED_COMB1]](float* %{{.+}}, float* %{{.+}}) // var_lhs = var_lhs.operator &(var_rhs); // CHECK: call void [[RED_COMB2]]( // var1_lhs = var1_lhs.operator &&(var1_rhs); // CHECK: call void [[RED_COMB3]]( // t_var1_lhs = min(t_var1_lhs, t_var1_rhs); // CHECK: call void [[RED_COMB4]]( // CHECK: ret void // CHECK: define internal void [[RED_COMB4]]( // CHECK: fadd float 5.550000e+02, % // CHECK: define internal void [[MAIN_MICROTASK1]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, i64 %{{.+}}, i64 %{{.+}}, i32* {{.+}} %{{.+}}, [2 x i32]* nonnull align 4 dereferenceable(8) %{{.+}}, [10 x [4 x [[S_FLOAT_TY]]]]* nonnull align 4 dereferenceable(480) %{{.+}}) // Reduction list for runtime. // CHECK: [[RED_LIST:%.+]] = alloca [4 x i8*], // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]], // CHECK: store i{{[0-9]+}}* %{{.+}}, i{{[0-9]+}}** // CHECK: store i{{[0-9]+}}* %{{.+}}, i{{[0-9]+}}** [[ARR_ADDR:%.+]], // CHECK: [[ARR:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[ARR_ADDR]], // CHECK: [[UB_CAST:%.+]] = ptrtoint i32* [[UB1_UP:%.+]] to i64 // CHECK: [[LB_CAST:%.+]] = ptrtoint i32* [[LB1_0:%.+]] to i64 // CHECK: [[DIFF:%.+]] = sub i64 [[UB_CAST]], [[LB_CAST]] // CHECK: [[SIZE_1:%.+]] = sdiv exact i64 [[DIFF]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64) // CHECK: [[ARR_SIZE:%.+]] = add nuw i64 [[SIZE_1]], 1 // CHECK: call i8* @llvm.stacksave() // CHECK: [[ARR_PRIV:%.+]] = alloca i32, i64 [[ARR_SIZE]], // Check initialization of private copy. // CHECK: [[END:%.+]] = getelementptr i32, i32* [[ARR_PRIV]], i64 [[ARR_SIZE]] // CHECK: [[ISEMPTY:%.+]] = icmp eq i32* [[ARR_PRIV]], [[END]] // CHECK: br i1 [[ISEMPTY]], // CHECK: phi i32* // CHECK: call void [[RED_INIT5:@.+]](i32* %{{.+}}, i32* %{{.+}}) // CHECK: [[DONE:%.+]] = icmp eq i32* %{{.+}}, [[END]] // CHECK: br i1 [[DONE]], // CHECK: [[ARRS_PRIV:%.+]] = alloca [[S_FLOAT_TY]], i64 [[ARRS_SIZE:%.+]], // Check initialization of private copy. // CHECK: [[END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[ARRS_PRIV]], i64 [[ARRS_SIZE]] // CHECK: [[ISEMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[ARRS_PRIV]], [[END]] // CHECK: br i1 [[ISEMPTY]], // CHECK: phi [[S_FLOAT_TY]]* // CHECK: call void [[RED_INIT2]]( // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[END]] // CHECK: br i1 [[DONE]], // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]] // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] // CHECK: call void @__kmpc_for_static_init_4( // Skip checks for internal operations. // CHECK: call void @__kmpc_for_static_fini( // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; // CHECK: [[ARR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 0 // CHECK: [[BITCAST:%.+]] = bitcast i32* [[ARR_PRIV]] to i8* // CHECK: store i8* [[BITCAST]], i8** [[ARR_PRIV_REF]], // CHECK: [[ARR_SIZE_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 1 // CHECK: [[BITCAST:%.+]] = inttoptr i64 [[ARR_SIZE]] to i8* // CHECK: store i8* [[BITCAST]], i8** [[ARR_SIZE_REF]], // CHECK: [[ARRS_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 2 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[ARRS_PRIV]] to i8* // CHECK: store i8* [[BITCAST]], i8** [[ARRS_PRIV_REF]], // CHECK: [[ARRS_SIZE_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 3 // CHECK: [[BITCAST:%.+]] = inttoptr i64 [[ARRS_SIZE]] to i8* // CHECK: store i8* [[BITCAST]], i8** [[ARRS_SIZE_REF]], // res = __kmpc_reduce(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>); // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]] // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] // CHECK: [[BITCAST:%.+]] = bitcast [4 x i8*]* [[RED_LIST]] to i8* // CHECK: [[RES:%.+]] = call i32 @__kmpc_reduce_nowait(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], i32 2, i64 32, i8* [[BITCAST]], void (i8*, i8*)* [[REDUCTION_FUNC:@.+]], [8 x i32]* [[REDUCTION_LOCK]]) // switch(res) // CHECK: switch i32 [[RES]], label %[[RED_DONE:.+]] [ // CHECK: i32 1, label %[[CASE1:.+]] // CHECK: i32 2, label %[[CASE2:.+]] // CHECK: ] // case 1: // CHECK: [[CASE1]] // arr[:] += arr_reduction[:]; // CHECK: [[END:%.+]] = getelementptr i32, i32* [[LB1_0]], i64 [[ARR_SIZE]] // CHECK: [[ISEMPTY:%.+]] = icmp eq i32* [[LB1_0]], [[END]] // CHECK: br i1 [[ISEMPTY]], // CHECK: phi i32* // CHECK: call void [[RED_COMB5:@.+]](i32* %{{.+}}, i32* %{{.+}}) // CHECK: [[DONE:%.+]] = icmp eq i32* %{{.+}}, [[END]] // CHECK: br i1 [[DONE]], // arrs[:] = var.operator &(arrs_reduction[:]); // CHECK: [[END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[ARRS_LB:%.+]], i64 [[ARRS_SIZE]] // CHECK: [[ISEMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[ARRS_LB]], [[END]] // CHECK: br i1 [[ISEMPTY]], // CHECK: phi [[S_FLOAT_TY]]* // CHECK: call void [[RED_COMB2]]( // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[END]] // CHECK: br i1 [[DONE]], // __kmpc_end_reduce(<loc>, <gtid>, &<lock>); // CHECK: call void @__kmpc_end_reduce_nowait(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], [8 x i32]* [[REDUCTION_LOCK]]) // break; // CHECK: br label %[[RED_DONE]] // case 2: // CHECK: [[CASE2]] // arr[:] += arr_reduction[:]; // CHECK: [[END:%.+]] = getelementptr i32, i32* [[LB1_0]], i64 [[ARR_SIZE]] // CHECK: [[ISEMPTY:%.+]] = icmp eq i32* [[LB1_0]], [[END]] // CHECK: br i1 [[ISEMPTY]], // CHECK: phi i32* // CHECK: call void @__kmpc_critical( // CHECK: call void [[RED_COMB5]]( // CHECK: call void @__kmpc_end_critical( // CHECK: [[DONE:%.+]] = icmp eq i32* %{{.+}}, [[END]] // CHECK: br i1 [[DONE]], // arrs[:] = var.operator &(arrs_reduction[:]); // CHECK: [[END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[ARRS_LB:%.+]], i64 [[ARRS_SIZE]] // CHECK: [[ISEMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[ARRS_LB]], [[END]] // CHECK: br i1 [[ISEMPTY]], // CHECK: phi [[S_FLOAT_TY]]* // CHECK: call void @__kmpc_critical( // CHECK: call void [[RED_COMB2]]( // CHECK: call void @__kmpc_end_critical( // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[END]] // CHECK: br i1 [[DONE]], // break; // CHECK: br label %[[RED_DONE]] // CHECK: [[RED_DONE]] // Check destruction of private copy. // CHECK: [[END:%.+]] = getelementptr inbounds [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[ARRS_PRIV]], i64 [[ARRS_SIZE]] // CHECK: [[ISEMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[ARRS_PRIV]], [[END]] // CHECK: br i1 [[ISEMPTY]], // CHECK: phi [[S_FLOAT_TY]]* // CHECK: call void @_ZN1SIfED1Ev([[S_FLOAT_TY]]* % // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[ARRS_PRIV]] // CHECK: br i1 [[DONE]], // CHECK: call void @llvm.stackrestore(i8* // CHECK: ret void // CHECK: define internal void [[RED_COMB5]](i32* noalias %0, i32* noalias %1) // CHECK: mul nsw i32 555, % // CHECK: define internal void [[RED_INIT5]](i32* noalias %0, i32* noalias %1) // CHECK: store i32 888, i32* % // void reduce_func(void *lhs[<n>], void *rhs[<n>]) { // *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]); // ... // *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1], // *(Type<n>-1*)rhs[<n>-1]); // } // CHECK: define internal void [[REDUCTION_FUNC]](i8* %0, i8* %1) // arr_rhs = (int*)rhs[0]; // CHECK: [[ARR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS:%.+]], i64 0, i64 0 // CHECK: [[ARR_RHS_VOID:%.+]] = load i8*, i8** [[ARR_RHS_REF]], // CHECK: [[ARR_RHS:%.+]] = bitcast i8* [[ARR_RHS_VOID]] to i32* // arr_lhs = (int*)lhs[0]; // CHECK: [[ARR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS:%.+]], i64 0, i64 0 // CHECK: [[ARR_LHS_VOID:%.+]] = load i8*, i8** [[ARR_LHS_REF]], // CHECK: [[ARR_LHS:%.+]] = bitcast i8* [[ARR_LHS_VOID]] to i32* // arr_size = (size_t)lhs[1]; // CHECK: [[ARR_SIZE_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 1 // CHECK: [[ARR_SIZE_VOID:%.+]] = load i8*, i8** [[ARR_SIZE_REF]], // CHECK: [[ARR_SIZE:%.+]] = ptrtoint i8* [[ARR_SIZE_VOID]] to i64 // arrs_rhs = (S<float>*)rhs[2]; // CHECK: [[ARRS_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 2 // CHECK: [[ARRS_RHS_VOID:%.+]] = load i8*, i8** [[ARRS_RHS_REF]], // CHECK: [[ARRS_RHS:%.+]] = bitcast i8* [[ARRS_RHS_VOID]] to [[S_FLOAT_TY]]* // arrs_lhs = (S<float>*)lhs[2]; // CHECK: [[ARRS_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 2 // CHECK: [[ARRS_LHS_VOID:%.+]] = load i8*, i8** [[ARRS_LHS_REF]], // CHECK: [[ARRS_LHS:%.+]] = bitcast i8* [[ARRS_LHS_VOID]] to [[S_FLOAT_TY]]* // arrs_size = (size_t)lhs[3]; // CHECK: [[ARRS_SIZE_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 3 // CHECK: [[ARRS_SIZE_VOID:%.+]] = load i8*, i8** [[ARRS_SIZE_REF]], // CHECK: [[ARRS_SIZE:%.+]] = ptrtoint i8* [[ARRS_SIZE_VOID]] to i64 // arr_lhs[:] += arr_rhs[:]; // CHECK: [[END:%.+]] = getelementptr i32, i32* [[ARR_LHS]], i64 [[ARR_SIZE]] // CHECK: [[ISEMPTY:%.+]] = icmp eq i32* [[ARR_LHS]], [[END]] // CHECK: br i1 [[ISEMPTY]], // CHECK: phi i32* // CHECK: call void [[RED_COMB5]]( // CHECK: [[DONE:%.+]] = icmp eq i32* %{{.+}}, [[END]] // CHECK: br i1 [[DONE]], // arrs_lhs = arrs_lhs.operator &(arrs_rhs); // CHECK: [[END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[ARRS_LB:%.+]], i64 [[ARRS_SIZE]] // CHECK: [[ISEMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[ARRS_LB]], [[END]] // CHECK: br i1 [[ISEMPTY]], // CHECK: phi [[S_FLOAT_TY]]* // CHECK: call void [[RED_COMB2]]( // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[END]] // CHECK: br i1 [[DONE]], // CHECK: ret void // CHECK: define internal void [[MAIN_MICROTASK2]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, i64 %{{.+}}, i64 %{{.+}}, i32* {{.+}} %{{.+}}, [10 x [4 x [[S_FLOAT_TY]]]]* nonnull align 4 dereferenceable(480) %{{.+}}) // CHECK: [[ARRS_PRIV:%.+]] = alloca [10 x [4 x [[S_FLOAT_TY]]]], // Reduction list for runtime. // CHECK: [[RED_LIST:%.+]] = alloca [3 x i8*], // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]], // CHECK: [[ARR_SIZE:%.+]] = udiv exact i64 // CHECK: call i8* @llvm.stacksave() // CHECK: [[ARR_PRIV:%.+]] = alloca i32, i64 [[ARR_SIZE]], // Check initialization of private copy. // CHECK: [[END:%.+]] = getelementptr i32, i32* [[ARR_PRIV]], i64 [[ARR_SIZE]] // CHECK: [[ISEMPTY:%.+]] = icmp eq i32* [[ARR_PRIV]], [[END]] // CHECK: br i1 [[ISEMPTY]], // CHECK: phi i32* // CHECK: call void [[RED_INIT5]]( // CHECK: [[DONE:%.+]] = icmp eq i32* %{{.+}}, [[END]] // CHECK: br i1 [[DONE]], // Check initialization of private copy. // CHECK: [[BEGIN:%.+]] = getelementptr inbounds [10 x [4 x [[S_FLOAT_TY]]]], [10 x [4 x [[S_FLOAT_TY]]]]* [[ARRS_PRIV]], i32 0, i32 0, i32 0 // CHECK: [[END:%.+]] = getelementptr inbounds [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[BEGIN]], i64 40 // CHECK: br label %[[CTOR:[^,]+]] // CHECK: [[CTOR]]: // CHECK: [[CUR:%.+]] = phi [[S_FLOAT_TY]]* [ [[BEGIN]], %{{.+}} ], [ [[NEXT:%.+]], %[[CTOR]] ] // CHECK: call void @_ZN1SIfEC1Ev([[S_FLOAT_TY]]* [[CUR]]) // CHECK: [[NEXT:%.+]] = getelementptr inbounds [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[CUR]], i64 1 // CHECK: [[IS_DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* [[NEXT]], [[END]] // CHECK: br i1 [[IS_DONE]], label %[[DONE:[^,]+]], label %[[CTOR]] // CHECK: [[DONE]]: // CHECK: [[BEGIN:%.+]] = getelementptr inbounds [10 x [4 x [[S_FLOAT_TY]]]], [10 x [4 x [[S_FLOAT_TY]]]]* [[ARRS_PRIV]], i32 0, i32 0, i32 0 // CHECK: [[LHS_BEGIN:%.+]] = bitcast [10 x [4 x [[S_FLOAT_TY]]]]* %{{.+}} to [[S_FLOAT_TY]]* // CHECK: [[END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[BEGIN]], i64 40 // CHECK: [[ISEMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[BEGIN]], [[END]] // CHECK: br i1 [[ISEMPTY]], // CHECK: phi [[S_FLOAT_TY]]* // CHECK: call void [[RED_INIT2]]( // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[END]] // CHECK: br i1 [[DONE]], // CHECK: [[LHS_BEGIN:%.+]] = bitcast [10 x [4 x [[S_FLOAT_TY]]]]* %{{.+}} to [[S_FLOAT_TY]]* // CHECK: [[ARRS_PRIV_BEGIN:%.+]] = bitcast [10 x [4 x [[S_FLOAT_TY]]]]* [[ARRS_PRIV]] to [[S_FLOAT_TY]]* // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]] // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] // CHECK: call void @__kmpc_for_static_init_4( // Skip checks for internal operations. // CHECK: call void @__kmpc_for_static_fini( // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; // CHECK: [[ARR_PRIV_REF:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[RED_LIST]], i64 0, i64 0 // CHECK: [[BITCAST:%.+]] = bitcast i32* [[ARR_PRIV]] to i8* // CHECK: store i8* [[BITCAST]], i8** [[ARR_PRIV_REF]], // CHECK: [[ARR_SIZE_REF:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[RED_LIST]], i64 0, i64 1 // CHECK: [[BITCAST:%.+]] = inttoptr i64 [[ARR_SIZE]] to i8* // CHECK: store i8* [[BITCAST]], i8** [[ARR_SIZE_REF]], // CHECK: [[ARRS_PRIV_REF:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[RED_LIST]], i64 0, i64 2 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[ARRS_PRIV_BEGIN]] to i8* // CHECK: store i8* [[BITCAST]], i8** [[ARRS_PRIV_REF]], // res = __kmpc_reduce(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>); // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]] // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] // CHECK: [[BITCAST:%.+]] = bitcast [3 x i8*]* [[RED_LIST]] to i8* // CHECK: [[RES:%.+]] = call i32 @__kmpc_reduce(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], i32 2, i64 24, i8* [[BITCAST]], void (i8*, i8*)* [[REDUCTION_FUNC:@.+]], [8 x i32]* [[REDUCTION_LOCK]]) // switch(res) // CHECK: switch i32 [[RES]], label %[[RED_DONE:.+]] [ // CHECK: i32 1, label %[[CASE1:.+]] // CHECK: i32 2, label %[[CASE2:.+]] // CHECK: ] // case 1: // CHECK: [[CASE1]] // arr[:] += arr_reduction[:]; // CHECK: [[END:%.+]] = getelementptr i32, i32* [[LB1_0:%.+]], i64 [[ARR_SIZE]] // CHECK: [[ISEMPTY:%.+]] = icmp eq i32* [[LB1_0]], [[END]] // CHECK: br i1 [[ISEMPTY]], // CHECK: phi i32* // CHECK: call void [[RED_COMB5]]( // CHECK: [[DONE:%.+]] = icmp eq i32* %{{.+}}, [[END]] // CHECK: br i1 [[DONE]], // arrs[:] = var.operator &(arrs_reduction[:]); // CHECK: [[END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[LHS_BEGIN]], i64 40 // CHECK: [[ISEMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[LHS_BEGIN]], [[END]] // CHECK: br i1 [[ISEMPTY]], // CHECK: phi [[S_FLOAT_TY]]* // CHECK: call void [[RED_COMB2]]( // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[END]] // CHECK: br i1 [[DONE]], // __kmpc_end_reduce(<loc>, <gtid>, &<lock>); // CHECK: call void @__kmpc_end_reduce(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], [8 x i32]* [[REDUCTION_LOCK]]) // break; // CHECK: br label %[[RED_DONE]] // case 2: // CHECK: [[CASE2]] // arr[:] += arr_reduction[:]; // CHECK: [[END:%.+]] = getelementptr i32, i32* [[LB1_0]], i64 [[ARR_SIZE]] // CHECK: [[ISEMPTY:%.+]] = icmp eq i32* [[LB1_0]], [[END]] // CHECK: br i1 [[ISEMPTY]], // CHECK: phi i32* // CHECK: call void @__kmpc_critical( // CHECK: call void [[RED_COMB5]]( // CHECK: call void @__kmpc_end_critical( // CHECK: [[DONE:%.+]] = icmp eq i32* %{{.+}}, [[END]] // CHECK: br i1 [[DONE]], // arrs[:] = var.operator &(arrs_reduction[:]); // CHECK: [[END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[LHS_BEGIN]], i64 40 // CHECK: [[ISEMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[LHS_BEGIN]], [[END]] // CHECK: br i1 [[ISEMPTY]], // CHECK: phi [[S_FLOAT_TY]]* // CHECK: call void @__kmpc_critical( // CHECK: call void [[RED_COMB2]]( // CHECK: call void @__kmpc_end_critical( // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[END]] // CHECK: br i1 [[DONE]], // break; // CHECK: br label %[[RED_DONE]] // CHECK: [[RED_DONE]] // Check destruction of private copy. // CHECK: [[BEGIN:%.+]] = getelementptr inbounds [10 x [4 x [[S_FLOAT_TY]]]], [10 x [4 x [[S_FLOAT_TY]]]]* [[ARRS_PRIV]], i32 0, i32 0, i32 0 // CHECK: [[END:%.+]] = getelementptr inbounds [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[BEGIN]], i64 40 // CHECK: br // CHECK: phi [[S_FLOAT_TY]]* // CHECK: call void @_ZN1SIfED1Ev([[S_FLOAT_TY]]* % // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[BEGIN]] // CHECK: br i1 [[DONE]], // CHECK: call void @llvm.stackrestore(i8* // CHECK: call void @__kmpc_barrier( // CHECK: ret void // void reduce_func(void *lhs[<n>], void *rhs[<n>]) { // *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]); // ... // *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1], // *(Type<n>-1*)rhs[<n>-1]); // } // CHECK: define internal void [[REDUCTION_FUNC]](i8* %0, i8* %1) // arr_rhs = (int*)rhs[0]; // CHECK: [[ARR_RHS_REF:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[RED_LIST_RHS:%.+]], i64 0, i64 0 // CHECK: [[ARR_RHS_VOID:%.+]] = load i8*, i8** [[ARR_RHS_REF]], // CHECK: [[ARR_RHS:%.+]] = bitcast i8* [[ARR_RHS_VOID]] to i32* // arr_lhs = (int*)lhs[0]; // CHECK: [[ARR_LHS_REF:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[RED_LIST_LHS:%.+]], i64 0, i64 0 // CHECK: [[ARR_LHS_VOID:%.+]] = load i8*, i8** [[ARR_LHS_REF]], // CHECK: [[ARR_LHS:%.+]] = bitcast i8* [[ARR_LHS_VOID]] to i32* // arr_size = (size_t)lhs[1]; // CHECK: [[ARR_SIZE_REF:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[RED_LIST_LHS]], i64 0, i64 1 // CHECK: [[ARR_SIZE_VOID:%.+]] = load i8*, i8** [[ARR_SIZE_REF]], // CHECK: [[ARR_SIZE:%.+]] = ptrtoint i8* [[ARR_SIZE_VOID]] to i64 // arrs_rhs = (S<float>*)rhs[2]; // CHECK: [[ARRS_RHS_REF:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[RED_LIST_RHS]], i64 0, i64 2 // CHECK: [[ARRS_RHS_VOID:%.+]] = load i8*, i8** [[ARRS_RHS_REF]], // CHECK: [[ARRS_RHS:%.+]] = bitcast i8* [[ARRS_RHS_VOID]] to [[S_FLOAT_TY]]* // arrs_lhs = (S<float>*)lhs[2]; // CHECK: [[ARRS_LHS_REF:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[RED_LIST_LHS]], i64 0, i64 2 // CHECK: [[ARRS_LHS_VOID:%.+]] = load i8*, i8** [[ARRS_LHS_REF]], // CHECK: [[ARRS_LHS:%.+]] = bitcast i8* [[ARRS_LHS_VOID]] to [[S_FLOAT_TY]]* // arr_lhs[:] += arr_rhs[:]; // CHECK: [[END:%.+]] = getelementptr i32, i32* [[ARR_LHS]], i64 [[ARR_SIZE]] // CHECK: [[ISEMPTY:%.+]] = icmp eq i32* [[ARR_LHS]], [[END]] // CHECK: br i1 [[ISEMPTY]], // CHECK: phi i32* // CHECK: call void [[RED_COMB5]]( // CHECK: [[DONE:%.+]] = icmp eq i32* %{{.+}}, [[END]] // CHECK: br i1 [[DONE]], // arrs_lhs = arrs_lhs.operator &(arrs_rhs); // CHECK: [[END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[ARRS_LB:%.+]], i64 40 // CHECK: [[ISEMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[ARRS_LB]], [[END]] // CHECK: br i1 [[ISEMPTY]], // CHECK: phi [[S_FLOAT_TY]]* // CHECK: call void [[RED_COMB2]]( // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[END]] // CHECK: br i1 [[DONE]], // CHECK: ret void // CHECK: define internal void [[MAIN_MICROTASK3]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[S_FLOAT_TY]]*** nonnull align 8 dereferenceable(8) %{{.+}}) // CHECK: [[VAR2_ORIG_ADDR:%.+]] = alloca [[S_FLOAT_TY]]***, // Reduction list for runtime. // CHECK: [[RED_LIST:%.+]] = alloca [2 x i8*], // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]], // CHECK: [[VAR2_ORIG:%.+]] = load [[S_FLOAT_TY]]***, [[S_FLOAT_TY]]**** [[VAR2_ORIG_ADDR]], // CHECK: [[LAST:%.+]] = ptrtoint [[S_FLOAT_TY]]* %{{.+}} to i64 // CHECK: [[FIRST:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[LOW:%.+]] to i64 // CHECK: [[BYTE_DIF:%.+]] = sub i64 [[LAST]], [[FIRST]] // CHECK: [[DIF:%.+]] = sdiv exact i64 [[BYTE_DIF]], ptrtoint ([[S_FLOAT_TY]]* getelementptr ([[S_FLOAT_TY]], [[S_FLOAT_TY]]* null, i32 1) to i64) // CHECK: [[SIZE:%.+]] = add nuw i64 [[DIF]], 1 // CHECK: call i8* @llvm.stacksave() // CHECK: [[VAR2_PRIV:%.+]] = alloca [[S_FLOAT_TY]], i64 [[SIZE]], // CHECK: [[LD:%.+]] = load [[S_FLOAT_TY]]**, [[S_FLOAT_TY]]*** [[VAR2_ORIG]], // CHECK: [[ORIG_START:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** [[LD]], // CHECK: [[START:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[ORIG_START]] to i64 // CHECK: [[LOW_BOUND:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[LOW]] to i64 // CHECK: [[OFFSET_BYTES:%.+]] = sub i64 [[START]], [[LOW_BOUND]] // CHECK: [[OFFSET:%.+]] = sdiv exact i64 [[OFFSET_BYTES]], ptrtoint ([[S_FLOAT_TY]]* getelementptr ([[S_FLOAT_TY]], [[S_FLOAT_TY]]* null, i32 1) to i64) // CHECK: [[PSEUDO_VAR2_PRIV:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[VAR2_PRIV]], i64 [[OFFSET]] // CHECK: store [[S_FLOAT_TY]]** [[REF:.+]], [[S_FLOAT_TY]]*** % // CHECK: store [[S_FLOAT_TY]]* [[PSEUDO_VAR2_PRIV]], [[S_FLOAT_TY]]** [[REF]] // CHECK: ret void // CHECK: define internal void [[MAIN_MICROTASK4]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [5 x [[S_FLOAT_TY]]]* nonnull align 4 dereferenceable(60) %{{.+}}) // CHECK: [[VVAR2_ORIG_ADDR:%.+]] = alloca [5 x [[S_FLOAT_TY]]]*, // CHECK: [[VVAR2_PRIV:%.+]] = alloca [5 x [[S_FLOAT_TY]]], // Reduction list for runtime. // CHECK: [[RED_LIST:%.+]] = alloca [1 x i8*], // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]], // CHECK: [[VVAR2_ORIG:%.+]] = load [5 x [[S_FLOAT_TY]]]*, [5 x [[S_FLOAT_TY]]]** [[VVAR2_ORIG_ADDR]], // CHECK: [[LOW:%.+]] = getelementptr inbounds [5 x [[S_FLOAT_TY]]], [5 x [[S_FLOAT_TY]]]* [[VVAR2_ORIG]], i64 0, i64 0 // CHECK: [[ORIG_START:%.+]] = bitcast [5 x [[S_FLOAT_TY]]]* [[VVAR2_ORIG]] to [[S_FLOAT_TY]]* // CHECK: [[START:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[ORIG_START]] to i64 // CHECK: [[LOW_BOUND:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[LOW]] to i64 // CHECK: [[OFFSET_BYTES:%.+]] = sub i64 [[START]], [[LOW_BOUND]] // CHECK: [[OFFSET:%.+]] = sdiv exact i64 [[OFFSET_BYTES]], ptrtoint ([[S_FLOAT_TY]]* getelementptr ([[S_FLOAT_TY]], [[S_FLOAT_TY]]* null, i32 1) to i64) // CHECK: [[VVAR2_PRIV_PTR:%.+]] = bitcast [5 x [[S_FLOAT_TY]]]* [[VVAR2_PRIV]] to [[S_FLOAT_TY]]* // CHECK: [[VVAR2_PRIV:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[VVAR2_PRIV_PTR]], i64 [[OFFSET]] // CHECK: ret void // CHECK: define internal void [[MAIN_MICROTASK5]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [4 x [[S_FLOAT_TY]]]* nonnull align 4 dereferenceable(48) %{{.+}}) // CHECK: [[VAR3_ORIG_ADDR:%.+]] = alloca [4 x [[S_FLOAT_TY]]]*, // CHECK: [[VAR3_PRIV:%.+]] = alloca [2 x [[S_FLOAT_TY]]], // Reduction list for runtime. // CHECK: [[RED_LIST:%.+]] = alloca [1 x i8*], // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]], // CHECK: [[VAR3_ORIG:%.+]] = load [4 x [[S_FLOAT_TY]]]*, [4 x [[S_FLOAT_TY]]]** [[VAR3_ORIG_ADDR]], // CHECK: store [4 x [[S_FLOAT_TY]]]* [[VAR3_ORIG]], [4 x [[S_FLOAT_TY]]]** [[VAR3_ORIG_ADDR:%.+]], // CHECK: [[VAR3_ORIG:%.+]] = load [4 x [[S_FLOAT_TY]]]*, [4 x [[S_FLOAT_TY]]]** [[VAR3_ORIG_ADDR]], // CHECK: store [4 x [[S_FLOAT_TY]]]* [[VAR3_ORIG]], [4 x [[S_FLOAT_TY]]]** [[VAR3_ORIG_ADDR:%.+]], // CHECK: [[VAR3_ORIG:%.+]] = load [4 x [[S_FLOAT_TY]]]*, [4 x [[S_FLOAT_TY]]]** [[VAR3_ORIG_ADDR]], // CHECK: [[LOW:%.+]] = getelementptr inbounds [4 x [[S_FLOAT_TY]]], [4 x [[S_FLOAT_TY]]]* [[VAR3_ORIG]], i64 0, i64 1 // CHECK: [[VAR3_ORIG:%.+]] = load [4 x [[S_FLOAT_TY]]]*, [4 x [[S_FLOAT_TY]]]** [[VAR3_ORIG_ADDR]], // CHECK: [[VAR3_ORIG:%.+]] = load [4 x [[S_FLOAT_TY]]]*, [4 x [[S_FLOAT_TY]]]** [[VAR3_ORIG_ADDR]], // CHECK: [[ORIG_START:%.+]] = bitcast [4 x [[S_FLOAT_TY]]]* [[VAR3_ORIG]] to [[S_FLOAT_TY]]* // CHECK: [[START:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[ORIG_START]] to i64 // CHECK: [[LOW_BOUND:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[LOW]] to i64 // CHECK: [[OFFSET_BYTES:%.+]] = sub i64 [[START]], [[LOW_BOUND]] // CHECK: [[OFFSET:%.+]] = sdiv exact i64 [[OFFSET_BYTES]], ptrtoint ([[S_FLOAT_TY]]* getelementptr ([[S_FLOAT_TY]], [[S_FLOAT_TY]]* null, i32 1) to i64) // CHECK: [[VAR3_PRIV_PTR:%.+]] = bitcast [2 x [[S_FLOAT_TY]]]* [[VAR3_PRIV]] to [[S_FLOAT_TY]]* // CHECK: [[PSEUDO_VAR3_PRIV:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[VAR3_PRIV_PTR]], i64 [[OFFSET]] // CHECK: [[VAR3_PRIV:%.+]] = bitcast [[S_FLOAT_TY]]* [[PSEUDO_VAR3_PRIV]] to [4 x [[S_FLOAT_TY]]]* // CHECK: store [4 x [[S_FLOAT_TY]]]* [[VAR3_PRIV]], [4 x [[S_FLOAT_TY]]]** % // CHECK: ret void // CHECK: define internal void [[MAIN_MICROTASK6]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [4 x [[S_FLOAT_TY]]]* nonnull align 4 dereferenceable(48) %{{.+}}) // CHECK: [[VAR3_ORIG_ADDR:%.+]] = alloca [4 x [[S_FLOAT_TY]]]*, // Reduction list for runtime. // CHECK: [[RED_LIST:%.+]] = alloca [1 x i8*], // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]], // CHECK: [[VAR3_ORIG:%.+]] = load [4 x [[S_FLOAT_TY]]]*, [4 x [[S_FLOAT_TY]]]** [[VAR3_ORIG_ADDR]], // CHECK: store [4 x [[S_FLOAT_TY]]]* [[VAR3_ORIG]], [4 x [[S_FLOAT_TY]]]** [[VAR3_ORIG_ADDR:%.+]], // CHECK: [[VAR3_ORIG:%.+]] = load [4 x [[S_FLOAT_TY]]]*, [4 x [[S_FLOAT_TY]]]** [[VAR3_ORIG_ADDR]], // CHECK: store [4 x [[S_FLOAT_TY]]]* [[VAR3_ORIG]], [4 x [[S_FLOAT_TY]]]** [[VAR3_ORIG_ADDR:%.+]], // CHECK: [[VAR3_ORIG:%.+]] = load [4 x [[S_FLOAT_TY]]]*, [4 x [[S_FLOAT_TY]]]** [[VAR3_ORIG_ADDR]], // CHECK: [[VAR3_VOID_PTR:%.+]] = call i8* @__kmpc_alloc(i32 [[GTID:%.+]], i64 48, i8* inttoptr (i64 6 to i8*)) // CHECK: [[VAR3_PRIV:%.+]] = bitcast i8* [[VAR3_VOID_PTR]] to [4 x [[S_FLOAT_TY]]]* // CHECK: getelementptr inbounds [4 x [[S_FLOAT_TY]]], [4 x [[S_FLOAT_TY]]]* [[VAR3_PRIV]], i32 0, i32 0 // CHECK: bitcast [4 x [[S_FLOAT_TY]]]* [[VAR3_ORIG]] to [[S_FLOAT_TY]]* // CHECK: getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* %{{.+}}, i64 4 // CHECK: store [4 x [[S_FLOAT_TY]]]* [[VAR3_PRIV]], [4 x [[S_FLOAT_TY]]]** % // CHECK: [[VAR3_VOID_PTR:%.+]] = bitcast [4 x [[S_FLOAT_TY]]]* [[VAR3_PRIV]] to i8* // CHECK: call void @__kmpc_free(i32 [[GTID]], i8* [[VAR3_VOID_PTR]], i8* inttoptr (i64 6 to i8*)) // CHECK: ret void // CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT_42]]() // CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]], // CHECK: call {{.*}} [[S_INT_TY_CONSTR]]([[S_INT_TY]]* [[TEST]]) // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 6, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i32*, [[S_INT_TY]]*, [[S_INT_TY]]*, i32*, [2 x i32]*, [2 x [[S_INT_TY]]]*)* [[TMAIN_MICROTASK:@.+]] to void // Not interested in this one: // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 4, // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 5, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [42 x [[S_INT_TY]]]*, [2 x i32]*, i32*, [2 x [[S_INT_TY]]]*, [[S_INT_TY]]*)* [[TMAIN_MICROTASK2:@.+]] to void // CHECK: call {{.*}} [[S_INT_TY_DESTR]]([[S_INT_TY]]* // CHECK: call {{.*}} [[S_INT_TY_DESTR]]([[S_INT_TY]]* // CHECK: ret // // CHECK: define internal void [[TMAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, i32* nonnull align 4 dereferenceable(4) %{{.+}}, [[S_INT_TY]]* nonnull align 4 dereferenceable(12) %{{.+}}, [[S_INT_TY]]* nonnull align 4 dereferenceable(12) %{{.+}}, i32* nonnull align 4 dereferenceable(4) %{{.+}}, [2 x i32]* nonnull align 4 dereferenceable(8) %{{.+}}, [2 x [[S_INT_TY]]]* nonnull align 4 dereferenceable(24) %{{.+}}) // CHECK: alloca i{{[0-9]+}}, // CHECK: alloca i{{[0-9]+}}, // CHECK: alloca i{{[0-9]+}}, // CHECK: alloca i{{[0-9]+}}, // CHECK: alloca i{{[0-9]+}}, // CHECK: alloca i{{[0-9]+}}, // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]], // CHECK: [[VAR1_PRIV:%.+]] = alloca [[S_INT_TY]], // CHECK: [[T_VAR1_PRIV:%.+]] = alloca i{{[0-9]+}}, // Reduction list for runtime. // CHECK: [[RED_LIST:%.+]] = alloca [4 x i8*], // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]], // CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** % // CHECK: [[VAR1_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** % // CHECK: [[T_VAR1_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** % // For + reduction operation initial value of private variable is 0. // CHECK: call void [[RED_INIT6:@.+]]( // CHECK: call void @_ZN1SIiEC1Ev([[S_INT_TY]]* [[VAR_PRIV]] // For & reduction operation initial value of private variable is ones in all bits. // CHECK: call void [[RED_INIT2:@.+]]( // CHECK: call void @_ZN1SIiEC1Ev([[S_INT_TY]]* [[VAR1_PRIV]] // For && reduction operation initial value of private variable is 1.0. // CHECK: call void [[RED_INIT7:@.+]]( // For min reduction operation initial value of private variable is largest repesentable value. // CHECK: call void [[RED_INIT8:@.+]]( // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]] // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] // CHECK: call void @__kmpc_for_static_init_4( // Skip checks for internal operations. // CHECK: call void @__kmpc_for_static_fini( // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; // CHECK: [[T_VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 0 // CHECK: [[BITCAST:%.+]] = bitcast i{{[0-9]+}}* [[T_VAR_PRIV]] to i8* // CHECK: store i8* [[BITCAST]], i8** [[T_VAR_PRIV_REF]], // CHECK: [[VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 1 // CHECK: [[BITCAST:%.+]] = bitcast [[S_INT_TY]]* [[VAR_PRIV]] to i8* // CHECK: store i8* [[BITCAST]], i8** [[VAR_PRIV_REF]], // CHECK: [[VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 2 // CHECK: [[BITCAST:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_PRIV]] to i8* // CHECK: store i8* [[BITCAST]], i8** [[VAR1_PRIV_REF]], // CHECK: [[T_VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 3 // CHECK: [[BITCAST:%.+]] = bitcast i{{[0-9]+}}* [[T_VAR1_PRIV]] to i8* // CHECK: store i8* [[BITCAST]], i8** [[T_VAR1_PRIV_REF]], // res = __kmpc_reduce_nowait(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>); // CHECK: [[BITCAST:%.+]] = bitcast [4 x i8*]* [[RED_LIST]] to i8* // CHECK: [[RES:%.+]] = call i32 @__kmpc_reduce_nowait(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], i32 4, i64 32, i8* [[BITCAST]], void (i8*, i8*)* [[REDUCTION_FUNC:@.+]], [8 x i32]* [[REDUCTION_LOCK]]) // switch(res) // CHECK: switch i32 [[RES]], label %[[RED_DONE:.+]] [ // CHECK: i32 1, label %[[CASE1:.+]] // CHECK: i32 2, label %[[CASE2:.+]] // CHECK: ] // case 1: // t_var += t_var_reduction; // CHECK: call void [[RED_COMB6:@.+]]( // var = var.operator &(var_reduction); // CHECK: call void [[RED_COMB2]]( // var1 = var1.operator &&(var1_reduction); // CHECK: call void [[RED_COMB7:@.+]]( // t_var1 = min(t_var1, t_var1_reduction); // CHECK: call void [[RED_COMB8:@.+]]( // __kmpc_end_reduce_nowait(<loc>, <gtid>, &<lock>); // CHECK: call void @__kmpc_end_reduce_nowait(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], [8 x i32]* [[REDUCTION_LOCK]]) // break; // CHECK: br label %[[RED_DONE]] // case 2: // t_var += t_var_reduction; // CHECK: call void @__kmpc_critical( // CHECK: call void [[RED_COMB6]]( // CHECK: call void @__kmpc_end_critical( // var = var.operator &(var_reduction); // CHECK: call void @__kmpc_critical( // CHECK: call void [[RED_COMB2]]( // CHECK: call void @__kmpc_end_critical( // var1 = var1.operator &&(var1_reduction); // CHECK: call void @__kmpc_critical( // CHECK: call void [[RED_COMB7]]( // CHECK: call void @__kmpc_end_critical( // t_var1 = min(t_var1, t_var1_reduction); // CHECK: call void @__kmpc_critical( // CHECK: call void [[RED_COMB8]]( // CHECK: call void @__kmpc_end_critical( // break; // CHECK: br label %[[RED_DONE]] // CHECK: [[RED_DONE]] // CHECK-DAG: call {{.*}} [[S_INT_TY_DESTR]]([[S_INT_TY]]* [[VAR_PRIV]]) // CHECK-DAG: call {{.*}} [[S_INT_TY_DESTR]]([[S_INT_TY]]* // CHECK: ret void // CHECK: define internal void [[RED_COMB6]](i32* noalias %0, i32* noalias %1) // CHECK: add nsw i32 1513, % // CHECK: define internal void [[RED_INIT6]](i32* noalias %0, i32* noalias %1) // CHECK: store i32 321, i32* % // CHECK: define internal void [[RED_COMB7]]( // CHECK: mul nsw i32 17, % // CHECK: define internal void [[RED_INIT7]]( // CHECK: call void @_Z5init2R6BaseS1RKS_( // CHECK: define internal void [[RED_COMB8]](i32* noalias %0, i32* noalias %1) // CHECK: sub nsw i32 47, % // CHECK: define internal void [[RED_INIT8]](i32* noalias %0, i32* noalias %1) // CHECK: sdiv i32 432, % // void reduce_func(void *lhs[<n>], void *rhs[<n>]) { // *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]); // ... // *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1], // *(Type<n>-1*)rhs[<n>-1]); // } // CHECK: define internal void [[REDUCTION_FUNC]](i8* %0, i8* %1) // t_var_lhs = (i{{[0-9]+}}*)lhs[0]; // CHECK: [[T_VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS:%.+]], i64 0, i64 0 // CHECK: [[T_VAR_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR_RHS_REF]], // CHECK: [[T_VAR_RHS:%.+]] = bitcast i8* [[T_VAR_RHS_VOID]] to i{{[0-9]+}}* // t_var_rhs = (i{{[0-9]+}}*)rhs[0]; // CHECK: [[T_VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS:%.+]], i64 0, i64 0 // CHECK: [[T_VAR_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR_LHS_REF]], // CHECK: [[T_VAR_LHS:%.+]] = bitcast i8* [[T_VAR_LHS_VOID]] to i{{[0-9]+}}* // var_lhs = (S<i{{[0-9]+}}>*)lhs[1]; // CHECK: [[VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 1 // CHECK: [[VAR_RHS_VOID:%.+]] = load i8*, i8** [[VAR_RHS_REF]], // CHECK: [[VAR_RHS:%.+]] = bitcast i8* [[VAR_RHS_VOID]] to [[S_INT_TY]]* // var_rhs = (S<i{{[0-9]+}}>*)rhs[1]; // CHECK: [[VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 1 // CHECK: [[VAR_LHS_VOID:%.+]] = load i8*, i8** [[VAR_LHS_REF]], // CHECK: [[VAR_LHS:%.+]] = bitcast i8* [[VAR_LHS_VOID]] to [[S_INT_TY]]* // var1_lhs = (S<i{{[0-9]+}}>*)lhs[2]; // CHECK: [[VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 2 // CHECK: [[VAR1_RHS_VOID:%.+]] = load i8*, i8** [[VAR1_RHS_REF]], // CHECK: [[VAR1_RHS:%.+]] = bitcast i8* [[VAR1_RHS_VOID]] to [[S_INT_TY]]* // var1_rhs = (S<i{{[0-9]+}}>*)rhs[2]; // CHECK: [[VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 2 // CHECK: [[VAR1_LHS_VOID:%.+]] = load i8*, i8** [[VAR1_LHS_REF]], // CHECK: [[VAR1_LHS:%.+]] = bitcast i8* [[VAR1_LHS_VOID]] to [[S_INT_TY]]* // t_var1_lhs = (i{{[0-9]+}}*)lhs[3]; // CHECK: [[T_VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 3 // CHECK: [[T_VAR1_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_RHS_REF]], // CHECK: [[T_VAR1_RHS:%.+]] = bitcast i8* [[T_VAR1_RHS_VOID]] to i{{[0-9]+}}* // t_var1_rhs = (i{{[0-9]+}}*)rhs[3]; // CHECK: [[T_VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 3 // CHECK: [[T_VAR1_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_LHS_REF]], // CHECK: [[T_VAR1_LHS:%.+]] = bitcast i8* [[T_VAR1_LHS_VOID]] to i{{[0-9]+}}* // t_var_lhs += t_var_rhs; // CHECK: call void [[RED_COMB6]]( // var_lhs = var_lhs.operator &(var_rhs); // CHECK: call void [[RED_COMB2]]( // var1_lhs = var1_lhs.operator &&(var1_rhs); // CHECK: call void [[RED_COMB7]]( // t_var1_lhs = min(t_var1_lhs, t_var1_rhs); // CHECK: call void [[RED_COMB8]]( // CHECK: ret void // CHECK: define internal void [[TMAIN_MICROTASK2]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [42 x [[S_INT_TY]]]* nonnull align 4 dereferenceable(504) %{{.*}}, [2 x i32]* nonnull align 4 dereferenceable(8) %{{.*}}, i32* nonnull align 4 dereferenceable(4) %{{.*}}, [2 x [[S_INT_TY]]]* nonnull align 4 dereferenceable(24) %{{.*}}, [[S_INT_TY]]* nonnull align 4 dereferenceable(12) %{{.*}}) // CHECK: [[ARR_ORIG_ADDR:%.+]] = alloca [42 x [[S_INT_TY]]]*, // CHECK: [[ARR_PRIV:%.+]] = alloca [40 x [[S_INT_TY]]], // Reduction list for runtime. // CHECK: [[RED_LIST:%.+]] = alloca [1 x i8*], // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]], // CHECK: [[ARR_ORIG:%.+]] = load [42 x [[S_INT_TY]]]*, [42 x [[S_INT_TY]]]** [[ARR_ORIG_ADDR]], // CHECK: [[LOW:%.+]] = getelementptr inbounds [42 x [[S_INT_TY]]], [42 x [[S_INT_TY]]]* [[ARR_ORIG]], i64 0, i64 1 // CHECK: [[ORIG_START:%.+]] = bitcast [42 x [[S_INT_TY]]]* [[ARR_ORIG]] to [[S_INT_TY]]* // CHECK: [[START:%.+]] = ptrtoint [[S_INT_TY]]* [[ORIG_START]] to i64 // CHECK: [[LOW_BOUND:%.+]] = ptrtoint [[S_INT_TY]]* [[LOW]] to i64 // CHECK: [[OFFSET_BYTES:%.+]] = sub i64 [[START]], [[LOW_BOUND]] // CHECK: [[OFFSET:%.+]] = sdiv exact i64 [[OFFSET_BYTES]], ptrtoint ([[S_INT_TY]]* getelementptr ([[S_INT_TY]], [[S_INT_TY]]* null, i32 1) to i64) // CHECK: [[ARR_PRIV_PTR:%.+]] = bitcast [40 x [[S_INT_TY]]]* [[ARR_PRIV]] to [[S_INT_TY]]* // CHECK: [[PSEUDO_ARR_PRIV:%.+]] = getelementptr [[S_INT_TY]], [[S_INT_TY]]* [[ARR_PRIV_PTR]], i64 [[OFFSET]] // CHECK: [[ARR_PRIV:%.+]] = bitcast [[S_INT_TY]]* [[PSEUDO_ARR_PRIV]] to [42 x [[S_INT_TY]]]* // CHECK: ret void #endif
// Copyright Carl Philipp Reh 2009 - 2021. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #ifndef FCPPT_RECORD_ELEMENT_VECTOR_HPP_INCLUDED #define FCPPT_RECORD_ELEMENT_VECTOR_HPP_INCLUDED #include <fcppt/record/detail/element_vector.hpp> namespace fcppt::record { /** \brief A metafunction returning the elements of record as an MPL vector. \ingroup fcpptrecord \tparam Record Must be an #fcppt::record::object. */ template <typename Record> using element_vector = typename fcppt::record::detail::element_vector<Record>::type; } #endif
/* Written by Anshul Verma, 19/78065 */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <sys/types.h> #include <sys/stat.h> int main(int argc, char **argv) { if (argc < 2) { fprintf(stderr, "Correct Usage: ./main <filename>\n"); return -1; } struct stat dt; printf("\nFile Info\n"); printf("--------------------\n"); printf("Name: %s\n", argv[1]); printf("UserID: %d\n", dt.st_uid); printf("GroupID: %d\n", dt.st_gid); printf("File Type: %d\n", S_IFMT); printf("Directory: %s\n", S_IFDIR ? "Yes" : "No"); printf("Regular File: %s\n", S_IFREG ? "Yes" : "No"); printf("Last access time: %ld\n", dt.st_atime); printf("Last modified time: %ld\n", dt.st_mtime); printf("User Permissions:\n"); printf(" Read: %s\n", S_IRUSR ? "Yes" : "No"); printf(" Write: %s\n", S_IWUSR ? "Yes" : "No"); printf(" Execute: %s\n", S_IXUSR ? "Yes" : "No"); printf("Group Permissions:\n"); printf(" Read: %s\n", S_IRGRP ? "Yes" : "No"); printf(" Write: %s\n", S_IWGRP ? "Yes" : "No"); printf(" Execute: %s\n", S_IXGRP ? "Yes" : "No"); printf("Others Permissions:\n"); printf(" Read: %s\n", S_IROTH ? "Yes" : "No"); printf(" Write: %s\n", S_IWOTH ? "Yes" : "No"); printf(" Execute: %s\n\n", S_IXOTH ? "Yes" : "No"); }
/******************************************************************************* * thrill/core/hyperloglog.hpp * * Part of Project Thrill - http://project-thrill.org * * Copyright (C) 2017 Moritz Kiefer <moritz.kiefer@purelyfunctional.org> * Copyright (C) 2017 Tino Fuhrmann <tino-fuhrmann@web.de> * * All rights reserved. Published under the BSD-2 license in the LICENSE file. ******************************************************************************/ #pragma once #ifndef THRILL_CORE_HYPERLOGLOG_HEADER #define THRILL_CORE_HYPERLOGLOG_HEADER #include <thrill/data/serialization_fwd.hpp> #include <tlx/die.hpp> #include <tlx/math/clz.hpp> #include <tlx/siphash.hpp> #include <cmath> #include <vector> namespace thrill { namespace core { // The high 25 bit in this register are used for the index, the next 6 bits for // the value and the last bit is currently unused using HyperLogLogSparseRegister = uint32_t; enum class HyperLogLogRegisterFormat { SPARSE, DENSE }; template <size_t p> class HyperLogLogRegisters { public: HyperLogLogRegisters() : format_(HyperLogLogRegisterFormat::SPARSE) { } size_t size() const { return entries_.size(); } void toDense(); bool shouldConvertToDense(); bool shouldMerge(); template <typename ValueType> void insert(const ValueType& value) { // first p bits are the index insert_hash(tlx::siphash(value)); } void insert_hash(const uint64_t& hash_value); void mergeSparse(); void mergeDense(const HyperLogLogRegisters<p>& b); //! calculate count estimation result adjusted for bias double result(); //! combine two HyperloglogRegisters, switches between sparse/dense //! representations HyperLogLogRegisters operator + ( const HyperLogLogRegisters<p>& registers2) const; //! declare friendship with serializers template <typename Archive, typename T, typename Enable> friend struct data::Serialization; private: unsigned sparse_size_ = 0; HyperLogLogRegisterFormat format_; // Register values are always smaller than 64. We thus need log2(64) = 6 // bits to store them. In particular an uint8_t is sufficient std::vector<uint8_t> sparseListBuffer_; std::vector<HyperLogLogSparseRegister> deltaSet_; std::vector<uint8_t> entries_; }; /******************************************************************************/ // Additional Helpers, exposed mainly for testing namespace hyperloglog { template <size_t sparsePrecision, size_t densePrecision> uint32_t encodeHash(uint64_t hash); template <size_t sparsePrecision, size_t densePrecision> std::pair<size_t, uint8_t> decodeHash(HyperLogLogSparseRegister reg); //! Perform a varint and a difference encoding std::vector<uint8_t> encodeSparseList(const std::vector<uint32_t>& sparseList); std::vector<uint32_t> decodeSparseList(const std::vector<uint8_t>& sparseList); } // namespace hyperloglog } // namespace core namespace data { template <typename Archive, size_t p> struct Serialization<Archive, core::HyperLogLogRegisters<p> >{ static void Serialize(const core::HyperLogLogRegisters<p>& x, Archive& ar); static core::HyperLogLogRegisters<p> Deserialize(Archive& ar); static constexpr bool is_fixed_size = false; static constexpr size_t fixed_size = 0; }; } // namespace data } // namespace thrill #endif // !THRILL_CORE_HYPERLOGLOG_HEADER /******************************************************************************/
/* * Copyright (c) 2018 Uber Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "class.h" #include "art_field-inl.h" #include "art_method-inl.h" #include "class_linker-inl.h" #include "class_loader.h" #include "class-inl.h" #include "dex_cache.h" #include "dex_file-inl.h" #include "gc/accounting/card_table-inl.h" #include "handle_scope-inl.h" #include "method.h" #include "object_array-inl.h" #include "object-inl.h" #include "runtime.h" #include "thread.h" #include "throwable.h" #include "utils.h" #include "well_known_classes.h" namespace art { namespace mirror { GcRoot<Class> Class::java_lang_Class_; void Class::SetClassClass(Class* java_lang_Class) { CHECK(java_lang_Class_.IsNull()) << java_lang_Class_.Read() << " " << java_lang_Class; CHECK(java_lang_Class != nullptr); java_lang_Class->SetClassFlags(mirror::kClassFlagClass); java_lang_Class_ = GcRoot<Class>(java_lang_Class); } void Class::ResetClass() { CHECK(!java_lang_Class_.IsNull()); java_lang_Class_ = GcRoot<Class>(nullptr); } void Class::VisitRoots(RootVisitor* visitor) { java_lang_Class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass)); } inline void Class::SetVerifyError(mirror::Object* error) { CHECK(error != nullptr) << PrettyClass(this); if (Runtime::Current()->IsActiveTransaction()) { SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_), error); } else { SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_), error); } } void Class::SetStatus(Handle<Class> h_this, Status new_status, Thread* self) { Status old_status = h_this->GetStatus(); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); bool class_linker_initialized = class_linker != nullptr && class_linker->IsInitialized(); if (LIKELY(class_linker_initialized)) { if (UNLIKELY(new_status <= old_status && new_status != kStatusError && new_status != kStatusRetired)) { LOG(FATAL) << "Unexpected change back of class status for " << PrettyClass(h_this.Get()) << " " << old_status << " -> " << new_status; } if (new_status >= kStatusResolved || old_status >= kStatusResolved) { // When classes are being resolved the resolution code should hold the lock. CHECK_EQ(h_this->GetLockOwnerThreadId(), self->GetThreadId()) << "Attempt to change status of class while not holding its lock: " << PrettyClass(h_this.Get()) << " " << old_status << " -> " << new_status; } } if (UNLIKELY(new_status == kStatusError)) { CHECK_NE(h_this->GetStatus(), kStatusError) << "Attempt to set as erroneous an already erroneous class " << PrettyClass(h_this.Get()); if (VLOG_IS_ON(class_linker)) { LOG(ERROR) << "Setting " << PrettyDescriptor(h_this.Get()) << " to erroneous."; if (self->IsExceptionPending()) { LOG(ERROR) << "Exception: " << self->GetException()->Dump(); } } // Remember the current exception. CHECK(self->GetException() != nullptr); h_this->SetVerifyError(self->GetException()); } static_assert(sizeof(Status) == sizeof(uint32_t), "Size of status not equal to uint32"); if (Runtime::Current()->IsActiveTransaction()) { h_this->SetField32Volatile<true>(OFFSET_OF_OBJECT_MEMBER(Class, status_), new_status); } else { h_this->SetField32Volatile<false>(OFFSET_OF_OBJECT_MEMBER(Class, status_), new_status); } if (!class_linker_initialized) { // When the class linker is being initialized its single threaded and by definition there can be // no waiters. During initialization classes may appear temporary but won't be retired as their // size was statically computed. } else { // Classes that are being resolved or initialized need to notify waiters that the class status // changed. See ClassLinker::EnsureResolved and ClassLinker::WaitForInitializeClass. if (h_this->IsTemp()) { // Class is a temporary one, ensure that waiters for resolution get notified of retirement // so that they can grab the new version of the class from the class linker's table. CHECK_LT(new_status, kStatusResolved) << PrettyDescriptor(h_this.Get()); if (new_status == kStatusRetired || new_status == kStatusError) { h_this->NotifyAll(self); } } else { CHECK_NE(new_status, kStatusRetired); if (old_status >= kStatusResolved || new_status >= kStatusResolved) { h_this->NotifyAll(self); } } } } void Class::SetDexCache(DexCache* new_dex_cache) { SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_), new_dex_cache); SetDexCacheStrings(new_dex_cache != nullptr ? new_dex_cache->GetStrings() : nullptr); } void Class::SetClassSize(uint32_t new_class_size) { if (kIsDebugBuild && new_class_size < GetClassSize()) { DumpClass(LOG(INTERNAL_FATAL), kDumpClassFullDetail); LOG(INTERNAL_FATAL) << new_class_size << " vs " << GetClassSize(); LOG(FATAL) << " class=" << PrettyTypeOf(this); } // Not called within a transaction. SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_), new_class_size); } // Return the class' name. The exact format is bizarre, but it's the specified behavior for // Class.getName: keywords for primitive types, regular "[I" form for primitive arrays (so "int" // but "[I"), and arrays of reference types written between "L" and ";" but with dots rather than // slashes (so "java.lang.String" but "[Ljava.lang.String;"). Madness. String* Class::ComputeName(Handle<Class> h_this) { String* name = h_this->GetName(); if (name != nullptr) { return name; } std::string temp; const char* descriptor = h_this->GetDescriptor(&temp); Thread* self = Thread::Current(); if ((descriptor[0] != 'L') && (descriptor[0] != '[')) { // The descriptor indicates that this is the class for // a primitive type; special-case the return value. const char* c_name = nullptr; switch (descriptor[0]) { case 'Z': c_name = "boolean"; break; case 'B': c_name = "byte"; break; case 'C': c_name = "char"; break; case 'S': c_name = "short"; break; case 'I': c_name = "int"; break; case 'J': c_name = "long"; break; case 'F': c_name = "float"; break; case 'D': c_name = "double"; break; case 'V': c_name = "void"; break; default: LOG(FATAL) << "Unknown primitive type: " << PrintableChar(descriptor[0]); } name = String::AllocFromModifiedUtf8(self, c_name); } else { // Convert the UTF-8 name to a java.lang.String. The name must use '.' to separate package // components. name = String::AllocFromModifiedUtf8(self, DescriptorToDot(descriptor).c_str()); } h_this->SetName(name); return name; } void Class::DumpClass(std::ostream& os, int flags) { if ((flags & kDumpClassFullDetail) == 0) { os << PrettyClass(this); if ((flags & kDumpClassClassLoader) != 0) { os << ' ' << GetClassLoader(); } if ((flags & kDumpClassInitialized) != 0) { os << ' ' << GetStatus(); } os << "\n"; return; } Thread* const self = Thread::Current(); StackHandleScope<2> hs(self); Handle<mirror::Class> h_this(hs.NewHandle(this)); Handle<mirror::Class> h_super(hs.NewHandle(GetSuperClass())); auto image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); std::string temp; os << "----- " << (IsInterface() ? "interface" : "class") << " " << "'" << GetDescriptor(&temp) << "' cl=" << GetClassLoader() << " -----\n", os << " objectSize=" << SizeOf() << " " << "(" << (h_super.Get() != nullptr ? h_super->SizeOf() : -1) << " from super)\n", os << StringPrintf(" access=0x%04x.%04x\n", GetAccessFlags() >> 16, GetAccessFlags() & kAccJavaFlagsMask); if (h_super.Get() != nullptr) { os << " super='" << PrettyClass(h_super.Get()) << "' (cl=" << h_super->GetClassLoader() << ")\n"; } if (IsArrayClass()) { os << " componentType=" << PrettyClass(GetComponentType()) << "\n"; } const size_t num_direct_interfaces = NumDirectInterfaces(); if (num_direct_interfaces > 0) { os << " interfaces (" << num_direct_interfaces << "):\n"; for (size_t i = 0; i < num_direct_interfaces; ++i) { Class* interface = GetDirectInterface(self, h_this, i); if (interface == nullptr) { os << StringPrintf(" %2zd: nullptr!\n", i); } else { const ClassLoader* cl = interface->GetClassLoader(); os << StringPrintf(" %2zd: %s (cl=%p)\n", i, PrettyClass(interface).c_str(), cl); } } } if (!IsLoaded()) { os << " class not yet loaded"; } else { // After this point, this may have moved due to GetDirectInterface. os << " vtable (" << h_this->NumVirtualMethods() << " entries, " << (h_super.Get() != nullptr ? h_super->NumVirtualMethods() : 0) << " in super):\n"; for (size_t i = 0; i < NumVirtualMethods(); ++i) { os << StringPrintf(" %2zd: %s\n", i, PrettyMethod( h_this->GetVirtualMethodDuringLinking(i, image_pointer_size)).c_str()); } os << " direct methods (" << h_this->NumDirectMethods() << " entries):\n"; for (size_t i = 0; i < h_this->NumDirectMethods(); ++i) { os << StringPrintf(" %2zd: %s\n", i, PrettyMethod( h_this->GetDirectMethod(i, image_pointer_size)).c_str()); } if (h_this->NumStaticFields() > 0) { os << " static fields (" << h_this->NumStaticFields() << " entries):\n"; if (h_this->IsResolved() || h_this->IsErroneous()) { for (size_t i = 0; i < h_this->NumStaticFields(); ++i) { os << StringPrintf(" %2zd: %s\n", i, PrettyField(h_this->GetStaticField(i)).c_str()); } } else { os << " <not yet available>"; } } if (h_this->NumInstanceFields() > 0) { os << " instance fields (" << h_this->NumInstanceFields() << " entries):\n"; if (h_this->IsResolved() || h_this->IsErroneous()) { for (size_t i = 0; i < h_this->NumInstanceFields(); ++i) { os << StringPrintf(" %2zd: %s\n", i, PrettyField(h_this->GetInstanceField(i)).c_str()); } } else { os << " <not yet available>"; } } } } void Class::SetReferenceInstanceOffsets(uint32_t new_reference_offsets) { if (kIsDebugBuild && new_reference_offsets != kClassWalkSuper) { // Sanity check that the number of bits set in the reference offset bitmap // agrees with the number of references uint32_t count = 0; for (Class* c = this; c != nullptr; c = c->GetSuperClass()) { count += c->NumReferenceInstanceFieldsDuringLinking(); } // +1 for the Class in Object. CHECK_EQ(static_cast<uint32_t>(POPCOUNT(new_reference_offsets)) + 1, count); } // Not called within a transaction. SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, reference_instance_offsets_), new_reference_offsets); } bool Class::IsInSamePackage(const StringPiece& descriptor1, const StringPiece& descriptor2) { size_t i = 0; size_t min_length = std::min(descriptor1.size(), descriptor2.size()); while (i < min_length && descriptor1[i] == descriptor2[i]) { ++i; } if (descriptor1.find('/', i) != StringPiece::npos || descriptor2.find('/', i) != StringPiece::npos) { return false; } else { return true; } } bool Class::IsInSamePackage(Class* that) { Class* klass1 = this; Class* klass2 = that; if (klass1 == klass2) { return true; } // Class loaders must match. if (klass1->GetClassLoader() != klass2->GetClassLoader()) { return false; } // Arrays are in the same package when their element classes are. while (klass1->IsArrayClass()) { klass1 = klass1->GetComponentType(); } while (klass2->IsArrayClass()) { klass2 = klass2->GetComponentType(); } // trivial check again for array types if (klass1 == klass2) { return true; } // Compare the package part of the descriptor string. std::string temp1, temp2; return IsInSamePackage(klass1->GetDescriptor(&temp1), klass2->GetDescriptor(&temp2)); } bool Class::IsThrowableClass() { return WellKnownClasses::ToClass(WellKnownClasses::java_lang_Throwable)->IsAssignableFrom(this); } void Class::SetClassLoader(ClassLoader* new_class_loader) { if (Runtime::Current()->IsActiveTransaction()) { SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), new_class_loader); } else { SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), new_class_loader); } } ArtMethod* Class::FindInterfaceMethod(const StringPiece& name, const StringPiece& signature, size_t pointer_size) { // Check the current class before checking the interfaces. ArtMethod* method = FindDeclaredVirtualMethod(name, signature, pointer_size); if (method != nullptr) { return method; } int32_t iftable_count = GetIfTableCount(); IfTable* iftable = GetIfTable(); for (int32_t i = 0; i < iftable_count; ++i) { method = iftable->GetInterface(i)->FindDeclaredVirtualMethod(name, signature, pointer_size); if (method != nullptr) { return method; } } return nullptr; } ArtMethod* Class::FindInterfaceMethod(const StringPiece& name, const Signature& signature, size_t pointer_size) { // Check the current class before checking the interfaces. ArtMethod* method = FindDeclaredVirtualMethod(name, signature, pointer_size); if (method != nullptr) { return method; } int32_t iftable_count = GetIfTableCount(); IfTable* iftable = GetIfTable(); for (int32_t i = 0; i < iftable_count; ++i) { method = iftable->GetInterface(i)->FindDeclaredVirtualMethod(name, signature, pointer_size); if (method != nullptr) { return method; } } return nullptr; } ArtMethod* Class::FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx, size_t pointer_size) { // Check the current class before checking the interfaces. ArtMethod* method = FindDeclaredVirtualMethod(dex_cache, dex_method_idx, pointer_size); if (method != nullptr) { return method; } int32_t iftable_count = GetIfTableCount(); IfTable* iftable = GetIfTable(); for (int32_t i = 0; i < iftable_count; ++i) { method = iftable->GetInterface(i)->FindDeclaredVirtualMethod( dex_cache, dex_method_idx, pointer_size); if (method != nullptr) { return method; } } return nullptr; } ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature, size_t pointer_size) { for (auto& method : GetDirectMethods(pointer_size)) { if (name == method.GetName() && method.GetSignature() == signature) { return &method; } } return nullptr; } ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature, size_t pointer_size) { for (auto& method : GetDirectMethods(pointer_size)) { if (name == method.GetName() && signature == method.GetSignature()) { return &method; } } return nullptr; } ArtMethod* Class::FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx, size_t pointer_size) { if (GetDexCache() == dex_cache) { for (auto& method : GetDirectMethods(pointer_size)) { if (method.GetDexMethodIndex() == dex_method_idx) { return &method; } } } return nullptr; } ArtMethod* Class::FindDirectMethod(const StringPiece& name, const StringPiece& signature, size_t pointer_size) { for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) { ArtMethod* method = klass->FindDeclaredDirectMethod(name, signature, pointer_size); if (method != nullptr) { return method; } } return nullptr; } ArtMethod* Class::FindDirectMethod(const StringPiece& name, const Signature& signature, size_t pointer_size) { for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) { ArtMethod* method = klass->FindDeclaredDirectMethod(name, signature, pointer_size); if (method != nullptr) { return method; } } return nullptr; } ArtMethod* Class::FindDirectMethod( const DexCache* dex_cache, uint32_t dex_method_idx, size_t pointer_size) { for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) { ArtMethod* method = klass->FindDeclaredDirectMethod(dex_cache, dex_method_idx, pointer_size); if (method != nullptr) { return method; } } return nullptr; } ArtMethod* Class::FindDeclaredDirectMethodByName(const StringPiece& name, size_t pointer_size) { for (auto& method : GetDirectMethods(pointer_size)) { ArtMethod* const np_method = method.GetInterfaceMethodIfProxy(pointer_size); if (name == np_method->GetName()) { return &method; } } return nullptr; } // TODO These should maybe be changed to be named FindOwnedVirtualMethod or something similar // because they do not only find 'declared' methods and will return copied methods. This behavior is // desired and correct but the naming can lead to confusion because in the java language declared // excludes interface methods which might be found by this. ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature, size_t pointer_size) { for (auto& method : GetVirtualMethods(pointer_size)) { ArtMethod* const np_method = method.GetInterfaceMethodIfProxy(pointer_size); if (name == np_method->GetName() && np_method->GetSignature() == signature) { return &method; } } return nullptr; } ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature, size_t pointer_size) { for (auto& method : GetVirtualMethods(pointer_size)) { ArtMethod* const np_method = method.GetInterfaceMethodIfProxy(pointer_size); if (name == np_method->GetName() && signature == np_method->GetSignature()) { return &method; } } return nullptr; } ArtMethod* Class::FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx, size_t pointer_size) { if (GetDexCache() == dex_cache) { for (auto& method : GetDeclaredVirtualMethods(pointer_size)) { if (method.GetDexMethodIndex() == dex_method_idx) { return &method; } } } return nullptr; } ArtMethod* Class::FindDeclaredVirtualMethodByName(const StringPiece& name, size_t pointer_size) { for (auto& method : GetVirtualMethods(pointer_size)) { ArtMethod* const np_method = method.GetInterfaceMethodIfProxy(pointer_size); if (name == np_method->GetName()) { return &method; } } return nullptr; } ArtMethod* Class::FindVirtualMethod( const StringPiece& name, const StringPiece& signature, size_t pointer_size) { for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) { ArtMethod* method = klass->FindDeclaredVirtualMethod(name, signature, pointer_size); if (method != nullptr) { return method; } } return nullptr; } ArtMethod* Class::FindVirtualMethod( const StringPiece& name, const Signature& signature, size_t pointer_size) { for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) { ArtMethod* method = klass->FindDeclaredVirtualMethod(name, signature, pointer_size); if (method != nullptr) { return method; } } return nullptr; } ArtMethod* Class::FindVirtualMethod( const DexCache* dex_cache, uint32_t dex_method_idx, size_t pointer_size) { for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) { ArtMethod* method = klass->FindDeclaredVirtualMethod(dex_cache, dex_method_idx, pointer_size); if (method != nullptr) { return method; } } return nullptr; } ArtMethod* Class::FindVirtualMethodForInterfaceSuper(ArtMethod* method, size_t pointer_size) { DCHECK(method->GetDeclaringClass()->IsInterface()); DCHECK(IsInterface()) << "Should only be called on a interface class"; // Check if we have one defined on this interface first. This includes searching copied ones to // get any conflict methods. Conflict methods are copied into each subtype from the supertype. We // don't do any indirect method checks here. for (ArtMethod& iface_method : GetVirtualMethods(pointer_size)) { if (method->HasSameNameAndSignature(&iface_method)) { return &iface_method; } } std::vector<ArtMethod*> abstract_methods; // Search through the IFTable for a working version. We don't need to check for conflicts // because if there was one it would appear in this classes virtual_methods_ above. Thread* self = Thread::Current(); StackHandleScope<2> hs(self); MutableHandle<mirror::IfTable> iftable(hs.NewHandle(GetIfTable())); MutableHandle<mirror::Class> iface(hs.NewHandle<mirror::Class>(nullptr)); size_t iftable_count = GetIfTableCount(); // Find the method. We don't need to check for conflicts because they would have been in the // copied virtuals of this interface. Order matters, traverse in reverse topological order; most // subtypiest interfaces get visited first. for (size_t k = iftable_count; k != 0;) { k--; DCHECK_LT(k, iftable->Count()); iface.Assign(iftable->GetInterface(k)); // Iterate through every declared method on this interface. Each direct method's name/signature // is unique so the order of the inner loop doesn't matter. for (auto& method_iter : iface->GetDeclaredVirtualMethods(pointer_size)) { ArtMethod* current_method = &method_iter; if (current_method->HasSameNameAndSignature(method)) { if (current_method->IsDefault()) { // Handle JLS soft errors, a default method from another superinterface tree can // "override" an abstract method(s) from another superinterface tree(s). To do this, // ignore any [default] method which are dominated by the abstract methods we've seen so // far. Check if overridden by any in abstract_methods. We do not need to check for // default_conflicts because we would hit those before we get to this loop. bool overridden = false; for (ArtMethod* possible_override : abstract_methods) { DCHECK(possible_override->HasSameNameAndSignature(current_method)); if (iface->IsAssignableFrom(possible_override->GetDeclaringClass())) { overridden = true; break; } } if (!overridden) { return current_method; } } else { // Is not default. // This might override another default method. Just stash it for now. abstract_methods.push_back(current_method); } } } } // If we reach here we either never found any declaration of the method (in which case // 'abstract_methods' is empty or we found no non-overriden default methods in which case // 'abstract_methods' contains a number of abstract implementations of the methods. We choose one // of these arbitrarily. return abstract_methods.empty() ? nullptr : abstract_methods[0]; } ArtMethod* Class::FindClassInitializer(size_t pointer_size) { for (ArtMethod& method : GetDirectMethods(pointer_size)) { if (method.IsClassInitializer()) { DCHECK_STREQ(method.GetName(), "<clinit>"); DCHECK_STREQ(method.GetSignature().ToString().c_str(), "()V"); return &method; } } return nullptr; } // Custom binary search to avoid double comparisons from std::binary_search. static ArtField* FindFieldByNameAndType(LengthPrefixedArray<ArtField>* fields, const StringPiece& name, const StringPiece& type) SHARED_REQUIRES(Locks::mutator_lock_) { if (fields == nullptr) { return nullptr; } size_t low = 0; size_t high = fields->size(); ArtField* ret = nullptr; while (low < high) { size_t mid = (low + high) / 2; ArtField& field = fields->At(mid); // Fields are sorted by class, then name, then type descriptor. This is verified in dex file // verifier. There can be multiple fields with the same in the same class name due to proguard. int result = StringPiece(field.GetName()).Compare(name); if (result == 0) { result = StringPiece(field.GetTypeDescriptor()).Compare(type); } if (result < 0) { low = mid + 1; } else if (result > 0) { high = mid; } else { ret = &field; break; } } if (kIsDebugBuild) { ArtField* found = nullptr; for (ArtField& field : MakeIterationRangeFromLengthPrefixedArray(fields)) { if (name == field.GetName() && type == field.GetTypeDescriptor()) { found = &field; break; } } CHECK_EQ(found, ret) << "Found " << PrettyField(found) << " vs " << PrettyField(ret); } return ret; } ArtField* Class::FindDeclaredInstanceField(const StringPiece& name, const StringPiece& type) { // Binary search by name. Interfaces are not relevant because they can't contain instance fields. return FindFieldByNameAndType(GetIFieldsPtr(), name, type); } ArtField* Class::FindDeclaredInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx) { if (GetDexCache() == dex_cache) { for (ArtField& field : GetIFields()) { if (field.GetDexFieldIndex() == dex_field_idx) { return &field; } } } return nullptr; } ArtField* Class::FindInstanceField(const StringPiece& name, const StringPiece& type) { // Is the field in this class, or any of its superclasses? // Interfaces are not relevant because they can't contain instance fields. for (Class* c = this; c != nullptr; c = c->GetSuperClass()) { ArtField* f = c->FindDeclaredInstanceField(name, type); if (f != nullptr) { return f; } } return nullptr; } ArtField* Class::FindInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx) { // Is the field in this class, or any of its superclasses? // Interfaces are not relevant because they can't contain instance fields. for (Class* c = this; c != nullptr; c = c->GetSuperClass()) { ArtField* f = c->FindDeclaredInstanceField(dex_cache, dex_field_idx); if (f != nullptr) { return f; } } return nullptr; } ArtField* Class::FindDeclaredStaticField(const StringPiece& name, const StringPiece& type) { DCHECK(type != nullptr); return FindFieldByNameAndType(GetSFieldsPtr(), name, type); } ArtField* Class::FindDeclaredStaticField(const DexCache* dex_cache, uint32_t dex_field_idx) { if (dex_cache == GetDexCache()) { for (ArtField& field : GetSFields()) { if (field.GetDexFieldIndex() == dex_field_idx) { return &field; } } } return nullptr; } ArtField* Class::FindStaticField(Thread* self, Handle<Class> klass, const StringPiece& name, const StringPiece& type) { // Is the field in this class (or its interfaces), or any of its // superclasses (or their interfaces)? for (Class* k = klass.Get(); k != nullptr; k = k->GetSuperClass()) { // Is the field in this class? ArtField* f = k->FindDeclaredStaticField(name, type); if (f != nullptr) { return f; } // Wrap k incase it moves during GetDirectInterface. StackHandleScope<1> hs(self); HandleWrapper<mirror::Class> h_k(hs.NewHandleWrapper(&k)); // Is this field in any of this class' interfaces? for (uint32_t i = 0; i < h_k->NumDirectInterfaces(); ++i) { StackHandleScope<1> hs2(self); Handle<mirror::Class> interface(hs2.NewHandle(GetDirectInterface(self, h_k, i))); f = FindStaticField(self, interface, name, type); if (f != nullptr) { return f; } } } return nullptr; } ArtField* Class::FindStaticField(Thread* self, Handle<Class> klass, const DexCache* dex_cache, uint32_t dex_field_idx) { for (Class* k = klass.Get(); k != nullptr; k = k->GetSuperClass()) { // Is the field in this class? ArtField* f = k->FindDeclaredStaticField(dex_cache, dex_field_idx); if (f != nullptr) { return f; } // Wrap k incase it moves during GetDirectInterface. StackHandleScope<1> hs(self); HandleWrapper<mirror::Class> h_k(hs.NewHandleWrapper(&k)); // Is this field in any of this class' interfaces? for (uint32_t i = 0; i < h_k->NumDirectInterfaces(); ++i) { StackHandleScope<1> hs2(self); Handle<mirror::Class> interface(hs2.NewHandle(GetDirectInterface(self, h_k, i))); f = FindStaticField(self, interface, dex_cache, dex_field_idx); if (f != nullptr) { return f; } } } return nullptr; } ArtField* Class::FindField(Thread* self, Handle<Class> klass, const StringPiece& name, const StringPiece& type) { // Find a field using the JLS field resolution order for (Class* k = klass.Get(); k != nullptr; k = k->GetSuperClass()) { // Is the field in this class? ArtField* f = k->FindDeclaredInstanceField(name, type); if (f != nullptr) { return f; } f = k->FindDeclaredStaticField(name, type); if (f != nullptr) { return f; } // Is this field in any of this class' interfaces? StackHandleScope<1> hs(self); HandleWrapper<mirror::Class> h_k(hs.NewHandleWrapper(&k)); for (uint32_t i = 0; i < h_k->NumDirectInterfaces(); ++i) { StackHandleScope<1> hs2(self); Handle<mirror::Class> interface(hs2.NewHandle(GetDirectInterface(self, h_k, i))); f = interface->FindStaticField(self, interface, name, type); if (f != nullptr) { return f; } } } return nullptr; } void Class::SetSkipAccessChecksFlagOnAllMethods(size_t pointer_size) { DCHECK(IsVerified()); for (auto& m : GetMethods(pointer_size)) { if (!m.IsNative() && m.IsInvokable()) { m.SetSkipAccessChecks(); } } } const char* Class::GetDescriptor(std::string* storage) { if (IsPrimitive()) { return Primitive::Descriptor(GetPrimitiveType()); } else if (IsArrayClass()) { return GetArrayDescriptor(storage); } else if (IsProxyClass()) { *storage = Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(this); return storage->c_str(); } else { const DexFile& dex_file = GetDexFile(); const DexFile::TypeId& type_id = dex_file.GetTypeId(GetClassDef()->class_idx_); return dex_file.GetTypeDescriptor(type_id); } } const char* Class::GetDescriptorAssumingDex(std::string* storage) { if (UNLIKELY(IsPrimitive())) { return Primitive::Descriptor(GetPrimitiveType()); } else if (UNLIKELY(IsArrayClass())) { return GetArrayDescriptor(storage); } else if (UNLIKELY(IsProxyClass())) { *storage = Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(this); return storage->c_str(); } else { const DexFile& dex_file = GetDexFile(); const DexFile::TypeId& type_id = dex_file.GetTypeId(GetClassDef()->class_idx_); return dex_file.GetTypeDescriptor(type_id); } } const char* Class::GetArrayDescriptor(std::string* storage) { std::string temp; const char* elem_desc = GetComponentType()->GetDescriptor(&temp); *storage = "["; *storage += elem_desc; return storage->c_str(); } const DexFile::ClassDef* Class::GetClassDef() { uint16_t class_def_idx = GetDexClassDefIndex(); if (class_def_idx == DexFile::kDexNoIndex16) { return nullptr; } return &GetDexFile().GetClassDef(class_def_idx); } uint16_t Class::GetDirectInterfaceTypeIdx(uint32_t idx) { DCHECK(!IsPrimitive()); DCHECK(!IsArrayClass()); return GetInterfaceTypeList()->GetTypeItem(idx).type_idx_; } mirror::Class* Class::GetDirectInterface(Thread* self, Handle<mirror::Class> klass, uint32_t idx) { DCHECK(klass.Get() != nullptr); DCHECK(!klass->IsPrimitive()); if (klass->IsArrayClass()) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); if (idx == 0) { return class_linker->FindSystemClass(self, "Ljava/lang/Cloneable;"); } else { DCHECK_EQ(1U, idx); return class_linker->FindSystemClass(self, "Ljava/io/Serializable;"); } } else if (klass->IsProxyClass()) { mirror::ObjectArray<mirror::Class>* interfaces = klass.Get()->GetInterfaces(); DCHECK(interfaces != nullptr); return interfaces->Get(idx); } else { uint16_t type_idx = klass->GetDirectInterfaceTypeIdx(idx); mirror::Class* interface = klass->GetDexCache()->GetResolvedType(type_idx); if (interface == nullptr) { interface = Runtime::Current()->GetClassLinker()->ResolveType(klass->GetDexFile(), type_idx, klass.Get()); CHECK(interface != nullptr || self->IsExceptionPending()); } return interface; } } mirror::Class* Class::GetCommonSuperClass(Handle<Class> klass) { DCHECK(klass.Get() != nullptr); DCHECK(!klass->IsInterface()); DCHECK(!IsInterface()); mirror::Class* common_super_class = this; while (!common_super_class->IsAssignableFrom(klass.Get())) { mirror::Class* old_common = common_super_class; common_super_class = old_common->GetSuperClass(); DCHECK(common_super_class != nullptr) << PrettyClass(old_common); } return common_super_class; } const char* Class::GetSourceFile() { const DexFile& dex_file = GetDexFile(); const DexFile::ClassDef* dex_class_def = GetClassDef(); if (dex_class_def == nullptr) { // Generated classes have no class def. return nullptr; } return dex_file.GetSourceFile(*dex_class_def); } std::string Class::GetLocation() { mirror::DexCache* dex_cache = GetDexCache(); if (dex_cache != nullptr && !IsProxyClass()) { return dex_cache->GetLocation()->ToModifiedUtf8(); } // Arrays and proxies are generated and have no corresponding dex file location. return "generated class"; } const DexFile::TypeList* Class::GetInterfaceTypeList() { const DexFile::ClassDef* class_def = GetClassDef(); if (class_def == nullptr) { return nullptr; } return GetDexFile().GetInterfacesList(*class_def); } void Class::PopulateEmbeddedVTable(size_t pointer_size) { PointerArray* table = GetVTableDuringLinking(); CHECK(table != nullptr) << PrettyClass(this); const size_t table_length = table->GetLength(); SetEmbeddedVTableLength(table_length); for (size_t i = 0; i < table_length; i++) { SetEmbeddedVTableEntry(i, table->GetElementPtrSize<ArtMethod*>(i, pointer_size), pointer_size); } // Keep java.lang.Object class's vtable around for since it's easier // to be reused by array classes during their linking. if (!IsObjectClass()) { SetVTable(nullptr); } } class ReadBarrierOnNativeRootsVisitor { public: void operator()(mirror::Object* obj ATTRIBUTE_UNUSED, MemberOffset offset ATTRIBUTE_UNUSED, bool is_static ATTRIBUTE_UNUSED) const {} void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const SHARED_REQUIRES(Locks::mutator_lock_) { if (!root->IsNull()) { VisitRoot(root); } } void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Object* old_ref = root->AsMirrorPtr(); mirror::Object* new_ref = ReadBarrier::BarrierForRoot(root); if (old_ref != new_ref) { // Update the field atomically. This may fail if mutator updates before us, but it's ok. auto* atomic_root = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root); atomic_root->CompareExchangeStrongSequentiallyConsistent( mirror::CompressedReference<mirror::Object>::FromMirrorPtr(old_ref), mirror::CompressedReference<mirror::Object>::FromMirrorPtr(new_ref)); } } }; // The pre-fence visitor for Class::CopyOf(). class CopyClassVisitor { public: CopyClassVisitor(Thread* self, Handle<mirror::Class>* orig, size_t new_length, size_t copy_bytes, ImTable* imt, size_t pointer_size) : self_(self), orig_(orig), new_length_(new_length), copy_bytes_(copy_bytes), imt_(imt), pointer_size_(pointer_size) { } void operator()(mirror::Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const SHARED_REQUIRES(Locks::mutator_lock_) { StackHandleScope<1> hs(self_); Handle<mirror::Class> h_new_class_obj(hs.NewHandle(obj->AsClass())); mirror::Object::CopyObject(self_, h_new_class_obj.Get(), orig_->Get(), copy_bytes_); mirror::Class::SetStatus(h_new_class_obj, Class::kStatusResolving, self_); h_new_class_obj->PopulateEmbeddedVTable(pointer_size_); h_new_class_obj->SetImt(imt_, pointer_size_); h_new_class_obj->SetClassSize(new_length_); // Visit all of the references to make sure there is no from space references in the native // roots. static_cast<mirror::Object*>(h_new_class_obj.Get())->VisitReferences( ReadBarrierOnNativeRootsVisitor(), VoidFunctor()); } private: Thread* const self_; Handle<mirror::Class>* const orig_; const size_t new_length_; const size_t copy_bytes_; ImTable* imt_; const size_t pointer_size_; DISALLOW_COPY_AND_ASSIGN(CopyClassVisitor); }; Class* Class::CopyOf(Thread* self, int32_t new_length, ImTable* imt, size_t pointer_size) { DCHECK_GE(new_length, static_cast<int32_t>(sizeof(Class))); // We may get copied by a compacting GC. StackHandleScope<1> hs(self); Handle<mirror::Class> h_this(hs.NewHandle(this)); gc::Heap* heap = Runtime::Current()->GetHeap(); // The num_bytes (3rd param) is sizeof(Class) as opposed to SizeOf() // to skip copying the tail part that we will overwrite here. CopyClassVisitor visitor(self, &h_this, new_length, sizeof(Class), imt, pointer_size); mirror::Object* new_class = kMovingClasses ? heap->AllocObject<true>(self, java_lang_Class_.Read(), new_length, visitor) : heap->AllocNonMovableObject<true>(self, java_lang_Class_.Read(), new_length, visitor); if (UNLIKELY(new_class == nullptr)) { self->AssertPendingOOMException(); return nullptr; } return new_class->AsClass(); } bool Class::ProxyDescriptorEquals(const char* match) { DCHECK(IsProxyClass()); return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(this) == match; } // TODO: Move this to java_lang_Class.cc? ArtMethod* Class::GetDeclaredConstructor( Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args, size_t pointer_size) { for (auto& m : GetDirectMethods(pointer_size)) { // Skip <clinit> which is a static constructor, as well as non constructors. if (m.IsStatic() || !m.IsConstructor()) { continue; } // May cause thread suspension and exceptions. if (m.GetInterfaceMethodIfProxy(sizeof(void*))->EqualParameters(args)) { return &m; } if (UNLIKELY(self->IsExceptionPending())) { return nullptr; } } return nullptr; } uint32_t Class::Depth() { uint32_t depth = 0; for (Class* klass = this; klass->GetSuperClass() != nullptr; klass = klass->GetSuperClass()) { depth++; } return depth; } uint32_t Class::FindTypeIndexInOtherDexFile(const DexFile& dex_file) { std::string temp; const DexFile::TypeId* type_id = dex_file.FindTypeId(GetDescriptor(&temp)); return (type_id == nullptr) ? DexFile::kDexNoIndex : dex_file.GetIndexForTypeId(*type_id); } template <bool kTransactionActive> mirror::Method* Class::GetDeclaredMethodInternal(Thread* self, mirror::Class* klass, mirror::String* name, mirror::ObjectArray<mirror::Class>* args) { // Covariant return types permit the class to define multiple // methods with the same name and parameter types. Prefer to // return a non-synthetic method in such situations. We may // still return a synthetic method to handle situations like // escalated visibility. We never return miranda methods that // were synthesized by the runtime. constexpr uint32_t kSkipModifiers = kAccMiranda | kAccSynthetic; StackHandleScope<3> hs(self); auto h_method_name = hs.NewHandle(name); if (UNLIKELY(h_method_name.Get() == nullptr)) { ThrowNullPointerException("name == null"); return nullptr; } auto h_args = hs.NewHandle(args); Handle<mirror::Class> h_klass = hs.NewHandle(klass); ArtMethod* result = nullptr; const size_t pointer_size = kTransactionActive ? Runtime::Current()->GetClassLinker()->GetImagePointerSize() : sizeof(void*); for (auto& m : h_klass->GetDeclaredVirtualMethods(pointer_size)) { auto* np_method = m.GetInterfaceMethodIfProxy(pointer_size); // May cause thread suspension. mirror::String* np_name = np_method->GetNameAsString(self); if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) { if (UNLIKELY(self->IsExceptionPending())) { return nullptr; } continue; } auto modifiers = m.GetAccessFlags(); if ((modifiers & kSkipModifiers) == 0) { return mirror::Method::CreateFromArtMethod<kTransactionActive>(self, &m); } if ((modifiers & kAccMiranda) == 0) { result = &m; // Remember as potential result if it's not a miranda method. } } if (result == nullptr) { for (auto& m : h_klass->GetDirectMethods(pointer_size)) { auto modifiers = m.GetAccessFlags(); if ((modifiers & kAccConstructor) != 0) { continue; } auto* np_method = m.GetInterfaceMethodIfProxy(pointer_size); // May cause thread suspension. mirror::String* np_name = np_method->GetNameAsString(self); if (np_name == nullptr) { self->AssertPendingException(); return nullptr; } if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) { if (UNLIKELY(self->IsExceptionPending())) { return nullptr; } continue; } if ((modifiers & kSkipModifiers) == 0) { return mirror::Method::CreateFromArtMethod<kTransactionActive>(self, &m); } // Direct methods cannot be miranda methods, so this potential result must be synthetic. result = &m; } } return result != nullptr ? mirror::Method::CreateFromArtMethod<kTransactionActive>(self, result) : nullptr; } template mirror::Method* Class::GetDeclaredMethodInternal<false>(Thread* self, mirror::Class* klass, mirror::String* name, mirror::ObjectArray<mirror::Class>* args); template mirror::Method* Class::GetDeclaredMethodInternal<true>(Thread* self, mirror::Class* klass, mirror::String* name, mirror::ObjectArray<mirror::Class>* args); template <bool kTransactionActive> mirror::Constructor* Class::GetDeclaredConstructorInternal( Thread* self, mirror::Class* klass, mirror::ObjectArray<mirror::Class>* args) { StackHandleScope<1> hs(self); const size_t pointer_size = kTransactionActive ? Runtime::Current()->GetClassLinker()->GetImagePointerSize() : sizeof(void*); ArtMethod* result = klass->GetDeclaredConstructor(self, hs.NewHandle(args), pointer_size); return result != nullptr ? mirror::Constructor::CreateFromArtMethod<kTransactionActive>(self, result) : nullptr; } // mirror::Constructor::CreateFromArtMethod<kTransactionActive>(self, result) template mirror::Constructor* Class::GetDeclaredConstructorInternal<false>( Thread* self, mirror::Class* klass, mirror::ObjectArray<mirror::Class>* args); template mirror::Constructor* Class::GetDeclaredConstructorInternal<true>( Thread* self, mirror::Class* klass, mirror::ObjectArray<mirror::Class>* args); int32_t Class::GetInnerClassFlags(Handle<Class> h_this, int32_t default_value) { if (h_this->IsProxyClass() || h_this->GetDexCache() == nullptr) { return default_value; } uint32_t flags; if (!h_this->GetDexFile().GetInnerClassFlags(h_this, &flags)) { return default_value; } return flags; } } // namespace mirror } // namespace art
/********************************************************************** * Copyright (c) 2008-2013, Alliance for Sustainable Energy. * All rights reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********************************************************************/ #ifndef MODEL_AVAILABILITYMANAGERSCHEDULED_IMPL_HPP #define MODEL_AVAILABILITYMANAGERSCHEDULED_IMPL_HPP #include <model/ModelAPI.hpp> #include <model/ModelObject_Impl.hpp> namespace openstudio { namespace model { // TODO: Check the following class names against object getters and setters. class Schedule; namespace detail { /** AvailabilityManagerScheduled_Impl is a ModelObject_Impl that is the implementation class for AvailabilityManagerScheduled.*/ class MODEL_API AvailabilityManagerScheduled_Impl : public ModelObject_Impl { Q_OBJECT; Q_PROPERTY(boost::optional<openstudio::model::ModelObject> schedule READ scheduleAsModelObject WRITE setScheduleAsModelObject); public: AvailabilityManagerScheduled_Impl(const IdfObject& idfObject, Model_Impl* model, bool keepHandle); AvailabilityManagerScheduled_Impl(const openstudio::detail::WorkspaceObject_Impl& other, Model_Impl* model, bool keepHandle); AvailabilityManagerScheduled_Impl(const AvailabilityManagerScheduled_Impl& other, Model_Impl* model, bool keepHandle); virtual ~AvailabilityManagerScheduled_Impl() {} virtual const std::vector<std::string>& outputVariableNames() const; virtual IddObjectType iddObjectType() const; virtual std::vector<ScheduleTypeKey> getScheduleTypeKeys(const Schedule& schedule) const; Schedule schedule() const; bool setSchedule(Schedule& schedule); ModelObject clone(Model model) const; private: REGISTER_LOGGER("openstudio.model.AvailabilityManagerScheduled"); boost::optional<Schedule> optionalSchedule() const; boost::optional<ModelObject> scheduleAsModelObject() const; bool setScheduleAsModelObject(const boost::optional<ModelObject>& modelObject); }; } // detail } // model } // openstudio #endif // MODEL_AVAILABILITYMANAGERSCHEDULED_IMPL_HPP
// Copyright (c) 2010 Satoshi Nakamoto // Copyright (c) 2009-2014 The Bitcoin Core developers // Copyright (c) 2014-2021 The Dash Core developers // Copyright (c) 2021 The Bytz Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <chainparams.h> #include <consensus/merkle.h> #include <tinyformat.h> #include <util.h> #include <utilstrencodings.h> #include <arith_uint256.h> #include <assert.h> #include <chainparamsseeds.h> static CBlock CreateGenesisBlock(const char* pszTimestamp, const CScript& genesisOutputScript, uint32_t nTime, uint32_t nNonce, uint32_t nBits, int32_t nVersion, const CAmount& genesisReward) { CMutableTransaction txNew; txNew.nVersion = 1; txNew.vin.resize(1); txNew.vout.resize(1); txNew.vin[0].scriptSig = CScript() << 486604799 << CScriptNum(4) << std::vector<unsigned char>((const unsigned char*)pszTimestamp, (const unsigned char*)pszTimestamp + strlen(pszTimestamp)); txNew.vout[0].nValue = genesisReward; txNew.vout[0].scriptPubKey = genesisOutputScript; CBlock genesis; genesis.nTime = nTime; genesis.nBits = nBits; genesis.nNonce = nNonce; genesis.nVersion = nVersion; genesis.vtx.push_back(MakeTransactionRef(std::move(txNew))); genesis.hashPrevBlock.SetNull(); genesis.hashMerkleRoot = BlockMerkleRoot(genesis); return genesis; } static CBlock CreateDevNetGenesisBlock(const uint256 &prevBlockHash, const std::string& devNetName, uint32_t nTime, uint32_t nNonce, uint32_t nBits, const CAmount& genesisReward) { assert(!devNetName.empty()); CMutableTransaction txNew; txNew.nVersion = 1; txNew.vin.resize(1); txNew.vout.resize(1); // put height (BIP34) and devnet name into coinbase txNew.vin[0].scriptSig = CScript() << 1 << std::vector<unsigned char>(devNetName.begin(), devNetName.end()); txNew.vout[0].nValue = genesisReward; txNew.vout[0].scriptPubKey = CScript() << OP_RETURN; CBlock genesis; genesis.nTime = nTime; genesis.nBits = nBits; genesis.nNonce = nNonce; genesis.nVersion = 4; genesis.vtx.push_back(MakeTransactionRef(std::move(txNew))); genesis.hashPrevBlock = prevBlockHash; genesis.hashMerkleRoot = BlockMerkleRoot(genesis); return genesis; } /** * Build the genesis block. Note that the output of its generation * transaction cannot be spent since it did not originally exist in the * database. * * CBlock(hash=00000ffd590b14, ver=1, hashPrevBlock=00000000000000, hashMerkleRoot=e0028e, nTime=1390095618, nBits=1e0ffff0, nNonce=28917698, vtx=1) * CTransaction(hash=e0028e, ver=1, vin.size=1, vout.size=1, nLockTime=0) * CTxIn(COutPoint(000000, -1), coinbase 04ffff001d01044c5957697265642030392f4a616e2f3230313420546865204772616e64204578706572696d656e7420476f6573204c6976653a204f76657273746f636b2e636f6d204973204e6f7720416363657074696e6720426974636f696e73) * CTxOut(nValue=50.00000000, scriptPubKey=0xA9037BAC7050C479B121CF) * vMerkleTree: e0028e */ static CBlock CreateGenesisBlock(uint32_t nTime, uint32_t nNonce, uint32_t nBits, int32_t nVersion, const CAmount& genesisReward) { const char* pszTimestamp = "Investing.com 23/Apr/2018 Facebook Gets First Downgrade Since Data Scandal"; const CScript genesisOutputScript = CScript() << ParseHex("04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f") << OP_CHECKSIG; return CreateGenesisBlock(pszTimestamp, genesisOutputScript, nTime, nNonce, nBits, nVersion, genesisReward); } void CChainParams::UpdateVersionBitsParameters(Consensus::DeploymentPos d, int64_t nStartTime, int64_t nTimeout, int64_t nWindowSize, int64_t nThresholdStart, int64_t nThresholdMin, int64_t nFalloffCoeff) { consensus.vDeployments[d].nStartTime = nStartTime; consensus.vDeployments[d].nTimeout = nTimeout; if (nWindowSize != -1) { consensus.vDeployments[d].nWindowSize = nWindowSize; } if (nThresholdStart != -1) { consensus.vDeployments[d].nThresholdStart = nThresholdStart; } if (nThresholdMin != -1) { consensus.vDeployments[d].nThresholdMin = nThresholdMin; } if (nFalloffCoeff != -1) { consensus.vDeployments[d].nFalloffCoeff = nFalloffCoeff; } } void CChainParams::UpdateDIP3Parameters(int nActivationHeight, int nEnforcementHeight) { consensus.DIP0003Height = nActivationHeight; // consensus.DIP0003EnforcementHeight = nEnforcementHeight; } void CChainParams::UpdateDIP8Parameters(int nActivationHeight) { consensus.DIP0008Height = nActivationHeight; } void CChainParams::UpdateBudgetParameters(int nMasternodePaymentsStartBlock, int nBudgetPaymentsStartBlock, int nSuperblockStartBlock) { consensus.nMasternodePaymentsStartBlock = nMasternodePaymentsStartBlock; consensus.nBudgetPaymentsStartBlock = nBudgetPaymentsStartBlock; consensus.nSuperblockStartBlock = nSuperblockStartBlock; } void CChainParams::UpdateSubsidyAndDiffParams(int nMinimumDifficultyBlocks, int nHighSubsidyBlocks, int nHighSubsidyFactor) { consensus.nMinimumDifficultyBlocks = nMinimumDifficultyBlocks; consensus.nHighSubsidyBlocks = nHighSubsidyBlocks; consensus.nHighSubsidyFactor = nHighSubsidyFactor; } void CChainParams::UpdateLLMQChainLocks(Consensus::LLMQType llmqType) { consensus.llmqTypeChainLocks = llmqType; } void CChainParams::UpdateLLMQInstantSend(Consensus::LLMQType llmqType) { consensus.llmqTypeInstantSend = llmqType; } void CChainParams::UpdateLLMQTestParams(int size, int threshold) { auto& params = consensus.llmqs.at(Consensus::LLMQ_TEST); params.size = size; params.minSize = threshold; params.threshold = threshold; params.dkgBadVotesThreshold = threshold; } void CChainParams::UpdateLLMQDevnetParams(int size, int threshold) { auto& params = consensus.llmqs.at(Consensus::LLMQ_DEVNET); params.size = size; params.minSize = threshold; params.threshold = threshold; params.dkgBadVotesThreshold = threshold; } static CBlock FindDevNetGenesisBlock(const CBlock &prevBlock, const CAmount& reward) { std::string devNetName = gArgs.GetDevNetName(); assert(!devNetName.empty()); CBlock block = CreateDevNetGenesisBlock(prevBlock.GetHash(), devNetName.c_str(), prevBlock.nTime + 1, 0, prevBlock.nBits, reward); arith_uint256 bnTarget; bnTarget.SetCompact(block.nBits); for (uint32_t nNonce = 0; nNonce < UINT32_MAX; nNonce++) { block.nNonce = nNonce; uint256 hash = block.GetHash(); if (UintToArith256(hash) <= bnTarget) return block; } // This is very unlikely to happen as we start the devnet with a very low difficulty. In many cases even the first // iteration of the above loop will give a result already error("FindDevNetGenesisBlock: could not find devnet genesis block for %s", devNetName); assert(false); } // this one is for testing only static Consensus::LLMQParams llmq_test = { .type = Consensus::LLMQ_TEST, .name = "llmq_test", .size = 3, .minSize = 2, .threshold = 2, .dkgInterval = 30, // one DKG every 30 minutes .dkgPhaseBlocks = 3, .dkgMiningWindowStart = 15, // dkgPhaseBlocks * 5 = after finalization .dkgMiningWindowEnd = 27, .dkgBadVotesThreshold = 2, .signingActiveQuorumCount = 2, // just a few ones to allow easier testing .keepOldConnections = 3, .recoveryMembers = 3, }; // this one is for testing only static Consensus::LLMQParams llmq_test_v17 = { .type = Consensus::LLMQ_TEST_V17, .name = "llmq_test_v17", .size = 3, .minSize = 2, .threshold = 2, .dkgInterval = 30, // one DKG per 30 minutes .dkgPhaseBlocks = 3, .dkgMiningWindowStart = 15, // dkgPhaseBlocks * 5 = after finalization .dkgMiningWindowEnd = 27, .dkgBadVotesThreshold = 2, .signingActiveQuorumCount = 2, // just a few ones to allow easier testing .keepOldConnections = 3, .recoveryMembers = 3, }; // this one is for devnets only static Consensus::LLMQParams llmq_devnet = { .type = Consensus::LLMQ_DEVNET, .name = "llmq_devnet", .size = 10, .minSize = 7, .threshold = 6, .dkgInterval = 30, // one DKG per thirty minutes .dkgPhaseBlocks = 3, .dkgMiningWindowStart = 15, // dkgPhaseBlocks * 5 = after finalization .dkgMiningWindowEnd = 27, .dkgBadVotesThreshold = 7, .signingActiveQuorumCount = 3, // just a few ones to allow easier testing .keepOldConnections = 4, .recoveryMembers = 6, }; static Consensus::LLMQParams llmq20_60 = { .type = Consensus::LLMQ_20_60, .name = "llmq_20_60", .size = 20, .minSize = 16, .threshold = 12, .dkgInterval = 60, // one DKG per hour .dkgPhaseBlocks = 4, .dkgMiningWindowStart = 20, // dkgPhaseBlocks * 5 = after finalization .dkgMiningWindowEnd = 32, .dkgBadVotesThreshold = 14, .signingActiveQuorumCount = 24, // a full day worth of LLMQs .keepOldConnections = 25, .recoveryMembers = 12, }; static Consensus::LLMQParams llmq40_60 = { .type = Consensus::LLMQ_40_60, .name = "llmq_40_60", .size = 40, .minSize = 30, .threshold = 24, .dkgInterval = 60 * 12, // one DKG every 12 hours .dkgPhaseBlocks = 6, .dkgMiningWindowStart = 30, // dkgPhaseBlocks * 5 = after finalization .dkgMiningWindowEnd = 42, .dkgBadVotesThreshold = 30, .signingActiveQuorumCount = 4, // two days worth of LLMQs .keepOldConnections = 5, .recoveryMembers = 20, }; // Used for deployment and min-proto-version signalling, so it needs a higher threshold static Consensus::LLMQParams llmq40_85 = { .type = Consensus::LLMQ_40_85, .name = "llmq_40_85", .size = 40, .minSize = 35, .threshold = 34, .dkgInterval = 60 * 24, // one DKG every 24 hours .dkgPhaseBlocks = 6, .dkgMiningWindowStart = 30, // dkgPhaseBlocks * 5 = after finalization .dkgMiningWindowEnd = 60, // give it a larger mining window to make sure it is mined .dkgBadVotesThreshold = 30, .signingActiveQuorumCount = 4, // four days worth of LLMQs .keepOldConnections = 5, .recoveryMembers = 20, }; // Used for Platform static Consensus::LLMQParams llmq20_70 = { .type = Consensus::LLMQ_20_70, .name = "llmq_20_70", .size = 20, .minSize = 16, .threshold = 14, .dkgInterval = 60, // one DKG per hour .dkgPhaseBlocks = 4, .dkgMiningWindowStart = 20, // dkgPhaseBlocks * 5 = after finalization .dkgMiningWindowEnd = 32, .dkgBadVotesThreshold = 14, .signingActiveQuorumCount = 24, // a full day worth of LLMQs .keepOldConnections = 25, .recoveryMembers = 50, }; libzerocoin::ZerocoinParams* CChainParams::Zerocoin_Params(bool useModulusV1) const { assert(this); static CBigNum bnHexModulus = 0; if (!bnHexModulus) bnHexModulus.SetHex(consensus.zerocoinModulus); static libzerocoin::ZerocoinParams ZCParamsHex = libzerocoin::ZerocoinParams(bnHexModulus); static CBigNum bnDecModulus = 0; if (!bnDecModulus) bnDecModulus.SetDec(consensus.zerocoinModulus); static libzerocoin::ZerocoinParams ZCParamsDec = libzerocoin::ZerocoinParams(bnDecModulus); if (useModulusV1) return &ZCParamsHex; return &ZCParamsDec; } /** * Main network */ /** * What makes a good checkpoint block? * + Is surrounded by blocks with reasonable timestamps * (no blocks before with a timestamp after, none after with * timestamp before) * + Contains no strange transactions */ class CMainParams : public CChainParams { public: CMainParams() { strNetworkID = "main"; consensus.nSubsidyHalvingInterval = 210240; // Note: actual number of blocks per calendar year with DGW v3 is ~200700 (for example 449750 - 249050) consensus.nMasternodePaymentsStartBlock = 100000; // not true, but it's ok as long as it's less then nMasternodePaymentsIncreaseBlock consensus.nMasternodePaymentsIncreaseBlock = 158000; // actual historical value consensus.nMasternodePaymentsIncreasePeriod = 576*30; // 17280 - actual historical value consensus.nInstantSendConfirmationsRequired = 6; consensus.nInstantSendKeepLock = 24; consensus.nBudgetPaymentsStartBlock = std::numeric_limits<int>::max(); consensus.nBudgetPaymentsCycleBlocks = 43200; // (60*24*30) consensus.nBudgetPaymentsWindowBlocks = 2880; consensus.nSuperblockStartBlock = std::numeric_limits<int>::max(); consensus.nSuperblockStartHash = uint256(); // do not check this consensus.nSuperblockCycle = 43200; // (60*24*30) consensus.nGovernanceMinQuorum = 10; consensus.nGovernanceFilterElements = 20000; consensus.nMasternodeMinimumConfirmations = 15; consensus.V17DeploymentHeight = 1669300; consensus.BIP34Height = 1; consensus.BIP34Hash = uint256S("000002f68dbbf1fcfacb8f0b4e64083efdd2f07a906728ee068d573ffa5bcb4e"); consensus.BIP65Height = consensus.V17DeploymentHeight; consensus.BIP66Height = 1; // 000002f68dbbf1fcfacb8f0b4e64083efdd2f07a906728ee068d573ffa5bcb4e consensus.CSVHeight = consensus.V17DeploymentHeight; consensus.BIP147Height = consensus.V17DeploymentHeight; consensus.DIP0001Height = consensus.V17DeploymentHeight; consensus.DIP0003Height = consensus.V17DeploymentHeight; // consensus.DIP0003EnforcementHeight = std::numeric_limits<int>::max(); consensus.DIP0003EnforcementHash = uint256(); consensus.DIP0008Height = consensus.V17DeploymentHeight; consensus.powLimit = uint256S("00000fffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); // ~uint256(0) >> 20 consensus.nPowTargetTimespan = 24 * 60 * 60; // Bytz: 1 day consensus.nPowTargetSpacing = 1 * 60; // Bytz: 1 minute consensus.fPowAllowMinDifficultyBlocks = false; consensus.fPowNoRetargeting = false; // Bytz specific parameters // Proof of Stake parameters consensus.nPosStartHeight = 201; consensus.nBlockTimeProtocolV2 = consensus.V17DeploymentHeight; consensus.posLimit = uint256S("000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); // ~uint256(0) >> 24 consensus.posLimit_V2 = uint256S("00000fffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); // ~uint256(0) >> 20 consensus.nTimeSlotLength = 15; consensus.nPosTargetSpacing = 1 * 60; // 1 minute consensus.nPosTargetTimespan = 40 * 60; // 40 minutes consensus.nPosTargetTimespan_V2 = 2 * consensus.nTimeSlotLength * 60; // 30 minutes consensus.nStakeMinDepth = 600; consensus.nStakeMinAge = 60 * 60; // 1 hour consensus.nBlockStakeModifierV1A = 1000; consensus.nBlockStakeModifierV2 = consensus.V17DeploymentHeight; consensus.strCarbonOffsetAddress = "8GDeXyYNyc1o34v8BjtS3e1ZzvLDaqXNNK"; // ATP parameters consensus.ATPStartHeight = consensus.V17DeploymentHeight; consensus.BytzAddrPrefix = "bytz"; consensus.strTokenManagementKey = "sYCxBVHJx3A1tt7B1tFnaCJGnci3hvEf2c"; // 04d449cc1ac45d327c34d8b116797ad9ed287980a9199ea48dc4c8beab90ae2ded738e826ba0b27b5571d63884d985e2a50afbe8eef2925fc280af51a2a2d5e0e0 consensus.nOpGroupNewRequiredConfirmations = 1; // Other consensus.nCoinbaseMaturity = 100; consensus.AccruedCarbonOffsetStartHeight = consensus.V17DeploymentHeight; consensus.AccruedCarbonOffsetWindow = 1000; // Zerocoin consensus.nZerocoinRequiredStakeDepth = 200; consensus.nZerocoinStartHeight = 25; consensus.nZerocoinStartTime = 1524496462; consensus.nBlockZerocoinV2 = 60; consensus.nPublicZCSpends = std::numeric_limits<int>::max(); consensus.nFakeSerialBlockheightEnd = -1; consensus.nMintRequiredConfirmations = 20; //the maximum amount of confirmations until accumulated in 19 consensus.nRequiredAccumulation = 1; consensus.zerocoinModulus = "25195908475657893494027183240048398571429282126204032027777137836043662020707595556264018525880784" "4069182906412495150821892985591491761845028084891200728449926873928072877767359714183472702618963750149718246911" "6507761337985909570009733045974880842840179742910064245869181719511874612151517265463228221686998754918242243363" "7259085141865462043576798423387184774447920739934236584823824281198163815010674810451660377306056201619676256133" "8441436038339044149526344321901146575444541784240209246165157233507787077498171257724679629263863563732899121548" "31438167899885040445364023527381951378636564391212010397122822120720357"; consensus.nRuleChangeActivationThreshold = 1916; // 95% of 2016 consensus.nMinerConfirmationWindow = 2016; // nPowTargetTimespan / nPowTargetSpacing consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].bit = 25; consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nStartTime = 1199145601; // January 1, 2008 consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nTimeout = 1230767999; // December 31, 2008 // The best chain should have at least this much work. consensus.nMinimumChainWork = uint256S("0x000000000000000000000000000000000000000000000e9b67326dfc16f4713f"); // 1623262 // By default assume that the signatures in ancestors of this block are valid. consensus.defaultAssumeValid = uint256S("0x0"); /** * The message start string is designed to be unlikely to occur in normal data. * The characters are rarely used upper ASCII, not valid as UTF-8, and produce * a large 32-bit integer with any alignment. */ pchMessageStart[0] = 0xa3; pchMessageStart[1] = 0xea; pchMessageStart[2] = 0xb5; pchMessageStart[3] = 0x81; nDefaultPort = 37415; nPruneAfterHeight = 100000; genesis = CreateGenesisBlock(1524496461, 67657104, 0x1e0ffff0, 1, 0 * COIN); consensus.hashGenesisBlock = genesis.GetHash(); assert(consensus.hashGenesisBlock == uint256S("0x00000feb03167c4a4fa9f2bafcaea0e9f7e5646330e13c69e7ffa2dce58ace44")); assert(genesis.hashMerkleRoot == uint256S("0x80290404060ff7ff5bc6a42f755d24f6087ba5685474a5c8ffafac65de8b2bbf")); // Note that of those which support the service bits prefix, most only support a subset of // possible options. // This is fine at runtime as we'll fall back to using them as a oneshot if they don't support the // service bits we want, but we should get them updated to support all service bits wanted by any // release ASAP to avoid it where possible. vSeeds.emplace_back("main.seeder1.bytz.gg"); // Bytz US1 DNS Seeder vSeeds.emplace_back("main.seeder2.bytz.gg"); // Bytz EU1 DNS Seeder // Bytz addresses start with 'X' base58Prefixes[PUBKEY_ADDRESS] = std::vector<unsigned char>(1,125); // Bytz script addresses start with '7' base58Prefixes[SCRIPT_ADDRESS] = std::vector<unsigned char>(1,18); // Bytz private keys start with '7' or 'X' base58Prefixes[SECRET_KEY] = std::vector<unsigned char>(1,140); // Bytz BIP32 pubkeys start with 'xpub' (Bitcoin defaults) base58Prefixes[EXT_PUBLIC_KEY] = {0x02, 0x2D, 0x25, 0x33}; // Bytz BIP32 prvkeys start with 'xprv' (Bitcoin defaults) base58Prefixes[EXT_SECRET_KEY] = {0x02, 0x21, 0x31, 0x2B}; // Bytz BIP44 coin type is '416' nExtCoinType = 416; vFixedSeeds = std::vector<SeedSpec6>(pnSeed6_main, pnSeed6_main + ARRAYLEN(pnSeed6_main)); // long living quorum params consensus.llmqs[Consensus::LLMQ_20_60] = llmq20_60; consensus.llmqs[Consensus::LLMQ_40_60] = llmq40_60; consensus.llmqs[Consensus::LLMQ_40_85] = llmq40_85; consensus.llmqs[Consensus::LLMQ_20_70] = llmq20_70; consensus.llmqTypeChainLocks = Consensus::LLMQ_40_60; consensus.llmqTypeInstantSend = Consensus::LLMQ_20_60; consensus.llmqTypePlatform = Consensus::LLMQ_20_70; fDefaultConsistencyChecks = false; fRequireStandard = true; fRequireRoutableExternalIP = true; fMineBlocksOnDemand = false; fAllowMultipleAddressesFromGroup = false; fAllowMultiplePorts = false; nLLMQConnectionRetryTimeout = 60; nPoolMinParticipants = 3; nPoolMaxParticipants = 20; nFulfilledRequestExpireTime = 60*60; // fulfilled requests expire in 1 hour vSporkAddresses = {"sYJv3DxNMecQx7Z6FuQqqLGRBFjCVVpxmN"}; // 04a555983e950ddde95d4f236a1faa44eaf7399512342b40163afdc078dea8a941eb48834c61f22bd9632cbb2d7971bfdeddb6e476dc28726449eeb867affc5b26 nMinSporkKeys = 1; fBIP9CheckMasternodesUpgraded = true; checkpointData = { { {0, uint256S("00000feb03167c4a4fa9f2bafcaea0e9f7e5646330e13c69e7ffa2dce58ace44")}, // Genesis block {1, uint256S("000002f68dbbf1fcfacb8f0b4e64083efdd2f07a906728ee068d573ffa5bcb4e")}, // First mined block {25, uint256S("0000016f6d9c834f269f07e624feb02ba725e3d954017549afde932c8f6d6dc7")}, // Zerocoin enabled {60, uint256S("00000039aca457e0dd2287e0fd636f1998e6b2774a64e8c18fa853776ec309c8")}, // Zerocoin V2 enabled {200, uint256S("000000078d815b257737d227d50e22f2486fd3ded21c5c0bca347a410c71bd26")}, // Switch PoW-PoS {201, uint256S("5542cf20a79e2658f45fc5385cc431035efae3980985254e01a08d930408bc52")}, // Switch PoW-PoS {202, uint256S("016023220b7e1578f923a126dddecbf345d8004734afb52636f60954ba116d21")}, // Switch PoW-PoS {300, uint256S("0cddd447eebbc7bd9f158bdc25eb1b290ab2b6f54ae77b07229c8da7b1999d99")}, // {700, uint256S("eac3327ace445de2f39a6209b3a778d370a7e6d676c254d82e3d1c8de272559b")}, // tx=1230 time=1526558980 {67000, uint256S("727101d555687b91ed9740f3301048f3cfe5e5062babe491f2120ea7173b7234")}, // Add 3 premine blocks after this one {69713, uint256S("097a4a371b031eea8d26384a15e894dc60fcb7cd8304f62ab35c760317c36e28")}, // release v0.1.03 {1623268, uint256S("1a0a8a556b6d95a44f7ba8c587879197051c1652e430ee3c3f57e5c173d80d38")}, // tx=3981853 time=1624868163 } }; chainTxData = ChainTxData{ 1624868163, // * UNIX timestamp of last known number of transactions (Block 1623268) 3981853, // * total number of transactions between genesis and that timestamp // (the tx=... number in the SetBestChain debug.log lines) 0.045 // * estimated number of transactions per second after that timestamp }; } }; /** * Testnet (v3) */ class CTestNetParams : public CChainParams { public: CTestNetParams() { strNetworkID = "test"; consensus.nSubsidyHalvingInterval = 210240; consensus.nMasternodePaymentsStartBlock = 4010; // not true, but it's ok as long as it's less then nMasternodePaymentsIncreaseBlock consensus.nMasternodePaymentsIncreaseBlock = 4030; consensus.nMasternodePaymentsIncreasePeriod = 10; consensus.nInstantSendConfirmationsRequired = 2; consensus.nInstantSendKeepLock = 6; consensus.nBudgetPaymentsStartBlock = 4200; consensus.nBudgetPaymentsCycleBlocks = 144; consensus.nBudgetPaymentsWindowBlocks = 64; consensus.nSuperblockStartBlock = std::numeric_limits<int>::max(); // NOTE: Should satisfy nSuperblockStartBlock > nBudgetPeymentsStartBlock consensus.nSuperblockStartHash = uint256(); // do not check this on testnet consensus.nSuperblockCycle = 24; // Superblocks can be issued hourly on testnet consensus.nGovernanceMinQuorum = 1; consensus.nGovernanceFilterElements = 500; consensus.nMasternodeMinimumConfirmations = 1; consensus.V17DeploymentHeight = 826130; consensus.BIP34Height = 1; consensus.BIP34Hash = uint256S("0000065432f43b3efb23bd0f63fe33d00d02a5f36233fe1b982c08274d58ef12"); consensus.BIP65Height = consensus.V17DeploymentHeight; consensus.BIP66Height = 1; // 0000065432f43b3efb23bd0f63fe33d00d02a5f36233fe1b982c08274d58ef12 consensus.CSVHeight = consensus.V17DeploymentHeight; consensus.BIP147Height = consensus.V17DeploymentHeight; consensus.DIP0001Height = consensus.V17DeploymentHeight; consensus.DIP0003Height = consensus.V17DeploymentHeight; // consensus.DIP0003EnforcementHeight = std::numeric_limits<int>::max(); consensus.DIP0003EnforcementHash = uint256(); consensus.DIP0008Height = consensus.V17DeploymentHeight; consensus.powLimit = uint256S("00000fffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); // ~uint256(0) >> 20 consensus.nPowTargetTimespan = 24 * 60 * 60; // Bytz: 1 day consensus.nPowTargetSpacing = 1 * 60; // Bytz: 1 minute consensus.fPowAllowMinDifficultyBlocks = false; consensus.fPowNoRetargeting = false; // Bytz specific parameters // Proof of Stake parameters consensus.nPosStartHeight = 201; consensus.nBlockTimeProtocolV2 = consensus.V17DeploymentHeight; consensus.posLimit = uint256S("000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); // ~uint256(0) >> 24 consensus.posLimit_V2 = uint256S("00000fffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); // ~uint256(0) >> 20 consensus.nTimeSlotLength = 15; consensus.nPosTargetSpacing = 1 * 60; // 1 minute consensus.nPosTargetTimespan = 40 * 60; // 40 minutes consensus.nPosTargetTimespan_V2 = 2 * consensus.nTimeSlotLength * 60; // 30 minutes consensus.nStakeMinDepth = 100; consensus.nStakeMinAge = 60 * 60; // 1 hour consensus.nBlockStakeModifierV1A = 51197; consensus.nBlockStakeModifierV2 = 826130; consensus.strCarbonOffsetAddress = "TqqiV3twXTaD5pL4vrA3nZqT8d8BPbxM3e"; // ATP parameters consensus.ATPStartHeight = consensus.V17DeploymentHeight; consensus.BytzAddrPrefix = "bytztest"; consensus.strTokenManagementKey = "TsdKwqnDKEN3N38QG5hTQBNJe6y1mdECy8"; consensus.nOpGroupNewRequiredConfirmations = 1; // Other consensus.nCoinbaseMaturity = 15; consensus.AccruedCarbonOffsetStartHeight = 831200; consensus.AccruedCarbonOffsetWindow = 100; // Zerocoin consensus.nZerocoinRequiredStakeDepth = 200; consensus.nZerocoinStartHeight = 25; consensus.nZerocoinStartTime = 1524496462; consensus.nBlockZerocoinV2 = 60; consensus.nPublicZCSpends = std::numeric_limits<int>::max(); consensus.nFakeSerialBlockheightEnd = -1; consensus.nMintRequiredConfirmations = 20; //the maximum amount of confirmations until accumulated in 19 consensus.nRequiredAccumulation = 1; consensus.zerocoinModulus = "25195908475657893494027183240048398571429282126204032027777137836043662020707595556264018525880784" "4069182906412495150821892985591491761845028084891200728449926873928072877767359714183472702618963750149718246911" "6507761337985909570009733045974880842840179742910064245869181719511874612151517265463228221686998754918242243363" "7259085141865462043576798423387184774447920739934236584823824281198163815010674810451660377306056201619676256133" "8441436038339044149526344321901146575444541784240209246165157233507787077498171257724679629263863563732899121548" "31438167899885040445364023527381951378636564391212010397122822120720357"; consensus.nRuleChangeActivationThreshold = 1512; // 75% for testchains consensus.nMinerConfirmationWindow = 2016; // nPowTargetTimespan / nPowTargetSpacing consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].bit = 25; consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nStartTime = 1199145601; // January 1, 2008 consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nTimeout = 1230767999; // December 31, 2008 // The best chain should have at least this much work. consensus.nMinimumChainWork = uint256S("0x0000000000000000000000000000000000000000000000000000000000000000"); // 0 // By default assume that the signatures in ancestors of this block are valid. consensus.defaultAssumeValid = uint256S("0x0000009303aeadf8cf3812f5c869691dbd4cb118ad20e9bf553be434bafe6a52"); // 470000 pchMessageStart[0] = 0x81; pchMessageStart[1] = 0xbb; pchMessageStart[2] = 0x9f; pchMessageStart[3] = 0x83; nDefaultPort = 47415; nPruneAfterHeight = 1000; genesis = CreateGenesisBlock(1524496461, 846737, 0x1e0ffff0, 1, 0 * COIN); consensus.hashGenesisBlock = genesis.GetHash(); assert(consensus.hashGenesisBlock == uint256S("0x0000065432f43b3efb23bd0f63fe33d00d02a5f36233fe1b982c08274d58ef12")); //assert(genesis.hashMerkleRoot == uint256S("0xe0028eb9648db56b1ac77cf090b99048a8007e2bb64b68f092c03c7f56a662c7")); vFixedSeeds.clear(); vFixedSeeds = std::vector<SeedSpec6>(pnSeed6_test, pnSeed6_test + ARRAYLEN(pnSeed6_test)); vSeeds.clear(); // nodes with support for servicebits filtering should be at the top vSeeds.emplace_back("testnet.seeder1.bytz.gg"); // Bytz US1 DNS Seeder vSeeds.emplace_back("testnet.seeder2.bytz.gg"); // Bytz EU1 DNS Seeder vSeeds.emplace_back("testnet.seeder3.bytz.gg"); // Bytz ASIA1 (Singapore) DNS Seeder vSeeds.emplace_back("testnet.seeder4.bytz.gg"); // Bytz AUSTRALIA1 (Sydney) DNS Seeder // Testnet Bytz addresses start with 'T' base58Prefixes[PUBKEY_ADDRESS] = std::vector<unsigned char>(1,66); // Testnet Bytz script addresses start with '4' or '5' base58Prefixes[SCRIPT_ADDRESS] = std::vector<unsigned char>(1,9); // Testnet private keys start with 'z' or '2' base58Prefixes[SECRET_KEY] = std::vector<unsigned char>(1,144); // Testnet Bytz BIP32 pubkeys start with 'DRKV' (Bitcoin defaults) base58Prefixes[EXT_PUBLIC_KEY] = {0x3A, 0x80, 0x61, 0xA0}; // Testnet Bytz BIP32 prvkeys start with 'DRKP' (Bitcoin defaults) base58Prefixes[EXT_SECRET_KEY] = {0x3A, 0x80, 0x58, 0x37}; // Testnet Bytz BIP44 coin type is '1' (All coin's testnet default) nExtCoinType = 1; // long living quorum params consensus.llmqs[Consensus::LLMQ_TEST_V17] = llmq_test_v17; consensus.llmqs[Consensus::LLMQ_20_60] = llmq20_60; consensus.llmqs[Consensus::LLMQ_40_60] = llmq40_60; consensus.llmqs[Consensus::LLMQ_40_85] = llmq40_85; consensus.llmqs[Consensus::LLMQ_20_70] = llmq20_70; consensus.llmqTypeChainLocks = Consensus::LLMQ_20_60; consensus.llmqTypeInstantSend = Consensus::LLMQ_20_60; consensus.llmqTypePlatform = Consensus::LLMQ_20_70; fDefaultConsistencyChecks = false; fRequireStandard = false; fRequireRoutableExternalIP = true; fMineBlocksOnDemand = false; fAllowMultipleAddressesFromGroup = false; fAllowMultiplePorts = true; nLLMQConnectionRetryTimeout = 60; nPoolMinParticipants = 2; nPoolMaxParticipants = 20; nFulfilledRequestExpireTime = 5*60; // fulfilled requests expire in 5 minutes vSporkAddresses = {"TozWRrxnKYpshJw5PhAaP7gHzTLDFhKCnr"}; nMinSporkKeys = 1; fBIP9CheckMasternodesUpgraded = true; checkpointData = { { {0, uint256S("0000065432f43b3efb23bd0f63fe33d00d02a5f36233fe1b982c08274d58ef12")}, } }; chainTxData = ChainTxData{ 1530893198, // * UNIX timestamp of last known number of transactions (Block 387900) 4404, // * total number of transactions between genesis and that timestamp // (the tx=... number in the SetBestChain debug.log lines) 0.01 // * estimated number of transactions per second after that timestamp }; } }; /** * Devnet */ class CDevNetParams : public CChainParams { public: CDevNetParams(bool fHelpOnly = false) { strNetworkID = "devnet"; consensus.nSubsidyHalvingInterval = 210240; consensus.nMasternodePaymentsStartBlock = 4010; // not true, but it's ok as long as it's less then nMasternodePaymentsIncreaseBlock consensus.nMasternodePaymentsIncreaseBlock = 4030; consensus.nMasternodePaymentsIncreasePeriod = 10; consensus.nInstantSendConfirmationsRequired = 2; consensus.nInstantSendKeepLock = 6; consensus.nBudgetPaymentsStartBlock = 4100; consensus.nBudgetPaymentsCycleBlocks = 50; consensus.nBudgetPaymentsWindowBlocks = 10; consensus.nSuperblockStartBlock = 4200; // NOTE: Should satisfy nSuperblockStartBlock > nBudgetPeymentsStartBlock consensus.nSuperblockStartHash = uint256(); // do not check this on devnet consensus.nSuperblockCycle = 24; // Superblocks can be issued hourly on devnet consensus.nGovernanceMinQuorum = 1; consensus.nGovernanceFilterElements = 500; consensus.nMasternodeMinimumConfirmations = 1; consensus.V17DeploymentHeight = 1; consensus.BIP34Height = 1; // BIP34 activated immediately on devnet consensus.BIP65Height = 1; // BIP65 activated immediately on devnet consensus.BIP66Height = 1; // BIP66 activated immediately on devnet consensus.CSVHeight = consensus.V17DeploymentHeight; consensus.BIP147Height = consensus.V17DeploymentHeight; consensus.DIP0001Height = 2; // DIP0001 activated immediately on devnet consensus.DIP0003Height = 2; // DIP0003 activated immediately on devnet // consensus.DIP0003EnforcementHeight = 2; // DIP0003 activated immediately on devnet consensus.DIP0003EnforcementHash = uint256(); consensus.DIP0008Height = 2; // DIP0008 activated immediately on devnet consensus.powLimit = uint256S("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); // ~uint256(0) >> 1 consensus.nPowTargetTimespan = 24 * 60 * 60; // Bytz: 1 day consensus.nPowTargetSpacing = 2.5 * 60; // Bytz: 2.5 minutes consensus.fPowAllowMinDifficultyBlocks = true; consensus.fPowNoRetargeting = false; // Bytz specific parameters // Proof of Stake parameters consensus.nPosStartHeight = 201; consensus.nBlockTimeProtocolV2 = consensus.V17DeploymentHeight; consensus.posLimit = uint256S("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); // ~uint256(0) >> 1 consensus.posLimit_V2 = uint256S("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); // ~uint256(0) >> 1 consensus.nTimeSlotLength = 15; consensus.nPosTargetSpacing = 1 * 60; // 1 minute consensus.nPosTargetTimespan = 40 * 60; // 40 minutes consensus.nPosTargetTimespan_V2 = 2 * consensus.nTimeSlotLength * 60; // 30 minutes consensus.nStakeMinDepth = 100; consensus.nStakeMinAge = 60 * 60; // 1 hour consensus.nBlockStakeModifierV1A = 1000; consensus.nBlockStakeModifierV2 = consensus.V17DeploymentHeight; consensus.strCarbonOffsetAddress = "TkDutp66Ygp5PpPnrETvfyrtnxq5UevLpo"; // ATP parameters consensus.ATPStartHeight = consensus.V17DeploymentHeight; consensus.AccruedCarbonOffsetStartHeight = consensus.V17DeploymentHeight; consensus.AccruedCarbonOffsetWindow = 100; consensus.BytzAddrPrefix = "bytztest"; consensus.strTokenManagementKey = "TkDutp66Ygp5PpPnrETvfyrtnxq5UevLpo"; consensus.nOpGroupNewRequiredConfirmations = 1; // Other consensus.nCoinbaseMaturity = 15; // Zerocoin consensus.nZerocoinRequiredStakeDepth = 200; consensus.nZerocoinStartHeight = std::numeric_limits<int>::max(); consensus.nZerocoinStartTime = std::numeric_limits<int>::max(); consensus.nBlockZerocoinV2 = std::numeric_limits<int>::max(); consensus.nPublicZCSpends = std::numeric_limits<int>::max(); consensus.nFakeSerialBlockheightEnd = -1; consensus.nMintRequiredConfirmations = 20; //the maximum amount of confirmations until accumulated in 19 consensus.nRequiredAccumulation = 1; consensus.zerocoinModulus = "25195908475657893494027183240048398571429282126204032027777137836043662020707595556264018525880784" "4069182906412495150821892985591491761845028084891200728449926873928072877767359714183472702618963750149718246911" "6507761337985909570009733045974880842840179742910064245869181719511874612151517265463228221686998754918242243363" "7259085141865462043576798423387184774447920739934236584823824281198163815010674810451660377306056201619676256133" "8441436038339044149526344321901146575444541784240209246165157233507787077498171257724679629263863563732899121548" "31438167899885040445364023527381951378636564391212010397122822120720357"; consensus.nRuleChangeActivationThreshold = 1512; // 75% for testchains consensus.nMinerConfirmationWindow = 2016; // nPowTargetTimespan / nPowTargetSpacing consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].bit = 25; consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nStartTime = 1199145601; // January 1, 2008 consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nTimeout = 1230767999; // December 31, 2008 // The best chain should have at least this much work. consensus.nMinimumChainWork = uint256S("0x000000000000000000000000000000000000000000000000000000000000000"); // By default assume that the signatures in ancestors of this block are valid. consensus.defaultAssumeValid = uint256S("0x000000000000000000000000000000000000000000000000000000000000000"); pchMessageStart[0] = 0xb2; pchMessageStart[1] = 0x8f; pchMessageStart[2] = 0xa3; pchMessageStart[3] = 0xcc; nDefaultPort = 47626; nPruneAfterHeight = 1000; genesis = CreateGenesisBlock(1524496461, 12351, 0x207fffff, 1, 0 * COIN); consensus.hashGenesisBlock = genesis.GetHash(); assert(consensus.hashGenesisBlock == uint256S("0x618435c615f3d628acf97c19c4b3e6320555c62f515d4144425e4e8b7610fbab")); // assert(genesis.hashMerkleRoot == uint256S("0xe0028eb9648db56b1ac77cf090b99048a8007e2bb64b68f092c03c7f56a662c7")); if (!fHelpOnly) { devnetGenesis = FindDevNetGenesisBlock(genesis, 0 * COIN); consensus.hashDevnetGenesisBlock = devnetGenesis.GetHash(); } vFixedSeeds.clear(); vSeeds.clear(); //vSeeds.push_back(CDNSSeedData("bytzevo.org", "devnet-seed.bytzevo.org")); // Testnet Bytz addresses start with 'T' base58Prefixes[PUBKEY_ADDRESS] = std::vector<unsigned char>(1,66); // Testnet Bytz script addresses start with '4' or '5' base58Prefixes[SCRIPT_ADDRESS] = std::vector<unsigned char>(1,9); // Testnet private keys start with 'z' or '2' base58Prefixes[SECRET_KEY] = std::vector<unsigned char>(1,144); // Testnet Bytz BIP32 pubkeys start with 'DRKV' (Bitcoin defaults) base58Prefixes[EXT_PUBLIC_KEY] = {0x3A, 0x80, 0x61, 0xA0}; // Testnet Bytz BIP32 prvkeys start with 'DRKP' (Bitcoin defaults) base58Prefixes[EXT_SECRET_KEY] = {0x3A, 0x80, 0x58, 0x37}; // Testnet Bytz BIP44 coin type is '1' (All coin's testnet default) nExtCoinType = 1; // long living quorum params consensus.llmqs[Consensus::LLMQ_DEVNET] = llmq_devnet; consensus.llmqs[Consensus::LLMQ_20_60] = llmq20_60; consensus.llmqs[Consensus::LLMQ_40_60] = llmq40_60; consensus.llmqs[Consensus::LLMQ_40_85] = llmq40_85; consensus.llmqs[Consensus::LLMQ_20_70] = llmq20_70; consensus.llmqTypeChainLocks = Consensus::LLMQ_20_60; consensus.llmqTypeInstantSend = Consensus::LLMQ_20_60; consensus.llmqTypePlatform = Consensus::LLMQ_20_70; fDefaultConsistencyChecks = false; fRequireStandard = false; fRequireRoutableExternalIP = true; fMineBlocksOnDemand = false; fAllowMultipleAddressesFromGroup = true; fAllowMultiplePorts = true; nLLMQConnectionRetryTimeout = 60; nPoolMinParticipants = 2; nPoolMaxParticipants = 20; nFulfilledRequestExpireTime = 5*60; // fulfilled requests expire in 5 minutes vSporkAddresses = {"TkDutp66Ygp5PpPnrETvfyrtnxq5Z1ub79"}; // 04b33722601343992c8a651fafa0f424c6ac90f797d3f58d90eebf96e817e9d7ca76a40e3c53b3d47f6f6a60b0d36dbb94ee630a5ad622f08d92782999fe7b043a nMinSporkKeys = 1; // devnets are started with no blocks and no MN, so we can't check for upgraded MN (as there are none) fBIP9CheckMasternodesUpgraded = false; checkpointData = (CCheckpointData) { { { 0, uint256S("0x000008ca1832a4baf228eb1553c03d3a2c8e02399550dd6ea8d65cec3ef23d2e")}, { 1, devnetGenesis.GetHash() }, } }; chainTxData = ChainTxData{ devnetGenesis.GetBlockTime(), // * UNIX timestamp of devnet genesis block 2, // * we only have 2 coinbase transactions when a devnet is started up 0.01 // * estimated number of transactions per second }; } }; /** * Regression test */ class CRegTestParams : public CChainParams { public: CRegTestParams() { strNetworkID = "regtest"; consensus.nSubsidyHalvingInterval = 150; consensus.nMasternodePaymentsStartBlock = 240; consensus.nMasternodePaymentsIncreaseBlock = 350; consensus.nMasternodePaymentsIncreasePeriod = 10; consensus.nInstantSendConfirmationsRequired = 2; consensus.nInstantSendKeepLock = 6; consensus.nBudgetPaymentsStartBlock = 1000; consensus.nBudgetPaymentsCycleBlocks = 50; consensus.nBudgetPaymentsWindowBlocks = 10; consensus.nSuperblockStartBlock = 1500; consensus.nSuperblockStartHash = uint256(); // do not check this on regtest consensus.nSuperblockCycle = 10; consensus.nGovernanceMinQuorum = 1; consensus.nGovernanceFilterElements = 100; consensus.nMasternodeMinimumConfirmations = 1; consensus.V17DeploymentHeight = 300; consensus.BIP34Height = 100000000; // BIP34 has not activated on regtest (far in the future so block v1 are not rejected in tests) consensus.BIP34Hash = uint256(); consensus.BIP65Height = consensus.V17DeploymentHeight; // BIP65 activated on regtest (Used in rpc activation tests) consensus.BIP66Height = 1251; // BIP66 activated on regtest (Used in rpc activation tests) consensus.CSVHeight = consensus.V17DeploymentHeight; consensus.BIP147Height = consensus.V17DeploymentHeight; consensus.DIP0001Height = 2000; consensus.DIP0003Height = 210; // consensus.DIP0003EnforcementHeight = 500; consensus.DIP0003EnforcementHash = uint256(); consensus.DIP0008Height = 432; consensus.powLimit = uint256S("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); // ~uint256(0) >> 1 consensus.nPowTargetTimespan = 24 * 60 * 60; // Bytz: 1 day consensus.nPowTargetSpacing = 2.5 * 60; // Bytz: 2.5 minutes consensus.fPowAllowMinDifficultyBlocks = true; consensus.fPowNoRetargeting = true; // Bytz specific parameters // Proof of Stake parameters consensus.nPosStartHeight = 201; consensus.nBlockTimeProtocolV2 = consensus.V17DeploymentHeight; consensus.posLimit = uint256S("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); // ~uint256(0) >> 1 consensus.posLimit_V2 = uint256S("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); // ~uint256(0) >> 1 consensus.nTimeSlotLength = 15; consensus.nPosTargetSpacing = 1 * 60; // 1 minute consensus.nPosTargetTimespan = 40 * 60; // 40 minutes consensus.nPosTargetTimespan_V2 = 2 * consensus.nTimeSlotLength * 60; // 30 minutes consensus.nStakeMinDepth = 1; consensus.nStakeMinAge = 0; consensus.nBlockStakeModifierV1A = consensus.nPosStartHeight; consensus.nBlockStakeModifierV2 = consensus.V17DeploymentHeight; consensus.strCarbonOffsetAddress = "TqMgq4qkw7bGxf6CDhtDfEqzEtWD5C7x8U"; // ATP parameters consensus.ATPStartHeight = consensus.V17DeploymentHeight; consensus.AccruedCarbonOffsetStartHeight = consensus.V17DeploymentHeight + 30; consensus.AccruedCarbonOffsetWindow = 10; consensus.BytzAddrPrefix = "bytzreg"; consensus.strTokenManagementKey = "TqMgq4qkw7bGxf6CDhtDfEqzEtWD5C7x8U"; consensus.nOpGroupNewRequiredConfirmations = 1; // Other consensus.nCoinbaseMaturity = 15; // Zerocoin consensus.nZerocoinRequiredStakeDepth = 200; consensus.nZerocoinStartHeight = std::numeric_limits<int>::max(); consensus.nZerocoinStartTime = std::numeric_limits<int>::max(); consensus.nBlockZerocoinV2 = std::numeric_limits<int>::max(); consensus.nPublicZCSpends = std::numeric_limits<int>::max(); consensus.nFakeSerialBlockheightEnd = -1; consensus.nMintRequiredConfirmations = 20; //the maximum amount of confirmations until accumulated in 19 consensus.nRequiredAccumulation = 1; consensus.zerocoinModulus = "25195908475657893494027183240048398571429282126204032027777137836043662020707595556264018525880784" "4069182906412495150821892985591491761845028084891200728449926873928072877767359714183472702618963750149718246911" "6507761337985909570009733045974880842840179742910064245869181719511874612151517265463228221686998754918242243363" "7259085141865462043576798423387184774447920739934236584823824281198163815010674810451660377306056201619676256133" "8441436038339044149526344321901146575444541784240209246165157233507787077498171257724679629263863563732899121548" "31438167899885040445364023527381951378636564391212010397122822120720357"; consensus.nRuleChangeActivationThreshold = 108; // 75% for testchains consensus.nMinerConfirmationWindow = 144; // Faster than normal for regtest (144 instead of 2016) consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].bit = 25; consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nStartTime = 0; consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nTimeout = 999999999999ULL; // The best chain should have at least this much work. consensus.nMinimumChainWork = uint256S("0x00"); // By default assume that the signatures in ancestors of this block are valid. consensus.defaultAssumeValid = uint256S("0x00"); pchMessageStart[0] = 0xb2; pchMessageStart[1] = 0x8f; pchMessageStart[2] = 0xa3; pchMessageStart[3] = 0xcc; nDefaultPort = 47526; nPruneAfterHeight = 1000; genesis = CreateGenesisBlock(1524496461, 12351, 0x207fffff, 1, 0 * COIN); consensus.hashGenesisBlock = genesis.GetHash(); assert(consensus.hashGenesisBlock == uint256S("0x618435c615f3d628acf97c19c4b3e6320555c62f515d4144425e4e8b7610fbab")); // assert(genesis.hashMerkleRoot == uint256S("0xe0028eb9648db56b1ac77cf090b99048a8007e2bb64b68f092c03c7f56a662c7")); vFixedSeeds.clear(); //!< Regtest mode doesn't have any fixed seeds. vSeeds.clear(); //!< Regtest mode doesn't have any DNS seeds. fDefaultConsistencyChecks = true; fRequireStandard = false; fRequireRoutableExternalIP = false; fMineBlocksOnDemand = true; fAllowMultipleAddressesFromGroup = true; fAllowMultiplePorts = true; nLLMQConnectionRetryTimeout = 1; // must be lower then the LLMQ signing session timeout so that tests have control over failing behavior nFulfilledRequestExpireTime = 5*60; // fulfilled requests expire in 5 minutes nPoolMinParticipants = 2; nPoolMaxParticipants = 20; // privKey: 5rE5LTDq3tRhaPW3RT1De35MocGc9wD8foaBGioxSXJsn45XaFG vSporkAddresses = {"TqMgq4qkw7bGxf6CDhtDfEqzEtWD5C7x8U"}; // 048b664010f7851071787d58c276c05701b7109fa29f2360a3e72b3bdfa32b49cf20a23fd34bcc49fc564fdbdccc54dd0dc9183a7bdf05d580d118fcdcd4abfb3f nMinSporkKeys = 1; // regtest usually has no masternodes in most tests, so don't check for upgraged MNs fBIP9CheckMasternodesUpgraded = false; checkpointData = { { {0, uint256S("0x000008ca1832a4baf228eb1553c03d3a2c8e02399550dd6ea8d65cec3ef23d2e")}, } }; chainTxData = ChainTxData{ 0, 0, 0 }; // Testnet Bytz addresses start with 'T' base58Prefixes[PUBKEY_ADDRESS] = std::vector<unsigned char>(1,66); // Testnet Bytz script addresses start with '4' or '5' base58Prefixes[SCRIPT_ADDRESS] = std::vector<unsigned char>(1,9); // Testnet private keys start with 'z' or '2' base58Prefixes[SECRET_KEY] = std::vector<unsigned char>(1,144); // Testnet Bytz BIP32 pubkeys start with 'DRKV' (Bitcoin defaults) base58Prefixes[EXT_PUBLIC_KEY] = {0x3A, 0x80, 0x61, 0xA0}; // Testnet Bytz BIP32 prvkeys start with 'DRKP' (Bitcoin defaults) base58Prefixes[EXT_SECRET_KEY] = {0x3A, 0x80, 0x58, 0x37}; // Regtest Bytz BIP44 coin type is '1' (All coin's testnet default) nExtCoinType = 1; // long living quorum params consensus.llmqs[Consensus::LLMQ_TEST] = llmq_test; consensus.llmqs[Consensus::LLMQ_TEST_V17] = llmq_test_v17; consensus.llmqTypeChainLocks = Consensus::LLMQ_TEST; consensus.llmqTypeInstantSend = Consensus::LLMQ_TEST; consensus.llmqTypePlatform = Consensus::LLMQ_TEST; } }; static std::unique_ptr<CChainParams> globalChainParams; const CChainParams &Params() { assert(globalChainParams); return *globalChainParams; } std::unique_ptr<CChainParams> CreateChainParams(const std::string& chain, bool fHelpOnly) { if (chain == CBaseChainParams::MAIN) return std::unique_ptr<CChainParams>(new CMainParams()); else if (chain == CBaseChainParams::TESTNET) return std::unique_ptr<CChainParams>(new CTestNetParams()); else if (chain == CBaseChainParams::DEVNET) { return std::unique_ptr<CChainParams>(new CDevNetParams(fHelpOnly)); } else if (chain == CBaseChainParams::REGTEST) return std::unique_ptr<CChainParams>(new CRegTestParams()); throw std::runtime_error(strprintf("%s: Unknown chain %s.", __func__, chain)); } void SelectParams(const std::string& network) { SelectBaseParams(network); globalChainParams = CreateChainParams(network); } void UpdateVersionBitsParameters(Consensus::DeploymentPos d, int64_t nStartTime, int64_t nTimeout, int64_t nWindowSize, int64_t nThresholdStart, int64_t nThresholdMin, int64_t nFalloffCoeff) { globalChainParams->UpdateVersionBitsParameters(d, nStartTime, nTimeout, nWindowSize, nThresholdStart, nThresholdMin, nFalloffCoeff); } void UpdateDIP3Parameters(int nActivationHeight, int nEnforcementHeight) { globalChainParams->UpdateDIP3Parameters(nActivationHeight, nEnforcementHeight); } void UpdateDIP8Parameters(int nActivationHeight) { globalChainParams->UpdateDIP8Parameters(nActivationHeight); } void UpdateBudgetParameters(int nMasternodePaymentsStartBlock, int nBudgetPaymentsStartBlock, int nSuperblockStartBlock) { globalChainParams->UpdateBudgetParameters(nMasternodePaymentsStartBlock, nBudgetPaymentsStartBlock, nSuperblockStartBlock); } void UpdateDevnetSubsidyAndDiffParams(int nMinimumDifficultyBlocks, int nHighSubsidyBlocks, int nHighSubsidyFactor) { globalChainParams->UpdateSubsidyAndDiffParams(nMinimumDifficultyBlocks, nHighSubsidyBlocks, nHighSubsidyFactor); } void UpdateDevnetLLMQChainLocks(Consensus::LLMQType llmqType) { globalChainParams->UpdateLLMQChainLocks(llmqType); } void UpdateDevnetLLMQInstantSend(Consensus::LLMQType llmqType) { globalChainParams->UpdateLLMQInstantSend(llmqType); } void UpdateLLMQTestParams(int size, int threshold) { globalChainParams->UpdateLLMQTestParams(size, threshold); } void UpdateLLMQDevnetParams(int size, int threshold) { globalChainParams->UpdateLLMQDevnetParams(size, threshold); }