source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
test9.c | void foo (int a) {
0;
if (1) {
2;
#pragma omp barrier
3;
} else {
4;
foo(3);
5;
}
}
int main() {
#pragma omp parallel
{
6;
if (7) {
8;
foo(9);
10;
} else {
11;
#pragma omp barrier
12;
#pragma omp barrier
13;
}
14;
#pragma omp barrier
15;
}
}
|
kernel.cl.openmp.h | #include <brisbane/brisbane_openmp.h>
static void saxpy0(float* Z, float A, float* X, BRISBANE_OPENMP_KERNEL_ARGS) {
size_t _id;
#pragma omp parallel for shared(Z, A, X) private(_id)
BRISBANE_OPENMP_KERNEL_BEGIN
Z[_id] = A * X[_id];
BRISBANE_OPENMP_KERNEL_END
}
static void saxpy1(float* Z, float* Y, BRISBANE_OPENMP_KERNEL_ARGS) {
size_t _id;
#pragma omp parallel for shared(Z, Y) private(_id)
BRISBANE_OPENMP_KERNEL_BEGIN
Z[_id] += Y[_id];
BRISBANE_OPENMP_KERNEL_END
}
|
relu_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: bhu@openailab.com
*/
#include <math.h>
#include "sys_port.h"
#include "module.h"
#include "tengine_ir.h"
#include "../../cpu_node_ops.h"
#include "tengine_op.h"
#include "relu_param.h"
#include "compiler_fp16.h"
static int ref_relu_fp32(struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, float negative_slope,
int num_thread)
{
float* input_data = input_tensor->data;
float* output_data = output_tensor->data;
int batch = input_tensor->dims[0];
int channel = input_tensor->dims[1];
int cstep = input_tensor->dims[2] * input_tensor->dims[3];
if (negative_slope == 0)
{
for (int n = 0; n < batch; n++)
{
#pragma omp parallel for num_threads(num_thread)
for (int c = 0; c < channel; c++)
{
float* in_data = input_data + n * channel * cstep + c * cstep;
float* out_data = output_data + n * channel * cstep + c * cstep;
for (int i = 0; i < cstep; i++)
{
if (in_data[i] < 0.f)
out_data[i] = 0.f;
else
out_data[i] = in_data[i];
}
}
}
}
else
{
for (int n = 0; n < batch; n++)
{
#pragma omp parallel for num_threads(num_thread)
for (int c = 0; c < channel; c++)
{
float* in_data = input_data + n * channel * cstep + c * cstep;
float* out_data = output_data + n * channel * cstep + c * cstep;
for (int i = 0; i < cstep; i++)
{
if (in_data[i] < 0)
out_data[i] = in_data[i] * negative_slope;
else
out_data[i] = in_data[i];
}
}
}
}
return 0;
}
#if MACOS
#else
static int ref_relu_fp16(struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, float negative_slope,
int num_thread)
{
int total_size = input_tensor->elem_num;
float* input_data = input_tensor->data;
float* output_data = output_tensor->data;
/* cost fp16 to fp32 */
_fp16* input_fp16 = input_tensor->data;
_fp16* output_fp16 = output_tensor->data;
float* input_fp32 = (float*)sys_malloc(total_size * sizeof(float));
for(int i=0; i< total_size; i++)
{
input_fp32[i] = fp16_to_fp32(input_fp16[i]);
}
/* process */
if (negative_slope == 0)
{
for (int i = 0; i < total_size; i++)
{
if (input_fp32[i] < 0)
input_fp32[i] = 0;
else
input_fp32[i] = input_fp32[i];
}
}
else
{
for (int i = 0; i < total_size; i++)
{
if (input_fp32[i] < 0)
input_fp32[i] = input_fp32[i] * negative_slope;
else
input_fp32[i] = input_fp32[i];
}
}
/* cost fp32 to fp16 */
for(int i=0; i<total_size; i++)
{
output_fp16[i] = fp32_to_fp16(input_fp32[i]);
}
sys_free(input_fp32);
return 0;
}
#endif
static int ref_relu_uint8(struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, float negative_slope,
int num_thread)
{
int total_size = input_tensor->elem_num;
/* dequant */
uint8_t* input_uint8 = input_tensor->data;
uint8_t* output_uint8 = output_tensor->data;
float input_scale = input_tensor->scale;
float output_scale = output_tensor->scale;
int32_t input_zero = input_tensor->zero_point;
int32_t output_zero = output_tensor->zero_point;
float* data_fp32 = (float*)sys_malloc(total_size * sizeof(float));
for(int i=0; i<total_size; i++)
{
data_fp32[i] = ((float )input_uint8[i] - (float )input_zero) * input_scale;
}
/* process */
if (negative_slope == 0)
{
for (int i = 0; i < total_size; i++)
{
if (data_fp32[i] < 0)
data_fp32[i] = 0;
else
data_fp32[i] = data_fp32[i];
}
}
else
{
for (int i = 0; i < total_size; i++)
{
if (data_fp32[i] < 0)
data_fp32[i] = data_fp32[i] * negative_slope;
else
data_fp32[i] = data_fp32[i];
}
}
/* quant */
for(int i=0; i<total_size; i++)
{
int udata = round(data_fp32[i] / output_scale + output_zero);
if (udata > 255)
udata = 255;
else if (udata < 0)
udata = 0;
output_uint8[i] = udata;
}
sys_free(data_fp32);
return 0;
}
static int ref_relu_int8(struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, float negative_slope)
{
int total_size = input_tensor->elem_num;
/* dequant */
int8_t* input_int8 = input_tensor->data;
int8_t* output_int8 = output_tensor->data;
float input_scale = input_tensor->scale;
float output_scale = output_tensor->scale;
float* data_fp32 = (float*)sys_malloc(total_size * sizeof(float));
for(int i=0; i<total_size; i++)
{
data_fp32[i] = (float )input_int8[i] * input_scale;
}
/* process */
if (negative_slope == 0)
{
for (int i = 0; i < total_size; i++)
{
if (data_fp32[i] < 0)
data_fp32[i] = 0;
else
data_fp32[i] = data_fp32[i];
}
}
else
{
for (int i = 0; i < total_size; i++)
{
if (data_fp32[i] < 0)
data_fp32[i] = data_fp32[i] * negative_slope;
else
data_fp32[i] = data_fp32[i];
}
}
/* quant */
for(int i=0; i<total_size; i++)
{
int data_i32 = round(data_fp32[i] / output_scale);
if (data_i32 > 127)
data_i32 = 127;
else if (data_i32 < -127)
data_i32 = -127;
output_int8[i] = (int8_t)data_i32;
}
sys_free(data_fp32);
return 0;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor;
struct ir_tensor* output_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct relu_param* relu_param = ( struct relu_param* )ir_node->op.param_mem;
int ret = -1;
if (input_tensor->data_type == TENGINE_DT_FP32)
ret = ref_relu_fp32(input_tensor, output_tensor, relu_param->negative_slope, exec_graph->num_thread);
else if (input_tensor->data_type == TENGINE_DT_FP16)
#if MACOS
printf("FP16 not support mac os");
#else
ret = ref_relu_fp16(input_tensor, output_tensor, relu_param->negative_slope, exec_graph->num_thread);
#endif
else if (input_tensor->data_type == TENGINE_DT_UINT8)
ret = ref_relu_uint8(input_tensor, output_tensor, relu_param->negative_slope, exec_graph->num_thread);
else if (input_tensor->data_type == TENGINE_DT_INT8)
ret = ref_relu_int8(input_tensor, output_tensor, relu_param->negative_slope);
else
printf("Input data type %d not to be supported.\n", input_tensor->data_type);
return ret;
}
static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* node = exec_node->ir_node;
struct ir_graph* ir_graph = node->graph;
struct ir_tensor* input = get_ir_graph_tensor(ir_graph, node->input_tensors[0]);
struct ir_tensor* output = get_ir_graph_tensor(ir_graph, node->output_tensors[0]);
int ret = set_ir_tensor_shape(output, input->dims, input->dim_num);
return ret;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node)
{
return OPS_SCORE_CANDO;
}
static struct node_ops hcl_node_ops = {.prerun = NULL,
.run = run,
.reshape = reshape,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
static int reg_relu_hcl_ops(void* arg)
{
return register_builtin_node_ops(OP_RELU, &hcl_node_ops);
}
static int unreg_relu_hcl_ops(void* arg)
{
return unregister_builtin_node_ops(OP_RELU, &hcl_node_ops);
}
AUTO_REGISTER_OPS(reg_relu_hcl_ops);
AUTO_UNREGISTER_OPS(unreg_relu_hcl_ops);
|
elementwise_add_arm_func.h | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef ELEMENTWISEADD_OP
#pragma once
#include "operators/math/elementwise_op_function.h"
#include "operators/op_param.h"
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
#include <arm_neon.h>
#endif
namespace paddle_mobile {
namespace operators {
template <typename T>
inline void ElementwiseAddCompute(const ElementwiseAddParam<CPU> ¶m) {
const framework::Tensor *input_x = param.InputX();
const framework::Tensor *input_y = param.InputY();
framework::Tensor *Out = param.Out();
int axis = param.Axis();
const auto &x_dims = input_x->dims();
const auto &y_dims = input_y->dims();
/// axis = -1 represent the last dimensions.
axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis);
size_t batch = 1;
size_t channels = 1;
size_t elementwise_num = 1;
for (int i = 0; i < axis; ++i) {
batch *= x_dims[i];
}
for (int i = 0; i < y_dims.size(); ++i) {
channels *= y_dims[i];
}
for (int i = y_dims.size() + axis; i < x_dims.size(); ++i) {
elementwise_num *= x_dims[i];
}
const float *bias_data = input_y->data<float>();
const float *input_data = input_x->data<float>();
float *output_data = Out->mutable_data<float>();
#pragma omp parallel for collapse(2)
for (int i = 0; i < batch; ++i) {
for (int j = 0; j < channels; ++j) {
size_t offset = (i * channels + j) * elementwise_num;
const float *input = input_data + offset;
const float bias = bias_data[j];
float *output = output_data + offset;
int remain = elementwise_num;
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
int loop = elementwise_num >> 0x4;
remain = elementwise_num & 0xF;
for (int k = 0; k < loop; ++k) {
float32x4_t rb = vdupq_n_f32(bias);
float32x4_t r0 = vld1q_f32(input);
float32x4_t r1 = vld1q_f32(input + 4);
float32x4_t r2 = vld1q_f32(input + 8);
float32x4_t r3 = vld1q_f32(input + 12);
r0 = vaddq_f32(r0, rb);
r1 = vaddq_f32(r1, rb);
r2 = vaddq_f32(r2, rb);
r3 = vaddq_f32(r3, rb);
vst1q_f32(output, r0);
vst1q_f32(output + 4, r1);
vst1q_f32(output + 8, r2);
vst1q_f32(output + 12, r3);
input += 16;
output += 16;
}
#endif
for (int k = 0; k < remain; ++k) {
output[k] = input[k] + bias;
}
}
}
}
template class ElementwiseAddKernel<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
#endif
|
VolumetricAveragePooling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/VolumetricAveragePooling.c"
#else
static inline void THNN_(VolumetricAveragePooling_shapeCheck)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH) {
long nslices;
long itime;
long iheight;
long iwidth;
long otime;
long oheight;
long owidth;
int ndim = input->nDimension;
int dimN = 0;
int dimt = 1;
int dimh = 2;
int dimw = 3;
if (input->nDimension == 5)
{
dimN++;
dimt++;
dimh++;
dimw++;
}
THArgCheck(kT > 0 && kW > 0 && kH > 0, 5,
"kernel size should be greater than zero, but got kT: %d kH: %d kW: %d",
kT, kH, kW);
THArgCheck(dT > 0 && dW > 0 && dH > 0, 8,
"stride should be greater than zero, but got dT: %d dH: %d dW: %d",
dT, dH, dW);
THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input,
"4D or 5D (batch mode) tensor expected for input, but got: %s");
THArgCheck(input->size[dimw] >= kW && input->size[dimh] >= kH
&& input->size[dimt] >= kT, 2,
"input image (T: %d H: %d W: %d) smaller than "
"kernel size (kT: %d kH: %d kW: %d)",
input->size[dimt], input->size[dimh], input->size[dimw],
kT, kH, kW);
/* sizes */
nslices = input->size[dimN];
itime = input->size[dimt];
iheight = input->size[dimh];
iwidth = input->size[dimw];
otime = (itime - kT) / dT + 1;
oheight = (iheight - kH) / dH + 1;
owidth = (iwidth - kW) / dW + 1;
if (gradOutput != NULL) {
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimN, nslices);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimt, otime);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, oheight);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, owidth);
}
}
static void THNN_(VolumetricAveragePooling_updateOutput_frame)(
real *input_p,
real *output_p,
long nslices,
long itime,
long iwidth,
long iheight,
long otime,
long owidth,
long oheight,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH)
{
long k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
/* loop over output */
long i, j, ti;
for (ti = 0; ti < otime; ti++)
{
for (i = 0; i < oheight; i++)
{
for (j = 0; j < owidth; j++)
{
/* local pointers */
real *ip = input_p + k * itime * iwidth * iheight
+ ti * iwidth * iheight * dT + i * iwidth * dH + j * dW;
real *op = output_p + k * otime * owidth * oheight
+ ti * owidth * oheight + i * owidth + j;
/* compute local sum: */
real sum = 0.0;
int x, y, z;
for (z=0; z < kT; z++)
{
for (y = 0; y < kH; y++)
{
for (x = 0; x < kW; x++)
{
sum += *(ip + z * iwidth * iheight + y * iwidth + x);
}
}
}
/* set output to local max */
*op = sum / (kT * kW * kH);
}
}
}
}
}
void THNN_(VolumetricAveragePooling_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH)
{
long nslices;
long itime;
long iheight;
long iwidth;
long otime;
long oheight;
long owidth;
real *input_data;
real *output_data;
THNN_(VolumetricAveragePooling_shapeCheck)(
state, input, NULL, kT, kW, kH,
dT, dW, dH);
int dimN = 0;
int dimt = 1;
int dimh = 2;
int dimw = 3;
if (input->nDimension == 5)
{
dimN++;
dimt++;
dimh++;
dimw++;
}
/* sizes */
nslices = input->size[dimN];
itime = input->size[dimt];
iheight = input->size[dimh];
iwidth = input->size[dimw];
otime = (itime - kT) / dT + 1;
oheight = (iheight - kH) / dH + 1;
owidth = (iwidth - kW) / dW + 1;
/* get contiguous input */
input = THTensor_(newContiguous)(input);
if (input->nDimension == 4) /* non-batch mode */
{
/* resize output */
THTensor_(resize4d)(output, nslices, otime, oheight, owidth);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
THNN_(VolumetricAveragePooling_updateOutput_frame)(
input_data, output_data, nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
kT, kW, kH,
dT, dW, dH
);
}
else /* batch mode */
{
long p;
long nBatch = input->size[0];
long istride = nslices * itime * iwidth * iheight;
long ostride = nslices * otime * owidth * oheight;
/* resize output */
THTensor_(resize5d)(output, nBatch, nslices, otime, oheight, owidth);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
#pragma omp parallel for private(p)
for (p=0; p < nBatch; p++)
{
THNN_(VolumetricAveragePooling_updateOutput_frame)(
input_data + p * istride, output_data + p * ostride, nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
kT, kW, kH,
dT, dW, dH
);
}
}
/* cleanup */
THTensor_(free)(input);
}
static void THNN_(VolumetricAveragePooling_updateGradInput_frame)(
real *gradInput_p,
real *gradOutput_p,
long nslices,
long itime,
long iwidth,
long iheight,
long otime,
long owidth,
long oheight,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH)
{
long k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
/* loop over output */
long i, j, ti;
for (ti = 0; ti < otime; ti++)
{
for (i = 0; i < oheight; i++)
{
for (j = 0; j < owidth; j++)
{
/* local pointers */
real *ip = gradInput_p + k * itime * iwidth * iheight
+ ti * iwidth * iheight * dT + i * iwidth * dH + j * dW;
real *op = gradOutput_p + k * otime * owidth * oheight
+ ti * owidth * oheight + i * owidth + j;
/* scatter gradients out to footprint: */
real val = *op / (kT * kW * kH);
int x,y,z;
for (z=0; z < kT; z++)
{
for (y = 0; y < kH; y++)
{
for (x = 0; x < kW; x++)
{
*(ip + z * iwidth * iheight + y * iwidth + x) += val;
}
}
}
}
}
}
}
}
void THNN_(VolumetricAveragePooling_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH)
{
int nslices;
int itime;
int iheight;
int iwidth;
int otime;
int oheight;
int owidth;
real *gradInput_data;
real *gradOutput_data;
int dimN = 0;
int dimt = 1;
int dimh = 2;
int dimw = 3;
THNN_(VolumetricAveragePooling_shapeCheck)(
state, input, gradOutput, kT, kW, kH,
dT, dW, dH);
/* get contiguous gradOutput */
gradOutput = THTensor_(newContiguous)(gradOutput);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
if (input->nDimension == 5)
{
dimN++;
dimt++;
dimh++;
dimw++;
}
/* sizes */
nslices = input->size[dimN];
itime = input->size[dimt];
iheight = input->size[dimh];
iwidth = input->size[dimw];
otime = gradOutput->size[dimt];
oheight = gradOutput->size[dimh];
owidth = gradOutput->size[dimw];
/* get raw pointers */
gradInput_data = THTensor_(data)(gradInput);
gradOutput_data = THTensor_(data)(gradOutput);
/* backprop */
if (input->nDimension == 4) /* non-batch mode*/
{
THNN_(VolumetricAveragePooling_updateGradInput_frame)(
gradInput_data, gradOutput_data, nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
kT, kW, kH,
dT, dW, dH
);
}
else /* batch mode */
{
long p;
long nBatch = input->size[0];
long istride = nslices * itime * iwidth * iheight;
long ostride = nslices * otime * owidth * oheight;
#pragma omp parallel for private(p)
for (p = 0; p < nBatch; p++)
{
THNN_(VolumetricAveragePooling_updateGradInput_frame)(
gradInput_data + p * istride, gradOutput_data + p * ostride, nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
kT, kW, kH,
dT, dW, dH
);
}
}
/* cleanup */
THTensor_(free)(gradOutput);
}
#endif
|
sparselu.c | /*
* This file belongs to the Galois project, a C++ library for exploiting parallelism.
* The code is being released under the terms of the 3-Clause BSD License (a
* copy is located in LICENSE.txt at the top-level directory).
*
* Copyright (C) 2018, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*/
/**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de
* Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
* USA */
/**********************************************************************************************/
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <libgen.h>
#ifdef __linux__
#include <linux/mman.h>
#endif
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <fcntl.h>
#include <unistd.h>
#include "bots.h"
#include "sparselu.h"
extern char bots_arg_file[256];
/***********************************************************************
* checkmat:
**********************************************************************/
int checkmat(float* M, float* N) {
int i, j;
float r_err;
for (i = 0; i < bots_arg_size_1; i++) {
for (j = 0; j < bots_arg_size_1; j++) {
r_err = M[i * bots_arg_size_1 + j] - N[i * bots_arg_size_1 + j];
if (r_err == 0.0)
continue;
if (r_err < 0.0)
r_err = -r_err;
if (M[i * bots_arg_size_1 + j] == 0) {
bots_message("Checking failure: A[%d][%d]=%f B[%d][%d]=%f; \n", i, j,
M[i * bots_arg_size_1 + j], i, j,
N[i * bots_arg_size_1 + j]);
return FALSE;
}
r_err = r_err / M[i * bots_arg_size_1 + j];
if (r_err > EPSILON) {
bots_message(
"Checking failure: A[%d][%d]=%f B[%d][%d]=%f; Relative Error=%f\n",
i, j, M[i * bots_arg_size_1 + j], i, j, N[i * bots_arg_size_1 + j],
r_err);
return FALSE;
}
}
}
return TRUE;
}
/***********************************************************************
* genmat:
**********************************************************************/
static void synthetic_genmat(float* M[]) {
int null_entry, init_val, i, j, ii, jj;
float* p;
int a = 0, b = 0;
init_val = 1325;
/* generating the structure */
for (ii = 0; ii < bots_arg_size; ii++) {
for (jj = 0; jj < bots_arg_size; jj++) {
/* computing null entries */
null_entry = FALSE;
if ((ii < jj) && (ii % 3 != 0))
null_entry = TRUE;
if ((ii > jj) && (jj % 3 != 0))
null_entry = TRUE;
if (ii % 2 == 1)
null_entry = TRUE;
if (jj % 2 == 1)
null_entry = TRUE;
if (ii == jj)
null_entry = FALSE;
if (ii == jj - 1)
null_entry = FALSE;
if (ii - 1 == jj)
null_entry = FALSE;
/* allocating matrix */
if (null_entry == FALSE) {
a++;
M[ii * bots_arg_size + jj] =
(float*)malloc(bots_arg_size_1 * bots_arg_size_1 * sizeof(float));
if ((M[ii * bots_arg_size + jj] == NULL)) {
bots_message("Error: Out of memory\n");
exit(101);
}
/* initializing matrix */
p = M[ii * bots_arg_size + jj];
for (i = 0; i < bots_arg_size_1; i++) {
for (j = 0; j < bots_arg_size_1; j++) {
init_val = (3125 * init_val) % 65536;
(*p) = (float)((init_val - 32768.0) / 16384.0);
p++;
}
}
} else {
b++;
M[ii * bots_arg_size + jj] = NULL;
}
}
}
bots_debug("allo = %d, no = %d, total = %d, factor = %f\n", a, b, a + b,
(float)((float)a / (float)(a + b)));
}
static void structure_from_file_genmat(float* M[]) {
int a, b, jj;
int num_blocks, max_id;
int fd = open(bots_arg_file, O_RDONLY);
if (fd == -1)
abort();
struct stat buf;
if (fstat(fd, &buf) == -1)
abort();
void* base = mmap(NULL, buf.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
uint64_t* fptr = (uint64_t*)base;
uint64_t version = *fptr++;
if (version != 1)
abort();
uint64_t sizeof_edge = *fptr++;
if (sizeof_edge != 4)
abort();
uint64_t num_nodes = *fptr++;
uint64_t num_edges = *fptr++;
uint64_t* out_idx = fptr;
fptr += num_nodes;
uint32_t* fptr32 = (uint32_t*)fptr;
uint32_t* outs = fptr32;
fptr32 += num_edges;
if (num_edges % 2)
fptr32 += 1;
float* edge_data = (float*)fptr32;
memset(M, 0, bots_arg_size * bots_arg_size * sizeof(*M));
num_blocks = (num_nodes + bots_arg_size_1 - 1) / bots_arg_size_1;
max_id = bots_arg_size_1 * bots_arg_size;
printf("full size: %d\n", num_blocks);
/* generating the structure */
uint32_t ii;
for (ii = 0; ii < num_nodes; ++ii) {
if (ii >= max_id)
break;
int bii = ii / bots_arg_size_1;
uint64_t begin = (ii == 0) ? out_idx[0] : out_idx[ii - 1];
uint64_t end = out_idx[ii];
uint64_t edge;
for (edge = begin; edge < end; ++edge) {
/* computing null entries */
int jj = outs[edge];
if (jj >= max_id)
continue;
int bjj = jj / bots_arg_size_1;
if (M[bii * bots_arg_size + bjj] == NULL) {
a++;
M[bii * bots_arg_size + bjj] =
(float*)malloc(bots_arg_size_1 * bots_arg_size_1 * sizeof(float));
memset(M[bii * bots_arg_size + bjj], 0,
bots_arg_size_1 * bots_arg_size_1 * sizeof(float));
}
if (M[bii * bots_arg_size + bjj] == NULL) {
bots_message("Error: Out of memory\n");
exit(101);
}
if (M[bjj * bots_arg_size + bii] == NULL) {
a++;
M[bjj * bots_arg_size + bii] =
(float*)malloc(bots_arg_size_1 * bots_arg_size_1 * sizeof(float));
memset(M[bjj * bots_arg_size + bii], 0,
bots_arg_size_1 * bots_arg_size_1 * sizeof(float));
}
if (M[bjj * bots_arg_size + bii] == NULL) {
bots_message("Error: Out of memory\n");
exit(101);
}
M[bii * bots_arg_size + bjj][(ii % bots_arg_size_1) * bots_arg_size_1 +
(jj % bots_arg_size_1)] = edge_data[edge];
M[bjj * bots_arg_size + bii][(jj % bots_arg_size_1) * bots_arg_size_1 +
(ii % bots_arg_size_1)] = edge_data[edge];
}
}
// Add identity diagonal as necessary
for (ii = 0; ii < bots_arg_size; ++ii) {
if (M[ii * bots_arg_size + ii] == NULL) {
a++;
M[ii * bots_arg_size + ii] =
(float*)malloc(bots_arg_size_1 * bots_arg_size_1 * sizeof(float));
memset(M[ii * bots_arg_size + ii], 0,
bots_arg_size_1 * bots_arg_size_1 * sizeof(float));
}
for (jj = 0; jj < bots_arg_size_1; ++jj) {
if (M[ii * bots_arg_size + ii][jj * bots_arg_size_1 + jj] == 0.0)
M[ii * bots_arg_size + ii][jj * bots_arg_size_1 + jj] = 1.0;
}
}
b = num_blocks * num_blocks - a;
bots_debug("allo = %d, no = %d, total = %d, factor = %f\n", a, b, a + b,
(float)((float)a / (float)(a + b)));
}
void genmat(float* M[]) {
if (strlen(bots_arg_file) == 0)
synthetic_genmat(M);
else
structure_from_file_genmat(M);
}
/***********************************************************************
* print_structure:
**********************************************************************/
void print_structure(char* name, float* M[]) {
int ii, jj;
bots_message("Structure for matrix %s @ 0x%p\n", name, M);
for (ii = 0; ii < bots_arg_size; ii++) {
for (jj = 0; jj < bots_arg_size; jj++) {
if (M[ii * bots_arg_size + jj] != NULL) {
bots_message("x");
} else
bots_message(" ");
}
bots_message("\n");
}
bots_message("\n");
}
/***********************************************************************
* allocate_clean_block:
**********************************************************************/
float* allocate_clean_block() {
int i, j;
float *p, *q;
p = (float*)malloc(bots_arg_size_1 * bots_arg_size_1 * sizeof(float));
q = p;
if (p != NULL) {
for (i = 0; i < bots_arg_size_1; i++)
for (j = 0; j < bots_arg_size_1; j++) {
(*p) = 0.0;
p++;
}
} else {
bots_message("Error: Out of memory\n");
exit(101);
}
return (q);
}
/***********************************************************************
* lu0:
**********************************************************************/
void lu0(float* diag) {
int i, j, k;
for (k = 0; k < bots_arg_size_1; k++)
for (i = k + 1; i < bots_arg_size_1; i++) {
diag[i * bots_arg_size_1 + k] =
diag[i * bots_arg_size_1 + k] / diag[k * bots_arg_size_1 + k];
for (j = k + 1; j < bots_arg_size_1; j++)
diag[i * bots_arg_size_1 + j] =
diag[i * bots_arg_size_1 + j] -
diag[i * bots_arg_size_1 + k] * diag[k * bots_arg_size_1 + j];
}
}
/***********************************************************************
* bdiv:
**********************************************************************/
void bdiv(float* diag, float* row) {
int i, j, k;
for (i = 0; i < bots_arg_size_1; i++)
for (k = 0; k < bots_arg_size_1; k++) {
row[i * bots_arg_size_1 + k] =
row[i * bots_arg_size_1 + k] / diag[k * bots_arg_size_1 + k];
for (j = k + 1; j < bots_arg_size_1; j++)
row[i * bots_arg_size_1 + j] =
row[i * bots_arg_size_1 + j] -
row[i * bots_arg_size_1 + k] * diag[k * bots_arg_size_1 + j];
}
}
/***********************************************************************
* bmod:
**********************************************************************/
void bmod(float* row, float* col, float* inner) {
int i, j, k;
for (i = 0; i < bots_arg_size_1; i++)
for (j = 0; j < bots_arg_size_1; j++)
for (k = 0; k < bots_arg_size_1; k++)
inner[i * bots_arg_size_1 + j] =
inner[i * bots_arg_size_1 + j] -
row[i * bots_arg_size_1 + k] * col[k * bots_arg_size_1 + j];
}
/***********************************************************************
* fwd:
**********************************************************************/
void fwd(float* diag, float* col) {
int i, j, k;
for (j = 0; j < bots_arg_size_1; j++)
for (k = 0; k < bots_arg_size_1; k++)
for (i = k + 1; i < bots_arg_size_1; i++)
col[i * bots_arg_size_1 + j] =
col[i * bots_arg_size_1 + j] -
diag[i * bots_arg_size_1 + k] * col[k * bots_arg_size_1 + j];
}
void sparselu_init(float*** pBENCH, char* pass) {
*pBENCH = (float**)malloc(bots_arg_size * bots_arg_size * sizeof(float*));
genmat(*pBENCH);
print_structure(pass, *pBENCH);
}
void sparselu_par_call(float** BENCH) {
int ii, jj, kk;
bots_message(
"Computing SparseLU Factorization (%dx%d matrix with %dx%d blocks) ",
bots_arg_size, bots_arg_size, bots_arg_size_1, bots_arg_size_1);
#pragma omp parallel
#pragma omp single nowait
#pragma omp task untied
for (kk = 0; kk < bots_arg_size; kk++) {
lu0(BENCH[kk * bots_arg_size + kk]);
for (jj = kk + 1; jj < bots_arg_size; jj++)
if (BENCH[kk * bots_arg_size + jj] != NULL)
#pragma omp task untied firstprivate(kk, jj) shared(BENCH)
{
fwd(BENCH[kk * bots_arg_size + kk], BENCH[kk * bots_arg_size + jj]);
}
for (ii = kk + 1; ii < bots_arg_size; ii++)
if (BENCH[ii * bots_arg_size + kk] != NULL)
#pragma omp task untied firstprivate(kk, ii) shared(BENCH)
{
bdiv(BENCH[kk * bots_arg_size + kk], BENCH[ii * bots_arg_size + kk]);
}
#pragma omp taskwait
for (ii = kk + 1; ii < bots_arg_size; ii++)
if (BENCH[ii * bots_arg_size + kk] != NULL)
for (jj = kk + 1; jj < bots_arg_size; jj++)
if (BENCH[kk * bots_arg_size + jj] != NULL)
#pragma omp task untied firstprivate(kk, jj, ii) shared(BENCH)
{
if (BENCH[ii * bots_arg_size + jj] == NULL)
BENCH[ii * bots_arg_size + jj] = allocate_clean_block();
bmod(BENCH[ii * bots_arg_size + kk], BENCH[kk * bots_arg_size + jj],
BENCH[ii * bots_arg_size + jj]);
}
#pragma omp taskwait
}
bots_message(" completed!\n");
}
void sparselu_seq_call(float** BENCH) {
int ii, jj, kk;
for (kk = 0; kk < bots_arg_size; kk++) {
lu0(BENCH[kk * bots_arg_size + kk]);
for (jj = kk + 1; jj < bots_arg_size; jj++)
if (BENCH[kk * bots_arg_size + jj] != NULL) {
fwd(BENCH[kk * bots_arg_size + kk], BENCH[kk * bots_arg_size + jj]);
}
for (ii = kk + 1; ii < bots_arg_size; ii++)
if (BENCH[ii * bots_arg_size + kk] != NULL) {
bdiv(BENCH[kk * bots_arg_size + kk], BENCH[ii * bots_arg_size + kk]);
}
for (ii = kk + 1; ii < bots_arg_size; ii++)
if (BENCH[ii * bots_arg_size + kk] != NULL)
for (jj = kk + 1; jj < bots_arg_size; jj++)
if (BENCH[kk * bots_arg_size + jj] != NULL) {
if (BENCH[ii * bots_arg_size + jj] == NULL)
BENCH[ii * bots_arg_size + jj] = allocate_clean_block();
bmod(BENCH[ii * bots_arg_size + kk], BENCH[kk * bots_arg_size + jj],
BENCH[ii * bots_arg_size + jj]);
}
}
}
void sparselu_fini(float** BENCH, char* pass) { print_structure(pass, BENCH); }
int sparselu_check(float** SEQ, float** BENCH) {
int ii, jj, ok = 1;
for (ii = 0; ((ii < bots_arg_size) && ok); ii++) {
for (jj = 0; ((jj < bots_arg_size) && ok); jj++) {
if ((SEQ[ii * bots_arg_size + jj] == NULL) &&
(BENCH[ii * bots_arg_size + jj] != NULL))
ok = FALSE;
if ((SEQ[ii * bots_arg_size + jj] != NULL) &&
(BENCH[ii * bots_arg_size + jj] == NULL))
ok = FALSE;
if ((SEQ[ii * bots_arg_size + jj] != NULL) &&
(BENCH[ii * bots_arg_size + jj] != NULL))
ok = checkmat(SEQ[ii * bots_arg_size + jj],
BENCH[ii * bots_arg_size + jj]);
}
}
if (ok)
return BOTS_RESULT_SUCCESSFUL;
else
return BOTS_RESULT_UNSUCCESSFUL;
}
|
GB_unop__asin_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__asin_fc64_fc64
// op(A') function: GB_unop_tran__asin_fc64_fc64
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = casin (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = casin (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = casin (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ASIN || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__asin_fc64_fc64
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = casin (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__asin_fc64_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
build.c | #ifndef BUILD_C
#define BUILD_C
#include <string.h> /* memcpy() */
#include <stdio.h> /* Printing to stdout/stderr */
#include <stdlib.h> /* malloc()/free() */
#include <complex.h> /* Complex numbers */
#include <errno.h> /* Errors from parsing user input */
#include "build.h"
#include "utils.c"
/* Encode a symbol in the alphabet */
double* encode(int value, double **syms) {
return syms[value];
}
/* Decode by finding which has the maximum dot product */
int decode(double *recv, double **syms, int N, int M) {
int i, j;
double prod, max;
int output = 0;
/* Check each symbol */
for (i = 0; i < N; i++) {
prod = 0;
/* Perform the dot product of this symbol with the received symbol */
for (j = 0; j < M; j++) {
prod += syms[i][j] * recv[j];
}
/* If the dot product is higher, save this index */
if (i == 0 || prod > max) {
max = prod;
output = i;
}
}
/* Return the most likely symbol */
return output;
}
/* Generate the initial state */
complex double* initial_state(int N, int M, int K)
{
complex double *state; /* The generated initial state */
int i; /* Loop counters */
/* Allocate N*K complex numbers for the state */
state = malloc(N * K * sizeof(complex double));
/* Generate K active fourier bins for each of the N symbols */
for (i = 0; i < N * K; i++)
state[i] = (2 * drandom() - 1) + (2 * drandom() - 1) * I;
return state;
}
/* Get the directional vectors */
complex double** get_D(int N, int K)
{
int i, j; /* Loop counters */
complex double **output; /* The directional vectors */
/* Initialize the output with 2 * P vectors */
output = (complex double**) malloc(sizeof(complex double*) * 2 * (N * 2 * K));
for (i = 0; i < 2 * (N * 2 * K); i++) {
/* Allocate each vector as N*K dimensions */
output[i] = (complex double*) malloc(N * K * sizeof(complex double));
/* Set everything to 0 */
for (j = 0; j < N * K; j++)
output[i][j] = 0.0;
/* Set the first half to positive, the second is negative */
if (i < N * 2 * K) {
/* Odd positions are real, even are imaginary */
if (i % 2 == 0)
output[i][i / 2] = 1.0;
else
output[i][i / 2] = 1.0 * I;
} else {
/* Odd positions are real, even are imaginary */
if (i % 2 == 0)
output[i][(i % (N * 2 * K)) / 2] = -1.0;
else
output[i][(i % (N * 2 * K)) / 2] = -1.0 * I;
}
}
return output;
}
/* Score the state */
double score(complex double *state, int N, int M, int K)
{
return E(state, N, M, K, 1000);
}
/* Determine if the state and directional vector combo is feasible */
int is_feasible(complex double *state, int size, complex double *d, double delta)
{
complex double tmp; /* Temporary checking variable */
int fail, /* Failure checking variable */
i; /* Loop counter */
fail = 0;
/* Check each component of the state vector */
for (i = 0; i < size; i++) {
/* Calculate the new component */
tmp = state[i] + (d[i] * delta);
/* If value is outside either bound, this state fails */
fail = creal(tmp) < -1.0 || creal(tmp) > 1.0
|| cimag(tmp) < -1.0 || cimag(tmp) > 1.0;
/* If this component failed, return early */
if (fail)
return 0;
}
/* The state must not have failed */
return 1;
}
/* Set the next candidate vector by applying the directional vector to the state */
void set_neighbor(complex double *dest, complex double *state, int size, complex double *d, double delta)
{
int i; /* Loop counter */
/* Set every component of the state vector */
for (i = 0; i < size; i++) {
/* Set the destination to state + (d * delta) */
dest[i] = state[i] + (d[i] * delta);
}
}
/* Optimize the starting state via Pattern Search */
double** optimize(complex double *state, struct PSArgs args, double *ber)
{
int min_index, /* The index of the minimum scoring candidate state */
n, /* Dimensionality of the state vector */
P, /* The number of directional vectors */
k, i; /* Loop counter */
double delta, /* The delta between iterations */
cost, /* The current state's cost score */
min_cost; /* The minimum cost score of the candidate states */
complex double **next; /* The next iteration's state vectors */
/* Set the dimensionality */
n = args.N * args.K * 2;
/* Set the number of directions */
P = 2 * n;
/* Set the initial delta */
delta = args.delta_0;
/* Get the initial score */
cost = score(state, args.N, args.M, args.K);
/* Allocate space for P state vectors of dimensionality n */
next = (complex double**) malloc(sizeof(complex double*) * P);
for (i = 0; i < P; i++) {
next[i] = (complex double*) malloc(sizeof(complex double) * args.N
* args.K);
}
/* Loop until either the max iterations or target cost is reached */
for (k = 0; k < args.max_iter && cost >= args.tolerance; k++) {
min_index = -1;
min_cost = 1.0;
/* Check each candidate state vector */
#pragma omp parallel for num_threads(THREADS)
for (i = 0; i < P; i++) {
/*
* If the candidate is feasible, set the next state
* Else, set it the first component to 2 to mark is infeasible
*/
if (is_feasible(state, args.N * args.K, args.D[i], delta)) {
/* Set the state */
set_neighbor(next[i], state, args.N * args.K, args.D[i], delta);
/* Get its score */
double tmp = score(next[i], args.N, args.M, args.K);
/* If it's the best score so far, save its info */
if (i == 0 || tmp < min_cost) {
min_index = i;
min_cost = tmp;
}
}
}
/*
* If there was a better state, choose it
* If not, halve the search distance
*/
if (min_index < 0) {
printf("%06d, %.3e: No suitable candidate state vectors\n", k,
delta);
delta /= 2.0;
} else if (min_index >= 0 && min_cost < cost) {
printf("%06d, %.3e: Changing from %.2e to %.2e\n", k, delta, cost,
min_cost);
/* Set the cost */
cost = min_cost;
/* Copy each element of the state */
for (i = 0; i < args.N * args.K; i++) {
state[i] = next[min_index][i];
}
/* Double the search distance */
delta *= 2.0;
} else {
printf("%06d, %.3e: Keeping %.2e instead of %.2e\n", k, delta, cost,
min_cost);
/* Halve the search distance */
delta /= 2.0;
}
}
/* Store the BER */
*ber = cost;
/* Clean up memory */
for (i = 0; i < P; i++)
free(next[i]);
free(next);
return state_to_time(state, args.N, args.M, args.K);
}
/* Calculate the error rate */
double E(complex double *state, int N, int M, int K, int iter)
{
double **td, /* The time domain of the alphabet */
output, /* The error rate score */
*tmp; /* Temporary */
int *values, /* The list of values encoded */
wrong, /* The number of incorrect decodes */
total, /* The total number of decodes */
i, j; /* Loop counters */
output = 0.0;
/* Get the time domain representation of the state vector */
td = state_to_time(state, N, M, K);
/* Allocate space for the values and their encodings */
tmp = malloc(iter * M * sizeof(double));
values = malloc(iter * sizeof(int));
/* Encode iter random symbols */
for (i = 0; i < iter; i++) {
/* Pick a random value to encode */
values[i] = lrandom() % N;
/* Copy the value from the alphabet to the signal array */
memcpy(&tmp[i * M], td[values[i]], M * sizeof(double));
}
/* Compress and decompress the signal to simulate a GSM network */
simulate_gsm(tmp, iter * M);
wrong = total = 0;
/* Try decoding every symbol */
for (i = 0; i < iter; i++) {
/* Decode it as an integer */
int dec = decode(&tmp[i * M], td, N, M);
/* Check each of the bits in the decoded value */
for (j = 0; j < ceil(log(N) / log(2)); j++) {
/* If the bits don't match, it's wrong */
if (((dec >> i) & 1) != ((values[i] >> i) & 1))
wrong++;
total++;
}
}
/* Return the result as the percentage incorrect */
output = (double) wrong / total;
/* Deallocate the no longer needed memory */
for (i = 0; i < N; i++)
free(td[i]);
free(tmp);
free(td);
return output;
}
/* Convert a state vector to a time domain representation of the alphabet */
double** state_to_time(complex double *state, int N, int M, int K)
{
complex double **fd; /* The frequency domain of the state */
int i, j; /* Loop counters */
/* Allocate memory for N symbols */
fd = (complex double**) malloc(N * sizeof(complex double));
/* Convert each of the K subcarriers into frequency domains */
for (i = 0; i < N; i++)
fd[i] = get_frequency_domain(&state[i * K], N, M, K);
/* Convert the frequency domains to time domains */
return get_time_domain(fd, N, M, K);
}
/* Convert an alphabet from frequency domain to time domain */
double** get_time_domain(complex double **phi, int N, int M, int K)
{
double **output; /* The real-valued time domain */
int i; /* Loop counter */
/* Allocate memory for the real-valued time domain */
output = (double**) malloc(N * sizeof(double*));
/* Convert each symbol to its time domain */
for (i = 0; i < N; i++)
output[i] = to_time_domain(phi[i], N, M, K);
return output;
}
/* Convert a frequency domain to a real-valued time domain */
double* to_time_domain(complex double *phi, int N, int M, int K)
{
complex double *g; /* The time domain */
double *output; /* The real-valued time domain */
int k; /* Loop counters */
/* Allocate memory for the output of M samples */
output = (double*) malloc(M * sizeof(double));
/* Get the time domain via the inverse DFT */
g = inverse_dft(phi, M + 1);
/* Normalize the symbol power */
for (k = 0; k < M + 1; k++) {
/* If one component is non-zero, divide by norm() */
if (creal(g[k]) != 0 || cimag(g[k]) != 0)
g[k] = g[k] / norm(g[k]);
/* Save the real part */
if (k < M)
output[k] = creal(g[k]);
}
/* De-allocate the complex time domain */
free(g);
return output;
}
/* Convert random complex numbers to a frequency domain */
complex double* get_frequency_domain(complex double *z, int N, int M, int K)
{
complex double *phi; /* The frequency domain */
int k_N, /* The index corresponding to the Nyquist frequency */
k; /* Loop counter */
/* The index of the Nyquist frequency is M / 2 */
k_N = M / 2;
/* Allocate memory for the frequency domain */
phi = (complex double*) malloc((M + 1) * sizeof(complex double));
/*
* Produce the complex spectrum for the symbol according to the following:
*
* phi_k = 0, if k = 0 (1)
* z_(k-1), if k = 1, ..., K (2)
* 0, if k = K + 1, ..., k_N (3)
* z_(M-k-1)* if k = k_N + 1, ..., M - 1 (4)
* 0 if k = M
*/
/* Rule 1: phi_k = 0, if k = 0 */
phi[0] = 0;
/* Rule 2: phi_k = z_(k-1), if k = 1, ..., K */
for (k = 1; k <= K; k++)
phi[k] = z[k - 1];
/* Rule 3: phi_k = 0, if k = K + 1, ..., k_N */
for (k = K + 1; k <= k_N; k++)
phi[k] = 0;
/* Rule 4: phi_k = z_(M-k-1)* if k = k_N + 1, ..., M - 1 */
for (k = k_N + 1; k < M; k++)
phi[k] = conj(z[M - k - 1]);
phi[M] = 0;
return phi;
}
int main(int argc, char **argv)
{
int N, /* The number of symbols in the alphabet */
M, /* The number of samples per symbol */
K; /* The number of subcarriers */
char *ptr; /* Pointer for passing to strtol() */
double **alphabet; /* The speech like symbols */
double ber; /* The BER for this alphabet */
/* Verify the number of arguments is correct */
if (argc < 4) {
fprintf(stderr,
"Usage: %s <N value ([1,inf])> <M value ([1,inf])> <K value ([1,inf])>\n",
argv[0]);
return ERROR_USAGE;
}
/* Parse the N-value, handling errors */
ptr = 0;
N = strtol(argv[1], NULL, 10);
/* If errors were encountered, print the error and exit */
if (ptr == argv[1] || errno == ERANGE || errno == EINVAL) {
perror("strtol");
return ERROR_USAGE;
} else if (N < MIN_N || N > MAX_N) {
fprintf(stderr, "Error: N value (%d) is out of bounds [%d,%d]\n", N,
MIN_N, MAX_N);
return ERROR_USAGE;
}
/* Parse the M-value, handling errors */
ptr = 0;
M = strtol(argv[2], NULL, 10);
/* If errors were encountered, print the error and exit */
if (ptr == argv[2] || errno == ERANGE || errno == EINVAL) {
perror("strtol");
return ERROR_USAGE;
} else if (M < MIN_M || M > MAX_M) {
fprintf(stderr, "Error: M value (%d) is out of bounds [%d,%d]\n", M,
MIN_M, MAX_M);
return ERROR_USAGE;
}
/* Parse the K-value, handling errors */
ptr = 0;
K = strtol(argv[3], NULL, 10);
/* If errors were encountered, print the error and exit */
if (ptr == argv[3] || errno == ERANGE || errno == EINVAL) {
perror("strtol");
return ERROR_USAGE;
} else if (K < MIN_K || K > MAX_K) {
fprintf(stderr, "Error: K value (%d) is out of bounds [%d,%d]\n", K,
MIN_K, MAX_K);
return ERROR_USAGE;
} else if (K >= M / 2) {
fprintf(stderr, "Error: K value must be less than M/2\n");
return ERROR_USAGE;
}
/* Build the arguments */
struct PSArgs args;
args.D = get_D(N, K); /* Get the standard directional vectors */
args.delta_0 = 0.5; /* Set the initial delta */
args.tolerance = 0.0001; /* Set the tolerance */
args.max_iter = 10000; /* Set the maximum number of iterations */
args.N = N; /* Set the given N, M, & K */
args.M = M;
args.K = K;
/* Generate an initial state */
complex double *start = initial_state(N, M, K);
/* Optimize the state and return a real-valued time domain alphabet */
alphabet = optimize(start, args, &ber);
printf("-----BEGIN-HEADER-----\n");
printf("#ifndef SYMBOLS_H\n");
printf("#define SYMBOLS_H\n");
printf("\n\n/*\n");
printf(" These symbols have a BER of %e%%\n", ber * 100.0);
printf("\n*/\n\n");
printf("#define N %d\n", N);
printf("#define N %d\n\n", M);
printf("double SYMBOLS[N][M] = {\n");
for (int i = 0; i < N; i++) {
printf(" {%lf", alphabet[i][0]);
for (int j = 1; j < M; j++) {
printf(", %lf", alphabet[i][j]);
}
printf("}");
if (i < N - 1)
printf(",\n");
}
printf("};\n\n");
printf("#endif\n");
printf("-----END-HEADER-----");
return SUCCESS;
}
#endif
|
test.c | #include <stdio.h>
#include "../utilities/check.h"
#define N 100
int test_aligned(){
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
int *b = a;
// offload
#pragma omp target map(tofrom: b[0:100])
{
#pragma omp teams
#pragma omp distribute simd aligned(b: 8*sizeof(int))
for(int k=0; k<N; k++)
b[k] = k;
}
// host
for(i=0; i<N; i++)
aa[i] = i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return error;
}
}
return error;
}
int test_collapsed(){
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
// offload
#pragma omp target map(tofrom: a[0:100])
{
#pragma omp teams
#pragma omp distribute simd collapse(2)
for(int k=0; k<N/4; k++)
for(int l=0; l<4; l++)
a[k*4+l] = k*4+l;
}
// host
for(i=0; i<N; i++)
aa[i] = i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return error;
}
}
return error;
}
int test_lastprivate(){
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
// offload
#pragma omp target map(tofrom: a[0:100])
{
#pragma omp teams num_teams(1)
{
int n;
#pragma omp distribute simd lastprivate(n)
for(int k=0; k<N; k++) {
a[k] = k;
n = k;
}
a[0] = n;
}
}
// host
for(i=0; i<N; i++)
aa[i] = i;
aa[0] = N-1;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return error;
}
}
return error;
}
int test_linear(){
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
int l = 0;
// offload
#pragma omp target map(tofrom: a[0:100])
{
#pragma omp teams num_teams(1)
#pragma omp distribute simd linear(l: 2)
for(int k=0; k<N; k++) {
l = 2*k;
a[k] = l;
}
}
// host
for(i=0; i<N; i++)
aa[i] = 2*i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return error;;
}
}
return error;
}
int test_private(){
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
int n;
// offload
#pragma omp target map(tofrom: a[0:100])
{
#pragma omp teams
#pragma omp distribute simd private(n)
for(int k=0; k<N; k++) {
n = k;
a[k] = n;
}
}
// host
for(i=0; i<N; i++)
aa[i] = i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return error;
}
}
return error;
}
int test_safelen(){
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
// offload
#pragma omp target map(tofrom: a[0:100])
{
#pragma omp teams num_teams(1)
#pragma omp distribute simd safelen(2)
for(int k=0; k<100; k++) {
if (k > 1){
a[k] = a[k-2] + 2;
}
else{
a[k] = k;
}
}
}
// host
for(i=0; i<N; i++)
aa[i] = i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return error;
}
}
return error;
}
int main()
{
int error = 0;
check_offloading();
// Clauses
error += test_aligned();
error += test_collapsed();
error += test_lastprivate();
error += test_linear();
error += test_private();
error += test_safelen();
// report
printf("done with %d errors\n", error);
return error;
}
|
sageInterface.h | #ifndef ROSE_SAGE_INTERFACE
#define ROSE_SAGE_INTERFACE
#include "sage3basic.hhh"
#include <stdint.h>
#include <utility>
#include "rosePublicConfig.h" // for ROSE_BUILD_JAVA_LANGUAGE_SUPPORT
#include "OmpAttribute.h"
#if 0 // FMZ(07/07/2010): the argument "nextErrorCode" should be call-by-reference
SgFile* determineFileType ( std::vector<std::string> argv, int nextErrorCode, SgProject* project );
#else
SgFile* determineFileType ( std::vector<std::string> argv, int& nextErrorCode, SgProject* project );
#endif
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
#include "rewrite.h"
#endif
// DQ (7/20/2008): Added support for unparsing abitrary strings in the unparser.
#include "astUnparseAttribute.h"
#include <set>
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
#include "LivenessAnalysis.h"
#include "abstract_handle.h"
#include "ClassHierarchyGraph.h"
#endif
// DQ (8/19/2004): Moved from ROSE/src/midend/astRewriteMechanism/rewrite.h
//! A global function for getting the string associated with an enum (which is defined in global scope)
ROSE_DLL_API std::string getVariantName (VariantT v);
// DQ (12/9/2004): Qing, Rich and Dan have decided to start this namespace within ROSE
// This namespace is specific to interface functions that operate on the Sage III AST.
// The name was chosen so as not to conflict with other classes within ROSE.
// This will become the future home of many interface functions which operate on
// the AST and which are generally useful to users. As a namespace multiple files can be used
// to represent the compete interface and different developers may contribute interface
// functions easily.
// Constructor handling: (We have sageBuilder.h now for this purpose, Liao 2/1/2008)
// We could add simpler layers of support for construction of IR nodes by
// hiding many details in "makeSg***()" functions. Such functions would
// return pointers to the associated Sg*** objects and would be able to hide
// many IR specific details, including:
// memory handling
// optional parameter settings not often required
// use of Sg_File_Info objects (and setting them as transformations)
//
// namespace AST_Interface (this name is taken already by some of Qing's work :-)
//! An alias for Sg_File_Info::generateDefaultFileInfoForTransformationNode()
#define TRANS_FILE Sg_File_Info::generateDefaultFileInfoForTransformationNode()
/** Functions that are useful when operating on the AST.
*
* The Sage III IR design attempts to be minimalist. Thus additional functionality is intended to be presented using separate
* higher level interfaces which work with the IR. This namespace collects functions that operate on the IR and support
* numerous types of operations that are common to general analysis and transformation of the AST. */
namespace SageInterface
{
// Liao 6/22/2016: keep records of loop init-stmt normalization, later help undo it to support autoPar.
struct Transformation_Record
{
// a lookup table to check if a for loop has been normalized for its c99-style init-stmt
std::map <SgForStatement* , bool > forLoopInitNormalizationTable;
// Detailed record about the original declaration (1st in the pair) and the normalization generated new declaration (2nd in the pair)
std::map <SgForStatement* , std::pair<SgVariableDeclaration*, SgVariableDeclaration*> > forLoopInitNormalizationRecord;
} ;
ROSE_DLL_API extern Transformation_Record trans_records;
// DQ (4/3/2014): Added general AST support separate from the AST.
// Container and API for analysis information that is outside of the AST and as a result
// prevents frequent modification of the IR.
class DeclarationSets
{
// DQ (4/3/2014): This stores all associated declarations as a map of sets.
// the key to the map is the first nondefining declaration and the elements of the set are
// all of the associated declarations (including the defining declaration).
private:
//! Map of first-nondefining declaration to all other associated declarations.
std::map<SgDeclarationStatement*,std::set<SgDeclarationStatement*>* > declarationMap;
public:
void addDeclaration(SgDeclarationStatement* decl);
const std::set<SgDeclarationStatement*>* getDeclarations(SgDeclarationStatement* decl);
std::map<SgDeclarationStatement*,std::set<SgDeclarationStatement*>* > & getDeclarationMap();
bool isLocatedInDefiningScope(SgDeclarationStatement* decl);
};
// DQ (4/3/2014): This constructs a data structure that holds analysis information about
// the AST that is separate from the AST. This is intended to be a general mechanism
// to support analysis information without constantly modifying the IR.
DeclarationSets* buildDeclarationSets(SgNode*);
//! An internal counter for generating unique SgName
ROSE_DLL_API extern int gensym_counter;
// tps : 28 Oct 2008 - support for finding the main interpretation
SgAsmInterpretation* getMainInterpretation(SgAsmGenericFile* file);
//! Get the unsigned value of a disassembled constant.
uint64_t getAsmConstant(SgAsmValueExpression* e);
//! Get the signed value of a disassembled constant.
int64_t getAsmSignedConstant(SgAsmValueExpression *e);
//! Function to add "C" style comment to statement.
void addMessageStatement( SgStatement* stmt, std::string message );
//! A persistent attribute to represent a unique name for an expression
class UniqueNameAttribute : public AstAttribute
{
private:
std::string name;
public:
UniqueNameAttribute(std::string n="") {name =n; };
void set_name (std::string n) {name = n;};
std::string get_name () {return name;};
};
// DQ (3/2/2009): Added support for collectiong an merging the referenced symbols in the outlined
// function into the list used to edit the outlined code subtree to fixup references (from symbols
// in the original file to the symbols in the newer separate file).
// typedef rose_hash::unordered_map<SgNode*, SgNode*, hash_nodeptr> ReplacementMapType;
// void supplementReplacementSymbolMap ( const ReplacementMapTraversal::ReplacementMapType & inputReplacementMap );
// CH (4/9/2010): Use boost::hash instead
//#ifdef _MSC_VER
#if 0
inline size_t hash_value(SgNode* t) {return (size_t)t;}
#endif
#if 0
// DQ (8/3/2015): We expect that this is not used and is generating a warnings so we
// can best fix it by removing it.
struct hash_nodeptr
{
// CH (4/9/2010): Use boost::hash instead
//#ifndef _MSC_VER
#if 0
//rose_hash::hash<char*> hasher;
#endif
public:
size_t operator()(SgNode* node) const
{
// CH (4/9/2010): Use boost::hash instead
//#ifdef _MSC_VER
#if 0
return (size_t) hash_value(node);
#else
return (size_t) node;
#endif
}
};
#ifndef SWIG
// DQ (3/10/2013): This appears to be a problem for the SWIG interface (undefined reference at link-time).
void supplementReplacementSymbolMap ( rose_hash::unordered_map<SgNode*, SgNode*, hash_nodeptr> & inputReplacementMap );
#endif
#endif
//------------------------------------------------------------------------
//@{
/*! @name Symbol tables
\brief utility functions for symbol tables
*/
// Liao 1/22/2008, used for get symbols for generating variable reference nodes
// ! Find a variable symbol in current and ancestor scopes for a given name
ROSE_DLL_API SgVariableSymbol *lookupVariableSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope=NULL);
// DQ (8/21/2013): Modified to make newest function parameters be default arguments.
// DQ (8/16/2013): For now we want to remove the use of default parameters and add the support for template parameters and template arguments.
//! Find a symbol in current and ancestor scopes for a given variable name, starting from top of ScopeStack if currentscope is not given or NULL.
// SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope=NULL);
// SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope, SgTemplateParameterPtrList* templateParameterList, SgTemplateArgumentPtrList* templateArgumentList);
ROSE_DLL_API SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
// DQ (11/24/2007): Functions moved from the Fortran support so that they could be called from within astPostProcessing.
//!look up the first matched function symbol in parent scopes given only a function name, starting from top of ScopeStack if currentscope is not given or NULL
ROSE_DLL_API SgFunctionSymbol *lookupFunctionSymbolInParentScopes (const SgName & functionName, SgScopeStatement *currentScope=NULL);
// Liao, 1/24/2008, find exact match for a function
//!look up function symbol in parent scopes given both name and function type, starting from top of ScopeStack if currentscope is not given or NULL
ROSE_DLL_API SgFunctionSymbol *lookupFunctionSymbolInParentScopes (const SgName & functionName,
const SgType* t,
SgScopeStatement *currentScope=NULL);
ROSE_DLL_API SgFunctionSymbol *lookupTemplateFunctionSymbolInParentScopes (const SgName & functionName, SgFunctionType * ftype, SgTemplateParameterPtrList * tplparams, SgScopeStatement *currentScope=NULL);
ROSE_DLL_API SgFunctionSymbol *lookupTemplateMemberFunctionSymbolInParentScopes (const SgName & functionName, SgFunctionType * ftype, SgTemplateParameterPtrList * tplparams, SgScopeStatement *currentScope=NULL);
ROSE_DLL_API SgTemplateVariableSymbol * lookupTemplateVariableSymbolInParentScopes (const SgName & name, SgTemplateParameterPtrList * tplparams, SgTemplateArgumentPtrList* tplargs, SgScopeStatement *currentScope=NULL);
// DQ (8/21/2013): Modified to make newest function parameters be default arguments.
// DQ (8/16/2013): For now we want to remove the use of default parameters and add the support for template parameters and template arguments.
// DQ (5/7/2011): Added support for SgClassSymbol (used in name qualification support).
// SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
ROSE_DLL_API SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
ROSE_DLL_API SgTypedefSymbol* lookupTypedefSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
ROSE_DLL_API SgNonrealSymbol* lookupNonrealSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
#if 0
// DQ (8/13/2013): This function does not make since any more, now that we have made the symbol
// table handling more precise and we have to provide template parameters for any template lookup.
// We also have to know if we want to lookup template classes, template functions, or template
// member functions (since each have specific requirements).
SgTemplateSymbol* lookupTemplateSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
#endif
#if 0
// DQ (8/13/2013): I am not sure if we want this functions in place of lookupTemplateSymbolInParentScopes.
// Where these are called we might not know enough information about the template parameters or function
// types, for example.
SgTemplateClassSymbol* lookupTemplateClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
SgTemplateFunctionSymbol* lookupTemplateFunctionSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL);
SgTemplateMemberFunctionSymbol* lookupTemplateMemberFunctionSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL);
#endif
// DQ (8/21/2013): Modified to make some of the newest function parameters be default arguments.
// DQ (8/13/2013): I am not sure if we want this functions in place of lookupTemplateSymbolInParentScopes.
ROSE_DLL_API SgTemplateClassSymbol* lookupTemplateClassSymbolInParentScopes (const SgName & name, SgTemplateParameterPtrList* templateParameterList, SgTemplateArgumentPtrList* templateArgumentList, SgScopeStatement *cscope = NULL);
ROSE_DLL_API SgEnumSymbol* lookupEnumSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
ROSE_DLL_API SgNamespaceSymbol* lookupNamespaceSymbolInParentScopes(const SgName & name, SgScopeStatement *currentScope = NULL);
// DQ (7/17/2011): Added function from cxx branch that I need here for the Java support.
// SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *cscope);
/*! \brief set_name of symbol in symbol table.
This function extracts the symbol from the relavant symbol table,
changes the name (at the declaration) and reinserts it into the
symbol table.
\internal I think this is what this function does, I need to double check.
*/
// DQ (12/9/2004): Moved this function (by Alin Jula) from being a member of SgInitializedName
// to this location where it can be a part of the interface for the Sage III AST.
ROSE_DLL_API int set_name (SgInitializedName * initializedNameNode, SgName new_name);
/*! \brief Output function type symbols in global function type symbol table.
*/
void outputGlobalFunctionTypeSymbolTable ();
// DQ (6/27/2005):
/*! \brief Output the local symbol tables.
\implementation Each symbol table is output with the file infor where it is located in the source code.
*/
ROSE_DLL_API void outputLocalSymbolTables (SgNode * node);
class OutputLocalSymbolTables:public AstSimpleProcessing
{
public:
void visit (SgNode * node);
};
/*! \brief Regenerate the symbol table.
\implementation current symbol table must be NULL pointer before calling this
function (for safety, but is this a good idea?)
*/
// DQ (9/28/2005):
void rebuildSymbolTable (SgScopeStatement * scope);
/*! \brief Clear those variable symbols with unknown type (together with initialized names) which are also not referenced by any variable references or declarations under root. If root is NULL, all symbols with unknown type will be deleted.
*/
void clearUnusedVariableSymbols (SgNode* root = NULL);
// DQ (3/1/2009):
//! All the symbol table references in the copied AST need to be reset after rebuilding the copied scope's symbol table.
void fixupReferencesToSymbols( const SgScopeStatement* this_scope, SgScopeStatement* copy_scope, SgCopyHelp & help );
//@}
//------------------------------------------------------------------------
//@{
/*! @name Stringify
\brief Generate a useful string (name) to describe a SgNode
*/
/*! \brief Generate a useful name to describe the SgNode
\internal default names are used for SgNode objects that can not be associated with a name.
*/
// DQ (9/21/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgNode * node);
/*! \brief Generate a useful name to describe the declaration
\internal default names are used for declarations that can not be associated with a name.
*/
// DQ (6/13/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgStatement * stmt);
/*! \brief Generate a useful name to describe the expression
\internal default names are used for expressions that can not be associated with a name.
*/
std::string get_name (const SgExpression * expr);
/*! \brief Generate a useful name to describe the declaration
\internal default names are used for declarations that can not be associated with a name.
*/
// DQ (6/13/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgDeclarationStatement * declaration);
/*! \brief Generate a useful name to describe the scope
\internal default names are used for scope that cannot be associated with a name.
*/
// DQ (6/13/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgScopeStatement * scope);
/*! \brief Generate a useful name to describe the SgSymbol
\internal default names are used for SgSymbol objects that cannot be associated with a name.
*/
// DQ (2/11/2007): Added this function to make debugging support more complete (useful for symbol table debugging support).
std::string get_name (const SgSymbol * symbol);
/*! \brief Generate a useful name to describe the SgType
\internal default names are used for SgType objects that cannot be associated with a name.
*/
std::string get_name (const SgType * type);
/*! \brief Generate a useful name to describe the SgSupport IR node
*/
std::string get_name (const SgSupport * node);
/*! \brief Generate a useful name to describe the SgLocatedNodeSupport IR node
*/
std::string get_name (const SgLocatedNodeSupport * node);
/*! \brief Generate a useful name to describe the SgC_PreprocessorDirectiveStatement IR node
*/
std::string get_name ( const SgC_PreprocessorDirectiveStatement* directive );
/*! \brief Generate a useful name to describe the SgToken IR node
*/
std::string get_name ( const SgToken* token );
// DQ (3/20/2016): Added to refactor some of the DSL infrastructure support.
/*! \brief Generate a useful name to support construction of identifiers from declarations.
This function permits names to be generated that will be unique across translation units
(a specific requirement different from the context of the get_name() functions above).
\internal This supports only a restricted set of declarations presently.
*/
std::string generateUniqueNameForUseAsIdentifier ( SgDeclarationStatement* declaration );
std::string generateUniqueNameForUseAsIdentifier_support ( SgDeclarationStatement* declaration );
/*! \brief Global map of name collisions to support generateUniqueNameForUseAsIdentifier() function.
*/
extern std::map<std::string,int> local_name_collision_map;
extern std::map<std::string,SgNode*> local_name_to_node_map;
extern std::map<SgNode*,std::string> local_node_to_name_map;
/*! \brief Traversal to set the global map of names to node and node to names.collisions to support generateUniqueNameForUseAsIdentifier() function.
*/
void computeUniqueNameForUseAsIdentifier( SgNode* astNode );
/*! \brief Reset map variables used to support generateUniqueNameForUseAsIdentifier() function.
*/
void reset_name_collision_map();
//@}
//------------------------------------------------------------------------
//@{
/*! @name Class utilities
\brief
*/
/*! \brief Get the default destructor from the class declaration
*/
// DQ (6/21/2005): Get the default destructor from the class declaration
SgMemberFunctionDeclaration *getDefaultDestructor (SgClassDeclaration *
classDeclaration);
/*! \brief Get the default constructor from the class declaration
*/
// DQ (6/22/2005): Get the default constructor from the class declaration
ROSE_DLL_API SgMemberFunctionDeclaration *getDefaultConstructor (SgClassDeclaration *
classDeclaration);
/*! \brief Return true if template definition is in the class, false if outside of class.
*/
// DQ (8/27/2005):
bool templateDefinitionIsInClass (SgTemplateInstantiationMemberFunctionDecl
* memberFunctionDeclaration);
/*! \brief Generate a non-defining (forward) declaration from a defining function declaration.
\internal should put into sageBuilder ?
*/
// DQ (9/17/2005):
SgTemplateInstantiationMemberFunctionDecl*
buildForwardFunctionDeclaration
(SgTemplateInstantiationMemberFunctionDecl * memberFunctionInstantiation);
//! Check if a SgNode is a declaration for a structure
bool isStructDeclaration(SgNode * node);
//! Check if a SgNode is a declaration for a union
bool isUnionDeclaration(SgNode * node);
#if 0
// DQ (8/28/2005): This is already a member function of the SgFunctionDeclaration
// (so that it can handle template functions and member functions)
/*! \brief Return true if member function of a template member function,
of false if a non-template member function in a templated class.
*/
// DQ (8/27/2005):
bool isTemplateMemberFunction (SgTemplateInstantiationMemberFunctionDecl *
memberFunctionDeclaration);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name Misc.
\brief Not sure the classifications right now
*/
//! Save AST into a pdf file. Start from a node to find its enclosing file node. The entire file's AST will be saved into a pdf.
void saveToPDF(SgNode* node, std::string filename);
void saveToPDF(SgNode* node); // enable calling from gdb
// DQ (2/12/2012): Added some diagnostic support.
//! Diagnostic function for tracing back through the parent list to understand at runtime where in the AST a failure happened.
void whereAmI(SgNode* node);
//! Extract a SgPragmaDeclaration's leading keyword . For example "#pragma omp parallel" has a keyword of "omp".
std::string extractPragmaKeyword(const SgPragmaDeclaration *);
//! Check if a node is SgOmp*Statement
ROSE_DLL_API bool isOmpStatement(SgNode* );
/*! \brief Return true if function is overloaded.
*/
// DQ (8/27/2005):
bool isOverloaded (SgFunctionDeclaration * functionDeclaration);
// DQ (2/14/2012): Added support function used for variable declarations in conditionals.
//! Support function used for variable declarations in conditionals
void initializeIfStmt(SgIfStmt *ifstmt, SgStatement* conditional, SgStatement * true_body, SgStatement * false_body);
//! Support function used for variable declarations in conditionals
void initializeSwitchStatement(SgSwitchStatement* switchStatement,SgStatement *item_selector,SgStatement *body);
//! Support function used for variable declarations in conditionals
void initializeWhileStatement(SgWhileStmt* whileStatement, SgStatement * condition, SgStatement *body, SgStatement *else_body);
//! Generate unique names for expressions and attach the names as persistent attributes ("UniqueNameAttribute")
void annotateExpressionsWithUniqueNames (SgProject* project);
//! Check if a SgNode is a main() function declaration
ROSE_DLL_API bool isMain (const SgNode* node);
// DQ (6/22/2005):
/*! \brief Generate unique name from C and C++ constructs. The name may contain space.
This is support for the AST merge, but is generally useful as a more general mechanism than
name mangling which is more closely ties to the generation of names to support link-time function name
resolution. This is more general than common name mangling in that it resolves more relevant differences
between C and C++ declarations. (e.g. the type within the declaration: "struct { int:8; } foo;").
\implementation current work does not support expressions.
*/
std::string generateUniqueName ( const SgNode * node, bool ignoreDifferenceBetweenDefiningAndNondefiningDeclarations);
/** Generate a name like __temp#__ that is unique in the current scope and any parent and children scopes. # is a unique integer counter.
* @param baseName the word to be included in the variable names. */
std::string generateUniqueVariableName(SgScopeStatement* scope, std::string baseName = "temp");
// DQ (8/10/2010): Added const to first parameter.
// DQ (3/10/2007):
//! Generate a unique string from the source file position information
std::string declarationPositionString (const SgDeclarationStatement * declaration);
// DQ (1/20/2007):
//! Added mechanism to generate project name from list of file names
ROSE_DLL_API std::string generateProjectName (const SgProject * project, bool supressSuffix = false );
//! Given a SgExpression that represents a named function (or bound member
//! function), return the mentioned function
SgFunctionDeclaration* getDeclarationOfNamedFunction(SgExpression* func);
//! Get the mask expression from the header of a SgForAllStatement
SgExpression* forallMaskExpression(SgForAllStatement* stmt);
//! Find all SgPntrArrRefExp under astNode, then add SgVarRefExp (if any) of SgPntrArrRefExp's dim_info into NodeList_t
void addVarRefExpFromArrayDimInfo(SgNode * astNode, Rose_STL_Container<SgNode *>& NodeList_t);
// DQ (10/6/2006): Added support for faster mangled name generation (caching avoids recomputation).
/*! \brief Support for faster mangled name generation (caching avoids recomputation).
*/
#ifndef SWIG
// DQ (3/10/2013): This appears to be a problem for the SWIG interface (undefined reference at link-time).
void clearMangledNameCache (SgGlobal * globalScope);
void resetMangledNameCache (SgGlobal * globalScope);
#endif
std::string getMangledNameFromCache (SgNode * astNode);
std::string addMangledNameToCache (SgNode * astNode, const std::string & mangledName);
SgDeclarationStatement * getNonInstantiatonDeclarationForClass (SgTemplateInstantiationMemberFunctionDecl * memberFunctionInstantiation);
//! a better version for SgVariableDeclaration::set_baseTypeDefininingDeclaration(), handling all side effects automatically
//! Used to have a struct declaration embedded into a variable declaration
void setBaseTypeDefiningDeclaration(SgVariableDeclaration* var_decl, SgDeclarationStatement *base_decl);
// DQ (10/14/2006): This function tests the AST to see if for a non-defining declaration, the
// bool declarationPreceedsDefinition ( SgClassDeclaration* classNonDefiningDeclaration, SgClassDeclaration* classDefiningDeclaration );
//! Check if a defining declaration comes before of after the non-defining declaration.
bool declarationPreceedsDefinition (SgDeclarationStatement *nonDefiningDeclaration, SgDeclarationStatement *definingDeclaration);
// DQ (10/19/2006): Function calls have interesting context dependent rules to determine if
// they are output with a global qualifier or not. Were this is true we have to avoid global
// qualifiers, since the function's scope has not been defined. This is an example of where
// qualification of function names in function calls are context dependent; an interesting
// example of where the C++ language is not friendly to source-to-source processing :-).
bool functionCallExpressionPreceedsDeclarationWhichAssociatesScope (SgFunctionCallExp * functionCall);
/*! \brief Compute the intersection set for two ASTs.
This is part of a test done by the copy function to compute those IR nodes in the copy that still reference the original AST.
*/
ROSE_DLL_API std::vector < SgNode * >astIntersection (SgNode * original, SgNode * copy, SgCopyHelp * help = NULL);
//! Deep copy an arbitrary subtree
ROSE_DLL_API SgNode* deepCopyNode (const SgNode* subtree);
//! A template function for deep copying a subtree. It is also used to create deepcopy functions with specialized parameter and return types. e.g SgExpression* copyExpression(SgExpression* e);
template <typename NodeType>
NodeType* deepCopy (const NodeType* subtree) {
return dynamic_cast<NodeType*>(deepCopyNode(subtree));
}
//! Deep copy an expression
ROSE_DLL_API SgExpression* copyExpression(SgExpression* e);
//!Deep copy a statement
ROSE_DLL_API SgStatement* copyStatement(SgStatement* s);
// from VarSym.cc in src/midend/astOutlining/src/ASTtools
//! Get the variable symbol for the first initialized name of a declaration stmt.
ROSE_DLL_API SgVariableSymbol* getFirstVarSym (SgVariableDeclaration* decl);
//! Get the first initialized name of a declaration statement
ROSE_DLL_API SgInitializedName* getFirstInitializedName (SgVariableDeclaration* decl);
//! A special purpose statement removal function, originally from inlinerSupport.h, Need Jeremiah's attention to refine it. Please don't use it for now.
ROSE_DLL_API void myRemoveStatement(SgStatement* stmt);
ROSE_DLL_API bool isConstantTrue(SgExpression* e);
ROSE_DLL_API bool isConstantFalse(SgExpression* e);
ROSE_DLL_API bool isCallToParticularFunction(SgFunctionDeclaration* decl, SgExpression* e);
ROSE_DLL_API bool isCallToParticularFunction(const std::string& qualifiedName, size_t arity, SgExpression* e);
//! Check if a declaration has a "static' modifier
bool ROSE_DLL_API isStatic(SgDeclarationStatement* stmt);
//! Set a declaration as static
ROSE_DLL_API void setStatic(SgDeclarationStatement* stmt);
//! Check if a declaration has an "extern" modifier
ROSE_DLL_API bool isExtern(SgDeclarationStatement* stmt);
//! Set a declaration as extern
ROSE_DLL_API void setExtern(SgDeclarationStatement* stmt);
//! Interface for creating a statement whose computation writes its answer into
//! a given variable.
class StatementGenerator {
public:
virtual ~StatementGenerator() {};
virtual SgStatement* generate(SgExpression* where_to_write_answer) = 0;
};
//! Check if a SgNode _s is an assignment statement (any of =,+=,-=,&=,/=, ^=, etc)
//!
//! Return the left hand, right hand expressions and if the left hand variable is also being read
bool isAssignmentStatement(SgNode* _s, SgExpression** lhs=NULL, SgExpression** rhs=NULL, bool* readlhs=NULL);
//! Variable references can be introduced by SgVarRef, SgPntrArrRefExp, SgInitializedName, SgMemberFunctionRef etc. For Dot and Arrow Expressions, their lhs is used to obtain SgInitializedName (coarse grain) by default. Otherwise, fine-grain rhs is used.
ROSE_DLL_API SgInitializedName* convertRefToInitializedName(SgNode* current, bool coarseGrain=true);
//! Build an abstract handle from an AST node, reuse previously built handle when possible
ROSE_DLL_API AbstractHandle::abstract_handle* buildAbstractHandle(SgNode*);
//! Obtain a matching SgNode from an abstract handle string
ROSE_DLL_API SgNode* getSgNodeFromAbstractHandleString(const std::string& input_string);
//! Dump information about a SgNode for debugging
ROSE_DLL_API void dumpInfo(SgNode* node, std::string desc="");
//! Reorder a list of declaration statements based on their appearance order in source files
ROSE_DLL_API std::vector<SgDeclarationStatement*>
sortSgNodeListBasedOnAppearanceOrderInSource(const std::vector<SgDeclarationStatement*>& nodevec);
// DQ (4/13/2013): We need these to support the unparing of operators defined by operator syntax or member function names.
//! Is an overloaded operator a prefix operator (e.g. address operator X * operator&(), dereference operator X & operator*(), unary plus operator X & operator+(), etc.
// bool isPrefixOperator( const SgMemberFunctionRefExp* memberFunctionRefExp );
bool isPrefixOperator( SgExpression* exp );
//! Check for proper names of possible prefix operators (used in isPrefixOperator()).
bool isPrefixOperatorName( const SgName & functionName );
//! Is an overloaded operator a postfix operator. (e.g. ).
bool isPostfixOperator( SgExpression* exp );
//! Is an overloaded operator an index operator (also referred to as call or subscript operators). (e.g. X & operator()() or X & operator[]()).
bool isIndexOperator( SgExpression* exp );
// DQ (1/10/2014): Adding more general support for token based unparsing.
//! Used to support token unparsing (when the output the trailing token sequence).
SgStatement* lastStatementOfScopeWithTokenInfo (SgScopeStatement* scope, std::map<SgNode*,TokenStreamSequenceToNodeMapping*> & tokenStreamSequenceMap);
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST properties
\brief version, language properties of current AST.
*/
// std::string version(); // utility_functions.h, version number
/*! Brief These traverse the memory pool of SgFile IR nodes and determine what languages are in use!
*/
ROSE_DLL_API bool is_Ada_language ();
ROSE_DLL_API bool is_C_language ();
ROSE_DLL_API bool is_Cobol_language ();
ROSE_DLL_API bool is_OpenMP_language ();
ROSE_DLL_API bool is_UPC_language ();
//! Check if dynamic threads compilation is used for UPC programs
ROSE_DLL_API bool is_UPC_dynamic_threads();
ROSE_DLL_API bool is_C99_language ();
ROSE_DLL_API bool is_Cxx_language ();
ROSE_DLL_API bool is_Java_language ();
ROSE_DLL_API bool is_Jovial_language ();
ROSE_DLL_API bool is_Fortran_language ();
ROSE_DLL_API bool is_CAF_language ();
ROSE_DLL_API bool is_PHP_language();
ROSE_DLL_API bool is_Python_language();
ROSE_DLL_API bool is_Cuda_language();
ROSE_DLL_API bool is_OpenCL_language();
ROSE_DLL_API bool is_X10_language();
ROSE_DLL_API bool is_binary_executable();
ROSE_DLL_API bool is_mixed_C_and_Cxx_language ();
ROSE_DLL_API bool is_mixed_Fortran_and_C_language ();
ROSE_DLL_API bool is_mixed_Fortran_and_Cxx_language ();
ROSE_DLL_API bool is_mixed_Fortran_and_C_and_Cxx_language ();
//@}
//------------------------------------------------------------------------
//@{
/*! @name Scope
\brief
*/
// DQ (10/5/2006): Added support for faster (non-quadratic) computation of unique
// labels for scopes in a function (as required for name mangling).
/*! \brief Assigns unique numbers to each SgScopeStatement of a function.
This is used to provide unique names for variables and types defined is
different nested scopes of a function (used in mangled name generation).
*/
void resetScopeNumbers (SgFunctionDefinition * functionDeclaration);
// DQ (10/5/2006): Added support for faster (non-quadratic) computation of unique
// labels for scopes in a function (as required for name mangling).
/*! \brief Clears the cache of scope,integer pairs for the input function.
This is used to clear the cache of computed unique labels for scopes in a function.
This function should be called after any transformation on a function that might effect
the allocation of scopes and cause the existing unique numbers to be incorrect.
This is part of support to provide unique names for variables and types defined is
different nested scopes of a function (used in mangled name generation).
*/
void clearScopeNumbers (SgFunctionDefinition * functionDefinition);
//!Find the enclosing namespace of a declaration
SgNamespaceDefinitionStatement * enclosingNamespaceScope (SgDeclarationStatement * declaration);
// SgNamespaceDefinitionStatement * getEnclosingNamespaceScope (SgNode * node);
bool isPrototypeInScope (SgScopeStatement * scope,
SgFunctionDeclaration * functionDeclaration,
SgDeclarationStatement * startingAtDeclaration);
//!check if node1 is a strict ancestor of node 2. (a node is not considered its own ancestor)
bool ROSE_DLL_API isAncestor(SgNode* node1, SgNode* node2);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Preprocessing Information
\brief #if-#else-#end, comments, #include, etc
*/
//! Dumps a located node's preprocessing information.
void dumpPreprocInfo (SgLocatedNode* locatedNode);
//! Insert #include "filename" or #include <filename> (system header) onto the global scope of a source file, add to be the last #include .. by default among existing headers, Or as the first header. Recommended for use.
PreprocessingInfo * insertHeader(SgSourceFile * source_file, const std::string & header_file_name, bool isSystemHeader, bool asLastHeader);
//! Insert a new header right before stmt, if there are existing headers attached to stmt, insert it as the last or first header as specified by asLastHeader
void insertHeader (SgStatement* stmt, PreprocessingInfo* newheader, bool asLastHeader);
//! Insert #include "filename" or #include <filename> (system header) onto the global scope of a source file
PreprocessingInfo * insertHeader(SgSourceFile * source_file, const std::string & header_file_name, bool isSystemHeader = false, PreprocessingInfo::RelativePositionType position = PreprocessingInfo::before);
//! Insert #include "filename" or #include <filename> (system header) into the global scope containing the current scope, right after other #include XXX.
ROSE_DLL_API PreprocessingInfo* insertHeader(const std::string& filename, PreprocessingInfo::RelativePositionType position=PreprocessingInfo::after, bool isSystemHeader=false, SgScopeStatement* scope=NULL);
//! Identical to movePreprocessingInfo(), except for the stale name and confusing order of parameters. It will be deprecated soon.
ROSE_DLL_API void moveUpPreprocessingInfo (SgStatement* stmt_dst, SgStatement* stmt_src, PreprocessingInfo::RelativePositionType src_position=PreprocessingInfo::undef, PreprocessingInfo::RelativePositionType dst_position=PreprocessingInfo::undef, bool usePrepend= false);
//! Move preprocessing information of stmt_src to stmt_dst, Only move preprocessing information from the specified source-relative position to a specified target position, otherwise move all preprocessing information with position information intact. The preprocessing information is appended to the existing preprocessing information list of the target node by default. Prepending is used if usePreprend is set to true. Optionally, the relative position can be adjust after the moving using dst_position.
ROSE_DLL_API void movePreprocessingInfo (SgStatement* stmt_src, SgStatement* stmt_dst, PreprocessingInfo::RelativePositionType src_position=PreprocessingInfo::undef,
PreprocessingInfo::RelativePositionType dst_position=PreprocessingInfo::undef, bool usePrepend= false);
//!Cut preprocessing information from a source node and save it into a buffer. Used in combination of pastePreprocessingInfo(). The cut-paste operation is similar to moveUpPreprocessingInfo() but it is more flexible in that the destination node can be unknown during the cut operation.
ROSE_DLL_API void cutPreprocessingInfo (SgLocatedNode* src_node, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& save_buf);
//!Paste preprocessing information from a buffer to a destination node. Used in combination of cutPreprocessingInfo()
ROSE_DLL_API void pastePreprocessingInfo (SgLocatedNode* dst_node, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& saved_buf);
//! Attach an arbitrary string to a located node. A workaround to insert irregular statements or vendor-specific attributes.
ROSE_DLL_API PreprocessingInfo* attachArbitraryText(SgLocatedNode* target,
const std::string & text,
PreprocessingInfo::RelativePositionType position=PreprocessingInfo::before);
//!Check if a pragma declaration node has macro calls attached, if yes, replace macro calls within the pragma string with expanded strings. This only works if -rose:wave is turned on.
ROSE_DLL_API void replaceMacroCallsWithExpandedStrings(SgPragmaDeclaration* target);
//@}
//! Build and attach comment onto the global scope of a source file
PreprocessingInfo* attachComment(
SgSourceFile * source_file,
const std::string & content,
PreprocessingInfo::DirectiveType directive_type = PreprocessingInfo::C_StyleComment,
PreprocessingInfo::RelativePositionType position = PreprocessingInfo::before
);
//! Build and attach comment, comment style is inferred from the language type of the target node if not provided
ROSE_DLL_API PreprocessingInfo* attachComment(SgLocatedNode* target, const std::string & content,
PreprocessingInfo::RelativePositionType position=PreprocessingInfo::before,
PreprocessingInfo::DirectiveType dtype= PreprocessingInfo::CpreprocessorUnknownDeclaration);
// DQ (11/25/2009): Added matching support for adding comments to SgAsm nodes.
// Build and attach comment
// void attachComment(SgAsmStatement* target, const std::string & content );
// DQ (7/20/2008): I am not clear were I should put this function, candidates include: SgLocatedNode or SgInterface
//! Add a string to be unparsed to support code generation for back-end specific tools or compilers.
ROSE_DLL_API void addTextForUnparser ( SgNode* astNode, std::string s, AstUnparseAttribute::RelativePositionType inputlocation );
/**
* Add preproccessor guard around a given node.
* It surrounds the node with "#if guard" and "#endif"
*/
void guardNode(SgLocatedNode * target, std::string guard);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Source File Position
\brief set Sg_File_Info for a SgNode
*/
// ************************************************************************
// Newer versions of now depricated functions
// ************************************************************************
// DQ (5/1/2012): This function queries the SageBuilder::SourcePositionClassification mode (stored in the SageBuilder
// interface) and used the specified mode to initialize the source position data (Sg_File_Info objects). This
// function is the only function that should be called directly (though in a namespace we can't define permissions).
//! Set the source code positon for the current (input) node.
ROSE_DLL_API void setSourcePosition(SgNode* node);
// A better name might be "setSourcePositionForSubTree"
//! Set the source code positon for the subtree (including the root).
ROSE_DLL_API void setSourcePositionAtRootAndAllChildren(SgNode *root);
//! DQ (5/1/2012): New function with improved name.
void setSourcePositionAsTransformation(SgNode *node);
// DQ (5/1/2012): Newly renamed function (previous name preserved for backward compatability).
void setSourcePositionPointersToNull(SgNode *node);
// ************************************************************************
// ************************************************************************
// Older deprecated functions
// ************************************************************************
// Liao, 1/8/2007, set file info. for a whole subtree as transformation generated
//! Set current node's source position as transformation generated
ROSE_DLL_API void setOneSourcePositionForTransformation(SgNode *node);
//! Set current node's source position as NULL
ROSE_DLL_API void setOneSourcePositionNull(SgNode *node);
//! Recursively set source position info(Sg_File_Info) as transformation generated
ROSE_DLL_API void setSourcePositionForTransformation (SgNode * root);
//! Set source position info(Sg_File_Info) as transformation generated for all SgNodes in memory pool
// ROSE_DLL_API void setSourcePositionForTransformation_memoryPool();
//! Check if a node is from a system header file
ROSE_DLL_API bool insideSystemHeader (SgLocatedNode* node);
//! Set the source position of SgLocatedNode to Sg_File_Info::generateDefaultFileInfo(). These nodes WILL be unparsed. Not for transformation usage.
// ROSE_DLL_API void setSourcePosition (SgLocatedNode * locatedNode);
// ************************************************************************
//@}
//------------------------------------------------------------------------
//@{
/*! @name Data types
\brief
*/
// from src/midend/astInlining/typeTraits.h
// src/midend/astUtil/astInterface/AstInterface.h
//! Get the right bool type according to C or C++ language input
SgType* getBoolType(SgNode* n);
//! Check if a type is an integral type, only allowing signed/unsigned short, int, long, long long.
////!
////! There is another similar function named SgType::isIntegerType(), which allows additional types char, wchar, and bool to be treated as integer types
ROSE_DLL_API bool isStrictIntegerType(SgType* t);
//!Get the data type of the first initialized name of a declaration statement
ROSE_DLL_API SgType* getFirstVarType(SgVariableDeclaration* decl);
//! Is a type default constructible? This may not quite work properly.
ROSE_DLL_API bool isDefaultConstructible(SgType* type);
//! Is a type copy constructible? This may not quite work properly.
ROSE_DLL_API bool isCopyConstructible(SgType* type);
//! Is a type assignable? This may not quite work properly.
ROSE_DLL_API bool isAssignable(SgType* type);
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
//! Check if a class type is a pure virtual class. True means that there is at least
//! one pure virtual function that has not been overridden.
//! In the case of an incomplete class type (forward declaration), this function returns false.
ROSE_DLL_API bool isPureVirtualClass(SgType* type, const ClassHierarchyWrapper& classHierarchy);
#endif
//! Does a type have a trivial (built-in) destructor?
ROSE_DLL_API bool hasTrivialDestructor(SgType* t);
//! Is this type a non-constant reference type? (Handles typedefs correctly)
ROSE_DLL_API bool isNonconstReference(SgType* t);
//! Is this type a const or non-const reference type? (Handles typedefs correctly)
ROSE_DLL_API bool isReferenceType(SgType* t);
//! Is this type a pointer type? (Handles typedefs correctly)
ROSE_DLL_API bool isPointerType(SgType* t);
//! Is this a pointer to a non-const type? Note that this function will return true for const pointers pointing to
//! non-const types. For example, (int* const y) points to a modifiable int, so this function returns true. Meanwhile,
//! it returns false for (int const * x) and (int const * const x) because these types point to a const int.
//! Also, only the outer layer of nested pointers is unwrapped. So the function returns true for (const int ** y), but returns
//! false for const (int * const * x)
ROSE_DLL_API bool isPointerToNonConstType(SgType* type);
//! Is this a const type?
/* const char* p = "aa"; is not treated as having a const type. It is a pointer to const char.
* Similarly, neither for const int b[10]; or const int & c =10;
* The standard says, "A compound type is not cv-qualified by the cv-qualifiers (if any) of
the types from which it is compounded. Any cv-qualifiers applied to an array type affect the array element type, not the array type".
*/
ROSE_DLL_API bool isConstType(SgType* t);
//! Remove const (if present) from a type. stripType() cannot do this because it removes all modifiers.
SgType* removeConst(SgType* t);
//! Is this a volatile type?
ROSE_DLL_API bool isVolatileType(SgType* t);
//! Is this a restrict type?
ROSE_DLL_API bool isRestrictType(SgType* t);
//! Is this a scalar type?
/*! We define the following SgType as scalar types: char, short, int, long , void, Wchar, Float, double, long long, string, bool, complex, imaginary
*/
ROSE_DLL_API bool isScalarType(SgType* t);
//! Check if a type is an integral type, only allowing signed/unsigned short, int, long, long long.
//!
//! There is another similar function named SgType::isIntegerType(), which allows additional types char, wchar, and bool.
ROSE_DLL_API bool isStrictIntegerType(SgType* t);
//! Check if a type is a struct type (a special SgClassType in ROSE)
ROSE_DLL_API bool isStructType(SgType* t);
//! Generate a mangled string for a given type based on Itanium C++ ABI
ROSE_DLL_API std::string mangleType(SgType* type);
//! Generate mangled scalar type names according to Itanium C++ ABI, the input type should pass isScalarType() in ROSE
ROSE_DLL_API std::string mangleScalarType(SgType* type);
//! Generated mangled modifier types, include const, volatile,according to Itanium C++ ABI, with extension to handle UPC shared types.
ROSE_DLL_API std::string mangleModifierType(SgModifierType* type);
//! Calculate the number of elements of an array type: dim1* dim2*... , assume element count is 1 for int a[]; Strip off THREADS if it is a UPC array.
ROSE_DLL_API size_t getArrayElementCount(SgArrayType* t);
//! Get the number of dimensions of an array type
ROSE_DLL_API int getDimensionCount(SgType* t);
//! Get the element type of an array. It recursively find the base type for multi-dimension array types
ROSE_DLL_API SgType* getArrayElementType(SgType* t);
//! Get the element type of an array, pointer or string, or NULL if not applicable. This function only check one level base type. No recursion.
ROSE_DLL_API SgType* getElementType(SgType* t);
/// \brief returns the array dimensions in an array as defined for arrtype
/// \param arrtype the type of a C/C++ array
/// \return an array that contains an expression indicating each dimension's size.
/// OWNERSHIP of the expressions is TRANSFERED TO the CALLER (which
/// becomes responsible for freeing the expressions).
/// Note, the first entry of the array is a SgNullExpression, iff the
/// first array dimension was not specified.
/// \code
/// int x[] = { 1, 2, 3 };
/// \endcode
/// note, the expression does not have to be a constant
/// \code
/// int x[i*5];
/// \endcode
/// \post return-value.empty() == false
/// \post return-value[*] != NULL (no nullptr in the returned vector)
std::vector<SgExpression*>
get_C_array_dimensions(const SgArrayType& arrtype);
/// \brief returns the array dimensions in an array as defined for arrtype
/// \param arrtype the type of a C/C++ array
/// \param varref a reference to an array variable (the variable of type arrtype)
/// \return an array that contains an expression indicating each dimension's size.
/// OWNERSHIP of the expressions is TRANSFERED TO the CALLER (which
/// becomes responsible for freeing the expressions).
/// If the first array dimension was not specified an expression
/// that indicates that size is generated.
/// \code
/// int x[][3] = { 1, 2, 3, 4, 5, 6 };
/// \endcode
/// the entry for the first dimension will be:
/// \code
/// // 3 ... size of 2nd dimension
/// sizeof(x) / (sizeof(int) * 3)
/// \endcode
/// \pre arrtype is the array-type of varref
/// \post return-value.empty() == false
/// \post return-value[*] != NULL (no nullptr in the returned vector)
/// \post !isSgNullExpression(return-value[*])
std::vector<SgExpression*>
get_C_array_dimensions(const SgArrayType& arrtype, const SgVarRefExp& varref);
/// \overload
/// \note see get_C_array_dimensions for SgVarRefExp for details.
/// \todo make initname const
std::vector<SgExpression*>
get_C_array_dimensions(const SgArrayType& arrtype, SgInitializedName& initname);
//! Check if an expression is an array access (SgPntrArrRefExp). If so, return its name expression and subscripts if requested. Users can use convertRefToInitializedName() to get the possible name. It does not check if the expression is a top level SgPntrArrRefExp.
ROSE_DLL_API bool isArrayReference(SgExpression* ref, SgExpression** arrayNameExp=NULL, std::vector<SgExpression*>** subscripts=NULL);
//! Collect variable references in array types. The default NodeQuery::querySubTree() will miss variables referenced in array type's index list. e.g. double *buffer = new double[numItems] ;
ROSE_DLL_API int collectVariableReferencesInArrayTypes (SgLocatedNode* root, Rose_STL_Container<SgNode*> & currentVarRefList);
//! Has a UPC shared type of any kinds (shared-to-shared, private-to-shared, shared-to-private, shared scalar/array)? An optional parameter, mod_type_out, stores the first SgModifierType with UPC access information.
/*!
* Note: we classify private-to-shared as 'has shared' type for convenience here. It is indeed a private type in strict sense.
AST graph for some examples:
- shared scalar: SgModifierType -->base type
- shared array: SgArrayType --> SgModiferType --> base type
- shared to shared: SgModifierType --> SgPointerType --> SgModifierType ->SgTypeInt
- shared to private: SgModifierType --> SgPointerType --> base type
- private to shared: SgPointerType --> SgModifierType --> base type
*/
ROSE_DLL_API bool hasUpcSharedType(SgType* t, SgModifierType ** mod_type_out = NULL );
//! Check if a type is a UPC shared type, including shared array, shared pointers etc. Exclude private pointers to shared types. Optionally return the modifier type with the UPC shared property.
/*!
* ROSE uses SgArrayType of SgModifierType to represent shared arrays, not SgModifierType points to SgArrayType. Also typedef may cause a chain of nodes before reach the actual SgModifierType with UPC shared property.
*/
ROSE_DLL_API bool isUpcSharedType(SgType* t, SgModifierType ** mod_type_out = NULL);
//! Check if a modifier type is a UPC shared type.
ROSE_DLL_API bool isUpcSharedModifierType (SgModifierType* mod_type);
//! Check if an array type is a UPC shared type. ROSE AST represents a UPC shared array as regular array of elements of UPC shared Modifier Type. Not directly a UPC shared Modifier Type of an array.
ROSE_DLL_API bool isUpcSharedArrayType (SgArrayType* array_type);
//! Check if a shared UPC type is strict memory consistency or not. Return false if it is relaxed. (So isUpcRelaxedSharedModifierType() is not necessary.)
ROSE_DLL_API bool isUpcStrictSharedModifierType(SgModifierType* mode_type);
//! Get the block size of a UPC shared modifier type
ROSE_DLL_API size_t getUpcSharedBlockSize(SgModifierType* mod_type);
//! Get the block size of a UPC shared type, including Modifier types and array of modifier types (shared arrays)
ROSE_DLL_API size_t getUpcSharedBlockSize(SgType* t);
//! Is UPC phase-less shared type? Phase-less means block size of the first SgModifierType with UPC information is 1 or 0/unspecified. Also return false if the type is not a UPC shared type.
ROSE_DLL_API bool isUpcPhaseLessSharedType (SgType* t);
//! Is a UPC private-to-shared pointer? SgPointerType comes first compared to SgModifierType with UPC information. Input type must be any of UPC shared types first.
ROSE_DLL_API bool isUpcPrivateToSharedType(SgType* t);
//! Is a UPC array with dimension of X*THREADS
ROSE_DLL_API bool isUpcArrayWithThreads(SgArrayType* t);
//! Lookup a named type based on its name, bottomup searching from a specified scope. Note name collison might be allowed for c (not C++) between typedef and enum/struct. Only the first matched named type will be returned in this case. typedef is returned as it is, not the base type it actually refers to.
ROSE_DLL_API SgType* lookupNamedTypeInParentScopes(const std::string& type_name, SgScopeStatement* scope=NULL);
// DQ (7/22/2014): Added support for comparing expression types in actual arguments with those expected from the formal function parameter types.
//! Get the type of the associated argument expression from the function type.
ROSE_DLL_API SgType* getAssociatedTypeFromFunctionTypeList(SgExpression* actual_argument_expression);
//! Verify that 2 SgTemplateArgument are equivalent (same type, same expression, or same template declaration)
ROSE_DLL_API bool templateArgumentEquivalence(SgTemplateArgument * arg1, SgTemplateArgument * arg2);
//! Verify that 2 SgTemplateArgumentPtrList are equivalent.
ROSE_DLL_API bool templateArgumentListEquivalence(const SgTemplateArgumentPtrList & list1, const SgTemplateArgumentPtrList & list2);
//! Test for equivalence of types independent of access permissions (private or protected modes for members of classes).
ROSE_DLL_API bool isEquivalentType (const SgType* lhs, const SgType* rhs);
//! Test if two types are equivalent SgFunctionType nodes. This is necessary for template function types
//! They may differ in one SgTemplateType pointer but identical otherwise.
ROSE_DLL_API bool isEquivalentFunctionType (const SgFunctionType* lhs, const SgFunctionType* rhs);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Loop handling
\brief
*/
// by Jeremiah
//! Add a step statement to the end of a loop body
//! Add a new label to the end of the loop, with the step statement after
//! it; then change all continue statements in the old loop body into
//! jumps to the label
//!
//! For example:
//! while (a < 5) {if (a < -3) continue;} (adding "a++" to end) becomes
//! while (a < 5) {if (a < -3) goto label; label: a++;}
ROSE_DLL_API void addStepToLoopBody(SgScopeStatement* loopStmt, SgStatement* step);
ROSE_DLL_API void moveForStatementIncrementIntoBody(SgForStatement* f);
ROSE_DLL_API void convertForToWhile(SgForStatement* f);
ROSE_DLL_API void convertAllForsToWhiles(SgNode* top);
//! Change continue statements in a given block of code to gotos to a label
ROSE_DLL_API void changeContinuesToGotos(SgStatement* stmt, SgLabelStatement* label);
//!Return the loop index variable for a for loop
ROSE_DLL_API SgInitializedName* getLoopIndexVariable(SgNode* loop);
//!Check if a SgInitializedName is used as a loop index within a AST subtree
//! This function will use a bottom-up traverse starting from the subtree_root to find all enclosing loops and check if ivar is used as an index for either of them.
ROSE_DLL_API bool isLoopIndexVariable(SgInitializedName* ivar, SgNode* subtree_root);
//! Check if a for loop uses C99 style initialization statement with multiple expressions like for (int i=0, j=0; ..) or for (i=0,j=0;...)
/*!
for (int i=0, j=0; ..) is stored as two variable declarations under SgForInitStatement's init_stmt member
for (i=0,j=0;...) is stored as a single expression statement, with comma expression (i=0,j=0).
*/
ROSE_DLL_API bool hasMultipleInitStatmentsOrExpressions (SgForStatement* for_loop);
//! Routines to get and set the body of a loop
ROSE_DLL_API SgStatement* getLoopBody(SgScopeStatement* loop);
ROSE_DLL_API void setLoopBody(SgScopeStatement* loop, SgStatement* body);
//! Routines to get the condition of a loop. It recognize While-loop, For-loop, and Do-While-loop
ROSE_DLL_API SgStatement* getLoopCondition(SgScopeStatement* loop);
//! Set the condition statement of a loop, including While-loop, For-loop, and Do-While-loop.
ROSE_DLL_API void setLoopCondition(SgScopeStatement* loop, SgStatement* cond);
//! Check if a for-loop has a canonical form, return loop index, bounds, step, and body if requested
//!
//! A canonical form is defined as : one initialization statement, a test expression, and an increment expression , loop index variable should be of an integer type. IsInclusiveUpperBound is true when <= or >= is used for loop condition
ROSE_DLL_API bool isCanonicalForLoop(SgNode* loop, SgInitializedName** ivar=NULL, SgExpression** lb=NULL, SgExpression** ub=NULL, SgExpression** step=NULL, SgStatement** body=NULL, bool *hasIncrementalIterationSpace = NULL, bool* isInclusiveUpperBound = NULL);
//! Check if a Fortran Do loop has a complete canonical form: Do I=1, 10, 1
ROSE_DLL_API bool isCanonicalDoLoop(SgFortranDo* loop,SgInitializedName** ivar/*=NULL*/, SgExpression** lb/*=NULL*/, SgExpression** ub/*=NULL*/, SgExpression** step/*=NULL*/, SgStatement** body/*=NULL*/, bool *hasIncrementalIterationSpace/*= NULL*/, bool* isInclusiveUpperBound/*=NULL*/);
//! Set the lower bound of a loop header for (i=lb; ...)
ROSE_DLL_API void setLoopLowerBound(SgNode* loop, SgExpression* lb);
//! Set the upper bound of a loop header,regardless the condition expression type. for (i=lb; i op up, ...)
ROSE_DLL_API void setLoopUpperBound(SgNode* loop, SgExpression* ub);
//! Set the stride(step) of a loop 's incremental expression, regardless the expression types (i+=s; i= i+s, etc)
ROSE_DLL_API void setLoopStride(SgNode* loop, SgExpression* stride);
//! Normalize loop init stmt by promoting the single variable declaration statement outside of the for loop header's init statement, e.g. for (int i=0;) becomes int i_x; for (i_x=0;..) and rewrite the loop with the new index variable, if necessary
ROSE_DLL_API bool normalizeForLoopInitDeclaration(SgForStatement* loop);
//! Undo the normalization of for loop's C99 init declaration. Previous record of normalization is used to ease the reverse transformation.
ROSE_DLL_API bool unnormalizeForLoopInitDeclaration(SgForStatement* loop);
//! Normalize a for loop, return true if successful. Generated constants will be fold by default.
//!
//! Translations are :
//! For the init statement: for (int i=0;... ) becomes int i; for (i=0;..)
//! For test expression:
//! i<x is normalized to i<= (x-1) and
//! i>x is normalized to i>= (x+1)
//! For increment expression:
//! i++ is normalized to i+=1 and
//! i-- is normalized to i+=-1
//! i-=s is normalized to i+= -s
ROSE_DLL_API bool forLoopNormalization(SgForStatement* loop, bool foldConstant = true);
//! Normalize a for loop's test expression
//! i<x is normalized to i<= (x-1) and
//! i>x is normalized to i>= (x+1)
ROSE_DLL_API bool normalizeForLoopTest(SgForStatement* loop);
ROSE_DLL_API bool normalizeForLoopIncrement(SgForStatement* loop);
//!Normalize a Fortran Do loop. Make the default increment expression (1) explicit
ROSE_DLL_API bool doLoopNormalization(SgFortranDo* loop);
//! Unroll a target loop with a specified unrolling factor. It handles steps larger than 1 and adds a fringe loop if the iteration count is not evenly divisible by the unrolling factor.
ROSE_DLL_API bool loopUnrolling(SgForStatement* loop, size_t unrolling_factor);
//! Interchange/permutate a n-level perfectly-nested loop rooted at 'loop' using a lexicographical order number within (0,depth!).
ROSE_DLL_API bool loopInterchange(SgForStatement* loop, size_t depth, size_t lexicoOrder);
//! Tile the n-level (starting from 1) loop of a perfectly nested loop nest using tiling size s
ROSE_DLL_API bool loopTiling(SgForStatement* loopNest, size_t targetLevel, size_t tileSize);
//Winnie Loop Collapsing
SgExprListExp * loopCollapsing(SgForStatement* target_loop, size_t collapsing_factor);
bool getForLoopInformations(
SgForStatement * for_loop,
SgVariableSymbol * & iterator,
SgExpression * & lower_bound,
SgExpression * & upper_bound,
SgExpression * & stride
);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Topdown search
\brief Top-down traversal from current node to find a node of a specified type
*/
//! Query a subtree to get all nodes of a given type, with an appropriate downcast.
template <typename NodeType>
std::vector<NodeType*> querySubTree(SgNode* top, VariantT variant = (VariantT)NodeType::static_variant)
{
#if 0
printf ("Top of SageInterface::querySubTree() \n");
#endif
Rose_STL_Container<SgNode*> nodes = NodeQuery::querySubTree(top,variant);
std::vector<NodeType*> result(nodes.size(), NULL);
int count = 0;
#if 0
printf ("In SageInterface::querySubTree(): before initialization loop \n");
#endif
for (Rose_STL_Container<SgNode*>::const_iterator i = nodes.begin(); i != nodes.end(); ++i, ++count)
{
#if 0
printf ("In SageInterface::querySubTree(): in loop: count = %d \n",count);
#endif
NodeType* node = dynamic_cast<NodeType*>(*i);
ROSE_ASSERT (node);
result[count] = node;
}
#if 0
printf ("Leaving SageInterface::querySubTree(): after initialization loop \n");
#endif
return result;
}
/*! \brief Returns STL vector of SgFile IR node pointers.
Demonstrates use of restricted traversal over just SgFile IR nodes.
*/
std::vector < SgFile * >generateFileList ();
/** Get the current SgProject IR Node.
*
* The library should never have more than one project and it asserts such. If no project has been created yet then this
* function returns the null pointer. */
ROSE_DLL_API SgProject * getProject();
//! \return the project associated with a node
SgProject * getProject(const SgNode * node);
//! Query memory pools to grab SgNode of a specified type
template <typename NodeType>
static std::vector<NodeType*> getSgNodeListFromMemoryPool()
{
// This function uses a memory pool traversal specific to the SgFile IR nodes
class MyTraversal : public ROSE_VisitTraversal
{
public:
std::vector<NodeType*> resultlist;
void visit ( SgNode* node)
{
NodeType* result = dynamic_cast<NodeType* > (node);
ROSE_ASSERT(result!= NULL);
if (result!= NULL)
{
resultlist.push_back(result);
}
};
virtual ~MyTraversal() {}
};
MyTraversal my_traversal;
NodeType::traverseMemoryPoolNodes(my_traversal);
return my_traversal.resultlist;
}
/*! \brief top-down traversal from current node to find the main() function declaration
*/
ROSE_DLL_API SgFunctionDeclaration* findMain(SgNode* currentNode);
//! Find the last declaration statement within a scope (if any). This is often useful to decide where to insert another variable declaration statement. Pragma declarations are not treated as a declaration by default in this context.
SgStatement* findLastDeclarationStatement(SgScopeStatement * scope, bool includePragma = false);
//midend/programTransformation/partialRedundancyElimination/pre.h
//! Find referenced symbols within an expression
std::vector<SgVariableSymbol*> getSymbolsUsedInExpression(SgExpression* expr);
//! Find break statements inside a particular statement, stopping at nested loops or switches
/*! loops or switch statements defines their own contexts for break
statements. The function will stop immediately if run on a loop or switch
statement. If fortranLabel is non-empty, breaks (EXITs) to that label within
nested loops are included in the returned list.
*/
std::vector<SgBreakStmt*> findBreakStmts(SgStatement* code, const std::string& fortranLabel = "");
//! Find all continue statements inside a particular statement, stopping at nested loops
/*! Nested loops define their own contexts for continue statements. The
function will stop immediately if run on a loop
statement. If fortranLabel is non-empty, continues (CYCLEs) to that label
within nested loops are included in the returned list.
*/
std::vector<SgContinueStmt*> findContinueStmts(SgStatement* code, const std::string& fortranLabel = "");
std::vector<SgGotoStatement*> findGotoStmts(SgStatement* scope, SgLabelStatement* l);
std::vector<SgStatement*> getSwitchCases(SgSwitchStatement* sw);
//! Collect all variable references in a subtree
void collectVarRefs(SgLocatedNode* root, std::vector<SgVarRefExp* >& result);
//! Topdown traverse a subtree from root to find the first declaration given its name, scope (optional, can be NULL), and defining or nondefining flag.
template <typename T>
T* findDeclarationStatement(SgNode* root, std::string name, SgScopeStatement* scope, bool isDefining)
{
bool found = false;
#if 0
printf ("In findDeclarationStatement(): root = %p \n",root);
printf ("In findDeclarationStatement(): name = %s \n",name.c_str());
printf ("In findDeclarationStatement(): scope = %p \n",scope);
printf ("In findDeclarationStatement(): isDefining = %s \n",isDefining ? "true" : "false");
#endif
// Do we really want a NULL pointer to be acceptable input to this function?
// Maybe we should have an assertion that it is non-null?
if (!root) return NULL;
T* decl = dynamic_cast<T*>(root);
#if 0
printf ("In findDeclarationStatement(): decl = %p \n",decl);
#endif
if (decl != NULL)
{
if (scope)
{
if ((decl->get_scope() == scope) && (decl->search_for_symbol_from_symbol_table()->get_name() == name))
{
found = true;
}
}
else // Liao 2/9/2010. We should allow NULL scope
{
#if 0
// DQ (12/6/2016): Include this into the debugging code to aboid compiler warning about unused variable.
SgSymbol* symbol = decl->search_for_symbol_from_symbol_table();
printf ("In findDeclarationStatement(): decl->search_for_symbol_from_symbol_table() = %p \n",symbol);
printf ("In findDeclarationStatement(): decl->search_for_symbol_from_symbol_table()->get_name() = %s \n",symbol->get_name().str());
#endif
if (decl->search_for_symbol_from_symbol_table()->get_name() == name)
{
found = true;
}
}
}
if (found)
{
if (isDefining)
{
#if 0
printf ("In findDeclarationStatement(): decl->get_firstNondefiningDeclaration() = %p \n",decl->get_firstNondefiningDeclaration());
printf ("In findDeclarationStatement(): decl->get_definingDeclaration() = %p \n",decl->get_definingDeclaration());
#endif
ROSE_ASSERT (decl->get_definingDeclaration() != NULL);
#if 0
printf ("In findDeclarationStatement(): returing decl->get_definingDeclaration() = %p \n",decl->get_definingDeclaration());
#endif
return dynamic_cast<T*> (decl->get_definingDeclaration());
}
else
{
#if 0
printf ("In findDeclarationStatement(): returing decl = %p \n",decl);
#endif
return decl;
}
}
std::vector<SgNode*> children = root->get_traversalSuccessorContainer();
#if 0
printf ("In findDeclarationStatement(): children.size() = %zu \n",children.size());
#endif
// DQ (4/10/2016): Note that if we are searching for a function member that has it's defining
// declaration defined outside of the class then it will not be found in the child list.
for (std::vector<SgNode*>::const_iterator i = children.begin(); i != children.end(); ++i)
{
T* target = findDeclarationStatement<T> (*i,name,scope,isDefining);
if (target)
{
return target;
}
}
return NULL;
}
//! Topdown traverse a subtree from root to find the first function declaration matching the given name, scope (optional, can be NULL), and defining or nondefining flag. This is an instantiation of findDeclarationStatement<T>.
SgFunctionDeclaration* findFunctionDeclaration(SgNode* root, std::string name, SgScopeStatement* scope, bool isDefining);
#if 0 //TODO
// 1. preorder traversal from current SgNode till find next SgNode of type V_SgXXX
// until reach the end node
SgNode* getNextSgNode( const SgNode* astSourceNode, VariantT=V_SgNode, SgNode* astEndNode=NULL);
// 2. return all nodes of type VariantT following the source node
std::vector<SgNode*> getAllNextSgNode( const SgNode* astSourceNode, VariantT=V_SgNode, SgNode* astEndNode=NULL);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name Bottom up search
\brief Backwards traverse through the AST to find a node, findEnclosingXXX()
*/
// remember to put const to all arguments.
/** Find a node by type using upward traversal.
*
* Traverse backward through a specified node's ancestors, starting with the node's parent and progressing to more distant
* ancestors, to find the first node matching the specified or derived type. If @p includingSelf is true then the
* starting node, @p astNode, is returned if its type matches, otherwise the search starts at the parent of @p astNode.
*
* For the purposes of this function, the parent (P) of an SgDeclarationStatement node (N) is considered to be the first
* non-defining declaration of N if N has both a defining declaration and a first non-defining declaration and the defining
* declaration is different than the first non-defining declaration.
*
* If no ancestor of the requisite type of subtypes is found then this function returns a null pointer.
*
* If @p astNode is the null pointer, then the return value is a null pointer. That is, if there is no node, then there cannot
* be an enclosing node of the specified type. */
template <typename NodeType>
NodeType* getEnclosingNode(const SgNode* astNode, const bool includingSelf = false)
{
#if 1
// DQ (10/20/2012): This is the older version of this implementation. Until I am sure that
// the newer version (below) is what we want to use I will resolve this conflict by keeping
// the previous version in place.
if (NULL == astNode)
{
return NULL;
}
if ( (includingSelf ) && (dynamic_cast<const NodeType*>(astNode)) )
{
return const_cast<NodeType*>(dynamic_cast<const NodeType*> (astNode));
}
// DQ (3/5/2012): Check for reference to self...
ROSE_ASSERT(astNode->get_parent() != astNode);
SgNode* parent = astNode->get_parent();
// DQ (3/5/2012): Check for loops that will cause infinite loops.
SgNode* previouslySeenParent = parent;
bool foundCycle = false;
while ( (foundCycle == false) && (parent != NULL) && (!dynamic_cast<const NodeType*>(parent)) )
{
ROSE_ASSERT(parent->get_parent() != parent);
#if 0
printf ("In getEnclosingNode(): parent = %p = %s \n",parent,parent->class_name().c_str());
#endif
parent = parent->get_parent();
// DQ (3/5/2012): Check for loops that will cause infinite loops.
// ROSE_ASSERT(parent != previouslySeenParent);
if (parent == previouslySeenParent)
{
foundCycle = true;
}
}
#if 0
printf ("previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str());
#endif
parent = previouslySeenParent;
SgDeclarationStatement* declarationStatement = isSgDeclarationStatement(parent);
if (declarationStatement != NULL)
{
#if 0
printf ("Found a SgDeclarationStatement \n");
#endif
SgDeclarationStatement* definingDeclaration = declarationStatement->get_definingDeclaration();
SgDeclarationStatement* firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration();
#if 0
printf (" --- declarationStatement = %p \n",declarationStatement);
printf (" --- definingDeclaration = %p \n",definingDeclaration);
if (definingDeclaration != NULL && definingDeclaration->get_parent() != NULL)
printf (" --- definingDeclaration ->get_parent() = %p = %s \n",definingDeclaration->get_parent(),definingDeclaration->get_parent()->class_name().c_str());
printf (" --- firstNondefiningDeclaration = %p \n",firstNondefiningDeclaration);
if (firstNondefiningDeclaration != NULL && firstNondefiningDeclaration->get_parent() != NULL)
printf (" --- firstNondefiningDeclaration ->get_parent() = %p = %s \n",firstNondefiningDeclaration->get_parent(),firstNondefiningDeclaration->get_parent()->class_name().c_str());
#endif
if (definingDeclaration != NULL && declarationStatement != firstNondefiningDeclaration)
{
#if 0
printf ("Found a nondefining declaration so use the non-defining declaration instead \n");
#endif
// DQ (10/19/2012): Use the defining declaration instead.
// parent = firstNondefiningDeclaration;
parent = definingDeclaration;
}
}
#if 0
printf ("reset: previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str());
#endif
// DQ (10/19/2012): This branch is just to document the cycle that was previously detected, it is for
// debugging only. Thus it ony make sense for it to be executed when "(foundCycle == true)". However,
// this will have to be revisited later since it appears clear that it is a problem for the binary analysis
// work when it is visited for this case. Since the cycle is detected, but there is no assertion on the
// cycle, we don't exit when a cycle is identified (which is the point of the code below).
// Note also that I have fixed the code (above and below) to only chase pointers through defining
// declarations (where they exist), this is important since non-defining declarations can be almost
// anywhere (and thus chasing them can make it appear that there are cycles where there are none
// (I think); test2012_234.C demonstrates an example of this.
// DQ (10/9/2012): Robb has suggested this change to fix the binary analysis work.
// if (foundCycle == true)
if (foundCycle == false)
{
while ( (parent != NULL) && (!dynamic_cast<const NodeType*>(parent)) )
{
ROSE_ASSERT(parent->get_parent() != parent);
#if 0
printf ("In getEnclosingNode() (2nd try): parent = %p = %s \n",parent,parent->class_name().c_str());
if (parent->get_file_info() != NULL)
parent->get_file_info()->display("In getEnclosingNode() (2nd try): debug");
#endif
SgDeclarationStatement* declarationStatement = isSgDeclarationStatement(parent);
if (declarationStatement != NULL)
{
#if 0
printf ("Found a SgDeclarationStatement \n");
#endif
SgDeclarationStatement* definingDeclaration = declarationStatement->get_definingDeclaration();
SgDeclarationStatement* firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration();
#if 0
printf (" --- declarationStatement = %p = %s \n",declarationStatement,(declarationStatement != NULL) ? declarationStatement->class_name().c_str() : "null");
printf (" --- definingDeclaration = %p \n",definingDeclaration);
if (definingDeclaration != NULL && definingDeclaration->get_parent() != NULL)
printf (" --- definingDeclaration ->get_parent() = %p = %s \n",definingDeclaration->get_parent(),definingDeclaration->get_parent()->class_name().c_str());
printf (" --- firstNondefiningDeclaration = %p \n",firstNondefiningDeclaration);
if (firstNondefiningDeclaration != NULL && firstNondefiningDeclaration->get_parent() != NULL)
printf (" --- firstNondefiningDeclaration ->get_parent() = %p = %s \n",firstNondefiningDeclaration->get_parent(),firstNondefiningDeclaration->get_parent()->class_name().c_str());
#endif
if (definingDeclaration != NULL && declarationStatement != firstNondefiningDeclaration)
{
#if 0
printf ("Found a nondefining declaration so use the firstNondefining declaration instead \n");
#endif
// DQ (10/19/2012): Use the defining declaration instead.
// parent = firstNondefiningDeclaration;
parent = definingDeclaration;
}
}
parent = parent->get_parent();
#if 1
// DQ (3/5/2012): Check for loops that will cause infinite loops.
ROSE_ASSERT(parent != previouslySeenParent);
#else
printf ("WARNING::WARNING::WARNING commented out assertion for parent != previouslySeenParent \n");
if (parent == previouslySeenParent)
break;
#endif
}
}
return const_cast<NodeType*>(dynamic_cast<const NodeType*> (parent));
#else
// DQ (10/20/2012): Using Robb's newer version with my modification to use the definingDeclaration rather than firstNondefiningDeclaration (below).
// Find the parent of specified type, but watch out for cycles in the ancestry (which would cause an infinite loop).
// Cast away const because isSg* functions aren't defined for const node pointers; and our return is not const.
SgNode *node = const_cast<SgNode*>(!astNode || includingSelf ? astNode : astNode->get_parent());
std::set<const SgNode*> seen; // nodes we've seen, in order to detect cycles
while (node) {
if (NodeType *found = dynamic_cast<NodeType*>(node))
return found;
// FIXME: Cycle detection could be moved elsewhere so we don't need to do it on every call. [RPM 2012-10-09]
ROSE_ASSERT(seen.insert(node).second);
// Traverse to parent (declaration statements are a special case)
if (SgDeclarationStatement *declarationStatement = isSgDeclarationStatement(node)) {
SgDeclarationStatement *definingDeclaration = declarationStatement->get_definingDeclaration();
SgDeclarationStatement *firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration();
if (definingDeclaration && firstNondefiningDeclaration && declarationStatement != firstNondefiningDeclaration) {
// DQ (10/19/2012): Use the defining declaration instead.
// node = firstNondefiningDeclaration;
node = definingDeclaration;
}
} else {
node = node->get_parent();
}
}
return NULL;
#endif
}
//! Find enclosing source file node
ROSE_DLL_API SgSourceFile* getEnclosingSourceFile(SgNode* n, const bool includingSelf=false);
//! Get the closest scope from astNode. Return astNode if it is already a scope.
ROSE_DLL_API SgScopeStatement* getScope(const SgNode* astNode);
//! Get the enclosing scope from a node n
ROSE_DLL_API SgScopeStatement* getEnclosingScope(SgNode* n, const bool includingSelf=false);
//! Traverse back through a node's parents to find the enclosing global scope
ROSE_DLL_API SgGlobal* getGlobalScope( const SgNode* astNode);
//! Find the function definition
ROSE_DLL_API SgFunctionDefinition* getEnclosingProcedure(SgNode* n, const bool includingSelf=false);
ROSE_DLL_API SgFunctionDefinition* getEnclosingFunctionDefinition(SgNode* astNode, const bool includingSelf=false);
//! Find the closest enclosing statement, including the given node
ROSE_DLL_API SgStatement* getEnclosingStatement(SgNode* n);
//! Find the closest switch outside a given statement (normally used for case and default statements)
ROSE_DLL_API SgSwitchStatement* findEnclosingSwitch(SgStatement* s);
//! Find enclosing OpenMP clause body statement from s. If s is already one, return it directly.
ROSE_DLL_API SgOmpClauseBodyStatement* findEnclosingOmpClauseBodyStatement(SgStatement* s);
//! Find the closest loop outside the given statement; if fortranLabel is not empty, the Fortran label of the loop must be equal to it
ROSE_DLL_API SgScopeStatement* findEnclosingLoop(SgStatement* s, const std::string& fortranLabel = "", bool stopOnSwitches = false);
//! Find the enclosing function declaration, including its derived instances like isSgProcedureHeaderStatement, isSgProgramHeaderStatement, and isSgMemberFunctionDeclaration.
ROSE_DLL_API SgFunctionDeclaration * getEnclosingFunctionDeclaration (SgNode * astNode, const bool includingSelf=false);
//roseSupport/utility_functions.h
//! get the SgFile node from current node
ROSE_DLL_API SgFile* getEnclosingFileNode (SgNode* astNode );
//! Get the initializer containing an expression if it is within an initializer.
ROSE_DLL_API SgInitializer* getInitializerOfExpression(SgExpression* n);
//! Get the closest class definition enclosing the specified AST node,
ROSE_DLL_API SgClassDefinition* getEnclosingClassDefinition(SgNode* astnode, const bool includingSelf=false);
//! Get the closest class declaration enclosing the specified AST node,
ROSE_DLL_API SgClassDeclaration* getEnclosingClassDeclaration( SgNode* astNode );
// DQ (2/7/2019): Adding support for name qualification of variable references associated with SgPointerMemberType function parameters.
//! Get the enclosing SgExprListExp (used as part of function argument index evaluation in subexpressions).
ROSE_DLL_API SgExprListExp* getEnclosingExprListExp(SgNode* astNode, const bool includingSelf = false);
// DQ (2/7/2019): Need a function to return when an expression is in an expression subtree.
// This is part of index evaluation ofr expressions in function argument lists, but likely usefule elsewhere as well.
ROSE_DLL_API bool isInSubTree(SgExpression* subtree, SgExpression* exp);
// DQ (2/7/2019): Need a function to return the SgFunctionDeclaration from a SgFunctionCallExp.
ROSE_DLL_API SgFunctionDeclaration* getFunctionDeclaration ( SgFunctionCallExp* functionCallExp );
// DQ (2/17/2019): Generalizing this support for SgVarRefExp and SgMemberFunctionRefExp nodes.
// DQ (2/8/2019): Adding support for detecting when to use added name qualification for pointer-to-member expressions.
ROSE_DLL_API bool isDataMemberReference(SgVarRefExp* varRefExp);
// ROSE_DLL_API bool isAddressTaken(SgVarRefExp* varRefExp);
ROSE_DLL_API bool isAddressTaken(SgExpression* refExp);
// DQ (2/17/2019): Adding support for detecting when to use added name qualification for membr function references.
ROSE_DLL_API bool isMemberFunctionMemberReference(SgMemberFunctionRefExp* memberFunctionRefExp);
// DQ (2/15/2019): Adding support for detecting which class a member reference is being made from.
// ROSE_DLL_API SgClassType* getClassTypeForDataMemberReference(SgVarRefExp* varRefExp);
// ROSE_DLL_API std::list<SgClassType*> getClassTypeChainForDataMemberReference(SgVarRefExp* varRefExp);
ROSE_DLL_API std::list<SgClassType*> getClassTypeChainForMemberReference(SgExpression* refExp);
// DQ (2/17/2019): Display the shared nodes in the AST for debugging.
ROSE_DLL_API void outputSharedNodes( SgNode* node );
// TODO
#if 0
SgNode * getEnclosingSgNode(SgNode* source,VariantT, SgNode* endNode=NULL);
std::vector<SgNode *> getAllEnclosingSgNode(SgNode* source,VariantT, SgNode* endNode=NULL);
SgVariableDeclaration* findVariableDeclaratin( const string& varname)
SgClassDeclaration* getEnclosingClassDeclaration( const SgNode* astNode);
// e.g. for some expression, find its parent statement
SgStatement* getEnclosingStatement(const SgNode* astNode);
SgSwitchStatement* getEnclosingSwitch(SgStatement* s);
SgModuleStatement* getEnclosingModuleStatement( const SgNode* astNode);
// used to build a variable reference for compiler generated code in current scope
SgSymbol * findReachingDefinition (SgScopeStatement* startScope, SgName &name);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST Walk and Traversal
\brief
*/
// Liao, 1/9/2008
/*!
\brief return the first global scope under current project
*/
ROSE_DLL_API SgGlobal * getFirstGlobalScope(SgProject *project);
/*!
\brief get the last statement within a scope, return NULL if it does not exit
*/
ROSE_DLL_API SgStatement* getLastStatement(SgScopeStatement *scope);
//! Get the first statement within a scope, return NULL if it does not exist. Skip compiler-generated statement by default. Count transformation-generated ones, but excluding those which are not to be outputted in unparsers.
ROSE_DLL_API SgStatement* getFirstStatement(SgScopeStatement *scope,bool includingCompilerGenerated=false);
//!Find the first defining function declaration statement in a scope
ROSE_DLL_API SgFunctionDeclaration* findFirstDefiningFunctionDecl(SgScopeStatement* scope);
//! Get next statement within the same scope of current statement
ROSE_DLL_API SgStatement* getNextStatement(SgStatement * currentStmt);
//! Get previous statement of the current statement. It may return a previous statement of a parent scope by default (climbOutScope is true), otherwise only a previous statement of the same scope is returned.
ROSE_DLL_API SgStatement* getPreviousStatement(SgStatement * currentStmt, bool climbOutScope = true);
#if 0 //TODO
// preorder traversal from current SgNode till find next SgNode of type V_SgXXX
SgNode* getNextSgNode( const SgNode* currentNode, VariantT=V_SgNode);
#endif
// DQ (11/15/2018): Adding support for traversals over the include file tree.
//! return path prefix for subtree of include files.
void listHeaderFiles ( SgIncludeFile* includeFile );
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST Comparison
\brief Compare AST nodes, subtree, etc
*/
//! Check if a SgIntVal node has a given value
ROSE_DLL_API bool isEqualToIntConst(SgExpression* e, int value);
//! Check if two function declarations refer to the same one. Two function declarations are the same when they are a) identical, b) same name in C c) same qualified named and mangled name in C++. A nondefining (prototype) declaration and a defining declaration of a same function are treated as the same.
/*!
* There is a similar function bool compareFunctionDeclarations(SgFunctionDeclaration *f1, SgFunctionDeclaration *f2) from Classhierarchy.C
*/
ROSE_DLL_API bool isSameFunction(SgFunctionDeclaration* func1, SgFunctionDeclaration* func2);
//! Check if a statement is the last statement within its closed scope
ROSE_DLL_API bool isLastStatement(SgStatement* stmt);
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST insert, removal, and replacement
\brief Add, remove,and replace AST
scope->append_statement(), exprListExp->append_expression() etc. are not enough to handle side effect of parent pointers, symbol tables, preprocessing info, defining/nondefining pointers etc.
*/
// DQ (2/24/2009): Simple function to delete an AST subtree (used in outlining).
//! Function to delete AST subtree's nodes only, users must take care of any dangling pointers, symbols or types that result.
ROSE_DLL_API void deleteAST(SgNode* node);
//! Special purpose function for deleting AST expression tress containing valid original expression trees in constant folded expressions (for internal use only).
ROSE_DLL_API void deleteExpressionTreeWithOriginalExpressionSubtrees(SgNode* root);
// DQ (2/25/2009): Added new function to support outliner.
//! Move statements in first block to the second block (preserves order and rebuilds the symbol table).
ROSE_DLL_API void moveStatementsBetweenBlocks ( SgBasicBlock* sourceBlock, SgBasicBlock* targetBlock );
//! Move a variable declaration to a new scope, handle symbol, special scopes like For loop, etc.
ROSE_DLL_API void moveVariableDeclaration(SgVariableDeclaration* decl, SgScopeStatement* target_scope);
//! Append a statement to the end of the current scope, handle side effect of appending statements, e.g. preprocessing info, defining/nondefining pointers etc.
ROSE_DLL_API void appendStatement(SgStatement *stmt, SgScopeStatement* scope=NULL);
//! Append a statement to the end of SgForInitStatement
ROSE_DLL_API void appendStatement(SgStatement *stmt, SgForInitStatement* for_init_stmt);
//! Append a list of statements to the end of the current scope, handle side effect of appending statements, e.g. preprocessing info, defining/nondefining pointers etc.
ROSE_DLL_API void appendStatementList(const std::vector<SgStatement*>& stmt, SgScopeStatement* scope=NULL);
// DQ (2/6/2009): Added function to support outlining into separate file.
//! Append a copy ('decl') of a function ('original_statement') into a 'scope', include any referenced declarations required if the scope is within a compiler generated file. All referenced declarations, including those from headers, are inserted if excludeHeaderFiles is set to true (the new file will not have any headers).
ROSE_DLL_API void appendStatementWithDependentDeclaration( SgDeclarationStatement* decl, SgGlobal* scope, SgStatement* original_statement, bool excludeHeaderFiles );
//! Prepend a statement to the beginning of the current scope, handling side
//! effects as appropriate
ROSE_DLL_API void prependStatement(SgStatement *stmt, SgScopeStatement* scope=NULL);
//! Prepend a statement to the beginning of SgForInitStatement
ROSE_DLL_API void prependStatement(SgStatement *stmt, SgForInitStatement* for_init_stmt);
//! prepend a list of statements to the beginning of the current scope,
//! handling side effects as appropriate
ROSE_DLL_API void prependStatementList(const std::vector<SgStatement*>& stmt, SgScopeStatement* scope=NULL);
//! Check if a scope statement has a simple children statement list
//! so insert additional statements under the scope is straightforward and unambiguous .
//! for example, SgBasicBlock has a simple statement list while IfStmt does not.
ROSE_DLL_API bool hasSimpleChildrenList (SgScopeStatement* scope);
//! Insert a statement before or after the target statement within the target's scope. Move around preprocessing info automatically
ROSE_DLL_API void insertStatement(SgStatement *targetStmt, SgStatement* newStmt, bool insertBefore= true, bool autoMovePreprocessingInfo = true);
//! Insert a list of statements before or after the target statement within the
//target's scope
ROSE_DLL_API void insertStatementList(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmts, bool insertBefore= true);
//! Insert a statement before a target statement
ROSE_DLL_API void insertStatementBefore(SgStatement *targetStmt, SgStatement* newStmt, bool autoMovePreprocessingInfo = true);
//! Insert a list of statements before a target statement
ROSE_DLL_API void insertStatementListBefore(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmts);
//! Insert a statement after a target statement, Move around preprocessing info automatically by default
ROSE_DLL_API void insertStatementAfter(SgStatement *targetStmt, SgStatement* newStmt, bool autoMovePreprocessingInfo = true);
//! Insert a list of statements after a target statement
ROSE_DLL_API void insertStatementListAfter(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmt);
//! Insert a statement after the last declaration within a scope. The statement will be prepended to the scope if there is no declaration statement found
ROSE_DLL_API void insertStatementAfterLastDeclaration(SgStatement* stmt, SgScopeStatement* scope);
//! Insert a list of statements after the last declaration within a scope. The statement will be prepended to the scope if there is no declaration statement found
ROSE_DLL_API void insertStatementAfterLastDeclaration(std::vector<SgStatement*> stmt_list, SgScopeStatement* scope);
//! Insert a statement before the first non-declaration statement in a scope. If the scope has no non-declaration statements
// then the statement is inserted at the end of the scope.
ROSE_DLL_API void insertStatementBeforeFirstNonDeclaration(SgStatement *newStmt, SgScopeStatement *scope,
bool movePreprocessingInfo=true);
//! Insert statements before the first non-declaration statement in a scope. If the scope has no non-declaration statements
//then the new statements are inserted at the end of the scope.
ROSE_DLL_API void insertStatementListBeforeFirstNonDeclaration(const std::vector<SgStatement*> &newStmts, SgScopeStatement *scope);
// DQ (11/21/2018): We need to sometimes insert something after the last statement of the collection from rose_edg_required_macros_and_functions.h.
ROSE_DLL_API SgStatement* lastFrontEndSpecificStatement( SgGlobal* globalScope );
//! Remove a statement from its attach point of the AST. Automatically keep its associated preprocessing information at the original place after the removal. The statement is still in memory and it is up to the users to decide if the removed one will be inserted somewhere else or released from memory (deleteAST()).
ROSE_DLL_API void removeStatement(SgStatement* stmt, bool autoRelocatePreprocessingInfo = true);
//! Deep delete a sub AST tree. It uses postorder traversal to delete each child node. Users must take care of any dangling pointers, symbols or types that result. This is identical to deleteAST()
ROSE_DLL_API void deepDelete(SgNode* root);
//! Replace a statement with another. Move preprocessing information from oldStmt to newStmt if requested.
ROSE_DLL_API void replaceStatement(SgStatement* oldStmt, SgStatement* newStmt, bool movePreprocessinInfo = false);
//! Replace an anchor node with a specified pattern subtree with optional SgVariantExpression. All SgVariantExpression in the pattern will be replaced with copies of the anchor node.
ROSE_DLL_API SgNode* replaceWithPattern (SgNode * anchor, SgNode* new_pattern);
//! Replace all variable references to an old symbol in a scope to being references to a new symbol.
// Essentially replace variable a with b.
ROSE_DLL_API void replaceVariableReferences(SgVariableSymbol* old_sym, SgVariableSymbol* new_sym, SgScopeStatement * scope );
// DQ (11/12/2018): Adding test to avoid issues that we can't test for in the unparsing of header files using the token based unparsing.
//! If header file unparsing and token-based unparsing are used, then some statements in header files
//! used with the same name and different include syntax can't be transformed. This is currently because
//! there is no way to generally test the resulting transformed code generated by ROSE.
ROSE_DLL_API bool statementCanBeTransformed(SgStatement* stmt);
/** Given an expression, generates a temporary variable whose initializer optionally evaluates
* that expression. Then, the var reference expression returned can be used instead of the original
* expression. The temporary variable created can be reassigned to the expression by the returned SgAssignOp;
* this can be used when the expression the variable represents needs to be evaluated. NOTE: This handles
* reference types correctly by using pointer types for the temporary.
* @param expression Expression which will be replaced by a variable
* @param scope scope in which the temporary variable will be generated
* @param reEvaluate an assignment op to reevaluate the expression. Leave NULL if not needed
* @return declaration of the temporary variable, and a a variable reference expression to use instead of
* the original expression. */
std::pair<SgVariableDeclaration*, SgExpression* > createTempVariableForExpression(SgExpression* expression,
SgScopeStatement* scope, bool initializeInDeclaration, SgAssignOp** reEvaluate = NULL);
/* This function creates a temporary variable for a given expression in the given scope
This is different from SageInterface::createTempVariableForExpression in that it does not
try to be smart to create pointers to reference types and so on. The tempt is initialized to expression.
The caller is responsible for setting the parent of SgVariableDeclaration since buildVariableDeclaration
may not set_parent() when the scope stack is empty. See programTransformation/extractFunctionArgumentsNormalization/ExtractFunctionArguments.C for sample usage.
@param expression Expression which will be replaced by a variable
@param scope scope in which the temporary variable will be generated
*/
std::pair<SgVariableDeclaration*, SgExpression*> createTempVariableAndReferenceForExpression
(SgExpression* expression, SgScopeStatement* scope);
//! Append an argument to SgFunctionParameterList, transparently set parent,scope, and symbols for arguments when possible
/*! We recommend to build SgFunctionParameterList before building a function declaration
However, it is still allowed to append new arguments for existing function declarations.
\todo function type , function symbol also need attention.
*/
ROSE_DLL_API SgVariableSymbol* appendArg(SgFunctionParameterList *, SgInitializedName*);
//!Prepend an argument to SgFunctionParameterList
ROSE_DLL_API SgVariableSymbol* prependArg(SgFunctionParameterList *, SgInitializedName*);
//! Append an expression to a SgExprListExp, set the parent pointer also
ROSE_DLL_API void appendExpression(SgExprListExp *, SgExpression*);
//! Append an expression list to a SgExprListExp, set the parent pointers also
ROSE_DLL_API void appendExpressionList(SgExprListExp *, const std::vector<SgExpression*>&);
//! Set parameter list for a function declaration, considering existing parameter list etc.
template <class actualFunction>
void setParameterList(actualFunction *func,SgFunctionParameterList *paralist) {
// TODO consider the difference between C++ and Fortran
// fixup the scope of arguments,no symbols for nondefining function declaration's arguments
// DQ (11/25/2011): templated function so that we can handle both
// SgFunctionDeclaration and SgTemplateFunctionDeclaration (and their associated member
// function derived classes).
ROSE_ASSERT(func != NULL);
ROSE_ASSERT(paralist != NULL);
#if 0
// At this point we don't have cerr and endl defined, so comment this code out.
// Warn to users if a paralist is being shared
if (paralist->get_parent() !=NULL)
{
cerr << "Waring! Setting a used SgFunctionParameterList to function: "
<< (func->get_name()).getString()<<endl
<< " Sharing parameter lists can corrupt symbol tables!"<<endl
<< " Please use deepCopy() to get an exclusive parameter list for each function declaration!"<<endl;
// ROSE_ASSERT(false);
}
#endif
// Liao,2/5/2008 constructor of SgFunctionDeclaration will automatically generate SgFunctionParameterList, so be cautious when set new paralist!!
if (func->get_parameterList() != NULL)
{
if (func->get_parameterList() != paralist)
{
delete func->get_parameterList();
}
}
func->set_parameterList(paralist);
paralist->set_parent(func);
// DQ (5/15/2012): Need to set the declptr in each SgInitializedName IR node.
// This is needed to support the AST Copy mechanism (at least). The files: test2005_150.C,
// test2012_81.C and testcode2012_82.C demonstrate this problem.
SgInitializedNamePtrList & args = paralist->get_args();
for (SgInitializedNamePtrList::iterator i = args.begin(); i != args.end(); i++)
{
(*i)->set_declptr(func);
}
}
//! Set a pragma of a pragma declaration. handle memory release for preexisting pragma, and set parent pointer.
ROSE_DLL_API void setPragma(SgPragmaDeclaration* decl, SgPragma *pragma);
//! Replace an expression with another, used for variable reference substitution and others. the old expression can be deleted (default case) or kept.
ROSE_DLL_API void replaceExpression(SgExpression* oldExp, SgExpression* newExp, bool keepOldExp=false);
//! Replace a given expression with a list of statements produced by a generator
ROSE_DLL_API void replaceExpressionWithStatement(SgExpression* from,
SageInterface::StatementGenerator* to);
//! Similar to replaceExpressionWithStatement, but with more restrictions.
//! Assumptions: from is not within the test of a loop or ifStmt, not currently traversing from or the statement it is in
ROSE_DLL_API void replaceSubexpressionWithStatement(SgExpression* from,
SageInterface::StatementGenerator* to);
//! Set operands for expressions with single operand, such as unary expressions. handle file info, lvalue, pointer downcasting, parent pointer etc.
ROSE_DLL_API void setOperand(SgExpression* target, SgExpression* operand);
//!set left hand operand for binary expressions, transparently downcasting target expressions when necessary
ROSE_DLL_API void setLhsOperand(SgExpression* target, SgExpression* lhs);
//!set left hand operand for binary expression
ROSE_DLL_API void setRhsOperand(SgExpression* target, SgExpression* rhs);
//! Set original expression trees to NULL for SgValueExp or SgCastExp expressions, so you can change the value and have it unparsed correctly.
ROSE_DLL_API void removeAllOriginalExpressionTrees(SgNode* top);
// DQ (1/25/2010): Added support for directories
//! Move file to be generated in a subdirectory (will be generated by the unparser).
ROSE_DLL_API void moveToSubdirectory ( std::string directoryName, SgFile* file );
//! Supporting function to comment relocation in insertStatement() and removeStatement().
ROSE_DLL_API SgStatement* findSurroundingStatementFromSameFile(SgStatement* targetStmt, bool & surroundingStatementPreceedsTargetStatement);
//! Relocate comments and CPP directives from one statement to another.
ROSE_DLL_API void moveCommentsToNewStatement(SgStatement* sourceStatement, const std::vector<int> & indexList, SgStatement* targetStatement, bool surroundingStatementPreceedsTargetStatement);
// DQ (7/19/2015): This is required to support general unparsing of template instantations for the GNU g++
// compiler which does not permit name qualification to be used to support the expression of the namespace
// where a template instantiatoon would be places. Such name qualification would also sometimes require
// global qualification which is also not allowed by the GNU g++ compiler. These issues appear to be
// specific to the GNU compiler versions, at least versions 4.4 through 4.8.
//! Relocate the declaration to be explicitly represented in its associated namespace (required for some backend compilers to process template instantiations).
ROSE_DLL_API void moveDeclarationToAssociatedNamespace ( SgDeclarationStatement* declarationStatement );
ROSE_DLL_API bool isTemplateInstantiationNode(SgNode* node);
ROSE_DLL_API void wrapAllTemplateInstantiationsInAssociatedNamespaces(SgProject* root);
// DQ (12/1/2015): Adding support for fixup internal data struuctures that have references to statements (e.g. macro expansions).
ROSE_DLL_API void resetInternalMapsForTargetStatement(SgStatement* sourceStatement);
// DQ (6/7/2019): Add support for transforming function definitions to function prototypes in a subtree.
// We might have to make this specific to a file (only traversing the functions in that file).
ROSE_DLL_API void convertFunctionDefinitionsToFunctionPrototypes(SgNode* node);
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST repair, fix, and postprocessing.
\brief Mostly used internally when some AST pieces are built without knowing their target
scope/parent, especially during bottom-up construction of AST. The associated symbols,
parent and scope pointers cannot be set on construction then.
A set of utility functions are provided to
patch up scope, parent, symbol for them when the target scope/parent become know.
*/
//! Connect variable reference to the right variable symbols when feasible, return the number of references being fixed.
/*! In AST translation, it is possible to build a variable reference before the variable
is being declared. buildVarRefExp() will use fake initialized name and symbol as placeholders
to get the work done. Users should call fixVariableReference() when AST is complete and all
variable declarations are in place.
*/
ROSE_DLL_API int fixVariableReferences(SgNode* root, bool cleanUnusedSymbol=true);
//!Patch up symbol, scope, and parent information when a SgVariableDeclaration's scope is known.
/*!
It is possible to build a variable declaration without knowing its scope information during bottom-up construction of AST, though top-down construction is recommended in general.
In this case, we have to patch up symbol table, scope and parent information when the scope is known. This function is usually used internally within appendStatment(), insertStatement().
*/
ROSE_DLL_API void fixVariableDeclaration(SgVariableDeclaration* varDecl, SgScopeStatement* scope);
//! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a struct declaration was built without knowing its target scope.
ROSE_DLL_API void fixStructDeclaration(SgClassDeclaration* structDecl, SgScopeStatement* scope);
//! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a class declaration was built without knowing its target scope.
ROSE_DLL_API void fixClassDeclaration(SgClassDeclaration* classDecl, SgScopeStatement* scope);
//! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a namespace declaration was built without knowing its target scope.
ROSE_DLL_API void fixNamespaceDeclaration(SgNamespaceDeclarationStatement* structDecl, SgScopeStatement* scope);
//! Fix symbol table for SgLabelStatement. Used Internally when the label is built without knowing its target scope. Both parameters cannot be NULL.
ROSE_DLL_API void fixLabelStatement(SgLabelStatement* label_stmt, SgScopeStatement* scope);
//! Set a numerical label for a Fortran statement. The statement should have a enclosing function definition already. SgLabelSymbol and SgLabelRefExp are created transparently as needed.
ROSE_DLL_API void setFortranNumericLabel(SgStatement* stmt, int label_value);
//! Suggest next usable (non-conflicting) numeric label value for a Fortran function definition scope
ROSE_DLL_API int suggestNextNumericLabel(SgFunctionDefinition* func_def);
//! Fix the symbol table and set scope (only if scope in declaration is not already set).
ROSE_DLL_API void fixFunctionDeclaration(SgFunctionDeclaration* stmt, SgScopeStatement* scope);
//! Fix the symbol table and set scope (only if scope in declaration is not already set).
ROSE_DLL_API void fixTemplateDeclaration(SgTemplateDeclaration* stmt, SgScopeStatement* scope);
//! A wrapper containing fixes (fixVariableDeclaration(),fixStructDeclaration(), fixLabelStatement(), etc) for all kinds statements. Should be used before attaching the statement into AST.
ROSE_DLL_API void fixStatement(SgStatement* stmt, SgScopeStatement* scope);
// DQ (6/11/2015): This reports the statements that are marked as transformed (used to debug the token-based unparsing).
//! This collects the statements that are marked as transformed (useful in debugging).
ROSE_DLL_API std::set<SgStatement*> collectTransformedStatements( SgNode* node );
//! This collects the statements that are marked as modified (a flag automatically set by all set_* generated functions) (useful in debugging).
ROSE_DLL_API std::set<SgStatement*> collectModifiedStatements( SgNode* node );
//! This collects the SgLocatedNodes that are marked as modified (a flag automatically set by all set_* generated functions) (useful in debugging).
ROSE_DLL_API std::set<SgLocatedNode*> collectModifiedLocatedNodes( SgNode* node );
// DQ (6/5/2019): Use the previously constructed set (above) to reset the IR nodes to be marked as isModified.
//! Use the set of IR nodes and set the isModified flag in each IR node to true.
ROSE_DLL_API void resetModifiedLocatedNodes(const std::set<SgLocatedNode*> & modifiedNodeSet);
// DQ (10/23/2018): Report nodes that are marked as modified.
ROSE_DLL_API void reportModifiedStatements(const std::string & label, SgNode* node);
// DQ (3/22/2019): Translate CPP directives from attached preprocessor information to CPP Directive Declaration IR nodes.
ROSE_DLL_API void translateToUseCppDeclarations( SgNode* n );
ROSE_DLL_API void translateScopeToUseCppDeclarations( SgScopeStatement* scope );
ROSE_DLL_API std::vector<SgC_PreprocessorDirectiveStatement*> translateStatementToUseCppDeclarations( SgStatement* statement, SgScopeStatement* scope);
ROSE_DLL_API void printOutComments ( SgLocatedNode* locatedNode );
ROSE_DLL_API bool skipTranslateToUseCppDeclaration( PreprocessingInfo* currentPreprocessingInfo );
//@}
//! Update defining and nondefining links due to a newly introduced function declaration. Should be used after inserting the function into a scope.
/*! This function not only set the defining and nondefining links of the newly introduced
* function declaration inside a scope, but also update other same function declarations' links
* accordingly if there are any.
* Assumption: The function has already inserted/appended/prepended into the scope before calling this function.
*/
ROSE_DLL_API void updateDefiningNondefiningLinks(SgFunctionDeclaration* func, SgScopeStatement* scope);
//------------------------------------------------------------------------
//@{
/*! @name Advanced AST transformations, analyses, and optimizations
\brief Some complex but commonly used AST transformations.
*/
//! Collect all read and write references within stmt, which can be a function, a scope statement, or a single statement. Note that a reference can be both read and written, like i++
ROSE_DLL_API bool
collectReadWriteRefs(SgStatement* stmt, std::vector<SgNode*>& readRefs, std::vector<SgNode*>& writeRefs, bool useCachedDefUse=false);
//!Collect unique variables which are read or written within a statement. Note that a variable can be both read and written. The statement can be either of a function, a scope, or a single line statement. For accesses to members of aggregate data, we return the coarse grain aggregate mem obj by default.
ROSE_DLL_API bool collectReadWriteVariables(SgStatement* stmt, std::set<SgInitializedName*>& readVars, std::set<SgInitializedName*>& writeVars, bool coarseGrain=true);
//!Collect read only variables within a statement. The statement can be either of a function, a scope, or a single line statement. For accesses to members of aggregate data, we return the coarse grain aggregate mem obj by default.
ROSE_DLL_API void collectReadOnlyVariables(SgStatement* stmt, std::set<SgInitializedName*>& readOnlyVars, bool coarseGrain=true);
//!Collect read only variable symbols within a statement. The statement can be either of a function, a scope, or a single line statement. For accesses to members of aggregate data, we return the coarse grain aggregate mem obj by default.
ROSE_DLL_API void collectReadOnlySymbols(SgStatement* stmt, std::set<SgVariableSymbol*>& readOnlySymbols, bool coarseGrain=true);
//! Check if a variable reference is used by its address: including &a expression and foo(a) when type2 foo(Type& parameter) in C++
ROSE_DLL_API bool isUseByAddressVariableRef(SgVarRefExp* ref);
//! Collect variable references involving use by address: including &a expression and foo(a) when type2 foo(Type& parameter) in C++
ROSE_DLL_API void collectUseByAddressVariableRefs (const SgStatement* s, std::set<SgVarRefExp* >& varSetB);
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
//!Call liveness analysis on an entire project
ROSE_DLL_API LivenessAnalysis * call_liveness_analysis(SgProject* project, bool debug=false);
//!get liveIn and liveOut variables for a for loop from liveness analysis result liv.
ROSE_DLL_API void getLiveVariables(LivenessAnalysis * liv, SgForStatement* loop, std::set<SgInitializedName*>& liveIns, std::set<SgInitializedName*> & liveOuts);
#endif
//!Recognize and collect reduction variables and operations within a C/C++ loop, following OpenMP 3.0 specification for allowed reduction variable types and operation types.
ROSE_DLL_API void ReductionRecognition(SgForStatement* loop, std::set< std::pair <SgInitializedName*, OmpSupport::omp_construct_enum> > & results);
//! Constant folding an AST subtree rooted at 'r' (replacing its children with their constant values, if applicable). Please be advised that constant folding on floating point computation may decrease the accuracy of floating point computations!
/*! It is a wrapper function for ConstantFolding::constantFoldingOptimization(). Note that only r's children are replaced with their corresponding constant values, not the input SgNode r itself. You have to call this upon an expression's parent node if you want to fold the expression. */
ROSE_DLL_API void constantFolding(SgNode* r);
//!Instrument(Add a statement, often a function call) into a function right before the return points, handle multiple return statements (with duplicated statement s) and return expressions with side effects. Return the number of statements inserted.
/*! Useful when adding a runtime library call to terminate the runtime system right before the end of a program, especially for OpenMP and UPC runtime systems. Return with complex expressions with side effects are rewritten using an additional assignment statement.
*/
ROSE_DLL_API int instrumentEndOfFunction(SgFunctionDeclaration * func, SgStatement* s);
//! Remove jumps whose label is immediately after the jump. Used to clean up inlined code fragments.
ROSE_DLL_API void removeJumpsToNextStatement(SgNode*);
//! Remove labels which are not targets of any goto statements
ROSE_DLL_API void removeUnusedLabels(SgNode* top);
//! Remove consecutive labels
ROSE_DLL_API void removeConsecutiveLabels(SgNode* top);
//! Merge a variable assignment statement into a matching variable declaration statement. Callers should make sure the merge is semantically correct (by not introducing compilation errors). This function simply does the merge transformation, without eligibility check.
/*!
* e.g. int i; i=10; becomes int i=10; the original i=10 will be deleted after the merge
* if success, return true, otherwise return false (e.g. variable declaration does not match or already has an initializer)
* The original assignment stmt will be removed by default
* This function is a bit ambiguous about the merge direction, to be phased out.
*/
ROSE_DLL_API bool mergeDeclarationAndAssignment (SgVariableDeclaration* decl, SgExprStatement* assign_stmt, bool removeAssignStmt = true);
//! Merge an assignment into its upstream declaration statement. Callers should make sure the merge is semantically correct.
ROSE_DLL_API bool mergeAssignmentWithDeclaration (SgExprStatement* assign_stmt, SgVariableDeclaration* decl, bool removeAssignStmt = true);
//! Merge a declaration statement into a matching followed variable assignment. Callers should make sure the merge is semantically correct (by not introducing compilation errors). This function simply does the merge transformation, without eligibility check.
/*!
* e.g. int i; i=10; becomes int i=10; the original int i; will be deleted after the merge
*/
ROSE_DLL_API bool mergeDeclarationWithAssignment (SgVariableDeclaration* decl, SgExprStatement* assign_stmt);
//! Split a variable declaration with an rhs assignment into two statements: a declaration and an assignment.
/*! Return the generated assignment statement, if any
* e.g. int i =10; becomes int i; i=10;
* This can be seen as a normalization of declarations
*/
ROSE_DLL_API SgExprStatement* splitVariableDeclaration (SgVariableDeclaration* decl);
//! Split declarations within a scope into declarations and assignment statements, by default only top level declarations are considered. Return the number of declarations split.
ROSE_DLL_API int splitVariableDeclaration (SgScopeStatement* scope, bool topLevelOnly = true);
//! Replace an expression with a temporary variable and an assignment statement
/*!
Add a new temporary variable to contain the value of 'from'
Change reference to 'from' to use this new variable
Assumptions: 'from' is not within the test of a loop or 'if'
not currently traversing 'from' or the statement it is in
*/
ROSE_DLL_API SgAssignInitializer* splitExpression(SgExpression* from, std::string newName = "");
//! Split long expressions into blocks of statements
ROSE_DLL_API void splitExpressionIntoBasicBlock(SgExpression* expr);
//! Remove labeled goto statements
ROSE_DLL_API void removeLabeledGotos(SgNode* top);
//! If the given statement contains any break statements in its body, add a new label below the statement and change the breaks into gotos to that new label.
ROSE_DLL_API void changeBreakStatementsToGotos(SgStatement* loopOrSwitch);
//! Check if the body of a 'for' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfFor(SgForStatement* fs);
//! Check if the body of a 'upc_forall' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfUpcForAll(SgUpcForAllStatement* fs);
//! Check if the body of a 'while' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfWhile(SgWhileStmt* ws);
//! Check if the body of a 'do .. while' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfDoWhile(SgDoWhileStmt* ws);
//! Check if the body of a 'switch' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfSwitch(SgSwitchStatement* ws);
//! Check if the body of a 'case option' statement is a SgBasicBlock, create one if not.
SgBasicBlock* ensureBasicBlockAsBodyOfCaseOption(SgCaseOptionStmt* cs);
//! Check if the body of a 'default option' statement is a SgBasicBlock, create one if not.
SgBasicBlock* ensureBasicBlockAsBodyOfDefaultOption(SgDefaultOptionStmt * cs);
//! Check if the true body of a 'if' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsTrueBodyOfIf(SgIfStmt* ifs);
//! Check if the false body of a 'if' statement is a SgBasicBlock, create one if not when the flag is true.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsFalseBodyOfIf(SgIfStmt* ifs, bool createEmptyBody = true);
//! Check if the body of a 'catch' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfCatch(SgCatchOptionStmt* cos);
//! Check if the body of a SgOmpBodyStatement is a SgBasicBlock, create one if not
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfOmpBodyStmt(SgOmpBodyStatement* ompbodyStmt);
// DQ (1/18/2015): This is added to support better quality token-based unparsing.
//! Remove unused basic block IR nodes added as part of normalization.
ROSE_DLL_API void cleanupNontransformedBasicBlockNode();
// DQ (1/18/2015): This is added to support better quality token-based unparsing.
//! Record where normalization have been done so that we can preform denormalizations as required for the token-based unparsing to generate minimal diffs.
ROSE_DLL_API void recordNormalizations(SgStatement* s);
//! Check if a statement is a (true or false) body of a container-like parent, such as For, Upc_forall, Do-while,
//! switch, If, Catch, OmpBodyStmt, etc
bool isBodyStatement (SgStatement* s);
//! Fix up ifs, loops, while, switch, Catch, OmpBodyStatement, etc. to have blocks as body components. It also adds an empty else body to if statements that don't have them.
void changeAllBodiesToBlocks(SgNode* top, bool createEmptyBody = true);
// The same as changeAllBodiesToBlocks(SgNode* top). Phased out.
//void changeAllLoopBodiesToBlocks(SgNode* top);
//! Make a single statement body to be a basic block. Its parent is if, while, catch, or upc_forall etc.
SgBasicBlock * makeSingleStatementBodyToBlock(SgStatement* singleStmt);
#if 0
/** If s is the body of a loop, catch, or if statement and is already a basic block,
* s is returned unmodified. Otherwise generate a SgBasicBlock between s and its parent
* (a loop, catch, or if statement, etc). */
SgLocatedNode* ensureBasicBlockAsParent(SgStatement* s);
#endif
//! Get the constant value from a constant integer expression; abort on
//! everything else. Note that signed long longs are converted to unsigned.
unsigned long long getIntegerConstantValue(SgValueExp* expr);
//! Get a statement's dependent declarations which declares the types used in the statement. The returned vector of declaration statements are sorted according to their appearance order in the original AST. Any reference to a class or template class from a namespace will treated as a reference to the enclosing namespace.
std::vector<SgDeclarationStatement*> getDependentDeclarations (SgStatement* stmt );
//! Insert an expression (new_exp )before another expression (anchor_exp) has possible side effects, without changing the original semantics. This is achieved by using a comma operator: (new_exp, anchor_exp). The comma operator is returned.
SgCommaOpExp *insertBeforeUsingCommaOp (SgExpression* new_exp, SgExpression* anchor_exp);
//! Insert an expression (new_exp ) after another expression (anchor_exp) has possible side effects, without changing the original semantics. This is done by using two comma operators: type T1; ... ((T1 = anchor_exp, new_exp),T1) )... , where T1 is a temp variable saving the possible side effect of anchor_exp. The top level comma op exp is returned. The reference to T1 in T1 = anchor_exp is saved in temp_ref.
SgCommaOpExp *insertAfterUsingCommaOp (SgExpression* new_exp, SgExpression* anchor_exp, SgStatement** temp_decl = NULL, SgVarRefExp** temp_ref = NULL);
/// \brief moves the body of a function f to a new function f`;
/// f's body is replaced with code that forwards the call to f`.
/// \return a pair indicating the statement containing the call of f`
/// and an initialized name refering to the temporary variable
/// holding the result of f`. In case f returns void
/// the initialized name is NULL.
/// \param definingDeclaration the defining function declaration of f
/// \param newName the name of function f`
/// \details f's new body becomes { f`(...); } and { int res = f`(...); return res; }
/// for functions returning void and a value, respectively.
/// two function declarations are inserted in f's enclosing scope
/// \code
/// result_type f`(...); <--- (1)
/// result_type f (...) { forward call to f` }
/// result_type f`(...) { original code } <--- (2)
/// \endcode
/// Calls to f are not updated, thus in the transformed code all
/// calls will continue calling f (this is also true for
/// recursive function calls from within the body of f`).
/// After the function has created the wrapper,
/// definingDeclaration becomes the wrapper function
/// The definition of f` is the next entry in the
/// statement list; the forward declaration of f` is the previous
/// entry in the statement list.
/// \pre definingDeclaration must be a defining declaration of a
/// free standing function.
/// typeid(SgFunctionDeclaration) == typeid(definingDeclaration)
/// i.e., this function is NOT implemented for class member functions,
/// template functions, procedures, etc.
std::pair<SgStatement*, SgInitializedName*>
wrapFunction(SgFunctionDeclaration& definingDeclaration, SgName newName);
/// \overload
/// \tparam NameGen functor that generates a new name based on the old name.
/// interface: SgName nameGen(const SgName&)
/// \param nameGen name generator
/// \brief see wrapFunction for details
template <class NameGen>
std::pair<SgStatement*, SgInitializedName*>
wrapFunction(SgFunctionDeclaration& definingDeclaration, NameGen nameGen)
{
return wrapFunction(definingDeclaration, nameGen(definingDeclaration.get_name()));
}
/// \brief convenience function that returns the first initialized name in a
/// list of variable declarations.
SgInitializedName& getFirstVariable(SgVariableDeclaration& vardecl);
//@}
// DQ (6/7/2012): Unclear where this function should go...
bool hasTemplateSyntax( const SgName & name );
#if 0
//------------------------AST dump, stringify-----------------------------
//------------------------------------------------------------------------
std::string buildOperatorString ( SgNode* astNode ); //transformationSupport.h
// do we need these?
std::string dump_node(const SgNode* astNode);
std::string dump_tree(const SgNode* astNode);
// or a friendly version of unparseToString(), as a memeber function
std::string SgNode::toString(bool asSubTree=true); // dump node or subtree
//----------------------------AST comparison------------------------------
//------------------------------------------------------------------------
// How to get generic functions for comparison?
bool isNodeEqual(SgNode* node1, SgNode* node2); //?
bool isTreeEqual(SgNode* tree1, SgNode* tree2);
//! Are two expressions equal (using a deep comparison)?
bool expressionTreeEqual(SgExpression*, SgExpression*);
//! Are corresponding expressions in two lists equal (using a deep comparison)?
bool expressionTreeEqualStar(const SgExpressionPtrList&,
const SgExpressionPtrList&);
//----------------------AST verfication/repair----------------------------
//------------------------------------------------------------------------
// sanity check of AST subtree, any suggestions?
// TODO
verifySgNode(SgNode* node, bool subTree=true);
//src/midend/astDiagnostics/AstConsistencyTests.h
// AstTests::runAllTests(SgProject * )
//src/midend/astUtil/astInterface/AstInterface.h.C
//FixSgProject(SgProject &project)
//FixSgTree(SgNode* r)
//src/frontend/SageIII/astPostProcessing
//AstPostProcessing(SgNode * node)
//--------------------------AST modification------------------------------
//------------------------------------------------------------------------
// any operations changing AST tree, including
// insert, copy, delete(remove), replace
// insert before or after some point, argument list is consistent with LowLevelRewrite
void insertAst(SgNode* targetPosition, SgNode* newNode, bool insertBefore=true);
// previous examples
//void myStatementInsert(SgStatement* target,...)
// void AstInterfaceBase::InsertStmt(AstNodePtr const & orig, AstNodePtr const &n, bool insertbefore, bool extractfromBasicBlock)
// copy
// copy children of one basic block to another basic block
//void appendStatementCopy (const SgBasicBlock* a, SgBasicBlock* b);
void copyStatements (const SgBasicBlock* src, SgBasicBlock* dst);
// delete (remove) a node or a whole subtree
void removeSgNode(SgNode* targetNode); // need this?
void removeSgNodeTree(SgNode* subtree); // need this?
void removeStatement( SgStatement* targetStmt);
//Move = delete + insert
void moveAst (SgNode* src, SgNode* target); // need this?
// similar to
void moveStatements (SgBasicBlock* src, SgBasicBlock* target);
// replace= delete old + insert new (via building or copying)
// DQ (1/25/2010): This does not appear to exist as a definition anywhere in ROSE.
// void replaceAst(SgNode* oldNode, SgNode* newNode);
//void replaceChild(SgNode* parent, SgNode* from, SgNode* to);
//bool AstInterface::ReplaceAst( const AstNodePtr& orig, const AstNodePtr& n)
//--------------------------AST transformations---------------------------
//------------------------------------------------------------------------
// Advanced AST modifications through basic AST modifications
// Might not be included in AST utitlity list, but listed here for the record.
// extract statements/content from a scope
void flattenBlocks(SgNode* n);
//src/midend/astInlining/inlinerSupport.h
void renameVariables(SgNode* n);
void renameLabels(SgNode* n, SgFunctionDefinition* enclosingFunctionDefinition);
void simpleCopyAndConstantPropagation(SgNode* top);
void changeAllMembersToPublic(SgNode* n);
void removeVariableDeclaration(SgInitializedName* initname);
//! Convert something like "int a = foo();" into "int a; a = foo();"
SgAssignOp* convertInitializerIntoAssignment(SgAssignInitializer* init);
//! Rewrites a while or for loop so that the official test is changed to
//! "true" and what had previously been the test is now an if-break
//! combination (with an inverted condition) at the beginning of the loop
//! body
void pushTestIntoBody(LoopStatement* loopStmt);
//programTransformation/finiteDifferencing/finiteDifferencing.h
//! Move variables declared in a for statement to just outside that statement.
void moveForDeclaredVariables(SgNode* root);
//------------------------ Is/Has functions ------------------------------
//------------------------------------------------------------------------
// misc. boolean functions
// some of them could moved to SgXXX class as a member function
bool isOverloaded (SgFunctionDeclaration * functionDeclaration);
bool isSwitchCond (const SgStatement* s);
bool isIfCond (const SgStatement* s);
bool isWhileCond (const SgStatement* s);
bool isStdNamespace (const SgScopeStatement* scope);
bool isTemplateInst (const SgDeclarationStatement* decl);
bool isCtor (const SgFunctionDeclaration* func);
bool isDtor (const SgFunctionDeclaration* func);
// src/midend/astInlining/typeTraits.h
bool hasTrivialDestructor(SgType* t);
ROSE_DLL_API bool isNonconstReference(SgType* t);
ROSE_DLL_API bool isReferenceType(SgType* t);
// generic ones, or move to the SgXXX class as a member function
bool isConst(SgNode* node); // const type, variable, function, etc.
// .... and more
bool isConstType (const SgType* type);
bool isConstFunction (const SgFunctionDeclaration* decl);
bool isMemberVariable(const SgInitializedName & var);
//bool isMemberVariable(const SgNode& in);
bool isPrototypeInScope (SgScopeStatement * scope,
SgFunctionDeclaration * functionDeclaration,
SgDeclarationStatement * startingAtDeclaration);
bool MayRedefined(SgExpression* expr, SgNode* root);
// bool isPotentiallyModified(SgExpression* expr, SgNode* root); // inlinderSupport.h
bool hasAddressTaken(SgExpression* expr, SgNode* root);
//src/midend/astInlining/inlinerSupport.C
// can also classified as topdown search
bool containsVariableReference(SgNode* root, SgInitializedName* var);
bool isDeclarationOf(SgVariableDeclaration* decl, SgInitializedName* var);
bool isPotentiallyModifiedDuringLifeOf(SgBasicBlock* sc,
SgInitializedName* toCheck,
SgInitializedName* lifetime)
//src/midend/programTransformation/partialRedundancyElimination/pre.h
bool anyOfListPotentiallyModifiedIn(const std::vector<SgVariableSymbol*>& syms, SgNode* n);
//------------------------ loop handling ---------------------------------
//------------------------------------------------------------------------
//get and set loop control expressions
// 0: init expr, 1: condition expr, 2: stride expr
SgExpression* getForLoopTripleValues(int valuetype,SgForStatement* forstmt );
int setForLoopTripleValues(int valuetype,SgForStatement* forstmt, SgExpression* exp);
bool isLoopIndexVarRef(SgForStatement* forstmt, SgVarRefExp *varref);
SgInitializedName * getLoopIndexVar(SgForStatement* forstmt);
//------------------------expressions-------------------------------------
//------------------------------------------------------------------------
//src/midend/programTransformation/partialRedundancyElimination/pre.h
int countComputationsOfExpressionIn(SgExpression* expr, SgNode* root);
//src/midend/astInlining/replaceExpressionWithStatement.h
void replaceAssignmentStmtWithStatement(SgExprStatement* from, StatementGenerator* to);
void replaceSubexpressionWithStatement(SgExpression* from,
StatementGenerator* to);
SgExpression* getRootOfExpression(SgExpression* n);
//--------------------------preprocessing info. -------------------------
//------------------------------------------------------------------------
//! Removes all preprocessing information at a given position.
void cutPreprocInfo (SgBasicBlock* b,
PreprocessingInfo::RelativePositionType pos,
AttachedPreprocessingInfoType& save_buf);
//! Pastes preprocessing information at the front of a statement.
void pastePreprocInfoFront (AttachedPreprocessingInfoType& save_buf,
SgStatement* s);
//! Pastes preprocessing information at the back of a statement.
void pastePreprocInfoBack (AttachedPreprocessingInfoType& save_buf,
SgStatement* s);
/*!
* \brief Moves 'before' preprocessing information.
* Moves all preprocessing information attached 'before' the source
* statement to the front of the destination statement.
*/
// a generic one for all
/// void movePreprocessingInfo(src, dest, RelativePositionType);
void moveBeforePreprocInfo (SgStatement* src, SgStatement* dest);
void moveInsidePreprocInfo (SgBasicBlock* src, SgBasicBlock* dest);
void moveAfterPreprocInfo (SgStatement* src, SgStatement* dest);
//--------------------------------operator--------------------------------
//------------------------------------------------------------------------
from transformationSupport.h, not sure if they should be included here
/* return enum code for SAGE operators */
operatorCodeType classifyOverloadedOperator(); // transformationSupport.h
/*! \brief generates a source code string from operator name.
This function returns a string representing the elementwise operator (for primative types)
that would be match that associated with the overloaded operator for a user-defined
abstractions (e.g. identifyOperator("operator+()") returns "+").
*/
std::string stringifyOperator (std::string name);
//--------------------------------macro ----------------------------------
//------------------------------------------------------------------------
std::string buildMacro ( std::string s ); //transformationSupport.h
//--------------------------------access functions---------------------------
//----------------------------------get/set sth.-----------------------------
// several categories:
* get/set a direct child/grandchild node or fields
* get/set a property flag value
* get a descendent child node using preorder searching
* get an ancestor node using bottomup/reverse searching
// SgName or string?
std::string getFunctionName (SgFunctionCallExp* functionCallExp);
std::string getFunctionTypeName ( SgFunctionCallExp* functionCallExpression );
// do we need them anymore? or existing member functions are enought?
// a generic one:
std::string get_name (const SgNode* node);
std::string get_name (const SgDeclarationStatement * declaration);
// get/set some property: should moved to SgXXX as an inherent memeber function?
// access modifier
void setExtern (SgFunctionDeclartion*)
void clearExtern()
// similarly for other declarations and other properties
void setExtern (SgVariableDeclaration*)
void setPublic()
void setPrivate()
#endif
// DQ (1/23/2013): Added support for generated a set of source sequence entries.
std::set<unsigned int> collectSourceSequenceNumbers( SgNode* astNode );
//--------------------------------Type Traits (C++)---------------------------
bool HasNoThrowAssign(const SgType * const inputType);
bool HasNoThrowCopy(const SgType * const inputType);
bool HasNoThrowConstructor(const SgType * const inputType);
bool HasTrivialAssign(const SgType * const inputType);
bool HasTrivialCopy(const SgType * const inputType);
bool HasTrivialConstructor(const SgType * const inputType);
bool HasTrivialDestructor(const SgType * const inputType);
bool HasVirtualDestructor(const SgType * const inputType);
bool IsBaseOf(const SgType * const inputBaseType, const SgType * const inputDerivedType);
bool IsAbstract(const SgType * const inputType);
bool IsClass(const SgType * const inputType);
bool IsEmpty(const SgType * const inputType);
bool IsEnum(const SgType * const inputType);
bool IsPod(const SgType * const inputType);
bool IsPolymorphic(const SgType * const inputType);
bool IsStandardLayout(const SgType * const inputType);
bool IsLiteralType(const SgType * const inputType);
bool IsTrivial(const SgType * const inputType);
bool IsUnion(const SgType * const inputType);
SgType * UnderlyingType(SgType *type);
// DQ (3/2/2014): Added a new interface function (used in the snippet insertion support).
// void supportForInitializedNameLists ( SgScopeStatement* scope, SgInitializedNamePtrList & variableList );
// DQ (3/4/2014): Added support for testing two trees for equivalents using the AST iterators.
bool isStructurallyEquivalentAST( SgNode* tree1, SgNode* tree2 );
// JP (10/14/24): Moved code to evaluate a const integer expression (like in array size definitions) to SageInterface
/*! The datastructure is used as the return type for SageInterface::evaluateConstIntegerExpression(). One needs to always check whether hasValue_ is true before accessing value_ */
struct const_int_expr_t {
size_t value_;
bool hasValue_;
};
/*! \brief The function tries to evaluate const integer expressions (such as are used in array dimension sizes). It follows variable symbols, and requires constness. */
struct const_int_expr_t evaluateConstIntegerExpression(SgExpression *expr);
// JP (9/17/14): Added function to test whether two SgType* are equivalent or not
bool checkTypesAreEqual(SgType *typeA, SgType *typeB);
//--------------------------------Java interface functions ---------------------
#ifdef ROSE_BUILD_JAVA_LANGUAGE_SUPPORT
ROSE_DLL_API std::string getTempDirectory(SgProject *project);
ROSE_DLL_API void destroyTempDirectory(std::string);
ROSE_DLL_API SgFile *processFile(SgProject *, std::string, bool unparse = false);
ROSE_DLL_API std::string preprocessPackage(SgProject *, std::string);
ROSE_DLL_API std::string preprocessImport(SgProject *, std::string);
ROSE_DLL_API SgFile* preprocessCompilationUnit(SgProject *, std::string, std::string, bool unparse = true);
ROSE_DLL_API SgClassDefinition *findJavaPackage(SgScopeStatement *, std::string);
ROSE_DLL_API SgClassDefinition *findOrInsertJavaPackage(SgProject *, std::string, bool create_directory = false);
ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, SgClassDefinition *package_definition, std::string);
ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, std::string, std::string);
ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, SgClassType *);
ROSE_DLL_API SgMemberFunctionDeclaration *findJavaMain(SgClassDefinition *);
ROSE_DLL_API SgMemberFunctionDeclaration *findJavaMain(SgClassType *);
#endif // ROSE_BUILD_JAVA_LANGUAGE_SUPPORT
// DQ (8/31/2016): Making this a template function so that we can have it work with user defined filters.
//! This function detects template instantiations that are relevant when filters are used.
/*!
EDG normalizes some in-class template functions and member functions to be redefined outside of a class. this causes the associated template instantiations
to be declared outside of the class, and to be marked as compiler generated (since the compiler generated form outside of the class declaration).
ROSE captures the function definitions, but in the new location (defined outside of the class declaration). This can confuse some simple tests
for template instantiations that are a part of definitions in a file, thus we have this function to detect this specific normalization.
*/
template < class T >
bool isTemplateInstantiationFromTemplateDeclarationSatisfyingFilter (SgFunctionDeclaration* function, T* filter )
{
// DQ (9/1/2016): This function is called in the Call graph generation to avoid filtering out EDG normalized
// function template instnatiations (which come from normalized template functions and member functions).
// Note that because of the EDG normailzation the membr function is moved outside of the class, and
// thus marked as compiler generated. However the template instantiations are always marked as compiler
// generated (if not specializations) and so we want to include a template instantiation that is marked
// as compiler generated, but is from a template declaration that satisfyied a specific user defined filter.
// The complexity of this detection is isolated here, but knowing that it must be called is more complex.
// This function is call in the CG.C file of tests/nonsmoke/functional/roseTests/programAnalysisTests/testCallGraphAnalysis.
bool retval = false;
#define DEBUG_TEMPLATE_NORMALIZATION_DETECTION 0
#if DEBUG_TEMPLATE_NORMALIZATION_DETECTION
printf ("In isNormalizedTemplateInstantiation(): function = %p = %s = %s \n",function,function->class_name().c_str(),function->get_name().str());
#endif
// Test for this to be a template instantation (in which case it was marked as
// compiler generated but we may want to allow it to be used in the call graph,
// if it's template was a part was defined in the current directory).
SgTemplateInstantiationFunctionDecl* templateInstantiationFunction = isSgTemplateInstantiationFunctionDecl(function);
SgTemplateInstantiationMemberFunctionDecl* templateInstantiationMemberFunction = isSgTemplateInstantiationMemberFunctionDecl(function);
if (templateInstantiationFunction != NULL)
{
// When the defining function has been normalized by EDG, only the non-defining declaration will have a source position.
templateInstantiationFunction = isSgTemplateInstantiationFunctionDecl(templateInstantiationFunction->get_firstNondefiningDeclaration());
SgTemplateFunctionDeclaration* templateFunctionDeclaration = templateInstantiationFunction->get_templateDeclaration();
if (templateFunctionDeclaration != NULL)
{
retval = filter->operator()(templateFunctionDeclaration);
}
else
{
// Assume false.
}
#if DEBUG_TEMPLATE_NORMALIZATION_DETECTION
printf (" --- case of templateInstantiationFunction: retval = %s \n",retval ? "true" : "false");
#endif
}
else
{
if (templateInstantiationMemberFunction != NULL)
{
// When the defining function has been normalized by EDG, only the non-defining declaration will have a source position.
templateInstantiationMemberFunction = isSgTemplateInstantiationMemberFunctionDecl(templateInstantiationMemberFunction->get_firstNondefiningDeclaration());
SgTemplateMemberFunctionDeclaration* templateMemberFunctionDeclaration = templateInstantiationMemberFunction->get_templateDeclaration();
if (templateMemberFunctionDeclaration != NULL)
{
retval = filter->operator()(templateMemberFunctionDeclaration);
}
else
{
// Assume false.
}
#if DEBUG_TEMPLATE_NORMALIZATION_DETECTION
printf (" --- case of templateInstantiationMemberFunction: retval = %s \n",retval ? "true" : "false");
#endif
}
}
return retval;
}
void detectCycleInType(SgType * type, const std::string & from);
}// end of namespace
#endif
|
test_mkl.h | #include <iostream>
#include <stdint.h>
#include <mkl_vsl.h>
void mkl_rng(double *ran, const int n, const int method, const int brng)
{
const double a = 0.0, b = 1.0;
uint32_t iseed = 13579;
int errcode;
VSLStreamStatePtr stream;
errcode = vslNewStream(&stream, brng, iseed);
if (errcode != VSL_ERROR_OK) {
cout << "Error Creating NewStream" << endl;
exit(1);
}
errcode = vdRngUniform(method, stream, n, ran, a, b);
if (errcode != VSL_ERROR_OK) {
cout << "Error RNG Uniform" << endl;
exit(1);
}
vslDeleteStream(&stream);
}
void mkl_rng_tp(double *ran, const int n, const int method, const int brng)
{
const double a = 0.0, b = 1.0;
uint32_t iseed = 13579;
int tnum = omp_get_max_threads();
#pragma omp parallel
{
int errcode;
int each_n = n / tnum;
int tid = omp_get_thread_num();
int start = each_n * tid;
VSLStreamStatePtr stream;
if (tid == tnum - 1) {
each_n = n - each_n * tid;
}
errcode = vslNewStream(&stream, brng, iseed);
if (errcode != VSL_ERROR_OK) {
cout << "Error Creating NewStream" << endl;
exit(1);
}
vslSkipAheadStream(stream, start);
errcode = vdRngUniform(method, stream, each_n, ran + start, a, b);
if (errcode != VSL_ERROR_OK) {
cout << "Error RNG Uniform" << endl;
exit(1);
}
vslDeleteStream(&stream);
}
}
void mkl_rng_tp_small(double *ran, const int each_n, int it, const int method, const int brng)
{
const double a = 0.0, b = 1.0;
uint32_t iseed = 13579;
int tnum = omp_get_max_threads();
#pragma omp parallel
{
int errcode;
int tid = omp_get_thread_num();
int offset = each_n * tid;
int start = offset * it;
VSLStreamStatePtr stream;
errcode = vslNewStream(&stream, brng, iseed);
if (errcode != VSL_ERROR_OK) {
cout << "Error Creating NewStream" << endl;
exit(1);
}
vslSkipAheadStream(stream, start);
for (int i = 0; i < it; ++i) {
errcode = vdRngUniform(method, stream, each_n, ran + offset, a, b);
}
if (errcode != VSL_ERROR_OK) {
cout << "Error RNG Uniform" << endl;
exit(1);
}
vslDeleteStream(&stream);
}
}
void mkl_rng_tp_sub(double *ran, const int n, const int sub_n, const int method, const int brng)
{
const double a = 0.0, b = 1.0;
uint32_t iseed = 13579;
int tnum = omp_get_max_threads();
#pragma omp parallel
{
int errcode;
int each_n = n / tnum;
int tid = omp_get_thread_num();
int start = each_n * tid;
VSLStreamStatePtr stream;
int itr = each_n / sub_n;
if (tid == tnum - 1) {
each_n = n - each_n * tid;
}
errcode = vslNewStream(&stream, brng, iseed);
if (errcode != VSL_ERROR_OK) {
cout << "Error Creating NewStream" << endl;
exit(1);
}
vslSkipAheadStream(stream, each_n * tid);
for (int i = 0; i < itr; ++i) {
errcode = vdRngUniform(method, stream, sub_n, ran + start, a, b);
}
if (errcode != VSL_ERROR_OK) {
cout << "Error RNG Uniform" << endl;
exit(1);
}
vslDeleteStream(&stream);
}
}
|
deconvolution_pack4to1.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void deconvolution_pack4to1_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_pack4to1, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
const int maxk = kernel_w * kernel_h;
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_data_ptr)
{
sum = bias_data_ptr[p];
}
v4f32 _sum = (v4f32)__msa_fill_w(0);
const float* kptr = (const float*)weight_data_pack4to1 + maxk * channels * p * 4;
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const float* sptr = m.row(sy) + sx * 4;
int k = y * kernel_w + x;
v4f32 _val = (v4f32)__msa_ld_w(sptr, 0);
v4f32 _w = (v4f32)__msa_ld_w(kptr + k * 4, 0);
_sum = __msa_fmadd_w(_sum, _val, _w);
}
}
kptr += maxk * 4;
}
sum += __msa_fhadd_w(_sum);
sum = activation_ss(sum, activation_type, activation_params);
outptr[j] = sum;
}
outptr += outw;
}
}
}
|
cache.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC AAA CCCC H H EEEEE %
% C A A C H H E %
% C AAAAA C HHHHH EEE %
% C A A C H H E %
% CCCC A A CCCC H H EEEEE %
% %
% %
% MagickCore Pixel Cache Methods %
% %
% Software Design %
% Cristy %
% July 1999 %
% %
% %
% Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/distribute-cache-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/quantum.h"
#include "MagickCore/random_.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Define declarations.
*/
#define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent)
#define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \
GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse)
/*
Typedef declarations.
*/
typedef struct _MagickModulo
{
ssize_t
quotient,
remainder;
} MagickModulo;
/*
Forward declarations.
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static Cache
GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *)
magick_hot_spot;
static const Quantum
*GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t,
const ssize_t,const size_t,const size_t,ExceptionInfo *),
*GetVirtualPixelsCache(const Image *);
static const void
*GetVirtualMetacontentFromCache(const Image *);
static MagickBooleanType
GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *,
ExceptionInfo *),
GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod,
const ssize_t,const ssize_t,Quantum *,ExceptionInfo *),
OpenPixelCache(Image *,const MapMode,ExceptionInfo *),
OpenPixelCacheOnDisk(CacheInfo *,const MapMode),
ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
ReadPixelCacheMetacontent(CacheInfo *magick_restrict,
NexusInfo *magick_restrict,ExceptionInfo *),
SyncAuthenticPixelsCache(Image *,ExceptionInfo *),
WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *);
static Quantum
*GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*SetPixelCacheNexusPixels(const CacheInfo *,const MapMode,
const RectangleInfo *,NexusInfo *,ExceptionInfo *) magick_hot_spot;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static void
CopyOpenCLBuffer(CacheInfo *magick_restrict);
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
/*
Global declarations.
*/
static volatile MagickBooleanType
instantiate_cache = MagickFalse;
static SemaphoreInfo
*cache_semaphore = (SemaphoreInfo *) NULL;
static time_t
cache_epoch = 0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCache() acquires a pixel cache.
%
% The format of the AcquirePixelCache() method is:
%
% Cache AcquirePixelCache(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate Cache AcquirePixelCache(const size_t number_threads)
{
CacheInfo
*magick_restrict cache_info;
char
*synchronize;
cache_info=(CacheInfo *) AcquireQuantumMemory(1,sizeof(*cache_info));
if (cache_info == (CacheInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(cache_info,0,sizeof(*cache_info));
cache_info->type=UndefinedCache;
cache_info->mode=IOMode;
cache_info->colorspace=sRGBColorspace;
cache_info->file=(-1);
cache_info->id=GetMagickThreadId();
cache_info->number_threads=number_threads;
if (GetOpenMPMaximumThreads() > cache_info->number_threads)
cache_info->number_threads=GetOpenMPMaximumThreads();
if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads)
cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
if (cache_info->number_threads == 0)
cache_info->number_threads=1;
cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads);
if (cache_info->nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
cache_info->semaphore=AcquireSemaphoreInfo();
cache_info->reference_count=1;
cache_info->file_semaphore=AcquireSemaphoreInfo();
cache_info->debug=IsEventLogging();
cache_info->signature=MagickCoreSignature;
return((Cache ) cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCacheNexus() allocates the NexusInfo structure.
%
% The format of the AcquirePixelCacheNexus method is:
%
% NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
{
NexusInfo
**magick_restrict nexus_info;
register ssize_t
i;
nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(
number_threads,sizeof(*nexus_info)));
if (nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
nexus_info[0]=(NexusInfo *) AcquireQuantumMemory(number_threads,
sizeof(**nexus_info));
if (nexus_info[0] == (NexusInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(nexus_info[0],0,number_threads*sizeof(**nexus_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
nexus_info[i]=(&nexus_info[0][i]);
nexus_info[i]->signature=MagickCoreSignature;
}
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCachePixels() returns the pixels associated with the specified
% image.
%
% The format of the AcquirePixelCachePixels() method is:
%
% const void *AcquirePixelCachePixels(const Image *image,
% MagickSizeType *length,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate const void *AcquirePixelCachePixels(const Image *image,
MagickSizeType *length,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((const void *) NULL);
*length=cache_info->length;
return((const void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentGenesis() instantiates the cache component.
%
% The format of the CacheComponentGenesis method is:
%
% MagickBooleanType CacheComponentGenesis(void)
%
*/
MagickPrivate MagickBooleanType CacheComponentGenesis(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
cache_semaphore=AcquireSemaphoreInfo();
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentTerminus() destroys the cache component.
%
% The format of the CacheComponentTerminus() method is:
%
% CacheComponentTerminus(void)
%
*/
MagickPrivate void CacheComponentTerminus(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
ActivateSemaphoreInfo(&cache_semaphore);
LockSemaphoreInfo(cache_semaphore);
instantiate_cache=MagickFalse;
UnlockSemaphoreInfo(cache_semaphore);
RelinquishSemaphoreInfo(&cache_semaphore);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCache() clones a pixel cache.
%
% The format of the ClonePixelCache() method is:
%
% Cache ClonePixelCache(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate Cache ClonePixelCache(const Cache cache)
{
CacheInfo
*magick_restrict clone_info;
const CacheInfo
*magick_restrict cache_info;
assert(cache != NULL);
cache_info=(const CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads);
if (clone_info == (Cache) NULL)
return((Cache) NULL);
clone_info->virtual_pixel_method=cache_info->virtual_pixel_method;
return((Cache ) clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheMethods() clones the pixel cache methods from one cache to
% another.
%
% The format of the ClonePixelCacheMethods() method is:
%
% void ClonePixelCacheMethods(Cache clone,const Cache cache)
%
% A description of each parameter follows:
%
% o clone: Specifies a pointer to a Cache structure.
%
% o cache: the pixel cache.
%
*/
MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict source_info;
assert(clone != (Cache) NULL);
source_info=(CacheInfo *) clone;
assert(source_info->signature == MagickCoreSignature);
if (source_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
source_info->filename);
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
source_info->methods=cache_info->methods;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e R e p o s i t o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% ClonePixelCacheRepository() clones the source pixel cache to the destination
% cache.
%
% The format of the ClonePixelCacheRepository() method is:
%
% MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info,
% CacheInfo *source_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o source_info: the source pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClonePixelCacheOnDisk(
CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info)
{
MagickSizeType
extent;
size_t
quantum;
ssize_t
count;
struct stat
file_stats;
unsigned char
*buffer;
/*
Clone pixel cache on disk with identifcal morphology.
*/
if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) ||
(OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse))
return(MagickFalse);
quantum=(size_t) MagickMaxBufferExtent;
if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0))
quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent);
buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer));
if (buffer == (unsigned char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
extent=0;
while ((count=read(cache_info->file,buffer,quantum)) > 0)
{
ssize_t
number_bytes;
number_bytes=write(clone_info->file,buffer,(size_t) count);
if (number_bytes != count)
break;
extent+=number_bytes;
}
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
if (extent != cache_info->length)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads 2
#define cache_threads(source,destination) \
num_threads(((source)->type == DiskCache) || \
((destination)->type == DiskCache) || (((source)->rows) < \
(16*GetMagickResourceLimit(ThreadResource))) ? 1 : \
GetMagickResourceLimit(ThreadResource) < MaxCacheThreads ? \
GetMagickResourceLimit(ThreadResource) : MaxCacheThreads)
MagickBooleanType
optimize,
status;
NexusInfo
**magick_restrict cache_nexus,
**magick_restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
if ((cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) &&
(cache_info->metacontent_extent == clone_info->metacontent_extent))
{
/*
Identical pixel cache morphology.
*/
if (((cache_info->type == MemoryCache) ||
(cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) ||
(clone_info->type == MapCache)))
{
(void) memcpy(clone_info->pixels,cache_info->pixels,
cache_info->columns*cache_info->number_channels*cache_info->rows*
sizeof(*cache_info->pixels));
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
(void) memcpy(clone_info->metacontent,cache_info->metacontent,
cache_info->columns*cache_info->rows*
clone_info->metacontent_extent*sizeof(unsigned char));
return(MagickTrue);
}
if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache))
return(ClonePixelCacheOnDisk(cache_info,clone_info));
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(MaxCacheThreads);
clone_nexus=AcquirePixelCacheNexus(MaxCacheThreads);
if ((cache_nexus == (NexusInfo **) NULL) ||
(clone_nexus == (NexusInfo **) NULL))
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
optimize=(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ?
MagickTrue : MagickFalse;
length=(size_t) MagickMin(cache_info->columns*cache_info->number_channels,
clone_info->columns*clone_info->number_channels);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
cache_threads(cache_info,clone_info)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
RectangleInfo
region;
register ssize_t
x;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
region.width=cache_info->columns;
region.height=1;
region.x=0;
region.y=y;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,
cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
region.width=clone_info->columns;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,®ion,
clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
(void) ResetMagickMemory(clone_nexus[id]->pixels,0,(size_t)
clone_nexus[id]->length);
if (optimize != MagickFalse)
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length*
sizeof(Quantum));
else
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
/*
Mismatched pixel channel map.
*/
p=cache_nexus[id]->pixels;
q=clone_nexus[id]->pixels;
for (x=0; x < (ssize_t) cache_info->columns; x++)
{
register ssize_t
i;
if (x == (ssize_t) clone_info->columns)
break;
for (i=0; i < (ssize_t) clone_info->number_channels; i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=clone_info->channel_map[i].channel;
traits=cache_info->channel_map[channel].traits;
if (traits != UndefinedPixelTrait)
*q=*(p+cache_info->channel_map[channel].offset);
q++;
}
p+=cache_info->number_channels;
}
}
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
{
/*
Clone metacontent.
*/
length=(size_t) MagickMin(cache_info->metacontent_extent,
clone_info->metacontent_extent);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
cache_threads(cache_info,clone_info)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
RectangleInfo
region;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
region.width=cache_info->columns;
region.height=1;
region.x=0;
region.y=y;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,
cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
region.width=clone_info->columns;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,®ion,
clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
if ((clone_nexus[id]->metacontent != (void *) NULL) &&
(cache_nexus[id]->metacontent != (void *) NULL))
(void) memcpy(clone_nexus[id]->metacontent,
cache_nexus[id]->metacontent,length*sizeof(unsigned char));
status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception);
}
}
cache_nexus=DestroyPixelCacheNexus(cache_nexus,MaxCacheThreads);
clone_nexus=DestroyPixelCacheNexus(clone_nexus,MaxCacheThreads);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixelCache() method is:
%
% void DestroyImagePixelCache(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void DestroyImagePixelCache(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->cache == (void *) NULL)
return;
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixels() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixels() method is:
%
% void DestroyImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImagePixels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL)
{
cache_info->methods.destroy_pixel_handler(image);
return;
}
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyPixelCache() method is:
%
% Cache DestroyPixelCache(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info)
{
int
status;
status=(-1);
if (cache_info->file != -1)
{
status=close(cache_info->file);
cache_info->file=(-1);
RelinquishMagickResource(FileResource,1);
}
return(status == -1 ? MagickFalse : MagickTrue);
}
static inline void RelinquishPixelCachePixels(CacheInfo *cache_info)
{
switch (cache_info->type)
{
case MemoryCache:
{
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
{
cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl,
MagickTrue);
cache_info->pixels=(Quantum *) NULL;
break;
}
#endif
if (cache_info->mapped == MagickFalse)
cache_info->pixels=RelinquishAlignedMemory(cache_info->pixels);
else
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
RelinquishMagickResource(MemoryResource,cache_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
cache_info->pixels=(Quantum *) NULL;
if (cache_info->mode != ReadMode)
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(MapResource,cache_info->length);
}
case DiskCache:
{
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
if (cache_info->mode != ReadMode)
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(DiskResource,cache_info->length);
break;
}
case DistributedCache:
{
*cache_info->cache_filename='\0';
(void) RelinquishDistributePixelCache((DistributeCacheInfo *)
cache_info->server_info);
break;
}
default:
break;
}
cache_info->type=UndefinedCache;
cache_info->mapped=MagickFalse;
cache_info->metacontent=(void *) NULL;
}
MagickPrivate Cache DestroyPixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count--;
if (cache_info->reference_count != 0)
{
UnlockSemaphoreInfo(cache_info->semaphore);
return((Cache) NULL);
}
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"destroy %s",
cache_info->filename);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
RelinquishPixelCachePixels(cache_info);
if (cache_info->server_info != (DistributeCacheInfo *) NULL)
cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *)
cache_info->server_info);
if (cache_info->nexus_info != (NexusInfo **) NULL)
cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info,
cache_info->number_threads);
if (cache_info->random_info != (RandomInfo *) NULL)
cache_info->random_info=DestroyRandomInfo(cache_info->random_info);
if (cache_info->file_semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->file_semaphore);
if (cache_info->semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->semaphore);
cache_info->signature=(~MagickCoreSignature);
cache_info=(CacheInfo *) RelinquishMagickMemory(cache_info);
cache=(Cache) NULL;
return(cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCacheNexus() destroys a pixel cache nexus.
%
% The format of the DestroyPixelCacheNexus() method is:
%
% NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus to destroy.
%
% o number_threads: the number of nexus threads.
%
*/
static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info)
{
if (nexus_info->mapped == MagickFalse)
(void) RelinquishAlignedMemory(nexus_info->cache);
else
(void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length);
nexus_info->cache=(Quantum *) NULL;
nexus_info->pixels=(Quantum *) NULL;
nexus_info->metacontent=(void *) NULL;
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
}
MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info,
const size_t number_threads)
{
register ssize_t
i;
assert(nexus_info != (NexusInfo **) NULL);
for (i=0; i < (ssize_t) number_threads; i++)
{
if (nexus_info[i]->cache != (Quantum *) NULL)
RelinquishCacheNexusPixels(nexus_info[i]);
nexus_info[i]->signature=(~MagickCoreSignature);
}
nexus_info[0]=(NexusInfo *) RelinquishMagickMemory(nexus_info[0]);
nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info);
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontent() returns the authentic metacontent corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the associated pixels are not available.
%
% The format of the GetAuthenticMetacontent() method is:
%
% void *GetAuthenticMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void *GetAuthenticMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
{
void
*metacontent;
metacontent=cache_info->methods.
get_authentic_metacontent_from_handler(image);
return(metacontent);
}
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontentFromCache() returns the meta-content corresponding
% with the last call to QueueAuthenticPixelsCache() or
% GetAuthenticPixelsCache().
%
% The format of the GetAuthenticMetacontentFromCache() method is:
%
% void *GetAuthenticMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void *GetAuthenticMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL
% operations.
%
% The format of the GetAuthenticOpenCLBuffer() method is:
%
% cl_mem GetAuthenticOpenCLBuffer(const Image *image,
% MagickCLDevice device,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o device: the device to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image,
MagickCLDevice device,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
cl_int
status;
assert(image != (const Image *) NULL);
assert(device != (const MagickCLDevice) NULL);
cache_info=(CacheInfo *) image->cache;
if (cache_info->type == UndefinedCache)
SyncImagePixelCache((Image *) image,exception);
if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse))
return((cl_mem) NULL);
if ((cache_info->opencl != (MagickCLCacheInfo) NULL) &&
(cache_info->opencl->device->context != device->context))
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
{
assert(cache_info->pixels != (Quantum *) NULL);
cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels,
cache_info->length);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
return((cl_mem) NULL);
}
assert(cache_info->opencl->pixels == cache_info->pixels);
return(cache_info->opencl->buffer);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or
% disk pixel cache as defined by the geometry parameters. A pointer to the
% pixels is returned if the pixels are transferred, otherwise a NULL is
% returned.
%
% The format of the GetAuthenticPixelCacheNexus() method is:
%
% Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict pixels;
/*
Transfer pixels from the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((Quantum *) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsFromCache() returns the pixels associated with the last
% call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods.
%
% The format of the GetAuthenticPixelsFromCache() method is:
%
% Quantum *GetAuthenticPixelsFromCache(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static Quantum *GetAuthenticPixelsFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelQueue() returns the authentic pixels associated
% corresponding with the last call to QueueAuthenticPixels() or
% GetAuthenticPixels().
%
% The format of the GetAuthenticPixelQueue() method is:
%
% Quantum *GetAuthenticPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Quantum *GetAuthenticPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
return(cache_info->methods.get_authentic_pixels_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a Quantum array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in memory, or in a memory-mapped file. The returned pointer
% must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image has corresponding metacontent,call
% GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the
% meta-content corresponding to the region. Once the Quantum array has
% been updated, the changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the GetAuthenticPixels() method is:
%
% Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns,
rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetAuthenticPixelsCache() method is:
%
% Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtent() returns the extent of the pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetImageExtent() method is:
%
% MagickSizeType GetImageExtent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickSizeType GetImageExtent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCache() ensures that there is only a single reference to the
% pixel cache to be modified, updating the provided cache pointer to point to
% a clone of the original pixel cache if necessary.
%
% The format of the GetImagePixelCache method is:
%
% Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone: any value other than MagickFalse clones the cache pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType ValidatePixelCacheMorphology(
const Image *magick_restrict image)
{
const CacheInfo
*magick_restrict cache_info;
const PixelChannelMap
*magick_restrict p,
*magick_restrict q;
/*
Does the image match the pixel cache morphology?
*/
cache_info=(CacheInfo *) image->cache;
p=image->channel_map;
q=cache_info->channel_map;
if ((image->storage_class != cache_info->storage_class) ||
(image->colorspace != cache_info->colorspace) ||
(image->alpha_trait != cache_info->alpha_trait) ||
(image->read_mask != cache_info->read_mask) ||
(image->write_mask != cache_info->write_mask) ||
(image->columns != cache_info->columns) ||
(image->rows != cache_info->rows) ||
(image->number_channels != cache_info->number_channels) ||
(memcmp(p,q,image->number_channels*sizeof(*p)) != 0) ||
(image->metacontent_extent != cache_info->metacontent_extent) ||
(cache_info->nexus_info == (NexusInfo **) NULL))
return(MagickFalse);
return(MagickTrue);
}
static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
destroy,
status;
static MagickSizeType
cache_timelimit = MagickResourceInfinity,
cpu_throttle = MagickResourceInfinity,
cycles = 0;
status=MagickTrue;
if (cpu_throttle == MagickResourceInfinity)
cpu_throttle=GetMagickResourceLimit(ThrottleResource);
if ((cpu_throttle != 0) && ((cycles++ % 32) == 0))
MagickDelay(cpu_throttle);
if (cache_epoch == 0)
{
/*
Set the expire time in seconds.
*/
cache_timelimit=GetMagickResourceLimit(TimeResource);
cache_epoch=time((time_t *) NULL);
}
if ((cache_timelimit != MagickResourceInfinity) &&
((MagickSizeType) (time((time_t *) NULL)-cache_epoch) >= cache_timelimit))
{
#if defined(ECANCELED)
errno=ECANCELED;
#endif
ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded");
}
LockSemaphoreInfo(image->semaphore);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
destroy=MagickFalse;
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
CacheInfo
*clone_info;
Image
clone_image;
/*
Clone pixel cache.
*/
clone_image=(*image);
clone_image.semaphore=AcquireSemaphoreInfo();
clone_image.reference_count=1;
clone_image.cache=ClonePixelCache(cache_info);
clone_info=(CacheInfo *) clone_image.cache;
status=OpenPixelCache(&clone_image,IOMode,exception);
if (status != MagickFalse)
{
if (clone != MagickFalse)
status=ClonePixelCacheRepository(clone_info,cache_info,
exception);
if (status != MagickFalse)
{
if (cache_info->reference_count == 1)
cache_info->nexus_info=(NexusInfo **) NULL;
destroy=MagickTrue;
image->cache=clone_image.cache;
}
}
RelinquishSemaphoreInfo(&clone_image.semaphore);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
if (destroy != MagickFalse)
cache_info=(CacheInfo *) DestroyPixelCache(cache_info);
if (status != MagickFalse)
{
/*
Ensure the image matches the pixel cache morphology.
*/
image->type=UndefinedType;
if (ValidatePixelCacheMorphology(image) == MagickFalse)
{
status=OpenPixelCache(image,IOMode,exception);
cache_info=(CacheInfo *) image->cache;
if (cache_info->type == DiskCache)
(void) ClosePixelCacheOnDisk(cache_info);
}
}
UnlockSemaphoreInfo(image->semaphore);
if (status == MagickFalse)
return((Cache) NULL);
return(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCacheType() returns the pixel cache type: UndefinedCache,
% DiskCache, MemoryCache, MapCache, or PingCache.
%
% The format of the GetImagePixelCacheType() method is:
%
% CacheType GetImagePixelCacheType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheType GetImagePixelCacheType(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e A u t h e n t i c P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixel() method is:
%
% MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType CopyPixel(const Image *image,
const Quantum *source,Quantum *destination)
{
register ssize_t
i;
if (source == (const Quantum *) NULL)
{
destination[RedPixelChannel]=ClampToQuantum(image->background_color.red);
destination[GreenPixelChannel]=ClampToQuantum(
image->background_color.green);
destination[BluePixelChannel]=ClampToQuantum(
image->background_color.blue);
destination[BlackPixelChannel]=ClampToQuantum(
image->background_color.black);
destination[AlphaPixelChannel]=ClampToQuantum(
image->background_color.alpha);
return(MagickFalse);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
destination[channel]=source[i];
}
return(MagickTrue);
}
MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
register Quantum
*magick_restrict q;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,
pixel,exception));
q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e A u t h e n t i c P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixelFromCache() method is:
%
% MagickBooleanType GetOneAuthenticPixelFromCache(const Image image,
% const ssize_t x,const ssize_t y,Quantum *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id],
exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixel() returns a single virtual pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixel() method is:
%
% MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
GetPixelCacheVirtualMethod(image),x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e V i r t u a l P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelFromCache() returns a single virtual pixel at the
% specified (x,y) location. The image background color is returned if an
% error occurs.
%
% The format of the GetOneVirtualPixelFromCache() method is:
%
% MagickBooleanType GetOneVirtualPixelFromCache(const Image image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixelInfo() method is:
%
% MagickBooleanType GetOneVirtualPixelInfo(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,PixelInfo *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: these values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelInfo *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
GetPixelInfo(image,pixel);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (p == (const Quantum *) NULL)
return(MagickFalse);
GetPixelInfoPixel(image,p,pixel);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheColorspace() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheColorspace() method is:
%
% Colorspace GetPixelCacheColorspace(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheMethods() initializes the CacheMethods structure.
%
% The format of the GetPixelCacheMethods() method is:
%
% void GetPixelCacheMethods(CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods)
{
assert(cache_methods != (CacheMethods *) NULL);
(void) ResetMagickMemory(cache_methods,0,sizeof(*cache_methods));
cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache;
cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache;
cache_methods->get_virtual_metacontent_from_handler=
GetVirtualMetacontentFromCache;
cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache;
cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache;
cache_methods->get_authentic_metacontent_from_handler=
GetAuthenticMetacontentFromCache;
cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache;
cache_methods->get_one_authentic_pixel_from_handler=
GetOneAuthenticPixelFromCache;
cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache;
cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache;
cache_methods->destroy_pixel_handler=DestroyImagePixelCache;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e N e x u s E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheNexusExtent() returns the extent of the pixels associated
% corresponding with the last call to SetPixelCacheNexusPixels() or
% GetPixelCacheNexusPixels().
%
% The format of the GetPixelCacheNexusExtent() method is:
%
% MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus info.
%
*/
MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
extent;
assert(cache != NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height;
if (extent == 0)
return((MagickSizeType) cache_info->columns*cache_info->rows);
return(extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCachePixels() returns the pixels associated with the specified image.
%
% The format of the GetPixelCachePixels() method is:
%
% void *GetPixelCachePixels(Image *image,MagickSizeType *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate void *GetPixelCachePixels(Image *image,MagickSizeType *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
assert(length != (MagickSizeType *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
*length=cache_info->length;
return((void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheStorageClass() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheStorageClass() method is:
%
% ClassType GetPixelCacheStorageClass(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->storage_class);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e T i l e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheTileSize() returns the pixel cache tile size.
%
% The format of the GetPixelCacheTileSize() method is:
%
% void GetPixelCacheTileSize(const Image *image,size_t *width,
% size_t *height)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the optimize cache tile width in pixels.
%
% o height: the optimize cache tile height in pixels.
%
*/
MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width,
size_t *height)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*width=2048UL/(cache_info->number_channels*sizeof(Quantum));
if (GetImagePixelCacheType(image) == DiskCache)
*width=8192UL/(cache_info->number_channels*sizeof(Quantum));
*height=(*width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the
% pixel cache. A virtual pixel is any pixel access that is outside the
% boundaries of the image cache.
%
% The format of the GetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->virtual_pixel_method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromCache() returns the meta-content corresponding with
% the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualMetacontentFromCache() method is:
%
% void *GetVirtualMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const void *GetVirtualMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromNexus() returns the meta-content for the specified
% cache nexus.
%
% The format of the GetVirtualMetacontentFromNexus() method is:
%
% const void *GetVirtualMetacontentFromNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the meta-content.
%
*/
MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((void *) NULL);
return(nexus_info->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontent() returns the virtual metacontent corresponding with
% the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the meta-content are not available.
%
% The format of the GetVirtualMetacontent() method is:
%
% const void *GetVirtualMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const void *GetVirtualMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image);
if (metacontent != (void *) NULL)
return(metacontent);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsFromNexus() gets virtual pixels from the in-memory or disk
% pixel cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelsFromNexus() method is:
%
% Quantum *GetVirtualPixelsFromNexus(const Image *image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to acquire.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t
DitherMatrix[64] =
{
0, 48, 12, 60, 3, 51, 15, 63,
32, 16, 44, 28, 35, 19, 47, 31,
8, 56, 4, 52, 11, 59, 7, 55,
40, 24, 36, 20, 43, 27, 39, 23,
2, 50, 14, 62, 1, 49, 13, 61,
34, 18, 46, 30, 33, 17, 45, 29,
10, 58, 6, 54, 9, 57, 5, 53,
42, 26, 38, 22, 41, 25, 37, 21
};
static inline ssize_t DitherX(const ssize_t x,const size_t columns)
{
ssize_t
index;
index=x+DitherMatrix[x & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) columns)
return((ssize_t) columns-1L);
return(index);
}
static inline ssize_t DitherY(const ssize_t y,const size_t rows)
{
ssize_t
index;
index=y+DitherMatrix[y & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) rows)
return((ssize_t) rows-1L);
return(index);
}
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns)
{
return((ssize_t) (columns*GetPseudoRandomValue(random_info)));
}
static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows)
{
return((ssize_t) (rows*GetPseudoRandomValue(random_info)));
}
static inline MagickModulo VirtualPixelModulo(const ssize_t offset,
const size_t extent)
{
MagickModulo
modulo;
/*
Compute the remainder of dividing offset by extent. It returns not only
the quotient (tile the offset falls in) but also the positive remainer
within that tile such that 0 <= remainder < extent. This method is
essentially a ldiv() using a floored modulo division rather than the
normal default truncated modulo division.
*/
modulo.quotient=offset/(ssize_t) extent;
if (offset < 0L)
modulo.quotient--;
modulo.remainder=offset-modulo.quotient*(ssize_t) extent;
return(modulo);
}
MagickPrivate const Quantum *GetVirtualPixelsFromNexus(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
length,
number_pixels;
NexusInfo
**magick_restrict virtual_nexus;
Quantum
*magick_restrict pixels,
virtual_pixel[MaxPixelChannels];
RectangleInfo
region;
register const Quantum
*magick_restrict p;
register const void
*magick_restrict r;
register Quantum
*magick_restrict q;
register ssize_t
i,
u;
register unsigned char
*magick_restrict s;
ssize_t
v;
void
*magick_restrict virtual_metacontent;
/*
Acquire pixels.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((const Quantum *) NULL);
region.x=x;
region.y=y;
region.width=columns;
region.height=rows;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,nexus_info,
exception);
if (pixels == (Quantum *) NULL)
return((const Quantum *) NULL);
q=pixels;
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+
nexus_info->region.width-1L;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels))
if ((x >= 0) && ((ssize_t) (x+columns) <= (ssize_t) cache_info->columns) &&
(y >= 0) && ((ssize_t) (y+rows) <= (ssize_t) cache_info->rows))
{
MagickBooleanType
status;
/*
Pixel request is inside cache extents.
*/
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(q);
status=ReadPixelCachePixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
{
status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
}
return(q);
}
/*
Pixel request is outside cache extents.
*/
s=(unsigned char *) nexus_info->metacontent;
virtual_nexus=AcquirePixelCacheNexus(1);
if (virtual_nexus == (NexusInfo **) NULL)
{
if (virtual_nexus != (NexusInfo **) NULL)
virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1);
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"UnableToGetCacheNexus","`%s'",image->filename);
return((const Quantum *) NULL);
}
(void) ResetMagickMemory(virtual_pixel,0,cache_info->number_channels*
sizeof(*virtual_pixel));
virtual_metacontent=(void *) NULL;
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
case EdgeVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
case HorizontalTileVirtualPixelMethod:
case VerticalTileVirtualPixelMethod:
{
if (cache_info->metacontent_extent != 0)
{
/*
Acquire a metacontent buffer.
*/
virtual_metacontent=(void *) AcquireQuantumMemory(1,
cache_info->metacontent_extent);
if (virtual_metacontent == (void *) NULL)
{
virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1);
(void) ThrowMagickException(exception,GetMagickModule(),
CacheError,"UnableToGetCacheNexus","`%s'",image->filename);
return((const Quantum *) NULL);
}
(void) ResetMagickMemory(virtual_metacontent,0,
cache_info->metacontent_extent);
}
switch (virtual_pixel_method)
{
case BlackVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,0,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case GrayVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange/2,
virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case TransparentVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,0,virtual_pixel);
SetPixelAlpha(image,TransparentAlpha,virtual_pixel);
break;
}
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
default:
{
SetPixelRed(image,ClampToQuantum(image->background_color.red),
virtual_pixel);
SetPixelGreen(image,ClampToQuantum(image->background_color.green),
virtual_pixel);
SetPixelBlue(image,ClampToQuantum(image->background_color.blue),
virtual_pixel);
SetPixelBlack(image,ClampToQuantum(image->background_color.black),
virtual_pixel);
SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha),
virtual_pixel);
break;
}
}
break;
}
default:
break;
}
for (v=0; v < (ssize_t) rows; v++)
{
ssize_t
y_offset;
y_offset=y+v;
if ((virtual_pixel_method == EdgeVirtualPixelMethod) ||
(virtual_pixel_method == UndefinedVirtualPixelMethod))
y_offset=EdgeY(y_offset,cache_info->rows);
for (u=0; u < (ssize_t) columns; u+=length)
{
ssize_t
x_offset;
x_offset=x+u;
length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u);
if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) ||
((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) ||
(length == 0))
{
MagickModulo
x_modulo,
y_modulo;
/*
Transfer a single pixel.
*/
length=(MagickSizeType) 1;
switch (virtual_pixel_method)
{
case EdgeVirtualPixelMethod:
default:
{
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),
EdgeY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
case RandomVirtualPixelMethod:
{
if (cache_info->random_info == (RandomInfo *) NULL)
cache_info->random_info=AcquireRandomInfo();
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
RandomX(cache_info->random_info,cache_info->columns),
RandomY(cache_info->random_info,cache_info->rows),1UL,1UL,
*virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
case DitherVirtualPixelMethod:
{
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
DitherX(x_offset,cache_info->columns),
DitherY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
case TileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
case MirrorVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
if ((x_modulo.quotient & 0x01) == 1L)
x_modulo.remainder=(ssize_t) cache_info->columns-
x_modulo.remainder-1L;
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if ((y_modulo.quotient & 0x01) == 1L)
y_modulo.remainder=(ssize_t) cache_info->rows-
y_modulo.remainder-1L;
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
case HorizontalTileEdgeVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL,
*virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
case VerticalTileEdgeVirtualPixelMethod:
{
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL,
*virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
case CheckerTileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L)
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
case HorizontalTileVirtualPixelMethod:
{
if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
case VerticalTileVirtualPixelMethod:
{
if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
}
if (p == (const Quantum *) NULL)
break;
(void) memcpy(q,p,(size_t) length*cache_info->number_channels*
sizeof(*p));
q+=cache_info->number_channels;
if ((s != (void *) NULL) && (r != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) cache_info->metacontent_extent);
s+=cache_info->metacontent_extent;
}
continue;
}
/*
Transfer a run of pixels.
*/
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x_offset,y_offset,
(size_t) length,1UL,*virtual_nexus,exception);
if (p == (const Quantum *) NULL)
break;
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
(void) memcpy(q,p,(size_t) length*cache_info->number_channels*sizeof(*p));
q+=length*cache_info->number_channels;
if ((r != (void *) NULL) && (s != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) length);
s+=length*cache_info->metacontent_extent;
}
}
if (u < (ssize_t) columns)
break;
}
/*
Free resources.
*/
if (virtual_metacontent != (void *) NULL)
virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent);
virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1);
if (v < (ssize_t) rows)
return((const Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel
% cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCache() method is:
%
% const Quantum *GetVirtualPixelCache(const Image *image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const Quantum *GetVirtualPixelCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelQueue() returns the virtual pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels().
%
% The format of the GetVirtualPixelQueue() method is:
%
% const Quantum *GetVirtualPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const Quantum *GetVirtualPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixels_handler !=
(GetVirtualPixelsHandler) NULL)
return(cache_info->methods.get_virtual_pixels_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in memory, or in a memory-mapped file. The
% returned pointer must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% access the meta-content (of type void) corresponding to the the
% region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread-
% safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% The format of the GetVirtualPixels() method is:
%
% const Quantum *GetVirtualPixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const Quantum *GetVirtualPixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixel_handler !=
(GetVirtualPixelHandler) NULL)
return(cache_info->methods.get_virtual_pixel_handler(image,
GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y,
columns,rows,cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsCache() returns the pixels associated corresponding with the
% last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualPixelsCache() method is:
%
% Quantum *GetVirtualPixelsCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const Quantum *GetVirtualPixelsCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsNexus() returns the pixels associated with the specified
% cache nexus.
%
% The format of the GetVirtualPixelsNexus() method is:
%
% const Quantum *GetVirtualPixelsNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap pixels.
%
*/
MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((Quantum *) NULL);
return((const Quantum *) nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p e n P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenPixelCache() allocates the pixel cache. This includes defining the cache
% dimensions, allocating space for the image pixels and optionally the
% metacontent, and memory mapping the cache if it is disk based. The cache
% nexus array is initialized as well.
%
% The format of the OpenPixelCache() method is:
%
% MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#if defined(SIGBUS)
static void CacheSignalHandler(int status)
{
ThrowFatalException(CacheFatalError,"UnableToExtendPixelCache");
}
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info,
const MapMode mode)
{
int
file;
/*
Open pixel cache on disk.
*/
if ((cache_info->file != -1) && (cache_info->mode == mode))
return(MagickTrue); /* cache already open and in the proper mode */
if (*cache_info->cache_filename == '\0')
file=AcquireUniqueFileResource(cache_info->cache_filename);
else
switch (mode)
{
case ReadMode:
{
file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0);
break;
}
case WriteMode:
{
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT |
O_BINARY | O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE);
break;
}
case IOMode:
default:
{
file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY |
O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE);
break;
}
}
if (file == -1)
return(MagickFalse);
(void) AcquireMagickResource(FileResource,1);
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->file=file;
cache_info->mode=mode;
return(MagickTrue);
}
static inline MagickOffsetType WritePixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
count,
extent,
offset;
cache_info=(CacheInfo *) image->cache;
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
message[MagickPathExtent];
(void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format);
(void) FormatLocaleString(message,MagickPathExtent,
"extend %s (%s[%d], disk, %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
count=(MagickOffsetType) 1;
else
{
extent=(MagickOffsetType) length-1;
count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *)
"");
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (cache_info->synchronize != MagickFalse)
(void) posix_fallocate(cache_info->file,offset+1,extent-offset);
#endif
#if defined(SIGBUS)
(void) signal(SIGBUS,CacheSignalHandler);
#endif
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET);
if (offset < 0)
return(MagickFalse);
return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue);
}
static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
source_info;
char
format[MagickPathExtent],
message[MagickPathExtent];
const char
*type;
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
size_t
columns,
packet_size;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->columns == 0) || (image->rows == 0))
ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if ((AcquireMagickResource(WidthResource,image->columns) == MagickFalse) ||
(AcquireMagickResource(HeightResource,image->rows) == MagickFalse))
ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit",
image->filename);
source_info=(*cache_info);
source_info.file=(-1);
(void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]",
image->filename,(double) GetImageIndexInList(image));
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->alpha_trait=image->alpha_trait;
cache_info->read_mask=image->read_mask;
cache_info->write_mask=image->write_mask;
cache_info->rows=image->rows;
cache_info->columns=image->columns;
InitializePixelChannelMap(image);
cache_info->number_channels=GetPixelChannels(image);
(void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels*
sizeof(*image->channel_map));
cache_info->metacontent_extent=image->metacontent_extent;
cache_info->mode=mode;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
packet_size=cache_info->number_channels*sizeof(Quantum);
if (image->metacontent_extent != 0)
packet_size+=cache_info->metacontent_extent;
length=number_pixels*packet_size;
columns=(size_t) (length/cache_info->rows/packet_size);
if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) ||
((ssize_t) cache_info->rows < 0))
ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed",
image->filename);
cache_info->length=length;
if (image->ping != MagickFalse)
{
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->type=PingCache;
return(MagickTrue);
}
status=AcquireMagickResource(AreaResource,cache_info->length);
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if ((status != MagickFalse) && (length == (MagickSizeType) ((size_t) length)))
{
status=AcquireMagickResource(MemoryResource,cache_info->length);
if (((cache_info->type == UndefinedCache) && (status != MagickFalse)) ||
(cache_info->type == MemoryCache))
{
status=MagickTrue;
cache_info->mapped=MagickFalse;
cache_info->pixels=(Quantum *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) cache_info->length));
if (cache_info->pixels == (Quantum *) NULL)
cache_info->pixels=source_info.pixels;
else
{
/*
Create memory pixel cache.
*/
cache_info->type=MemoryCache;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
number_pixels*cache_info->number_channels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->mapped != MagickFalse ?
"Anonymous" : "Heap",type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
return(status == 0 ? MagickFalse : MagickTrue);
}
}
RelinquishMagickResource(MemoryResource,cache_info->length);
}
/*
Create pixel cache on disk.
*/
status=AcquireMagickResource(DiskResource,cache_info->length);
if ((status == MagickFalse) || (cache_info->type == DistributedCache))
{
DistributeCacheInfo
*server_info;
if (cache_info->type == DistributedCache)
RelinquishMagickResource(DiskResource,cache_info->length);
server_info=AcquireDistributeCacheInfo(exception);
if (server_info != (DistributeCacheInfo *) NULL)
{
status=OpenDistributePixelCache(server_info,image);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
GetDistributeCacheHostname(server_info));
server_info=DestroyDistributeCacheInfo(server_info);
}
else
{
/*
Create a distributed pixel cache.
*/
status=MagickTrue;
cache_info->type=DistributedCache;
cache_info->server_info=server_info;
(void) FormatLocaleString(cache_info->cache_filename,
MagickPathExtent,"%s:%d",GetDistributeCacheHostname(
(DistributeCacheInfo *) cache_info->server_info),
GetDistributeCachePort((DistributeCacheInfo *)
cache_info->server_info));
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
GetDistributeCacheFile((DistributeCacheInfo *)
cache_info->server_info),type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
return(status == 0 ? MagickFalse : MagickTrue);
}
}
RelinquishMagickResource(DiskResource,cache_info->length);
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
(void) ClosePixelCacheOnDisk(cache_info);
*cache_info->cache_filename='\0';
}
if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse)
{
RelinquishMagickResource(DiskResource,cache_info->length);
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
image->filename);
return(MagickFalse);
}
status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+
cache_info->length);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToExtendCache",
image->filename);
return(MagickFalse);
}
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if (length != (MagickSizeType) ((size_t) length))
cache_info->type=DiskCache;
else
{
status=AcquireMagickResource(MapResource,cache_info->length);
if ((status == MagickFalse) && (cache_info->type != MapCache) &&
(cache_info->type != MemoryCache))
{
status=MagickTrue;
cache_info->type=DiskCache;
}
else
{
status=MagickTrue;
cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode,
cache_info->offset,(size_t) cache_info->length);
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->type=DiskCache;
cache_info->pixels=source_info.pixels;
}
else
{
/*
Create file-backed memory-mapped pixel cache.
*/
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->type=MapCache;
cache_info->mapped=MagickTrue;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
number_pixels*cache_info->number_channels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
cache_info->file,type,(double) cache_info->columns,(double)
cache_info->rows,(double) cache_info->number_channels,
format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
return(status == 0 ? MagickFalse : MagickTrue);
}
}
RelinquishMagickResource(MapResource,cache_info->length);
}
status=MagickTrue;
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,type,(double)
cache_info->columns,(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r s i s t P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PersistPixelCache() attaches to or initializes a persistent pixel cache. A
% persistent pixel cache is one that resides on disk and is not destroyed
% when the program exits.
%
% The format of the PersistPixelCache() method is:
%
% MagickBooleanType PersistPixelCache(Image *image,const char *filename,
% const MagickBooleanType attach,MagickOffsetType *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filename: the persistent pixel cache filename.
%
% o attach: A value other than zero initializes the persistent pixel cache.
%
% o initialize: A value other than zero initializes the persistent pixel
% cache.
%
% o offset: the offset in the persistent cache to store pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType PersistPixelCache(Image *image,
const char *filename,const MagickBooleanType attach,MagickOffsetType *offset,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict clone_info;
Image
clone_image;
MagickBooleanType
status;
ssize_t
page_size;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (void *) NULL);
assert(filename != (const char *) NULL);
assert(offset != (MagickOffsetType *) NULL);
page_size=GetMagickPageSize();
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
if (attach != MagickFalse)
{
/*
Attach existing persistent pixel cache.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"attach persistent cache");
(void) CopyMagickString(cache_info->cache_filename,filename,
MagickPathExtent);
cache_info->type=DiskCache;
cache_info->offset=(*offset);
if (OpenPixelCache(image,ReadMode,exception) == MagickFalse)
return(MagickFalse);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
return(MagickTrue);
}
if ((cache_info->mode != ReadMode) &&
((cache_info->type == DiskCache) || (cache_info->type == MapCache)) &&
(cache_info->reference_count == 1))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->mode != ReadMode) &&
((cache_info->type == DiskCache) || (cache_info->type == MapCache)) &&
(cache_info->reference_count == 1))
{
/*
Usurp existing persistent pixel cache.
*/
if (rename_utf8(cache_info->cache_filename, filename) == 0)
{
(void) CopyMagickString(cache_info->cache_filename,filename,
MagickPathExtent);
*offset+=cache_info->length+page_size-(cache_info->length %
page_size);
UnlockSemaphoreInfo(cache_info->semaphore);
cache_info=(CacheInfo *) ReferencePixelCache(cache_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"Usurp resident persistent cache");
return(MagickTrue);
}
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
/*
Clone persistent pixel cache.
*/
clone_image=(*image);
clone_info=(CacheInfo *) clone_image.cache;
image->cache=ClonePixelCache(cache_info);
cache_info=(CacheInfo *) ReferencePixelCache(image->cache);
(void) CopyMagickString(cache_info->cache_filename,filename,MagickPathExtent);
cache_info->type=DiskCache;
cache_info->offset=(*offset);
cache_info=(CacheInfo *) image->cache;
status=OpenPixelCache(image,IOMode,exception);
if (status != MagickFalse)
status=ClonePixelCacheRepository(cache_info,clone_info,exception);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelCacheNexus() method is:
%
% Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% const MagickBooleanType clone,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to set.
%
% o clone: clone the pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
number_pixels;
Quantum
*magick_restrict pixels;
RectangleInfo
region;
/*
Validate pixel cache geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception);
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) ||
(y < 0) || (x >= (ssize_t) cache_info->columns) ||
(y >= (ssize_t) cache_info->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"PixelsAreNotAuthentic","`%s'",image->filename);
return((Quantum *) NULL);
}
offset=(MagickOffsetType) y*cache_info->columns+x;
if (offset < 0)
return((Quantum *) NULL);
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1;
if ((MagickSizeType) offset >= number_pixels)
return((Quantum *) NULL);
/*
Return pixel cache.
*/
region.x=x;
region.y=y;
region.width=columns;
region.height=rows;
pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,®ion,nexus_info,
exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelsCache() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelsCache() method is:
%
% Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u e u e A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixels() queues a mutable pixel region. If the region is
% successfully initialized a pointer to a Quantum array representing the
% region is returned, otherwise NULL is returned. The returned pointer may
% point to a temporary working buffer for the pixels or it may point to the
% final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This is useful if the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not
% initialize the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in memory, or in a
% memory-mapped file. The returned pointer must *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% obtain the meta-content (of type void) corresponding to the region.
% Once the Quantum (and/or Quantum) array has been updated, the
% changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the QueueAuthenticPixels() method is:
%
% Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y,
columns,rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCacheMetacontent() reads metacontent from the specified region of
% the pixel cache.
%
% The format of the ReadPixelCacheMetacontent() method is:
%
% MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the metacontent.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickOffsetType ReadPixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType ReadPixelCacheMetacontent(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register ssize_t
y;
register unsigned char
*magick_restrict q;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
q=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register unsigned char
*magick_restrict p;
/*
Read meta-content from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->metacontent_extent*cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read meta content from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read metacontent from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCachePixels() reads pixels from the specified region of the pixel
% cache.
%
% The format of the ReadPixelCachePixels() method is:
%
% MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadPixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register Quantum
*magick_restrict q;
register ssize_t
y;
size_t
number_channels,
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns;
if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y)
return(MagickFalse);
offset+=nexus_info->region.x;
number_channels=cache_info->number_channels;
length=(MagickSizeType) number_channels*nexus_info->region.width*
sizeof(Quantum);
if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width)
return(MagickFalse);
rows=nexus_info->region.height;
extent=length*rows;
if ((extent == 0) || ((extent/length) != rows))
return(MagickFalse);
y=0;
q=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register Quantum
*magick_restrict p;
/*
Read pixels from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->pixels+offset*cache_info->number_channels;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read pixels from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*q),length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read pixels from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e f e r e n c e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferencePixelCache() increments the reference count associated with the
% pixel cache returning a pointer to the cache.
%
% The format of the ReferencePixelCache method is:
%
% Cache ReferencePixelCache(Cache cache_info)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
*/
MagickPrivate Cache ReferencePixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count++;
UnlockSemaphoreInfo(cache_info->semaphore);
return(cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e E p o c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheEpoch() resets the pixel cache epoch.
%
% The format of the ResetPixelCacheEpoch method is:
%
% void ResetPixelCacheEpoch(void)
%
*/
MagickPrivate void ResetPixelCacheEpoch(void)
{
cache_epoch=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheMethods() sets the image pixel methods to the specified ones.
%
% The format of the SetPixelCacheMethods() method is:
%
% SetPixelCacheMethods(Cache *,CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods)
{
CacheInfo
*magick_restrict cache_info;
GetOneAuthenticPixelFromHandler
get_one_authentic_pixel_from_handler;
GetOneVirtualPixelFromHandler
get_one_virtual_pixel_from_handler;
/*
Set cache pixel methods.
*/
assert(cache != (Cache) NULL);
assert(cache_methods != (CacheMethods *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL)
cache_info->methods.get_virtual_pixel_handler=
cache_methods->get_virtual_pixel_handler;
if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL)
cache_info->methods.destroy_pixel_handler=
cache_methods->destroy_pixel_handler;
if (cache_methods->get_virtual_metacontent_from_handler !=
(GetVirtualMetacontentFromHandler) NULL)
cache_info->methods.get_virtual_metacontent_from_handler=
cache_methods->get_virtual_metacontent_from_handler;
if (cache_methods->get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
cache_info->methods.get_authentic_pixels_handler=
cache_methods->get_authentic_pixels_handler;
if (cache_methods->queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
cache_info->methods.queue_authentic_pixels_handler=
cache_methods->queue_authentic_pixels_handler;
if (cache_methods->sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
cache_info->methods.sync_authentic_pixels_handler=
cache_methods->sync_authentic_pixels_handler;
if (cache_methods->get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
cache_info->methods.get_authentic_pixels_from_handler=
cache_methods->get_authentic_pixels_from_handler;
if (cache_methods->get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
cache_info->methods.get_authentic_metacontent_from_handler=
cache_methods->get_authentic_metacontent_from_handler;
get_one_virtual_pixel_from_handler=
cache_info->methods.get_one_virtual_pixel_from_handler;
if (get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
cache_info->methods.get_one_virtual_pixel_from_handler=
cache_methods->get_one_virtual_pixel_from_handler;
get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
if (get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
cache_info->methods.get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e N e x u s P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheNexusPixels() defines the region of the cache for the
% specified cache nexus.
%
% The format of the SetPixelCacheNexusPixels() method is:
%
% Quantum SetPixelCacheNexusPixels(const CacheInfo *cache_info,
% const MapMode mode,const RectangleInfo *region,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o region: A pointer to the RectangleInfo structure that defines the
% region of this particular cache nexus.
%
% o nexus_info: the cache nexus to set.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType AcquireCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
if (nexus_info->length != (MagickSizeType) ((size_t) nexus_info->length))
return(MagickFalse);
nexus_info->mapped=MagickFalse;
nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1,
(size_t) nexus_info->length));
if (nexus_info->cache == (Quantum *) NULL)
{
nexus_info->mapped=MagickTrue;
nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t)
nexus_info->length);
}
if (nexus_info->cache == (Quantum *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
return(MagickTrue);
}
static inline MagickBooleanType IsPixelCacheAuthentic(
const CacheInfo *magick_restrict cache_info,
const NexusInfo *magick_restrict nexus_info)
{
MagickBooleanType
status;
MagickOffsetType
offset;
/*
Does nexus pixels point directly to in-core cache pixels or is it buffered?
*/
if (cache_info->type == PingCache)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
status=nexus_info->pixels == (cache_info->pixels+offset*
cache_info->number_channels) ? MagickTrue : MagickFalse;
return(status);
}
static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info,
const MapMode mode)
{
if (mode == ReadMode)
{
MagickCachePrefetch((unsigned char *) nexus_info->pixels,0,1);
return;
}
MagickCachePrefetch((unsigned char *) nexus_info->pixels,1,1);
}
static Quantum *SetPixelCacheNexusPixels(const CacheInfo *cache_info,
const MapMode mode,const RectangleInfo *region,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
assert(cache_info != (const CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((Quantum *) NULL);
nexus_info->region=(*region);
if ((cache_info->type == MemoryCache) || (cache_info->type == MapCache))
{
ssize_t
x,
y;
x=nexus_info->region.x+(ssize_t) nexus_info->region.width-1;
y=nexus_info->region.y+(ssize_t) nexus_info->region.height-1;
if (((nexus_info->region.x >= 0) && (x < (ssize_t) cache_info->columns) &&
(nexus_info->region.y >= 0) && (y < (ssize_t) cache_info->rows)) &&
((nexus_info->region.height == 1UL) || ((nexus_info->region.x == 0) &&
((nexus_info->region.width == cache_info->columns) ||
((nexus_info->region.width % cache_info->columns) == 0)))))
{
MagickOffsetType
offset;
/*
Pixels are accessed directly from memory.
*/
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
nexus_info->pixels=cache_info->pixels+cache_info->number_channels*
offset;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(unsigned char *) cache_info->metacontent+
offset*cache_info->metacontent_extent;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
nexus_info->authentic_pixel_cache=IsPixelCacheAuthentic(cache_info,
nexus_info);
return(nexus_info->pixels);
}
}
/*
Pixels are stored in a staging region until they are synced to the cache.
*/
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
length=number_pixels*cache_info->number_channels*sizeof(Quantum);
if (cache_info->metacontent_extent != 0)
length+=number_pixels*cache_info->metacontent_extent;
if (nexus_info->cache == (Quantum *) NULL)
{
nexus_info->length=length;
status=AcquireCacheNexusPixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
{
nexus_info->length=0;
return((Quantum *) NULL);
}
}
else
if (nexus_info->length < length)
{
RelinquishCacheNexusPixels(nexus_info);
nexus_info->length=length;
status=AcquireCacheNexusPixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
{
nexus_info->length=0;
return((Quantum *) NULL);
}
}
nexus_info->pixels=nexus_info->cache;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(void *) (nexus_info->pixels+number_pixels*
cache_info->number_channels);
PrefetchPixelCacheNexusPixels(nexus_info,mode);
nexus_info->authentic_pixel_cache=IsPixelCacheAuthentic(cache_info,
nexus_info);
return(nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the
% pixel cache and returns the previous setting. A virtual pixel is any pixel
% access that is outside the boundaries of the image cache.
%
% The format of the SetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
% const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
CacheView
*magick_restrict image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(image,alpha,q);
q+=GetPixelChannels(image);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
image_view=DestroyCacheView(image_view);
return(status);
}
MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
VirtualPixelMethod
method;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
method=cache_info->virtual_pixel_method;
cache_info->virtual_pixel_method=virtual_pixel_method;
if ((image->columns != 0) && (image->rows != 0))
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
{
if ((image->background_color.alpha_trait != UndefinedPixelTrait) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
if ((IsPixelInfoGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
break;
}
case TransparentVirtualPixelMethod:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
break;
}
default:
break;
}
return(method);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have
% been completed and updates the host memory.
%
% The format of the SyncAuthenticOpenCLBuffer() method is:
%
% void SyncAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info)
{
assert(cache_info != (CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->type != MemoryCache) ||
(cache_info->opencl == (MagickCLCacheInfo) NULL))
return;
/*
Ensure single threaded access to OpenCL environment.
*/
LockSemaphoreInfo(cache_info->semaphore);
cache_info->opencl=(MagickCLCacheInfo) CopyMagickCLCacheInfo(
cache_info->opencl);
UnlockSemaphoreInfo(cache_info->semaphore);
}
MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
cache_info=(CacheInfo *) image->cache;
CopyOpenCLBuffer(cache_info);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the
% in-memory or disk cache. The method returns MagickTrue if the pixel region
% is synced, otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelCacheNexus() method is:
%
% MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to sync.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
status;
/*
Transfer pixels to the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->cache == (Cache) NULL)
ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
{
image->taint=MagickTrue;
return(MagickTrue);
}
assert(cache_info->signature == MagickCoreSignature);
status=WritePixelCachePixels(cache_info,nexus_info,exception);
if ((cache_info->metacontent_extent != 0) &&
(WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (status != MagickFalse)
image->taint=MagickTrue;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory
% or disk cache. The method returns MagickTrue if the pixel region is synced,
% otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelsCache() method is:
%
% MagickBooleanType SyncAuthenticPixelsCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SyncAuthenticPixelsCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncAuthenticPixels() method is:
%
% MagickBooleanType SyncAuthenticPixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixels(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
{
status=cache_info->methods.sync_authentic_pixels_handler(image,
exception);
return(status);
}
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixelCache() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncImagePixelCache() method is:
%
% MagickBooleanType SyncImagePixelCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(exception != (ExceptionInfo *) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception);
return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCacheMetacontent() writes the meta-content to the specified region
% of the pixel cache.
%
% The format of the WritePixelCacheMetacontent() method is:
%
% MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the meta-content.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const unsigned char
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=(MagickSizeType) length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register unsigned char
*magick_restrict q;
/*
Write associated pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width*cache_info->metacontent_extent;
q+=cache_info->columns*cache_info->metacontent_extent;
}
break;
}
case DiskCache:
{
/*
Write associated pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write metacontent to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCachePixels() writes image pixels to the specified region of the
% pixel cache.
%
% The format of the WritePixelCachePixels() method is:
%
% MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const Quantum
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width*
sizeof(Quantum);
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register Quantum
*magick_restrict q;
/*
Write pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->pixels+offset*cache_info->number_channels;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*nexus_info->region.width;
q+=cache_info->columns*cache_info->number_channels;
}
break;
}
case DiskCache:
{
/*
Write pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*p),length,(const unsigned char *)
p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write pixels to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
|
layer_example_f32.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/hfp/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include <libxsmm.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#if defined(USE_BLAS) || defined(USE_IM2COL)
#include <mkl.h>
#endif
#define CHANNEL_BLOCKING 64
/* function-pointer to LIBXSMM kernel */
libxsmm_smmfunction_reducebatch_offs fwd_brgemmz;
libxsmm_smmfunction_reducebatch_offs fwd_brgemma;
typedef struct {
int nImg;
int nIfm;
int nOfm;
int ifhp;
int ifwp;
int ifh;
int ifw;
int ofhp;
int ofwp;
int ofh;
int ofw;
int pad_h;
int pad_w;
int pad_h_in;
int pad_w_in;
int pad_h_out;
int pad_w_out;
int kh;
int kw;
int stride_h;
int stride_w;
int RK;
int Mh;
int Mw;
} naive_conv_t;
typedef struct {
int nImg;
int nBIfm;
int nbIfm;
int nBOfm;
int nbOfm;
int ifhp;
int ifwp;
int ifh;
int ifw;
int ofhp;
int ofwp;
int ofh;
int ofw;
int pad_h;
int pad_w;
int pad_h_in;
int pad_w_in;
int pad_h_out;
int pad_w_out;
int kh;
int kw;
int stride_h;
int stride_w;
int RK;
int Mh;
int Mw;
unsigned long long brcount;
} gemm_conv_t;
typedef struct {
double max_rel_err;
double max_abs_err;
double l2_rel_err;
double one_norm_ref;
double one_norm_test;
} correctness_t;
LIBXSMM_INLINE void zero_buf(float* buf, long size) {
int i;
#if defined(_OPENMP)
#pragma omp parallel for private(i)
#endif
for (i = 0; i < size; ++i) {
buf[i] = 0.0f;
}
}
LIBXSMM_INLINE void copy_buf(float* src, float* dst, long size) {
int i;
#if defined(_OPENMP)
#pragma omp parallel for private(i)
#endif
for (i = 0; i < size; ++i) {
dst[i] = src[i];
}
}
LIBXSMM_INLINE void init_buf(float* buf, long size, int initPos, int initOne)
{
int i;
zero_buf(buf, size);
for (i = 0; i < size; ++i) {
buf[i] = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? libxsmm_rng_f64() : (0.05 - libxsmm_rng_f64()/10.0)));
}
}
LIBXSMM_INLINE void set_zeropad_nchw(float* nchw, int N, int C, int H, int W, int Mh, int RK, int pad_h, int pad_w)
{
LIBXSMM_VLA_DECL(6, float, input, nchw, C, H, W, Mh, RK);
int n, h, w, c, m, rk;
for ( n = 0; n < N; n++ ) {
for ( c = 0; c < C; c++ ) {
for ( h = 0; h < H; h++ ) {
for ( w = 0; w < W; w++ ) {
for ( m = 0; m < Mh; m++ ) {
for ( rk = 0; rk < RK; rk++ ) {
if(h < pad_h || h >= H-pad_h || w < pad_w || w >= W-pad_w)
LIBXSMM_VLA_ACCESS(6, input, n, c, h, w, m, rk, C, H, W, Mh, RK) = 0.0;
}
}
}
}
}
}
}
LIBXSMM_INLINE void compare_buf(float* ref, float* test, long size, correctness_t* norms)
{
int i;
double diff, rel_err;
norms->max_rel_err = 0.;
norms->max_abs_err = 0.;
norms->l2_rel_err = 0.;
norms->one_norm_ref = 0.;
norms->one_norm_test = 0.;
for (i = 0; i < size; ++i) {
norms->one_norm_ref += (double)ref[i];
norms->one_norm_test += (double)test[i];
diff = fabs((double)ref[i] - (double)test[i]);
norms->l2_rel_err += (diff*diff);
rel_err = 0.0;
if (diff > 0.0 ) {
rel_err = diff/fabs((double)ref[i]);
}
if (rel_err > norms->max_rel_err) {
norms->max_rel_err = rel_err;
#if 0
printf("MISMATCH@ %3d: A=%12.8g B=%12.8g (E:%12.4e) (R:%12.4e)\n", i, ref[i], test[i], diff, rel_err);
#endif
}
if (diff > norms->max_abs_err) {
norms->max_abs_err = diff;
}
#if 0
if (diff > 1.0) {
printf("MISMATCH@ %3d: A=%12.8g B=%12.8g (E:%12.4e)\n", i, ref[i], test[i], diff);
}
#endif
}
norms->l2_rel_err = sqrt(norms->l2_rel_err);
}
LIBXSMM_INLINE void copy_naiveP_to_GEMM(const float* nchw, float* gemm, int N, int H, int W, int C, int Mh, int RK)
{
LIBXSMM_VLA_DECL(7, float, output, gemm, C/CHANNEL_BLOCKING, Mh, RK, H, W, CHANNEL_BLOCKING);
LIBXSMM_VLA_DECL(6, const float, input, nchw, H, W, C, Mh, RK);
int n, h, w, c1, c2, m, rk;
for ( n = 0; n < N; n++ ) {
for ( c1 = 0; c1 < C/CHANNEL_BLOCKING; c1++ ) {
for ( m = 0; m < Mh; m++ ) {
for ( rk = 0; rk < RK; rk++ ) {
for ( h = 0; h < H; h++ ) {
for ( w = 0; w < W; w++ ) {
for ( c2 = 0; c2 < CHANNEL_BLOCKING; c2++ ) {
LIBXSMM_VLA_ACCESS(7, output, n, c1, m, rk, h, w, c2, C/CHANNEL_BLOCKING, Mh, RK, H, W, CHANNEL_BLOCKING) =
LIBXSMM_VLA_ACCESS(6, input, n, h, w, (c1*CHANNEL_BLOCKING)+c2, m, rk, H, W, C, Mh, RK);
}
}
}
}
}
}
}
}
LIBXSMM_INLINE void copy_GEMM_to_naiveV(const float* gemm, float* nchw, int N, int H, int W, int C, int Mh, int Mw)
{
LIBXSMM_VLA_DECL(7, const float, input, gemm, C/CHANNEL_BLOCKING, Mh, Mw, H, W, CHANNEL_BLOCKING);
LIBXSMM_VLA_DECL(6, float, output, nchw, H, W, C, Mh, Mw);
int n, h, w, c1, c2, mi, mj;
for ( n = 0; n < N; n++ ) {
for ( c1 = 0; c1 < C/CHANNEL_BLOCKING; c1++ ) {
for ( mj = 0; mj < Mh; mj++) {
for ( mi = 0; mi < Mw; mi++) {
for ( h = 0; h < H; h++ ) {
for ( w = 0; w < W; w++ ) {
for ( c2 = 0; c2 < CHANNEL_BLOCKING; c2++ ) {
LIBXSMM_VLA_ACCESS(6, output, n, h, w, (c1*CHANNEL_BLOCKING)+c2, mj, mi, H, W, C, Mh, Mw) =
LIBXSMM_VLA_ACCESS(7, input, n, c1, mj, mi, h, w, c2, C/CHANNEL_BLOCKING, Mh, Mw, H, W, CHANNEL_BLOCKING);
}
}
}
}
}
}
}
}
LIBXSMM_INLINE void copy_naiveF_to_GEMM(const float* kcrs, float* gemm, int R, int S, int C, int K, int RK, int Mw)
{
LIBXSMM_VLA_DECL(8, float, output, gemm, C/CHANNEL_BLOCKING, Mw, RK, R, S, CHANNEL_BLOCKING, CHANNEL_BLOCKING);
LIBXSMM_VLA_DECL(6, const float, input, kcrs, K, R, S, RK, Mw);
int r, s, c1, c2, k1, k2, rk, m;
for ( k1 = 0; k1 < K/CHANNEL_BLOCKING; k1++ ) {
for ( c1 = 0; c1 < C/CHANNEL_BLOCKING; c1++ ) {
for ( m = 0; m < Mw; m++ ) {
for ( rk = 0; rk < RK; rk++ ) {
for ( r = 0; r < R; r++ ) {
for ( s = 0; s < S; s++ ) {
for ( c2 = 0; c2 < CHANNEL_BLOCKING; c2++ ) {
for ( k2 = 0; k2 < CHANNEL_BLOCKING; k2++ ) {
LIBXSMM_VLA_ACCESS(8, output, k1, c1, m, rk, r, s, c2, k2, C/CHANNEL_BLOCKING, Mw, RK, R, S, CHANNEL_BLOCKING, CHANNEL_BLOCKING) =
LIBXSMM_VLA_ACCESS(6, input, (c1*CHANNEL_BLOCKING)+c2, (k1*CHANNEL_BLOCKING)+k2, r, s, rk, m, C, R, S, RK, Mw);
}
}
}
}
}
}
}
}
}
LIBXSMM_INLINE int is_a_ge_zero_and_a_lt_b(int a, int b) {
return (unsigned int)a < (unsigned int)(b);
}
LIBXSMM_INLINE void naive_convcaps_fp(naive_conv_t* param, const float* input, float* output, const float* filter)
{
int nImg = param->nImg;
int nIfm = param->nIfm;
int nOfm = param->nOfm;
int ifhp = param->ifhp;
int ifwp = param->ifwp;
int ofhp = param->ofhp;
int ofwp = param->ofwp;
int ofh = param->ofh;
int ofw = param->ofw;
int pad_h = param->pad_h;
int pad_w = param->pad_w;
int pad_h_in = param->pad_h_in;
int pad_w_in = param->pad_w_in;
int pad_h_out = param->pad_h_out;
int pad_w_out = param->pad_w_out;
int kh = param->kh;
int kw = param->kw;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int RK = param->RK;
int Mh = param->Mh;
int Mw = param->Mw;
/* loop counters */
int img, ofm, ifm, oj, oi, ij, ii, kj, ki, rk, mj, mi;
LIBXSMM_VLA_DECL(6, float, votes_t, output + (pad_w_out * ofwp + pad_h_out), ofhp, ofwp, nOfm, Mh, Mw);
LIBXSMM_VLA_DECL(6, const float, poses_t, input + (pad_w_in * ifwp + pad_h_in), ifhp, ifwp, nIfm, Mh, RK);
LIBXSMM_VLA_DECL(6, const float, filter_t, filter, nOfm, kh, kw, RK, Mw);
#if defined(_OPENMP)
# pragma omp parallel for LIBXSMM_OPENMP_COLLAPSE(2) private(img, ofm, ifm, oj, oi, ij, ii, kj, ki, rk, mj, mi)
#endif
for (img = 0; img < nImg; ++img) {
for (ofm = 0; ofm < nOfm; ++ofm) {
for (oj = 0; oj < ofh; ++oj) {
ij = oj * stride_h - pad_h;
for (oi = 0; oi < ofw; ++oi) {
ii = oi * stride_w - pad_w;
for (mj = 0; mj < Mh; ++mj ) {
for (mi = 0; mi < Mw; ++mi ) {
LIBXSMM_VLA_ACCESS( 6, votes_t, img, oj, oi, ofm, mj, mi, ofhp, ofwp, nOfm, Mh, Mw) = 0.0f;
for (ifm = 0; ifm < nIfm; ++ifm) {
for (kj = 0; kj < kh; ++kj) {
/*if(ij+kj < 0 || ij+kj >= ifh) continue;*/
for (ki = 0; ki < kw; ++ki) {
/*if(ii+ki < 0 || ii+ki >= ifw) continue;*/
for (rk = 0; rk < RK; ++rk ) {
LIBXSMM_VLA_ACCESS( 6, votes_t, img, oj, oi, ofm, mj, mi, ofhp, ofwp, nOfm, Mh, Mw) +=
LIBXSMM_VLA_ACCESS( 6, poses_t, img, ij+kj, ii+ki, ifm, mj, rk, ifhp, ifwp, nIfm, Mh, RK) *
LIBXSMM_VLA_ACCESS( 6, filter_t, ifm, ofm, kj, ki, rk, mi, nOfm, kh, kw, RK, Mw);
}
}
}
}
}
}
}
}
}
}
}
LIBXSMM_INLINE void gemm_convcaps_fp(gemm_conv_t* param, const float* input, float* output, const float* filter, unsigned long long* aoff, unsigned long long* boff)
{
int nImg = param->nImg;
int nBIfm = param->nBIfm;
int nbIfm = param->nbIfm;
int nBOfm = param->nBOfm;
int nbOfm = param->nbOfm;
int ifhp = param->ifhp;
int ifwp = param->ifwp;
int ofhp = param->ofhp;
int ofwp = param->ofwp;
int ofh = param->ofh;
int pad_h = param->pad_h;
int pad_h_in = param->pad_h_in;
int pad_w_in = param->pad_w_in;
int pad_h_out = param->pad_h_out;
int pad_w_out = param->pad_w_out;
int kh = param->kh;
int kw = param->kw;
int stride_h = param->stride_h;
int RK = param->RK;
int Mh = param->Mh;
int Mw = param->Mw;
unsigned long long brcount = param->brcount;
/* loop counters */
int img, ofm1, ifm1, oj, ij, rk, mj, mi;
LIBXSMM_VLA_DECL(7, float, votes_t, output + (pad_w_out * ofwp + pad_h_out), nBOfm, Mh, Mw, ofhp, ofwp, nbOfm);
LIBXSMM_VLA_DECL(7, const float, poses_t, input + (pad_w_in * ifwp + pad_h_in), nBIfm, Mh, RK, ifhp, ifwp, nbIfm);
LIBXSMM_VLA_DECL(8, const float, filter_t, filter, nBIfm, Mw, RK, kh, kw, nbIfm, nbOfm);
#if defined(_OPENMP)
# pragma omp parallel for LIBXSMM_OPENMP_COLLAPSE(2) private(img, ofm1, ifm1, oj, ij, mj, mi, rk)
#endif
for (img = 0; img < nImg; ++img) {
for (ofm1 = 0; ofm1 < nBOfm; ++ofm1) {
for (mj = 0; mj < Mh; ++mj ) {
for (mi = 0; mi < Mw; ++mi ) {
for (ifm1 = 0; ifm1 < nBIfm; ++ifm1) {
for (rk = 0; rk < RK; ++rk ) {
for (oj = 0; oj < ofh; ++oj) {
ij = oj * stride_h - pad_h;
if ( rk == 0 && ifm1 == 0 ) {
fwd_brgemmz( &LIBXSMM_VLA_ACCESS(8, filter_t, ofm1, ifm1, mi, rk, 0, 0, 0, 0, nBIfm, Mw, RK, kh, kw, nbIfm, nbOfm) /* A */,
&LIBXSMM_VLA_ACCESS(7, poses_t, img, ifm1, mj, rk, ij, 0, 0, nBIfm, Mh, RK, ifhp, ifwp, nbIfm) /* B */,
&LIBXSMM_VLA_ACCESS(7, votes_t, img, ofm1, mj, mi, oj, 0, 0, nBOfm, Mh, Mw, ofhp, ofwp, nbOfm) /* C */,
&brcount, aoff, boff );
} else {
fwd_brgemma( &LIBXSMM_VLA_ACCESS(8, filter_t, ofm1, ifm1, mi, rk, 0, 0, 0, 0, nBIfm, Mw, RK, kh, kw, nbIfm, nbOfm) /* A */,
&LIBXSMM_VLA_ACCESS(7, poses_t, img, ifm1, mj, rk, ij, 0, 0, nBIfm, Mh, RK, ifhp, ifwp, nbIfm) /* B */,
&LIBXSMM_VLA_ACCESS(7, votes_t, img, ofm1, mj, mi, oj, 0, 0, nBOfm, Mh, Mw, ofhp, ofwp, nbOfm) /* C */,
&brcount, aoff, boff );
}
}
}
}
}
}
}
}
}
LIBXSMM_INLINE void compute_broff(gemm_conv_t* param, unsigned long long* aoff, unsigned long long* boff) {
int nbIfm = param->nbIfm;
int nbOfm = param->nbOfm;
int ifwp = param->ifwp;
int kh = param->kh;
int kw = param->kw;
/* loop counters */
int kj, ki, i;
i = 0;
for (kj = 0; kj < kh; ++kj) {
for (ki = 0; ki < kw; ++ki) {
aoff[i] = (kj*(kw*nbIfm*nbOfm) + ki*(nbIfm*nbOfm))*sizeof(float);
boff[i] = (kj*(ifwp*nbIfm) + ki*(nbIfm))*sizeof(float);
i++;
}
}
}
int main(int argc, char* argv[])
{
float *naive_input, *naive_output, *naive_filter;
float *gemm_input, *gemm_output, *gemm_filter;
float *check_output;
unsigned long long *aoff, *boff;
int ifhp, ifwp, ofhp, ofwp, ofh, ofw;
int stride_h, stride_w, pad_h_in, pad_w_in, pad_h_out, pad_w_out;
int ldx;
int brcount;
naive_conv_t naive_param;
gemm_conv_t gemm_param;
correctness_t norms_fwd;
/* some parameters we can overwrite via cli,
default is some inner layer of overfeat */
int iters = 100; /* repetitions of benchmark */
int ifw = 16; /* input width, "W" */
int ifh = 16; /* input height, "H" */
int nImg = 128; /* mini-batch size, "N" */
int nIfm = 128; /* number of input feature maps, "C" */
int nOfm = 256; /* number of output feature maps, "K" */
int kh = 3; /* filter height, "R" */
int kw = 3; /* filter width, "S" */
int pad_h = 0; /* padding in output */
int pad_w = 0; /* padding in output */
int stride = 2; /* stride when accessing inputs */
int Mh = 4;
int Mw = 4;
int RK = 4;
char type = 'F'; /* 'A': ALL, 'F': FP, 'B': BP, 'U', WU */
#if defined(_OPENMP)
int nThreads = omp_get_max_threads(); /* number of threads */
#else
int nThreads = 1; /* number of threads */
#endif
unsigned long long l_start, l_end;
double l_total = 0.0;
double flops = 0.0;
int i;
float beta=0.0f;
memset(&norms_fwd, 0, sizeof(norms_fwd));
if (argc > 1 && !strncmp(argv[1], "-h", 3)) {
printf("\n\n\nUsage: %s iters H W N C K R S pad stride type(F,B,U,A)\n\n\n", argv[0]);
return -1;
}
libxsmm_rng_set_seed(1);
/* reading new values from cli */
i = 1;
if (argc > i) iters = atoi(argv[i++]);
if (argc > i) ifw = atoi(argv[i++]);
if (argc > i) ifh = atoi(argv[i++]);
if (argc > i) nImg = atoi(argv[i++]);
if (argc > i) nIfm = atoi(argv[i++]);
if (argc > i) nOfm = atoi(argv[i++]);
if (argc > i) kw = atoi(argv[i++]);
if (argc > i) kh = atoi(argv[i++]);
if (argc > i) pad_w = atoi(argv[i++]);
if (argc > i) pad_h = atoi(argv[i++]);
if (argc > i) stride = atoi(argv[i++]);
if (argc > i) RK = atoi(argv[i++]);
if (argc > i) Mw = atoi(argv[i++]);
if (argc > i) Mh = atoi(argv[i++]);
if (argc > i) type = *(argv[i++]);
/* apply stride in both dimensions */
stride_w = stride;
stride_h = stride;
/* handle physical padding */
#ifdef USE_PHYSICAL_PADDING
#error "physical padding is not supported right now!"
pad_h_in = pad_h;
pad_w_in = pad_w;
pad_h_out = 0;
pad_w_out = 0;
#else
pad_h_in = 0;
pad_w_in = 0;
pad_h_out = 0;
pad_w_out = 0;
#endif
/* deriving some values image size */
ofh = (ifh + 2 * pad_h - kh) / stride_h + 1;
ofw = (ifw + 2 * pad_w - kw) / stride_w + 1;
ifhp = ifh + 2 * pad_h_in;
ifwp = ifw + 2 * pad_w_in;
ofhp = ofh + 2 * pad_h_out;
ofwp = ofw + 2 * pad_w_out;
/* set struct for naive convolution */
naive_param.nImg = nImg;
naive_param.nIfm = nIfm;
naive_param.nOfm = nOfm;
naive_param.ifhp = ifhp;
naive_param.ifwp = ifwp;
naive_param.ofhp = ofhp;
naive_param.ofwp = ofwp;
naive_param.ifh = ifh;
naive_param.ifw = ifw;
naive_param.ofh = ofh;
naive_param.ofw = ofw;
naive_param.pad_h = pad_h;
naive_param.pad_w = pad_w;
naive_param.pad_h_in = pad_h_in;
naive_param.pad_w_in = pad_w_in;
naive_param.pad_h_out = pad_h_out;
naive_param.pad_w_out = pad_w_out;
naive_param.kh = kh;
naive_param.kw = kw;
naive_param.stride_h = stride_h;
naive_param.stride_w = stride_w;
naive_param.RK = RK;
naive_param.Mh = Mh;
naive_param.Mw = Mw;
/* set struct for naive convolution */
gemm_param.nImg = nImg;
gemm_param.nBIfm = nIfm/CHANNEL_BLOCKING;
gemm_param.nbIfm = CHANNEL_BLOCKING;
gemm_param.nBOfm = nOfm/CHANNEL_BLOCKING;
gemm_param.nbOfm = CHANNEL_BLOCKING;
gemm_param.ifhp = ifhp;
gemm_param.ifwp = ifwp;
gemm_param.ofhp = ofhp;
gemm_param.ofwp = ofwp;
gemm_param.ifh = ifh;
gemm_param.ifw = ifw;
gemm_param.ofh = ofh;
gemm_param.ofw = ofw;
gemm_param.pad_h = pad_h;
gemm_param.pad_w = pad_w;
gemm_param.pad_h_in = pad_h_in;
gemm_param.pad_w_in = pad_w_in;
gemm_param.pad_h_out = pad_h_out;
gemm_param.pad_w_out = pad_w_out;
gemm_param.kh = kh;
gemm_param.kw = kw;
gemm_param.stride_h = stride_h;
gemm_param.stride_w = stride_w;
gemm_param.RK = RK;
gemm_param.Mh = Mh;
gemm_param.Mw = Mw;
/* compute brcount */
brcount = kh*kw;
gemm_param.brcount = brcount;
/* some empty lines at the beginning */
printf("\n\n\n");
/* print some summary */
printf("##########################################\n");
printf("# Setting Up #\n");
printf("##########################################\n");
printf("PARAMS: W:%d H:%d N:%d C:%d K:%d R:%d S:%d P:%d Q:%d STRIDE: %d RK: %d Mh: %d Mw: %d\n", ifw, ifh, nImg, nIfm, nOfm, kw, kh, ofh, ofw, stride, RK, Mh, Mw);
printf("PARAMS: ITERS:%d Threads:%d\n", iters, nThreads);
printf(" InImg %dx%d Padded (%dx%d)\n", ifh, ifw, ifhp, ifwp);
printf("OutImg %dx%d Padded (%dx%d)\n", ofh, ofw, ofhp, ofwp);
printf("SIZE Poses (MB): %10.2f MiB\n", (double)(nImg*nIfm*ifhp*ifwp*Mh*RK*sizeof(float))/(1024.0*1024.0) );
printf("SIZE Votes (MB): %10.2f MiB\n", (double)(nImg*nOfm*ofhp*ofwp*Mh*Mw*sizeof(float))/(1024.0*1024.0) );
printf("SIZE Poses (1): %10.2f MiB\n", (double)(1*nIfm*ifhp*ifwp*Mh*RK* sizeof(float))/(1024.0*1024.0) );
printf("SIZE Votes (1): %10.2f MiB\n", (double)(1*nOfm*ofhp*ofwp*Mh*Mw* sizeof(float))/(1024.0*1024.0) );
printf("SIZE Weight : %10.2f MiB\n", (double)(nIfm*nOfm*kw*kh*Mw*RK* sizeof(float))/(1024.0*1024.0) );
/* check for pass to run */
if (type != 'A' && type != 'F' && type != 'B' && type != 'U') {
printf("\ntype needs to be 'A' (All), 'F' (FP only), 'B' (BP only), 'U' (WU only)\n\n\n");
return -1;
}
if ((nIfm % CHANNEL_BLOCKING != 0) || (nOfm % CHANNEL_BLOCKING != 0) ) {
printf("\nThis code only works for ofm/ifm mod %i = 0!\n\n\n", CHANNEL_BLOCKING);
return -1;
}
if (pad_w !=0 || pad_h !=0 || pad_h_in != 0 || pad_w_in != 0 || pad_h_out !=0 || pad_w_out != 0) {
printf("\nThis code doesn't support padding right now\n!");
return -1;
}
/* apply stride in both dimensions */
/* JIT GEMM kernel */
ldx = stride_w*CHANNEL_BLOCKING;
fwd_brgemmz = libxsmm_smmdispatch_reducebatch_offs_unroll(CHANNEL_BLOCKING, ofwp, CHANNEL_BLOCKING, brcount, NULL, &ldx, NULL, NULL, &beta, NULL, NULL);
fwd_brgemma = libxsmm_smmdispatch_reducebatch_offs_unroll(CHANNEL_BLOCKING, ofwp, CHANNEL_BLOCKING, brcount, NULL, &ldx, NULL, NULL, NULL, NULL, NULL);
printf("BRGEMM FWD col-major: m=%d, n=%d, k=%d, lda=%d, ldb=%d, ldc=%d, transa='n', transb='n', alpha=1.0, beta=1.0, brcount=%d\n", CHANNEL_BLOCKING, ofwp, CHANNEL_BLOCKING, CHANNEL_BLOCKING, stride_w*CHANNEL_BLOCKING, CHANNEL_BLOCKING, brcount);
/* allocate data */
naive_input = (float*)libxsmm_aligned_malloc( nImg*nIfm*ifhp*ifwp*Mh*RK*sizeof(float), 2097152);
naive_output = (float*)libxsmm_aligned_malloc( nImg*nOfm*ofhp*ofwp*Mh*Mw*sizeof(float), 2097152);
naive_filter = (float*)libxsmm_aligned_malloc( nOfm*nIfm*kh*kw*Mw*RK* sizeof(float), 2097152);
gemm_input = (float*)libxsmm_aligned_malloc( nImg*nIfm*ifhp*ifwp*Mh*RK*sizeof(float), 2097152);
gemm_output = (float*)libxsmm_aligned_malloc( nImg*nOfm*ofhp*ofwp*Mh*Mw*sizeof(float), 2097152);
gemm_filter = (float*)libxsmm_aligned_malloc( nOfm*nIfm*kh*kw*Mw*RK* sizeof(float), 2097152);
check_output = (float*)libxsmm_aligned_malloc( nImg*nOfm*ofhp*ofwp*Mh*Mw*sizeof(float), 2097152);
aoff = (unsigned long long*)libxsmm_aligned_malloc( brcount*sizeof(unsigned long long), 2097152);
boff = (unsigned long long*)libxsmm_aligned_malloc( brcount*sizeof(unsigned long long), 2097152);
/* initialize data */
init_buf(naive_input, nImg*nIfm*ifhp*ifwp*Mh*RK, 0, 0);
set_zeropad_nchw(naive_input, nImg, nIfm, ifhp, ifwp, Mh, RK, pad_h_in, pad_w_in);
init_buf(naive_filter, nOfm*nIfm*kh*kw*Mw*RK, 0, 0);
zero_buf(naive_output, nImg*nOfm*ofhp*ofwp*Mw*Mh);
/* copy data into GEMM optimized format */
copy_naiveP_to_GEMM(naive_input, gemm_input, nImg, ifhp, ifwp, nIfm, Mh, RK);
copy_naiveF_to_GEMM(naive_filter, gemm_filter, kh, kw, nIfm, nOfm, RK, Mw);
zero_buf(gemm_output, nImg*nOfm*ofhp*ofwp*Mw*Mh);
/* compute BRGEMM offsets */
compute_broff( &gemm_param, aoff, boff );
/* check correctness forward */
if (type == 'A' || type == 'F') {
printf("##########################################\n");
printf("# Correctness - FWD (custom-Storage) #\n");
printf("##########################################\n");
/* run naive convolution */
naive_convcaps_fp(&naive_param, naive_input, naive_output, naive_filter);
gemm_convcaps_fp(&gemm_param, gemm_input, gemm_output, gemm_filter, aoff, boff);
copy_GEMM_to_naiveV(gemm_output, check_output, nImg, ofhp, ofwp, nOfm, Mh, Mw);
/* compare */
compare_buf(naive_output, check_output, nImg*nOfm*ofhp*ofwp*Mh*Mw, &norms_fwd);
printf(" 1-norm of reference: %f\n", norms_fwd.one_norm_ref);
printf(" 1-norm of GEMM-code: %f\n", norms_fwd.one_norm_test);
printf(" L2-error-norm of GEMM-code: %f\n", norms_fwd.l2_rel_err);
printf(" inf-norm of comp. rel. error: %f\n", norms_fwd.max_rel_err);
printf(" inf-norm of comp. abs. error: %f\n", norms_fwd.max_abs_err);
}
/* benchmark forward */
if (type == 'A' || type == 'F') {
printf("##########################################\n");
printf("# Performance - FWD (custom-Storage) #\n");
printf("##########################################\n");
/* run LIBXSMM convolution for performance */
l_start = libxsmm_timer_tick();
for (i = 0; i < iters; ++i) {
gemm_convcaps_fp(&gemm_param, gemm_input, gemm_output, gemm_filter, aoff, boff);
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
flops = (double)nImg * (double)nIfm * (double)nOfm * (double)ofh * (double)ofw * (double)(2 * kh * kw) * (double)RK * (double)Mh * (double)Mw * (double)iters;
printf("GFLOP = %.5g\n", flops*1e-9/(double)iters);
printf("fp time = %.5g\n", ((double)(l_total/iters)));
printf("GFLOPS = %.5g\n", (flops*1e-9)/l_total);
printf("PERFDUMP,FP,%s,%i,%i,%i,%i,%i,%i,%i,%i,%i,%i,%i,%i,%i,%i,%.5g,%.5g,%f,%f,%f,%f,%f\n", LIBXSMM_VERSION, nThreads, nImg, nIfm, nOfm,
ifw, ifh, kw, kh, stride, pad_h, pad_w, RK, Mh, Mw, ((double)(l_total/iters)), (flops*1e-9)/l_total,
norms_fwd.max_rel_err, norms_fwd.max_abs_err, norms_fwd.l2_rel_err, norms_fwd.one_norm_ref, norms_fwd.one_norm_test );
}
/* deallocate data */
libxsmm_free(naive_input);
libxsmm_free(naive_output);
libxsmm_free(naive_filter);
libxsmm_free(gemm_input);
libxsmm_free(gemm_output);
libxsmm_free(gemm_filter);
libxsmm_free(check_output);
libxsmm_free(aoff);
libxsmm_free(boff);
/* some empty lines at the end */
printf("\n\n\n");
return 0;
}
|
target_teams_distribute_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute'}}
#pragma omp target teams distribute
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute'}}
#pragma omp target teams distribute foo
void test_no_clause() {
int i;
#pragma omp target teams distribute
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp target teams distribute' must be a for loop}}
#pragma omp target teams distribute
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp target teams distribute
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute' are ignored}}
#pragma omp target teams distribute foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute' are ignored}}
#pragma omp target teams distribute;
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute' are ignored}}
#pragma omp target teams distribute private(x);
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute' are ignored}}
#pragma omp target teams distribute, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target teams distribute collapse
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute collapse(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute collapse()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute collapse(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute collapse(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target teams distribute' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target teams distribute collapse 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
#pragma omp target teams distribute collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target teams distribute collapse(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target teams distribute collapse(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute collapse(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute collapse(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{loop iteration variable in the associated loop of 'omp target teams distribute' directive may not be firstprivate, predetermined as private}}
// expected-note@+1 {{defined as firstprivate}}
#pragma omp target teams distribute collapse(2) firstprivate(i)
for (i = 0; i < 16; ++i)
for (int j = 0; j < 16; ++j)
#pragma omp parallel for reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_private() {
int i;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute private(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute private(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute private(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute private()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute private(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target teams distribute private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute lastprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute lastprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute lastprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute lastprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute lastprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target teams distribute lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute firstprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute firstprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute firstprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute firstprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute firstprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
// expected-error@+1 {{lastprivate variable cannot be firstprivate}} expected-note@+1 {{defined as lastprivate}}
#pragma omp target teams distribute lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{lastprivate variable cannot be firstprivate}} expected-note@+1 2 {{defined as lastprivate}}
#pragma omp target teams distribute lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 3 {{lastprivate variable cannot be firstprivate}} expected-note@+1 3 {{defined as lastprivate}}
#pragma omp target teams distribute lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target teams distribute
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target teams distribute
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
|
hermv_c_bsr_u_hi.c | #include<string.h>
#ifdef _OPENMP
#include<omp.h>
#endif
#include"alphasparse/opt.h"
#include "alphasparse/kernel.h"
#include "alphasparse/util.h"
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_BSR *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT thread_num = alpha_get_thread_num();
const ALPHA_INT m = A->rows * A->block_size;
const ALPHA_INT n = A->cols * A->block_size;
const ALPHA_INT bs = A->block_size;
const ALPHA_INT bs2 = bs * bs;
// assert(m==n);
ALPHA_INT b_rows = A->rows;
ALPHA_INT b_cols = A->cols;
if (b_rows != b_cols)
return ALPHA_SPARSE_STATUS_INVALID_VALUE;
ALPHA_INT partition[thread_num + 1];
balanced_partition_row_by_nnz(A->rows_end, b_rows, thread_num, partition);
ALPHA_Number **tmp = (ALPHA_Number **)malloc(sizeof(ALPHA_Number *) * thread_num);
#ifdef _OPENMP
#pragma omp parallel num_threads(thread_num)
#endif
{
const ALPHA_INT tid = alpha_get_thread_id();
const ALPHA_INT local_m_s = partition[tid];
const ALPHA_INT local_m_e = partition[tid + 1];
tmp[tid] = (ALPHA_Number *)malloc(sizeof(ALPHA_Number) * b_rows * bs);
memset(tmp[tid], 0, sizeof(ALPHA_Number) * b_rows * bs);
if (A->block_layout == ALPHA_SPARSE_LAYOUT_ROW_MAJOR)
{
for (ALPHA_INT br = local_m_s; br < local_m_e; br++)
{
ALPHA_INT row = br * bs;
ALPHA_INT block_start = A->rows_start[br], block_end = A->rows_end[br];
ALPHA_INT upper_start = alpha_lower_bound(&A->col_indx[block_start], &A->col_indx[block_end], br) - A->col_indx;
for (ALPHA_INT ai = upper_start; ai < block_end; ai++)
{
ALPHA_INT bc = A->col_indx[ai];
ALPHA_INT col = bc * bs;
ALPHA_INT a0_idx = ai * bs2;
// diagonal block containing diagonal entry
if (bc == br)
{
for (ALPHA_INT b_row = 0; b_row < bs; b_row++)
{
for (ALPHA_INT b_col = b_row + 1; b_col < bs; b_col++)
{
alpha_madde(tmp[tid][b_row + row], A->values[a0_idx + b_row * bs + b_col], x[col + b_col]);
alpha_madde_2c(tmp[tid][b_col + col], A->values[a0_idx + b_row * bs + b_col], x[row + b_row]);
}
}
}
else
{
for (ALPHA_INT b_row = 0; b_row < bs; b_row++)
{
for (ALPHA_INT b_col = 0; b_col < bs; b_col++)
{
alpha_madde(tmp[tid][b_row + row], A->values[a0_idx + b_row * bs + b_col], x[col + b_col]);
alpha_madde_2c(tmp[tid][b_col + col], A->values[a0_idx + b_row * bs + b_col], x[row + b_row]);
}
}
}
}
}
}
else if (A->block_layout == ALPHA_SPARSE_LAYOUT_COLUMN_MAJOR)
{
for (ALPHA_INT br = local_m_s; br < local_m_e; br++)
{
ALPHA_INT row = br * bs;
ALPHA_INT block_start = A->rows_start[br], block_end = A->rows_end[br];
ALPHA_INT upper_start = alpha_lower_bound(&A->col_indx[block_start], &A->col_indx[block_end], br) - A->col_indx;
for (ALPHA_INT ai = upper_start; ai < block_end; ai++)
{
ALPHA_INT bc = A->col_indx[ai];
ALPHA_INT col = bc * bs;
ALPHA_INT a0_idx = ai * bs2;
// diagonal block containing diagonal entry
if (bc == br)
{
for (ALPHA_INT b_col = 0; b_col < bs; b_col++)
{
for (ALPHA_INT b_row = 0; b_row < b_col; b_row++)
{
alpha_madde(tmp[tid][b_row + row], A->values[a0_idx + b_col * bs + b_row], x[col + b_col]);
alpha_madde_2c(tmp[tid][b_col + col], A->values[a0_idx + b_col * bs + b_row], x[row + b_row]);
}
}
}
else
{
for (ALPHA_INT b_col = 0; b_col < bs; b_col++)
{
for (ALPHA_INT b_row = 0; b_row < bs; b_row++)
{
alpha_madde(tmp[tid][b_row + row], A->values[a0_idx + b_col * bs + b_row], x[col + b_col]);
alpha_madde_2c(tmp[tid][b_col + col], A->values[a0_idx + b_col * bs + b_row], x[row + b_row]);
}
}
}
}
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < b_cols * bs; ++i)
{
ALPHA_Number tmp_y;
alpha_setzero(tmp_y);
for (ALPHA_INT j = 0; j < thread_num; ++j)
{
alpha_add(tmp_y, tmp_y, tmp[j][i]);
}
alpha_mul(y[i], y[i], beta);
alpha_madde(y[i], x[i], alpha);
alpha_madde(y[i], tmp_y, alpha);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < thread_num; ++i)
{
free(tmp[i]);
}
free(tmp);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
GB_binop__isne_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isne_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__isne_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__isne_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__isne_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_int8)
// A*D function (colscale): GB (_AxD__isne_int8)
// D*A function (rowscale): GB (_DxB__isne_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__isne_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__isne_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_int8)
// C=scalar+B GB (_bind1st__isne_int8)
// C=scalar+B' GB (_bind1st_tran__isne_int8)
// C=A+scalar GB (_bind2nd__isne_int8)
// C=A'+scalar GB (_bind2nd_tran__isne_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_INT8 || GxB_NO_ISNE_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isne_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isne_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isne_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isne_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isne_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isne_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__isne_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__isne_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
SparseOperations_impl.h | // Copyright (c) 2004-2022 Tomáš Oberhuber et al.
//
// This file is part of TNL - Template Numerical Library (https://tnl-project.org/)
//
// SPDX-License-Identifier: MIT
// Implemented by: Jakub Klinkovský
#pragma once
#include <type_traits>
#include <stdexcept>
#include <algorithm>
#include <memory> // std::unique_ptr
#include <TNL/Pointers/DevicePointer.h>
#include <TNL/Algorithms/ParallelFor.h>
namespace TNL {
namespace Matrices {
#ifdef HAVE_CUDA
template< typename Vector, typename Matrix >
__global__
void
SparseMatrixSetRowLengthsVectorKernel( Vector* rowLengths,
const Matrix* matrix,
typename Matrix::IndexType rows,
typename Matrix::IndexType cols )
{
using IndexType = typename Matrix::IndexType;
IndexType rowIdx = blockIdx.x * blockDim.x + threadIdx.x;
const IndexType gridSize = blockDim.x * gridDim.x;
while( rowIdx < rows ) {
const auto row = matrix->getRow( rowIdx );
IndexType length = 0;
for( IndexType c_j = 0; c_j < row.getSize(); c_j++ )
if( row.getColumnIndex( c_j ) < cols )
length++;
else
break;
rowLengths[ rowIdx ] = length;
rowIdx += gridSize;
}
}
template< typename Matrix1, typename Matrix2 >
__global__
void
SparseMatrixCopyKernel( Matrix1* A,
const Matrix2* B,
const typename Matrix2::IndexType* rowLengths,
typename Matrix2::IndexType rows )
{
using IndexType = typename Matrix2::IndexType;
IndexType rowIdx = blockIdx.x * blockDim.x + threadIdx.x;
const IndexType gridSize = blockDim.x * gridDim.x;
while( rowIdx < rows ) {
const auto length = rowLengths[ rowIdx ];
const auto rowB = B->getRow( rowIdx );
auto rowA = A->getRow( rowIdx );
for( IndexType c = 0; c < length; c++ )
rowA.setElement( c, rowB.getColumnIndex( c ), rowB.getValue( c ) );
rowIdx += gridSize;
}
}
#endif
// copy on the same device
template< typename Matrix1, typename Matrix2 >
typename std::enable_if< std::is_same< typename Matrix1::DeviceType, typename Matrix2::DeviceType >::value >::type
copySparseMatrix_impl( Matrix1& A, const Matrix2& B )
{
static_assert( std::is_same< typename Matrix1::RealType, typename Matrix2::RealType >::value,
"The matrices must have the same RealType." );
static_assert( std::is_same< typename Matrix1::DeviceType, typename Matrix2::DeviceType >::value,
"The matrices must be allocated on the same device." );
static_assert( std::is_same< typename Matrix1::IndexType, typename Matrix2::IndexType >::value,
"The matrices must have the same IndexType." );
using RealType = typename Matrix1::RealType;
using DeviceType = typename Matrix1::DeviceType;
using IndexType = typename Matrix1::IndexType;
const IndexType rows = B.getRows();
const IndexType cols = B.getColumns();
A.setDimensions( rows, cols );
if( std::is_same< DeviceType, Devices::Host >::value ) {
// set row lengths
typename Matrix1::RowsCapacitiesType rowLengths;
rowLengths.setSize( rows );
#ifdef HAVE_OPENMP
#pragma omp parallel for if( Devices::Host::isOMPEnabled() )
#endif
for( IndexType i = 0; i < rows; i++ ) {
const auto row = B.getRow( i );
IndexType length = 0;
for( IndexType c_j = 0; c_j < row.getSize(); c_j++ )
if( row.getColumnIndex( c_j ) < cols )
length++;
else
break;
rowLengths[ i ] = length;
}
A.setRowCapacities( rowLengths );
#ifdef HAVE_OPENMP
#pragma omp parallel for if( Devices::Host::isOMPEnabled() )
#endif
for( IndexType i = 0; i < rows; i++ ) {
const auto length = rowLengths[ i ];
const auto rowB = B.getRow( i );
auto rowA = A.getRow( i );
for( IndexType c = 0; c < length; c++ )
rowA.setElement( c, rowB.getColumnIndex( c ), rowB.getValue( c ) );
}
}
if( std::is_same< DeviceType, Devices::Cuda >::value ) {
#ifdef HAVE_CUDA
dim3 blockSize( 256 );
dim3 gridSize;
const IndexType desGridSize = 32 * Cuda::DeviceInfo::getCudaMultiprocessors( Cuda::DeviceInfo::getActiveDevice() );
gridSize.x = min( desGridSize, Cuda::getNumberOfBlocks( rows, blockSize.x ) );
typename Matrix1::RowsCapacitiesType rowLengths;
rowLengths.setSize( rows );
Pointers::DevicePointer< Matrix1 > Apointer( A );
const Pointers::DevicePointer< const Matrix2 > Bpointer( B );
// set row lengths
Pointers::synchronizeSmartPointersOnDevice< Devices::Cuda >();
SparseMatrixSetRowLengthsVectorKernel<<< gridSize,
blockSize >>>( rowLengths.getData(), &Bpointer.template getData< TNL::Devices::Cuda >(), rows, cols );
TNL_CHECK_CUDA_DEVICE;
Apointer->setRowCapacities( rowLengths );
// copy rows
Pointers::synchronizeSmartPointersOnDevice< Devices::Cuda >();
SparseMatrixCopyKernel<<< gridSize,
blockSize >>>( &Apointer.template modifyData< TNL::Devices::Cuda >(),
&Bpointer.template getData< TNL::Devices::Cuda >(),
rowLengths.getData(),
rows );
TNL_CHECK_CUDA_DEVICE;
#else
throw Exceptions::CudaSupportMissing();
#endif
}
}
// cross-device copy (host -> gpu)
template< typename Matrix1, typename Matrix2 >
typename std::enable_if< ! std::is_same< typename Matrix1::DeviceType, typename Matrix2::DeviceType >::value
&& std::is_same< typename Matrix2::DeviceType, Devices::Host >::value >::type
copySparseMatrix_impl( Matrix1& A, const Matrix2& B )
{
using CudaMatrix2 = typename Matrix2::template Self< typename Matrix2::RealType, Devices::Cuda >;
CudaMatrix2 B_tmp;
B_tmp = B;
copySparseMatrix_impl( A, B_tmp );
}
// cross-device copy (gpu -> host)
template< typename Matrix1, typename Matrix2 >
typename std::enable_if< ! std::is_same< typename Matrix1::DeviceType, typename Matrix2::DeviceType >::value
&& std::is_same< typename Matrix2::DeviceType, Devices::Cuda >::value >::type
copySparseMatrix_impl( Matrix1& A, const Matrix2& B )
{
using CudaMatrix1 = typename Matrix1::template Self< typename Matrix1::RealType, Devices::Cuda >;
CudaMatrix1 A_tmp;
copySparseMatrix_impl( A_tmp, B );
A = A_tmp;
}
template< typename Matrix1, typename Matrix2 >
void
copySparseMatrix( Matrix1& A, const Matrix2& B )
{
copySparseMatrix_impl( A, B );
}
template< typename Matrix, typename AdjacencyMatrix >
void
copyAdjacencyStructure( const Matrix& A, AdjacencyMatrix& B, bool has_symmetric_pattern, bool ignore_diagonal )
{
static_assert( std::is_same< typename Matrix::DeviceType, Devices::Host >::value,
"The function is not implemented for CUDA matrices - it would require atomic insertions "
"of elements into the sparse format." );
static_assert( std::is_same< typename Matrix::DeviceType, typename AdjacencyMatrix::DeviceType >::value,
"The matrices must be allocated on the same device." );
static_assert( std::is_same< typename Matrix::IndexType, typename AdjacencyMatrix::IndexType >::value,
"The matrices must have the same IndexType." );
// static_assert( std::is_same< typename AdjacencyMatrix::RealType, bool >::value,
// "The RealType of the adjacency matrix must be bool." );
using IndexType = typename Matrix::IndexType;
if( A.getRows() != A.getColumns() ) {
throw std::logic_error( "The matrix is not square: " + std::to_string( A.getRows() ) + " rows, "
+ std::to_string( A.getColumns() ) + " columns." );
}
const IndexType N = A.getRows();
B.setDimensions( N, N );
// set row lengths
typename AdjacencyMatrix::RowsCapacitiesType rowLengths;
rowLengths.setSize( N );
rowLengths.setValue( 0 );
for( IndexType i = 0; i < A.getRows(); i++ ) {
const auto row = A.getRow( i );
IndexType length = 0;
for( int c_j = 0; c_j < row.getSize(); c_j++ ) {
const IndexType j = row.getColumnIndex( c_j );
if( j >= A.getColumns() )
break;
length++;
if( ! has_symmetric_pattern && i != j )
if( A.getElement( j, i ) == 0 )
rowLengths[ j ]++;
}
if( ignore_diagonal )
length--;
rowLengths[ i ] += length;
}
B.setRowCapacities( rowLengths );
// set non-zeros
for( IndexType i = 0; i < A.getRows(); i++ ) {
const auto row = A.getRow( i );
for( int c_j = 0; c_j < row.getSize(); c_j++ ) {
const IndexType j = row.getColumnIndex( c_j );
if( j >= A.getColumns() )
break;
if( ! ignore_diagonal || i != j )
if( A.getElement( i, j ) != 0 ) {
B.setElement( i, j, true );
if( ! has_symmetric_pattern )
B.setElement( j, i, true );
}
}
}
}
template< typename Matrix1, typename Matrix2, typename PermutationArray >
void
reorderSparseMatrix( const Matrix1& matrix1, Matrix2& matrix2, const PermutationArray& perm, const PermutationArray& iperm )
{
// TODO: implement on GPU
static_assert( std::is_same< typename Matrix1::DeviceType, Devices::Host >::value,
"matrix reordering is implemented only for host" );
static_assert( std::is_same< typename Matrix2::DeviceType, Devices::Host >::value,
"matrix reordering is implemented only for host" );
static_assert( std::is_same< typename PermutationArray::DeviceType, Devices::Host >::value,
"matrix reordering is implemented only for host" );
using IndexType = typename Matrix1::IndexType;
matrix2.setDimensions( matrix1.getRows(), matrix1.getColumns() );
// set row lengths
typename Matrix2::RowsCapacitiesType rowLengths;
rowLengths.setSize( matrix1.getRows() );
for( IndexType i = 0; i < matrix1.getRows(); i++ ) {
const auto row = matrix1.getRow( perm[ i ] );
IndexType length = 0;
for( IndexType j = 0; j < row.getSize(); j++ )
if( row.getColumnIndex( j ) < matrix1.getColumns() )
length++;
rowLengths[ i ] = length;
}
matrix2.setRowCapacities( rowLengths );
// set row elements
for( IndexType i = 0; i < matrix2.getRows(); i++ ) {
const IndexType rowLength = rowLengths[ i ];
// extract sparse row
const auto row1 = matrix1.getRow( perm[ i ] );
// permute
std::unique_ptr< typename Matrix2::IndexType[] > columns{ new typename Matrix2::IndexType[ rowLength ] };
std::unique_ptr< typename Matrix2::RealType[] > values{ new typename Matrix2::RealType[ rowLength ] };
for( IndexType j = 0; j < rowLength; j++ ) {
columns[ j ] = iperm[ row1.getColumnIndex( j ) ];
values[ j ] = row1.getValue( j );
}
// sort
std::unique_ptr< IndexType[] > indices{ new IndexType[ rowLength ] };
for( IndexType j = 0; j < rowLength; j++ )
indices[ j ] = j;
auto comparator = [ &columns ]( IndexType a, IndexType b )
{
return columns[ a ] < columns[ b ];
};
std::sort( indices.get(), indices.get() + rowLength, comparator );
// set the row
auto row2 = matrix2.getRow( i );
for( IndexType j = 0; j < rowLength; j++ )
row2.setElement( j, columns[ indices[ j ] ], values[ indices[ j ] ] );
}
}
template< typename Array1, typename Array2, typename PermutationArray >
void
reorderArray( const Array1& src, Array2& dest, const PermutationArray& perm )
{
static_assert( std::is_same< typename Array1::DeviceType, typename Array2::DeviceType >::value,
"Arrays must reside on the same device." );
static_assert( std::is_same< typename Array1::DeviceType, typename PermutationArray::DeviceType >::value,
"Arrays must reside on the same device." );
TNL_ASSERT_EQ( src.getSize(), perm.getSize(), "Source array and permutation must have the same size." );
TNL_ASSERT_EQ( dest.getSize(), perm.getSize(), "Destination array and permutation must have the same size." );
using DeviceType = typename Array1::DeviceType;
using IndexType = typename Array1::IndexType;
auto kernel = [] __cuda_callable__( IndexType i,
const typename Array1::ValueType* src,
typename Array2::ValueType* dest,
const typename PermutationArray::ValueType* perm )
{
dest[ i ] = src[ perm[ i ] ];
};
Algorithms::ParallelFor< DeviceType >::exec(
(IndexType) 0, src.getSize(), kernel, src.getData(), dest.getData(), perm.getData() );
}
} // namespace Matrices
} // namespace TNL
|
composite_loop.c | /* SIMD loops without and with the simd schedule modifier
The simd schedule modifier in func_2() guarantees that a
preferred implementation-defined vector length is respected
when distributing the loop.
*/
void func_1(float *a, float *b, int n)
{
#pragma omp for simd schedule(static, 5)
for (int k=0; k<n; k++)
{
// do some work on a and b
}
}
void func_2(float *a, float *b, int n)
{
#pragma omp for simd schedule(simd:static, 5)
for (int k=0; k<n; k++)
{
// do some work on a and b
}
}
|
hello.c | #include <stdio.h>
int main(int argc, char *argv[])
{
printf("Hello world!\n");
#pragma omp parallel
{
printf("X\n");
}
return 0;
}
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/channel.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
short
type;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[257],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(Image *image)
{
switch (image->compose)
{
case ColorBurnCompositeOp:
return(image->endian == LSBEndian ? "vidi" : "idiv");
case ColorDodgeCompositeOp:
return(image->endian == LSBEndian ? " vid" : "div ");
case ColorizeCompositeOp:
return(image->endian == LSBEndian ? "rloc" : "colr");
case DarkenCompositeOp:
return(image->endian == LSBEndian ? "krad" : "dark");
case DifferenceCompositeOp:
return(image->endian == LSBEndian ? "ffid" : "diff");
case DissolveCompositeOp:
return(image->endian == LSBEndian ? "ssid" : "diss");
case ExclusionCompositeOp:
return(image->endian == LSBEndian ? "dums" : "smud");
case HardLightCompositeOp:
return(image->endian == LSBEndian ? "tiLh" : "hLit");
case HardMixCompositeOp:
return(image->endian == LSBEndian ? "xiMh" : "hMix");
case HueCompositeOp:
return(image->endian == LSBEndian ? " euh" : "hue ");
case LightenCompositeOp:
return(image->endian == LSBEndian ? "etil" : "lite");
case LinearBurnCompositeOp:
return(image->endian == LSBEndian ? "nrbl" : "lbrn");
case LinearDodgeCompositeOp:
return(image->endian == LSBEndian ? "gddl" : "lddg");
case LinearLightCompositeOp:
return(image->endian == LSBEndian ? "tiLl" : "lLit");
case LuminizeCompositeOp:
return(image->endian == LSBEndian ? " mul" : "lum ");
case MultiplyCompositeOp:
return(image->endian == LSBEndian ? " lum" : "mul ");
case OverlayCompositeOp:
return(image->endian == LSBEndian ? "revo" : "over");
case PinLightCompositeOp:
return(image->endian == LSBEndian ? "tiLp" : "pLit");
case SaturateCompositeOp:
return(image->endian == LSBEndian ? " tas" : "sat ");
case ScreenCompositeOp:
return(image->endian == LSBEndian ? "nrcs" : "scrn");
case SoftLightCompositeOp:
return(image->endian == LSBEndian ? "tiLs" : "sLit");
case VividLightCompositeOp:
return(image->endian == LSBEndian ? "tiLv" : "vLit");
case OverCompositeOp:
default:
return(image->endian == LSBEndian ? "mron" : "norm");
}
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image,ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if ((image->alpha_trait != BlendPixelTrait) ||
(image->colorspace != sRGBColorspace))
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringFalse(option) != MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
register ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image, q);
if (gamma != 0.0 && gamma != 1.0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
if (channel != AlphaPixelChannel)
q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma);
}
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == OpaqueAlpha)
return(MagickTrue);
if (image->alpha_trait != BlendPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))*
opacity),q);
else if (opacity > 0)
SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/
(MagickRealType) opacity)),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
PixelInfo
color;
ssize_t
y;
if (image->alpha_trait == UndefinedPixelTrait)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,0,0,MagickTrue,exception);
if (complete_mask == (Image *) NULL)
return(MagickFalse);
complete_mask->alpha_trait=BlendPixelTrait;
GetPixelInfo(complete_mask,&color);
color.red=(MagickRealType) background;
(void) SetImageColor(complete_mask,&color,exception);
status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue,
mask->page.x-image->page.x,mask->page.y-image->page.y,exception);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register Quantum
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=(MagickRealType) GetPixelAlpha(image,q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q);
else if (intensity > 0)
SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q);
q+=GetPixelChannels(image);
p+=GetPixelChannels(complete_mask);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=(char) layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
register ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(const Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
}
if (image->depth > 16)
return(4);
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickBooleanType
status;
channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~
AlphaChannel));
status=NegateImage(image,MagickFalse,exception);
(void) SetImageChannelMask(image,channel_mask);
return(status);
}
static StringInfo *ParseImageResourceBlocks(PSDInfo *psd_info,Image *image,
const unsigned char *blocks,size_t length,ExceptionInfo *exception)
{
const unsigned char
*p;
ssize_t
offset;
StringInfo
*profile;
unsigned char
name_length;
unsigned int
count;
unsigned short
id,
short_sans;
if (length < 16)
return((StringInfo *) NULL);
profile=BlobToStringInfo((const unsigned char *) NULL,length);
SetStringInfoDatum(profile,blocks);
SetStringInfoName(profile,"8bim");
for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p+=4;
p=PushShortPixel(MSBEndian,p,&id);
p=PushCharPixel(p,&name_length);
if ((name_length % 2) == 0)
name_length++;
p+=name_length;
if (p > (blocks+length-4))
break;
p=PushLongPixel(MSBEndian,p,&count);
offset=(ssize_t) count;
if (((p+offset) < blocks) || ((p+offset) > (blocks+length)))
break;
switch (id)
{
case 0x03ed:
{
char
value[MagickPathExtent];
unsigned short
resolution;
/*
Resolution info.
*/
if (offset < 16)
break;
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.x=(double) resolution;
(void) FormatLocaleString(value,MagickPathExtent,"%g",
image->resolution.x);
(void) SetImageProperty(image,"tiff:XResolution",value,exception);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.y=(double) resolution;
(void) FormatLocaleString(value,MagickPathExtent,"%g",
image->resolution.y);
(void) SetImageProperty(image,"tiff:YResolution",value,exception);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if ((offset > 4) && (*(p+4) == 0))
psd_info->has_merged_image=MagickFalse;
p+=offset;
break;
}
default:
{
p+=offset;
break;
}
}
if ((offset & 0x01) != 0)
p++;
}
return(profile);
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline void ReversePSDString(Image *image,char *p,size_t length)
{
char
*q;
if (image->endian == MSBEndian)
return;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
static inline void SetPSDPixel(Image *image,const size_t channels,
const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q,
ExceptionInfo *exception)
{
if (image->storage_class == PseudoClass)
{
PixelInfo
*color;
if (type == 0)
{
if (packet_size == 1)
SetPixelIndex(image,ScaleQuantumToChar(pixel),q);
else
SetPixelIndex(image,ScaleQuantumToShort(pixel),q);
}
color=image->colormap+(ssize_t) ConstrainColormapIndex(image,
(ssize_t) GetPixelIndex(image,q),exception);
if ((type == 0) && (channels > 1))
return;
else
color->alpha=(MagickRealType) pixel;
SetPixelViaPixelInfo(image,color,q);
return;
}
switch (type)
{
case -1:
{
SetPixelAlpha(image,pixel,q);
break;
}
case -2:
case 0:
{
SetPixelRed(image,pixel,q);
break;
}
case -3:
case 1:
{
SetPixelGreen(image,pixel,q);
break;
}
case -4:
case 2:
{
SetPixelBlue(image,pixel,q);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelBlack(image,pixel,q);
else
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
}
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const ssize_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
register const unsigned char
*p;
register Quantum
*q;
register ssize_t
x;
size_t
packet_size;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (Quantum *) NULL)
return MagickFalse;
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
if (packet_size == 2)
{
unsigned short
nibble;
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum((MagickRealType) (QuantumRange*nibble));
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,exception);
q+=GetPixelChannels(image);
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit = 0; bit < (ssize_t) number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception);
q+=GetPixelChannels(image);
x++;
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels,
const ssize_t type,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
row_size;
ssize_t
count,
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(pixels,0,row_size*sizeof(*pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != (ssize_t) row_size)
{
status=MagickFalse;
break;
}
status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > (row_size+2048)) /* arbitrary number */
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
register unsigned char
*p;
size_t
count,
length,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
if ((MagickSizeType) compact_size > GetBlobSize(image))
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
if (ret == Z_STREAM_END)
break;
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
p=pixels;
while (count > 0)
{
length=image->columns;
while (--length)
{
if (packet_size == 2)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
}
/*
else if (packet_size == 4)
{
TODO: Figure out what to do there.
}
*/
else
*(p+1)+=*p;
p+=packet_size;
}
p+=packet_size;
count-=row_size;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
offset;
MagickBooleanType
status;
channel_image=image;
mask=(Image *) NULL;
if ((layer_info->channel_info[channel].type < -1) &&
(layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0))
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->channel_info[channel].type != -2) ||
(layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)))
{
(void) SeekBlob(image,(MagickOffsetType)
layer_info->channel_info[channel].size-2,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
(void) ResetImagePixels(mask,exception);
(void) SetImageType(mask,GrayscaleType,exception);
channel_image=mask;
}
}
offset=TellBlob(image);
status=MagickFalse;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,psd_info->channels,
(ssize_t) layer_info->channel_info[channel].type,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,psd_info,
(ssize_t) layer_info->channel_info[channel].type,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,layer_info->channels,
(ssize_t) layer_info->channel_info[channel].type,compression,
layer_info->channel_info[channel].size-2,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
(void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2,
SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
(void) DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
if (mask != (Image *) NULL)
{
if (layer_info->mask.image != (Image *) NULL)
layer_info->mask.image=DestroyImage(layer_info->mask.image);
layer_info->mask.image=mask;
}
return(status);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MagickPathExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image,exception);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
layer_info->image->compose=NoCompositeOp;
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name,
exception);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
/* TODO: Remove this when we figure out how to support this */
if ((compression == ZipWithPrediction) && (image->depth == 32))
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeError,"CompressionNotSupported","ZipWithPrediction(32 bit)");
return(MagickFalse);
}
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->alpha_trait=BlendPixelTrait;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,
(size_t) j,compression,exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateCMYK(layer_info->image,exception);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info,
LayerInfo *layer_info)
{
int
channel_type;
register ssize_t
i;
if (layer_info->channels < psd_info->min_channels)
return(MagickFalse);
channel_type=RedChannel;
if (psd_info->min_channels >= 3)
channel_type|=(GreenChannel | BlueChannel);
if (psd_info->min_channels >= 4)
channel_type|=BlackChannel;
for (i=0; i < (ssize_t) layer_info->channels; i++)
{
short
type;
type=layer_info->channel_info[i].type;
if (type == -1)
{
channel_type|=AlphaChannel;
continue;
}
if (type < -1)
continue;
if (type == 0)
channel_type&=~RedChannel;
else if (type == 1)
channel_type&=~GreenChannel;
else if (type == 2)
channel_type&=~BlueChannel;
else if (type == 3)
channel_type&=~BlackChannel;
}
if (channel_type == 0)
return(MagickTrue);
if ((channel_type == AlphaChannel) &&
(layer_info->channels >= psd_info->min_channels + 1))
return(MagickTrue);
return(MagickFalse);
}
static void AttachPSDLayers(Image *image,LayerInfo *layer_info,
ssize_t number_layers)
{
register ssize_t
i;
ssize_t
j;
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers == 0)
{
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
return;
}
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline MagickBooleanType PSDSkipImage(const PSDInfo *psd_info,
const ImageInfo *image_info,const size_t index)
{
if (psd_info->has_merged_image == MagickFalse)
return(MagickFalse);
if (image_info->number_scenes == 0)
return(MagickFalse);
if (index < image_info->scene)
return(MagickTrue);
if (index > image_info->scene+image_info->number_scenes-1)
return(MagickTrue);
return(MagickFalse);
}
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
count,
index,
j,
number_layers;
size=GetPSDSize(psd_info,image);
if (size == 0)
{
/*
Skip layers & masks.
*/
(void) ReadBlobLong(image);
count=ReadBlob(image,4,(unsigned char *) type);
if (count == 4)
ReversePSDString(image,type,(size_t) count);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(MagickTrue);
else
{
count=ReadBlob(image,4,(unsigned char *) type);
if (count == 4)
ReversePSDString(image,type,4);
if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) ||
(LocaleNCompare(type,"Lr32",4) == 0)))
size=GetPSDSize(psd_info,image);
else
return(MagickTrue);
}
}
if (size == 0)
return(MagickTrue);
layer_info=(LayerInfo *) NULL;
number_layers=(ssize_t) ReadBlobSignedShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->alpha_trait=BlendPixelTrait;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
top,
left,
bottom,
right;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
top=(ssize_t) ReadBlobSignedLong(image);
left=(ssize_t) ReadBlobSignedLong(image);
bottom=(ssize_t) ReadBlobSignedLong(image);
right=(ssize_t) ReadBlobSignedLong(image);
if ((right < left) || (bottom < top))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].page.y=top;
layer_info[i].page.x=left;
layer_info[i].page.width=(size_t) (right-left);
layer_info[i].page.height=(size_t) (bottom-top);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
if ((layer_info[i].channel_info[j].type < -4) ||
(layer_info[i].channel_info[j].type > 4))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"NoSuchImageChannel",
image->filename);
}
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadBlob(image,4,(unsigned char *) type);
if (count == 4)
ReversePSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey);
if (count != 4)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
ReversePSDString(image,layer_info[i].blendkey,4);
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t)
(ReadBlobSignedLong(image)-layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (
ReadBlobSignedLong(image)-layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double)
layer_info[i].mask.page.width,(double)
layer_info[i].mask.page.height,(double) ((MagickOffsetType)
length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info,exception);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping != MagickFalse)
{
AttachPSDLayers(image,layer_info,number_layers);
return(MagickTrue);
}
status=MagickTrue;
index=0;
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].image == (Image *) NULL) ||
(PSDSkipImage(psd_info, image_info,++index) != MagickFalse))
{
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
(MagickSizeType) number_layers);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
AttachPSDLayers(image,layer_info,number_layers);
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=ReadPolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse,
exception));
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image *image,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
register ssize_t
i;
if ((image_info->number_scenes != 0) && (image_info->scene != 0))
return(MagickTrue);
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
ssize_t
type;
type=i;
if ((type == 1) && (psd_info->channels == 2))
type=-1;
if (compression == RLE)
status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows),
exception);
else
status=ReadPSDChannelRaw(image,psd_info->channels,type,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateCMYK(image,exception);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
size_t
imageListLength;
ssize_t
count;
StringInfo
*profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels < 1)
ThrowReaderException(CorruptImageError,"MissingImageChannel");
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) &&
(psd_info.depth != 16) && (psd_info.depth != 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
psd_info.min_channels=3;
if (psd_info.mode == LabMode)
(void) SetImageColorspace(image,LabColorspace,exception);
if (psd_info.mode == CMYKMode)
{
psd_info.min_channels=4;
(void) SetImageColorspace(image,CMYKColorspace,exception);
}
else
if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) ||
(psd_info.mode == DuotoneMode))
{
if (psd_info.depth != 32)
{
status=AcquireImageColormap(image,(size_t) (psd_info.depth < 16 ?
256 : 65536),exception);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
}
psd_info.min_channels=1;
(void) SetImageColorspace(image,GRAYColorspace,exception);
}
else
if (psd_info.mode == IndexedMode)
psd_info.min_channels=1;
if (psd_info.channels < psd_info.min_channels)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if ((psd_info.mode == IndexedMode) && (length < 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32))
{
/*
Duotone image data; the format of this data is undocumented.
32 bits per pixel; the colormap is ignored.
*/
(void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=(size_t) length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
image->alpha_trait=UndefinedPixelTrait;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
psd_info.has_merged_image=MagickTrue;
profile=(StringInfo *) NULL;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
profile=ParseImageResourceBlocks(&psd_info,image,blocks,(size_t) length,
exception);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(psd_info.has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers,
exception) != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
(void) SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (EOFBlob(image) != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
if (image_info->ping != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
imageListLength=GetImageListLength(image);
if ((psd_info.has_merged_image != MagickFalse) || (imageListLength == 1))
psd_info.has_merged_image=(MagickBooleanType) ReadPSDMergedImage(
image_info,image,&psd_info,exception);
if ((psd_info.has_merged_image == MagickFalse) && (imageListLength == 1) &&
(length != 0))
{
(void) SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse,
exception);
if (status != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
}
if (psd_info.has_merged_image == MagickFalse)
{
Image
*merged;
if (imageListLength == 1)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
image->background_color.alpha=(MagickRealType) TransparentAlpha;
image->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(image,exception);
merged=MergeImageLayers(image,FlattenLayer,exception);
ReplaceImageInList(&image,merged);
}
if (profile != (StringInfo *) NULL)
{
Image
*next;
i=0;
next=image;
while (next != (Image *) NULL)
{
if (PSDSkipImage(&psd_info,image_info,i++) == MagickFalse)
(void) SetImageProfile(next,GetStringInfoName(profile),profile,
exception);
next=next->next;
}
profile=DestroyStringInfo(profile);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned int) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=WriteBlobMSBLong(image,(unsigned int) size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobLong(image,(unsigned int) size));
return(WriteBlobLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
result=SetPSDSize(psd_info,image,size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels,
ExceptionInfo *exception)
{
int
count;
register ssize_t
i,
j;
register unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const CompressionType compression,
const ssize_t channels)
{
size_t
length;
ssize_t
i,
y;
if (compression == RLECompression)
{
length=(size_t) WriteBlobShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
length+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
length=(size_t) WriteBlobShort(image,ZipWithoutPrediction);
#endif
else
length=(size_t) WriteBlobShort(image,Raw);
return(length);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate,
const CompressionType compression,ExceptionInfo *exception)
{
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
register const Quantum
*p;
register ssize_t
i;
size_t
count,
length;
ssize_t
y;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(
MagickMinBufferExtent,sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels,
exception);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) MagickMinBufferExtent;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) MagickMinBufferExtent-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(const Image *image,
ExceptionInfo *exception)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
}
return(compact_pixels);
}
static size_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
CompressionType
compression;
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
count,
length,
offset_length;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
compression=next_image->compression;
if (image_info->compression != UndefinedCompression)
compression=image_info->compression;
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if ((next_image->storage_class != PseudoClass) ||
(IsImageGray(next_image) != MagickFalse))
{
if (IsImageGray(next_image) == MagickFalse)
channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 :
3);
if (next_image->alpha_trait != UndefinedPixelTrait)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,
(ssize_t) channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if ((next_image->storage_class == PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsImageGray(next_image) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->alpha_trait != UndefinedPixelTrait)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
exception);
if (mask != (Image *) NULL)
{
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue,compression,
exception);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
register ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->resolution.x+0.5;
y_resolution=2.54*65536.0*image->resolution.y+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->resolution.x+0.5;
y_resolution=65536.0*image->resolution.y+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
size_t
count;
count=(size_t) WriteBlobShort(image,(const unsigned short) channel);
count+=SetPSDSize(psd_info,image,0);
return(count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) memmove(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) &&
((ssize_t) length-(cnt+12)-(q-datum)) > 0)
{
(void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
register size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) memmove(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
(void) SetImageProfile(image,"psd:additional-info",info,exception);
return(profile);
}
static MagickBooleanType WritePSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size,
ExceptionInfo *exception)
{
char
layer_name[MagickPathExtent];
const char
*property;
const StringInfo
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
register ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
rounded_size,
size;
status=MagickTrue;
base_image=GetNextImageInList(image);
if (base_image == (Image *) NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
(void) SetPSDSize(psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->alpha_trait != UndefinedPixelTrait)
size+=WriteBlobShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception);
default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0);
}
size+=WriteBlobSignedLong(image,(signed int) next_image->page.y);
size+=WriteBlobSignedLong(image,(signed int) next_image->page.x);
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+
next_image->rows));
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+
next_image->columns));
channels=1;
if ((next_image->storage_class != PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 :
3);
total_channels=channels;
if (next_image->alpha_trait != UndefinedPixelTrait)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(psd_info,image,(signed short) i);
if (next_image->alpha_trait != UndefinedPixelTrait)
size+=WriteChannelSize(psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(psd_info,image,-2);
size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM");
size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,(const unsigned char)
(next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image,exception);
property=(const char *) GetImageProperty(next_image,"label",exception);
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobLong(image,20);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.y);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.x);
size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+
mask->page.y));
size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+
mask->page.x));
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,(const unsigned char)
(mask->compose == NoCompositeOp ? 2 : 0));
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),
GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=WritePSDChannels(psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue,exception);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
if (layers_size != (size_t*) NULL)
*layers_size=size;
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
(void) DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
return(status);
}
ModuleExport MagickBooleanType WritePSDLayers(Image * image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=WritePolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL,
exception);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const StringInfo
*icc_profile;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
size_t
length,
num_channels,
packet_size;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
packet_size=(size_t) (image->depth > 8 ? 6 : 3);
if (image->alpha_trait != UndefinedPixelTrait)
packet_size+=image->depth > 8 ? 2 : 1;
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
/* When the image has a color profile it won't be converted to gray scale */
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,exception) != MagickFalse))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) && (image_info->type !=
TrueColorAlphaType) && (image->storage_class == PseudoClass))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->colorspace != CMYKColorspace)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL);
else
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsImageGray(image) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace,exception);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsImageGray(image) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].red)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].green)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].blue)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
if (status != MagickFalse)
{
MagickOffsetType
size_offset;
size_t
size;
size_offset=TellBlob(image);
(void) SetPSDSize(&psd_info,image,0);
status=WritePSDLayersInternal(image,image_info,&psd_info,&size,
exception);
size_offset+=WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 12),size_offset);
}
(void) WriteBlobMSBLong(image,0); /* user mask data */
/*
Write composite image.
*/
if (status != MagickFalse)
{
CompressionType
compression;
compression=image->compression;
if (image_info->compression != UndefinedCompression)
image->compression=image_info->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse,
exception) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
omp_init_lock.c | // RUN: %libomp-compile-and-run
#include "omp_testsuite.h"
#include <stdio.h>
// This should be slightly less than KMP_I_LOCK_CHUNK, which is 1024
#define LOCKS_PER_ITER 1000
#define ITERATIONS (REPETITIONS + 1)
// This tests concurrently using locks on one thread while initializing new
// ones on another thread. This exercises the global lock pool.
int test_omp_init_lock() {
int i;
omp_lock_t lcks[ITERATIONS * LOCKS_PER_ITER];
#pragma omp parallel for schedule(static) num_threads(NUM_TASKS)
for (i = 0; i < ITERATIONS; i++) {
int j;
omp_lock_t *my_lcks = &lcks[i * LOCKS_PER_ITER];
for (j = 0; j < LOCKS_PER_ITER; j++) {
omp_init_lock(&my_lcks[j]);
}
for (j = 0; j < LOCKS_PER_ITER * 100; j++) {
omp_set_lock(&my_lcks[j % LOCKS_PER_ITER]);
omp_unset_lock(&my_lcks[j % LOCKS_PER_ITER]);
}
}
// Wait until all repititions are done. The test is exercising growth of
// the global lock pool, which does not shrink when no locks are allocated.
{
int j;
for (j = 0; j < ITERATIONS * LOCKS_PER_ITER; j++) {
omp_destroy_lock(&lcks[j]);
}
}
return 0;
}
int main() {
// No use repeating this test, since it's exercising a private global pool
// which is not reset between test iterations.
return test_omp_init_lock();
}
|
ast-dump-openmp-declare-variant-extensions-messages.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify %s
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify %s -x c++
int dummy() { return 1; }
#pragma omp declare variant(dummy) match(implementation={extension(match_any,match_all)}, device={kind(cpu, gpu)}) // expected-error {{only a single match extension allowed per OpenMP context selector}} expected-note {{the previous context property 'match_any' used here}} // expected-note {{the ignored property spans until here}}
int base1() { return 2; }
#pragma omp declare variant(dummy) match(implementation={extension(match_none,match_none)}, device={kind(gpu, fpga)}) // expected-warning {{the context property 'match_none' was used already in the same 'omp declare variant' directive; property ignored}} expected-note {{the previous context property 'match_none' used here}} expected-note {{the ignored property spans until here}}
int base2() { return 3; }
#pragma omp declare variant(dummy) match(implementation={vendor(pgi), extension(match_none,match_any)}, device={kind(cpu, gpu)}) // expected-error {{only a single match extension allowed per OpenMP context selector}} expected-note {{the previous context property 'match_none' used here}} // expected-note {{the ignored property spans until here}}
int base3() { return 4; }
int test() {
return base1() + base2() + base3();
}
|
convolution_3x3_pack8to1_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd64_transform_kernel_pack8to1_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt)
{
// winograd63 transform kernel
Mat kernel_tm;
kernel_tm.create(8 * 8, inch, outch);
const float ktm[8][3] = {
{1.0f, 0.0f, 0.0f},
{-2.0f / 9, -2.0f / 9, -2.0f / 9},
{-2.0f / 9, 2.0f / 9, -2.0f / 9},
{1.0f / 90, 1.0f / 45, 2.0f / 45},
{1.0f / 90, -1.0f / 45, 2.0f / 45},
{1.0f / 45, 1.0f / 90, 1.0f / 180},
{1.0f / 45, -1.0f / 90, 1.0f / 180},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i = 0; i < 8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j = 0; j < 8; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 8; i++)
{
kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 64-inch-outch
// dst = 8a-inch/8a-64-outch;
kernel_tm_pack8to1.create(8 * inch / 8, 64, outch / 8 + outch % 8, (size_t)2u * 8, 8);
int p = 0;
for (; p + 7 < outch; p += 8)
{
const Mat k0 = kernel_tm.channel(p);
const Mat k1 = kernel_tm.channel(p + 1);
const Mat k2 = kernel_tm.channel(p + 2);
const Mat k3 = kernel_tm.channel(p + 3);
const Mat k4 = kernel_tm.channel(p + 4);
const Mat k5 = kernel_tm.channel(p + 5);
const Mat k6 = kernel_tm.channel(p + 6);
const Mat k7 = kernel_tm.channel(p + 7);
Mat g0 = kernel_tm_pack8to1.channel(p / 8);
for (int k = 0; k < 64; k++)
{
__fp16* g00 = g0.row<__fp16>(k);
for (int q = 0; q + 7 < inch; q += 8)
{
for (int i = 0; i < 8; i++)
{
g00[0] = (__fp16)k0.row(q + i)[k];
g00[1] = (__fp16)k1.row(q + i)[k];
g00[2] = (__fp16)k2.row(q + i)[k];
g00[3] = (__fp16)k3.row(q + i)[k];
g00[4] = (__fp16)k4.row(q + i)[k];
g00[5] = (__fp16)k5.row(q + i)[k];
g00[6] = (__fp16)k6.row(q + i)[k];
g00[7] = (__fp16)k7.row(q + i)[k];
g00 += 8;
}
}
}
}
for (; p < outch; p++)
{
const Mat k0 = kernel_tm.channel(p);
Mat g0 = kernel_tm_pack8to1.channel(p / 8 + p % 8);
for (int k = 0; k < 64; k++)
{
__fp16* g00 = g0.row<__fp16>(k);
for (int q = 0; q + 7 < inch; q += 8)
{
for (int i = 0; i < 8; i++)
{
g00[0] = (__fp16)k0.row(q + i)[k];
g00 += 1;
}
}
}
}
}
static void conv3x3s1_winograd64_pack8to1_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
//size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
const __fp16* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
bottom_blob_tm.create(tiles, 64, inch, 2u * elempack, elempack, opt.workspace_allocator);
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
__fp16 tmp[8][8][8];
// tile
for (int i = 0; i < h_tm / 8; i++)
{
for (int j = 0; j < w_tm / 8; j++)
{
const __fp16* r0 = img0.row<const __fp16>(i * 6) + (j * 6) * 8;
for (int m = 0; m < 8; m++)
{
float16x8_t _r00 = vld1q_f16(r0);
float16x8_t _r01 = vld1q_f16(r0 + 8);
float16x8_t _r02 = vld1q_f16(r0 + 16);
float16x8_t _r03 = vld1q_f16(r0 + 24);
float16x8_t _r04 = vld1q_f16(r0 + 32);
float16x8_t _r05 = vld1q_f16(r0 + 40);
float16x8_t _r06 = vld1q_f16(r0 + 48);
float16x8_t _r07 = vld1q_f16(r0 + 56);
float16x8_t _tmp0m = vfmaq_n_f16(vsubq_f16(_r00, _r06), vsubq_f16(_r04, _r02), 5.25f);
float16x8_t _tmp7m = vfmaq_n_f16(vsubq_f16(_r07, _r01), vsubq_f16(_r03, _r05), 5.25f);
vst1q_f16(tmp[0][m], _tmp0m);
vst1q_f16(tmp[7][m], _tmp7m);
// tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25;
// tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25;
float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_r02, _r06), _r04, 4.25f);
float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_r01, _r05), _r03, 4.25f);
// float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25);
// float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25);
float16x8_t _tmp1m = vaddq_f16(_tmp12a, _tmp12b);
float16x8_t _tmp2m = vsubq_f16(_tmp12a, _tmp12b);
vst1q_f16(tmp[1][m], _tmp1m);
vst1q_f16(tmp[2][m], _tmp2m);
// tmp[1][m] = tmp12a + tmp12b;
// tmp[2][m] = tmp12a - tmp12b;
float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_r06, _r02, 0.25f), _r04, 1.25f);
float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 0.5f), _r03, 2.5f), _r05, 2.f);
// float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25);
// float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2);
float16x8_t _tmp3m = vaddq_f16(_tmp34a, _tmp34b);
float16x8_t _tmp4m = vsubq_f16(_tmp34a, _tmp34b);
vst1q_f16(tmp[3][m], _tmp3m);
vst1q_f16(tmp[4][m], _tmp4m);
// tmp[3][m] = tmp34a + tmp34b;
// tmp[4][m] = tmp34a - tmp34b;
float16x8_t _tmp56a = vfmaq_n_f16(_r06, vfmsq_n_f16(_r02, _r04, 1.25f), 4.f);
float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 2.f), _r03, 2.5f), _r05, 0.5f);
// float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4);
// float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5);
float16x8_t _tmp5m = vaddq_f16(_tmp56a, _tmp56b);
float16x8_t _tmp6m = vsubq_f16(_tmp56a, _tmp56b);
vst1q_f16(tmp[5][m], _tmp5m);
vst1q_f16(tmp[6][m], _tmp6m);
// tmp[5][m] = tmp56a + tmp56b;
// tmp[6][m] = tmp56a - tmp56b;
r0 += w * 8;
}
__fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tm / 8 + j) * 8;
__fp16* r0_tm_1 = r0_tm_0 + tiles * 8;
__fp16* r0_tm_2 = r0_tm_0 + tiles * 16;
__fp16* r0_tm_3 = r0_tm_0 + tiles * 24;
__fp16* r0_tm_4 = r0_tm_0 + tiles * 32;
__fp16* r0_tm_5 = r0_tm_0 + tiles * 40;
__fp16* r0_tm_6 = r0_tm_0 + tiles * 48;
__fp16* r0_tm_7 = r0_tm_0 + tiles * 56;
for (int m = 0; m < 8; m++)
{
float16x8_t _tmp00 = vld1q_f16(tmp[m][0]);
float16x8_t _tmp01 = vld1q_f16(tmp[m][1]);
float16x8_t _tmp02 = vld1q_f16(tmp[m][2]);
float16x8_t _tmp03 = vld1q_f16(tmp[m][3]);
float16x8_t _tmp04 = vld1q_f16(tmp[m][4]);
float16x8_t _tmp05 = vld1q_f16(tmp[m][5]);
float16x8_t _tmp06 = vld1q_f16(tmp[m][6]);
float16x8_t _tmp07 = vld1q_f16(tmp[m][7]);
float16x8_t _r0tm0 = vfmaq_n_f16(vsubq_f16(_tmp00, _tmp06), vsubq_f16(_tmp04, _tmp02), 5.25f);
float16x8_t _r0tm7 = vfmaq_n_f16(vsubq_f16(_tmp07, _tmp01), vsubq_f16(_tmp03, _tmp05), 5.25f);
// r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25;
// r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25;
float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_tmp02, _tmp06), _tmp04, 4.25f);
float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_tmp01, _tmp05), _tmp03, 4.25f);
// float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25);
// float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25);
float16x8_t _r0tm1 = vaddq_f16(_tmp12a, _tmp12b);
float16x8_t _r0tm2 = vsubq_f16(_tmp12a, _tmp12b);
// r0_tm[1] = tmp12a + tmp12b;
// r0_tm[2] = tmp12a - tmp12b;
float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f);
float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f);
// float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25);
// float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2);
float16x8_t _r0tm3 = vaddq_f16(_tmp34a, _tmp34b);
float16x8_t _r0tm4 = vsubq_f16(_tmp34a, _tmp34b);
// r0_tm[3] = tmp34a + tmp34b;
// r0_tm[4] = tmp34a - tmp34b;
float16x8_t _tmp56a = vfmaq_n_f16(_tmp06, vfmsq_n_f16(_tmp02, _tmp04, 1.25f), 4.f);
float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f);
// float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4);
// float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5);
float16x8_t _r0tm5 = vaddq_f16(_tmp56a, _tmp56b);
float16x8_t _r0tm6 = vsubq_f16(_tmp56a, _tmp56b);
// r0_tm[5] = tmp56a + tmp56b;
// r0_tm[6] = tmp56a - tmp56b;
vst1q_f16(r0_tm_0, _r0tm0);
vst1q_f16(r0_tm_1, _r0tm1);
vst1q_f16(r0_tm_2, _r0tm2);
vst1q_f16(r0_tm_3, _r0tm3);
vst1q_f16(r0_tm_4, _r0tm4);
vst1q_f16(r0_tm_5, _r0tm5);
vst1q_f16(r0_tm_6, _r0tm6);
vst1q_f16(r0_tm_7, _r0tm7);
r0_tm_0 += tiles * 64;
r0_tm_1 += tiles * 64;
r0_tm_2 += tiles * 64;
r0_tm_3 += tiles * 64;
r0_tm_4 += tiles * 64;
r0_tm_5 += tiles * 64;
r0_tm_6 += tiles * 64;
r0_tm_7 += tiles * 64;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm / 8 * w_tm / 8;
// permute
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 7 < tiles; i += 8)
{
__fp16* tm2p = tm2.row<__fp16>(i / 8);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n"
"sub %0, %0, #64 \n"
"uzp1 v16.8h, v0.8h, v4.8h \n"
"uzp2 v20.8h, v0.8h, v4.8h \n"
"uzp1 v17.8h, v1.8h, v5.8h \n"
"uzp2 v21.8h, v1.8h, v5.8h \n"
"uzp1 v18.8h, v2.8h, v6.8h \n"
"uzp2 v22.8h, v2.8h, v6.8h \n"
"uzp1 v19.8h, v3.8h, v7.8h \n"
"uzp2 v23.8h, v3.8h, v7.8h \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 3 < tiles; i += 4)
{
__fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x4
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i < tiles; i++)
{
__fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + i % 4);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.8h}, [%0] \n"
"st1 {v0.8h}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0");
r0 += bottom_blob_tm.cstep * 8;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, 2u, 1, opt.workspace_allocator);
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
__fp16* output0_tm = top_blob_tm.channel(p);
__fp16* output1_tm = top_blob_tm.channel(p + 1);
__fp16* output2_tm = top_blob_tm.channel(p + 2);
__fp16* output3_tm = top_blob_tm.channel(p + 3);
__fp16* output4_tm = top_blob_tm.channel(p + 4);
__fp16* output5_tm = top_blob_tm.channel(p + 5);
__fp16* output6_tm = top_blob_tm.channel(p + 6);
__fp16* output7_tm = top_blob_tm.channel(p + 7);
const Mat kernel01_tm = kernel_tm.channel(p / 8);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 7 < tiles; i += 8)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8);
const __fp16* kptr = kernel01_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%9], #64 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%10], #64 \n"
"fmla v24.8h, v16.8h, v0.h[0] \n"
"fmla v25.8h, v16.8h, v0.h[1] \n"
"fmla v26.8h, v16.8h, v0.h[2] \n"
"fmla v27.8h, v16.8h, v0.h[3] \n"
"fmla v28.8h, v16.8h, v0.h[4] \n"
"fmla v29.8h, v16.8h, v0.h[5] \n"
"fmla v30.8h, v16.8h, v0.h[6] \n"
"fmla v31.8h, v16.8h, v0.h[7] \n"
"fmla v24.8h, v17.8h, v1.h[0] \n"
"fmla v25.8h, v17.8h, v1.h[1] \n"
"fmla v26.8h, v17.8h, v1.h[2] \n"
"fmla v27.8h, v17.8h, v1.h[3] \n"
"fmla v28.8h, v17.8h, v1.h[4] \n"
"fmla v29.8h, v17.8h, v1.h[5] \n"
"fmla v30.8h, v17.8h, v1.h[6] \n"
"fmla v31.8h, v17.8h, v1.h[7] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%9], #64 \n"
"fmla v24.8h, v18.8h, v2.h[0] \n"
"fmla v25.8h, v18.8h, v2.h[1] \n"
"fmla v26.8h, v18.8h, v2.h[2] \n"
"fmla v27.8h, v18.8h, v2.h[3] \n"
"fmla v28.8h, v18.8h, v2.h[4] \n"
"fmla v29.8h, v18.8h, v2.h[5] \n"
"fmla v30.8h, v18.8h, v2.h[6] \n"
"fmla v31.8h, v18.8h, v2.h[7] \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%10], #64 \n"
"fmla v24.8h, v19.8h, v3.h[0] \n"
"fmla v25.8h, v19.8h, v3.h[1] \n"
"fmla v26.8h, v19.8h, v3.h[2] \n"
"fmla v27.8h, v19.8h, v3.h[3] \n"
"fmla v28.8h, v19.8h, v3.h[4] \n"
"fmla v29.8h, v19.8h, v3.h[5] \n"
"fmla v30.8h, v19.8h, v3.h[6] \n"
"fmla v31.8h, v19.8h, v3.h[7] \n"
"fmla v24.8h, v20.8h, v4.h[0] \n"
"fmla v25.8h, v20.8h, v4.h[1] \n"
"fmla v26.8h, v20.8h, v4.h[2] \n"
"fmla v27.8h, v20.8h, v4.h[3] \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v20.8h, v4.h[5] \n"
"fmla v30.8h, v20.8h, v4.h[6] \n"
"fmla v31.8h, v20.8h, v4.h[7] \n"
"fmla v24.8h, v21.8h, v5.h[0] \n"
"fmla v25.8h, v21.8h, v5.h[1] \n"
"fmla v26.8h, v21.8h, v5.h[2] \n"
"fmla v27.8h, v21.8h, v5.h[3] \n"
"fmla v28.8h, v21.8h, v5.h[4] \n"
"fmla v29.8h, v21.8h, v5.h[5] \n"
"fmla v30.8h, v21.8h, v5.h[6] \n"
"fmla v31.8h, v21.8h, v5.h[7] \n"
"fmla v24.8h, v22.8h, v6.h[0] \n"
"fmla v25.8h, v22.8h, v6.h[1] \n"
"fmla v26.8h, v22.8h, v6.h[2] \n"
"fmla v27.8h, v22.8h, v6.h[3] \n"
"fmla v28.8h, v22.8h, v6.h[4] \n"
"fmla v29.8h, v22.8h, v6.h[5] \n"
"fmla v30.8h, v22.8h, v6.h[6] \n"
"fmla v31.8h, v22.8h, v6.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.8h, v23.8h, v7.h[0] \n"
"fmla v25.8h, v23.8h, v7.h[1] \n"
"fmla v26.8h, v23.8h, v7.h[2] \n"
"fmla v27.8h, v23.8h, v7.h[3] \n"
"fmla v28.8h, v23.8h, v7.h[4] \n"
"fmla v29.8h, v23.8h, v7.h[5] \n"
"fmla v30.8h, v23.8h, v7.h[6] \n"
"fmla v31.8h, v23.8h, v7.h[7] \n"
"bne 0b \n"
"st1 {v24.8h}, [%1], #16 \n"
"st1 {v25.8h}, [%2], #16 \n"
"st1 {v26.8h}, [%3], #16 \n"
"st1 {v27.8h}, [%4], #16 \n"
"st1 {v28.8h}, [%5], #16 \n"
"st1 {v29.8h}, [%6], #16 \n"
"st1 {v30.8h}, [%7], #16 \n"
"st1 {v31.8h}, [%8], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(output4_tm), // %5
"=r"(output5_tm), // %6
"=r"(output6_tm), // %7
"=r"(output7_tm), // %8
"=r"(r0), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(output4_tm),
"6"(output5_tm),
"7"(output6_tm),
"8"(output7_tm),
"9"(r0),
"10"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < tiles; i += 4)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4);
const __fp16* kptr = kernel01_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%9], #32 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%10], #64 \n"
"fmla v24.4h, v16.4h, v0.h[0] \n"
"fmla v25.4h, v16.4h, v0.h[1] \n"
"fmla v26.4h, v16.4h, v0.h[2] \n"
"fmla v27.4h, v16.4h, v0.h[3] \n"
"fmla v28.4h, v16.4h, v0.h[4] \n"
"fmla v29.4h, v16.4h, v0.h[5] \n"
"fmla v30.4h, v16.4h, v0.h[6] \n"
"fmla v31.4h, v16.4h, v0.h[7] \n"
"fmla v24.4h, v17.4h, v1.h[0] \n"
"fmla v25.4h, v17.4h, v1.h[1] \n"
"fmla v26.4h, v17.4h, v1.h[2] \n"
"fmla v27.4h, v17.4h, v1.h[3] \n"
"fmla v28.4h, v17.4h, v1.h[4] \n"
"fmla v29.4h, v17.4h, v1.h[5] \n"
"fmla v30.4h, v17.4h, v1.h[6] \n"
"fmla v31.4h, v17.4h, v1.h[7] \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%9], #32 \n"
"fmla v24.4h, v18.4h, v2.h[0] \n"
"fmla v25.4h, v18.4h, v2.h[1] \n"
"fmla v26.4h, v18.4h, v2.h[2] \n"
"fmla v27.4h, v18.4h, v2.h[3] \n"
"fmla v28.4h, v18.4h, v2.h[4] \n"
"fmla v29.4h, v18.4h, v2.h[5] \n"
"fmla v30.4h, v18.4h, v2.h[6] \n"
"fmla v31.4h, v18.4h, v2.h[7] \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%10], #64 \n"
"fmla v24.4h, v19.4h, v3.h[0] \n"
"fmla v25.4h, v19.4h, v3.h[1] \n"
"fmla v26.4h, v19.4h, v3.h[2] \n"
"fmla v27.4h, v19.4h, v3.h[3] \n"
"fmla v28.4h, v19.4h, v3.h[4] \n"
"fmla v29.4h, v19.4h, v3.h[5] \n"
"fmla v30.4h, v19.4h, v3.h[6] \n"
"fmla v31.4h, v19.4h, v3.h[7] \n"
"fmla v24.4h, v20.4h, v4.h[0] \n"
"fmla v25.4h, v20.4h, v4.h[1] \n"
"fmla v26.4h, v20.4h, v4.h[2] \n"
"fmla v27.4h, v20.4h, v4.h[3] \n"
"fmla v28.4h, v20.4h, v4.h[4] \n"
"fmla v29.4h, v20.4h, v4.h[5] \n"
"fmla v30.4h, v20.4h, v4.h[6] \n"
"fmla v31.4h, v20.4h, v4.h[7] \n"
"fmla v24.4h, v21.4h, v5.h[0] \n"
"fmla v25.4h, v21.4h, v5.h[1] \n"
"fmla v26.4h, v21.4h, v5.h[2] \n"
"fmla v27.4h, v21.4h, v5.h[3] \n"
"fmla v28.4h, v21.4h, v5.h[4] \n"
"fmla v29.4h, v21.4h, v5.h[5] \n"
"fmla v30.4h, v21.4h, v5.h[6] \n"
"fmla v31.4h, v21.4h, v5.h[7] \n"
"fmla v24.4h, v22.4h, v6.h[0] \n"
"fmla v25.4h, v22.4h, v6.h[1] \n"
"fmla v26.4h, v22.4h, v6.h[2] \n"
"fmla v27.4h, v22.4h, v6.h[3] \n"
"fmla v28.4h, v22.4h, v6.h[4] \n"
"fmla v29.4h, v22.4h, v6.h[5] \n"
"fmla v30.4h, v22.4h, v6.h[6] \n"
"fmla v31.4h, v22.4h, v6.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.4h, v23.4h, v7.h[0] \n"
"fmla v25.4h, v23.4h, v7.h[1] \n"
"fmla v26.4h, v23.4h, v7.h[2] \n"
"fmla v27.4h, v23.4h, v7.h[3] \n"
"fmla v28.4h, v23.4h, v7.h[4] \n"
"fmla v29.4h, v23.4h, v7.h[5] \n"
"fmla v30.4h, v23.4h, v7.h[6] \n"
"fmla v31.4h, v23.4h, v7.h[7] \n"
"bne 0b \n"
"st1 {v24.4h}, [%1], #8 \n"
"st1 {v25.4h}, [%2], #8 \n"
"st1 {v26.4h}, [%3], #8 \n"
"st1 {v27.4h}, [%4], #8 \n"
"st1 {v28.4h}, [%5], #8 \n"
"st1 {v29.4h}, [%6], #8 \n"
"st1 {v30.4h}, [%7], #8 \n"
"st1 {v31.4h}, [%8], #8 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(output4_tm), // %5
"=r"(output5_tm), // %6
"=r"(output6_tm), // %7
"=r"(output7_tm), // %8
"=r"(r0), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(output4_tm),
"6"(output5_tm),
"7"(output6_tm),
"8"(output7_tm),
"9"(r0),
"10"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i < tiles; i++)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4);
const __fp16* kptr = kernel01_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v30.16b, v30.16b, v30.16b \n"
"0: \n"
"prfm pldl1keep, [%9, #128] \n"
"ld1 {v0.8h}, [%9], #16 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%10], #64 \n"
"fmla v30.8h, v16.8h, v0.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%10], #64 \n"
"fmla v30.8h, v18.8h, v0.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v30.8h, v20.8h, v0.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"bne 0b \n"
"st1 {v30.h}[0], [%1], #2 \n"
"st1 {v30.h}[1], [%2], #2 \n"
"st1 {v30.h}[2], [%3], #2 \n"
"st1 {v30.h}[3], [%4], #2 \n"
"st1 {v30.h}[4], [%5], #2 \n"
"st1 {v30.h}[5], [%6], #2 \n"
"st1 {v30.h}[6], [%7], #2 \n"
"st1 {v30.h}[7], [%8], #2 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(output4_tm), // %5
"=r"(output5_tm), // %6
"=r"(output6_tm), // %7
"=r"(output7_tm), // %8
"=r"(r0), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(output4_tm),
"6"(output5_tm),
"7"(output6_tm),
"8"(output7_tm),
"9"(r0),
"10"(kptr)
: "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v30");
}
}
}
remain_outch_start += nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
__fp16* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p / 8 + p % 8);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 7 < tiles; i += 8)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8);
const __fp16* kptr = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v30.16b, v30.16b, v30.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%2], #64 \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.8h}, [%3], #16 \n"
"fmla v30.8h, v16.8h, v0.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%2], #64 \n"
"fmla v30.8h, v18.8h, v0.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v30.8h, v20.8h, v0.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"bne 0b \n"
"st1 {v30.8h}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(kptr)
: "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v30");
}
for (; i + 3 < tiles; i += 4)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4);
const __fp16* kptr = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v30.16b, v30.16b, v30.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.8h}, [%3], #16 \n"
"fmla v30.4h, v16.4h, v0.h[0] \n"
"fmla v30.4h, v17.4h, v0.h[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n"
"fmla v30.4h, v18.4h, v0.h[2] \n"
"fmla v30.4h, v19.4h, v0.h[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v30.4h, v20.4h, v0.h[4] \n"
"fmla v30.4h, v21.4h, v0.h[5] \n"
"fmla v30.4h, v22.4h, v0.h[6] \n"
"fmla v30.4h, v23.4h, v0.h[7] \n"
"bne 0b \n"
"st1 {v30.4h}, [%1], #8 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(kptr)
: "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v30");
}
for (; i < tiles; i++)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4);
const __fp16* kptr = kernel0_tm.row<const __fp16>(r);
float16x8_t _sum0 = vdupq_n_f16((__fp16)0.f);
for (int q = 0; q < inch; q++)
{
float16x8_t _r0 = vld1q_f16(r0);
float16x8_t _k0 = vld1q_f16(kptr);
_sum0 = vfmaq_f16(_sum0, _r0, _k0);
kptr += 8;
r0 += 8;
}
__fp16 sum0 = vaddvq_f32(vcvt_f32_f16(vadd_f16(vget_low_f16(_sum0), vget_high_f16(_sum0))));
output0_tm[0] = sum0;
output0_tm++;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 2u, 1, opt.workspace_allocator);
}
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
const __fp16 bias0 = bias ? bias[p] : 0.f;
// float32x2_t _bias0 = vdup_n_f32(bias0);
__fp16 tmp[6][8];
// tile
for (int i = 0; i < outh / 6; i++)
{
for (int j = 0; j < outw / 6; j++)
{
// top_blob_tm.create(tiles, 64, outch, 4u, 1, opt.workspace_allocator);
const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tm / 8 + j) * 1;
const __fp16* output0_tm_1 = output0_tm_0 + tiles * 1;
const __fp16* output0_tm_2 = output0_tm_0 + tiles * 2;
const __fp16* output0_tm_3 = output0_tm_0 + tiles * 3;
const __fp16* output0_tm_4 = output0_tm_0 + tiles * 4;
const __fp16* output0_tm_5 = output0_tm_0 + tiles * 5;
const __fp16* output0_tm_6 = output0_tm_0 + tiles * 6;
const __fp16* output0_tm_7 = output0_tm_0 + tiles * 7;
// TODO neon optimize
for (int m = 0; m < 8; m++)
{
__fp16 tmp024a = output0_tm_1[0] + output0_tm_2[0];
__fp16 tmp135a = output0_tm_1[0] - output0_tm_2[0];
__fp16 tmp024b = output0_tm_3[0] + output0_tm_4[0];
__fp16 tmp135b = output0_tm_3[0] - output0_tm_4[0];
__fp16 tmp024c = output0_tm_5[0] + output0_tm_6[0];
__fp16 tmp135c = output0_tm_5[0] - output0_tm_6[0];
tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32;
tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
tmp[5][m] = output0_tm_7[0] + tmp135a + tmp135b * 32 + tmp135c;
output0_tm_0 += tiles * 8;
output0_tm_1 += tiles * 8;
output0_tm_2 += tiles * 8;
output0_tm_3 += tiles * 8;
output0_tm_4 += tiles * 8;
output0_tm_5 += tiles * 8;
output0_tm_6 += tiles * 8;
output0_tm_7 += tiles * 8;
}
__fp16* output0 = out0.row<__fp16>(i * 6) + j * 6;
for (int m = 0; m < 6; m++)
{
const __fp16* tmp0 = tmp[m];
__fp16 tmp024a = tmp0[1] + tmp0[2];
__fp16 tmp135a = tmp0[1] - tmp0[2];
__fp16 tmp024b = tmp0[3] + tmp0[4];
__fp16 tmp135b = tmp0[3] - tmp0[4];
__fp16 tmp024c = tmp0[5] + tmp0[6];
__fp16 tmp135c = tmp0[5] - tmp0[6];
output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
mxnet_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file mxnet_op.h
* \brief
* \author Junyuan Xie
*/
#ifndef MXNET_OPERATOR_MXNET_OP_H_
#define MXNET_OPERATOR_MXNET_OP_H_
#include <dmlc/omp.h>
#include <mxnet/base.h>
#include <mxnet/engine.h>
#include <mxnet/op_attr_types.h>
#include <algorithm>
#include "./operator_tune.h"
#include "../engine/openmp.h"
#ifdef __CUDACC__
#include "../common/cuda_utils.h"
#endif // __CUDACC__
namespace mxnet {
namespace op {
namespace mxnet_op {
using namespace mshadow;
#ifdef __CUDA_ARCH__
__constant__ const float PI = 3.14159265358979323846;
#else
const float PI = 3.14159265358979323846;
using std::isnan;
#endif
template<typename xpu>
int get_num_threads(const int N);
#ifdef __CUDACC__
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
inline cudaDeviceProp cuda_get_device_prop() {
int device;
CUDA_CALL(cudaGetDevice(&device));
cudaDeviceProp deviceProp;
CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device));
return deviceProp;
}
/*!
* \brief Get the number of blocks for cuda kernel given N
*/
inline int cuda_get_num_blocks(const int N) {
using namespace mshadow::cuda;
return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
}
template<>
inline int get_num_threads<gpu>(const int N) {
using namespace mshadow::cuda;
return kBaseThreadNum * cuda_get_num_blocks(N);
}
#endif // __CUDACC__
template<>
inline int get_num_threads<cpu>(const int N) {
return engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
}
/*! \brief operator request type switch */
#define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
break; \
case kWriteInplace: \
case kWriteTo: \
{ \
const OpReqType ReqType = kWriteTo; \
{__VA_ARGS__} \
} \
break; \
case kAddTo: \
{ \
const OpReqType ReqType = kAddTo; \
{__VA_ARGS__} \
} \
break; \
default: \
break; \
}
#define MXNET_NDIM_SWITCH(NDim, ndim, ...) \
if (NDim == 0) { \
} else if (NDim == 1) { \
const int ndim = 1; \
{__VA_ARGS__} \
} else if (NDim == 2) { \
const int ndim = 2; \
{__VA_ARGS__} \
} else if (NDim == 3) { \
const int ndim = 3; \
{__VA_ARGS__} \
} else if (NDim == 4) { \
const int ndim = 4; \
{__VA_ARGS__} \
} else if (NDim == 5) { \
const int ndim = 5; \
{__VA_ARGS__} \
} else { \
LOG(FATAL) << "ndim=" << NDim << "too large "; \
}
#define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
/*!
* \brief assign the val to out according
* to request in Kernel::Launch
* \param out the data to be assigned
* \param req the assignment request
* \param val the value to be assigned to out
* \tparam OType output type
* \tparam VType value type
*/
#define KERNEL_ASSIGN(out, req, val) \
{ \
switch (req) { \
case kNullOp: \
break; \
case kWriteTo: \
case kWriteInplace: \
(out) = (val); \
break; \
case kAddTo: \
(out) += (val); \
break; \
default: \
break; \
} \
}
/* \brief Compute flattened index given coordinates and shape. */
template<int ndim>
MSHADOW_XINLINE int ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) {
int ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i];
}
return ret;
}
/* Compute coordinates from flattened index given shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> unravel(const int idx, const Shape<ndim>& shape) {
Shape<ndim> ret;
#pragma unroll
for (int i = ndim-1, j = idx; i >=0; --i) {
int tmp = j / shape[i];
ret[i] = j - tmp*shape[i];
j = tmp;
}
return ret;
}
/* Compute dot product of two vector */
template<int ndim>
MSHADOW_XINLINE int dot(const Shape<ndim>& coord, const Shape<ndim>& stride) {
int ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret += coord[i] * stride[i];
}
return ret;
}
/* Combining unravel and dot */
template<int ndim>
MSHADOW_XINLINE int unravel_dot(const int idx, const Shape<ndim>& shape,
const Shape<ndim>& stride) {
int ret = 0;
#pragma unroll
for (int i = ndim-1, j = idx; i >=0; --i) {
int tmp = j / shape[i];
ret += (j - tmp*shape[i])*stride[i];
j = tmp;
}
return ret;
}
/* Calculate stride of each dim from shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) {
Shape<ndim> stride;
index_t cumprod = 1;
#pragma unroll
for (int i = ndim - 1; i >= 0; --i) {
stride[i] = (shape[i] > 1) ? cumprod : 0;
cumprod *= shape[i];
}
return stride;
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx, const Shape<ndim>& stride) {
++(*coord)[ndim-1];
*idx += stride[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx = *idx + stride[i-1] - shape[i] * stride[i];
}
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx1, const Shape<ndim>& stride1,
index_t* idx2, const Shape<ndim>& stride2) {
++(*coord)[ndim-1];
*idx1 += stride1[ndim-1];
*idx2 += stride2[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i];
*idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i];
}
}
/*!
* \brief Simple copy data from one blob to another
* \param to Destination blob
* \param from Source blob
*/
template <typename xpu>
MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) {
CHECK_EQ(from.Size(), to.Size());
CHECK_EQ(from.dev_mask(), to.dev_mask());
MSHADOW_TYPE_SWITCH(to.type_flag_, DType, {
if (to.type_flag_ == from.type_flag_) {
mshadow::Copy(to.FlatTo1D<xpu, DType>(), from.FlatTo1D<xpu, DType>(), s);
} else {
MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, {
to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s));
})
}
})
}
/*! \brief Binary op backward gradient OP wrapper */
template<typename GRAD_OP>
struct backward_grad {
/* \brief Backward calc with grad
* \param a - output grad
* \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies)
* \return input grad
*/
template<typename DType, typename ...Args>
MSHADOW_XINLINE static DType Map(DType a, Args... args) {
return DType(a * GRAD_OP::Map(args...));
}
};
/*! \brief Binary op backward gradient OP wrapper (tuned) */
template<typename GRAD_OP>
struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable {
using backward_grad<GRAD_OP>::Map;
};
/*! \brief Select assignment operation based upon the req value
* Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch
*/
template<typename OP, int req>
struct op_with_req {
typedef OP Operation;
/*! \brief input is one tensor */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i]));
}
/*! \brief inputs are two tensors */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs, const DType *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief input is tensor and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
/*! \brief input is tensor and two scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in,
const DType value_1, const DType value_2) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2));
}
/*! \brief No inputs (ie fill to constant value) */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out) {
KERNEL_ASSIGN(out[i], req, OP::Map());
}
/*! \brief input is single scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(value));
}
/*! \brief inputs are two tensors and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out,
const DType *input_1, const DType *input_2, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value));
}
/*! \brief inputs are three tensors (ie backward grad with binary grad function) */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out,
const DType *input_1,
const DType *input_2,
const DType *input_3) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i]));
}
};
template<typename OP, typename xpu>
struct Kernel;
/*!
* \brief CPU Kernel launcher
* \tparam OP Operator to launch
*/
template<typename OP>
struct Kernel<OP, cpu> {
/*!
* \brief Launch a generic CPU kernel.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam Args Varargs type to eventually pass to the OP::Map() functoion
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() functoion
*/
template<typename ...Args>
inline static bool Launch(mshadow::Stream<cpu> *, const int N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
}
#else
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
return true;
}
/*!
* \brief Launch CPU kernel which has OMP tuning data available.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam PRIMITIVE_OP The primitive operation to use for tuning
* \tparam DType Data type
* \tparam Args Varargs type to eventually pass to the OP::Map() functoion
* \param N Number of iterations
* \param dest Destination pointer (used to infer DType)
* \param args Varargs to eventually pass to the OP::Map() functoion
*/
template<typename PRIMITIVE_OP, typename DType, typename ...Args>
static void LaunchTuned(mshadow::Stream<cpu> *, const int N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP(
static_cast<size_t>(N), static_cast<size_t>(omp_threads))) {
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
}
#else
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
}
/*!
* \brief Launch custom-tuned kernel where each thread is set to
* operate on a contiguous partition
* \tparam Args Varargs type to eventually pass to the OP::Map() functoion
* \param N Number of iterations
* \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions
*/
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<cpu> *s, const int N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
OP::Map(0, N, args...);
} else {
const int length = (N + omp_threads - 1) / omp_threads;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i += length) {
OP::Map(i, i + length > N ? N - i : length, args...);
}
}
#else
OP::Map(0, N, args...);
#endif
}
/*!
* \brief Launch a tunable OP with implicitly-supplied data type
* \tparam DType Data type
* \tparam T OP type
* \tparam Args Varargs type to eventually pass to the OP::Map() functoion
* \param s Stream (usually null for CPU)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() functoion
* \return Always true
*/
template<typename DType, typename T = OP, typename ...Args>
static MSHADOW_CINLINE
typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type
Launch(mshadow::Stream<cpu> *s, const int N, DType *dest, Args... args) {
LaunchTuned<T, DType>(s, N, dest, args...);
return true;
}
/*!
* \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req)
* \tparam DType Data type
* \tparam T Wrapper type
* \tparam Args Varargs type to eventually pass to the OP::Map() functoion
* \param s Stream (usually null for CPU)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() functoion
* \return Always true
*/
template<typename DType, typename T = OP, typename ...Args>
static MSHADOW_CINLINE
typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type
Launch(mshadow::Stream<cpu> *s, const int N, DType *dest, Args... args) {
LaunchTuned<typename T::Operation, DType>(s, N, dest, args...);
return true;
}
};
#ifdef __CUDACC__
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, args...);
}
}
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel_ex(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, 1, args...);
}
}
template<typename OP>
struct Kernel<OP, gpu> {
/*! \brief Launch GPU kernel */
template<typename ...Args>
inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) {
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel);
}
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) {
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel_ex<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex);
}
};
#endif // __CUDACC__
/*!
* \brief Set to immediate scalar value kernel
* \tparam val Scalar immediate
*/
template<int val>
struct set_to_int : public tunable {
// mxnet_op version (when used directly with Kernel<>::Launch()) */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out) {
out[i] = DType(val);
}
// mshadow_op version (when used with op_with_req<>)
MSHADOW_XINLINE static int Map() {
return val;
}
};
/*!
* \brief Special-case kernel shortcut for setting to zero and one
*/
using set_zero = set_to_int<0>;
using set_one = set_to_int<1>;
} // namespace mxnet_op
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_MXNET_OP_H_
|
GB_binop__rdiv_fc32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__rdiv_fc32
// A.*B function (eWiseMult): GB_AemultB__rdiv_fc32
// A*D function (colscale): GB_AxD__rdiv_fc32
// D*A function (rowscale): GB_DxB__rdiv_fc32
// C+=B function (dense accum): GB_Cdense_accumB__rdiv_fc32
// C+=b function (dense accum): GB_Cdense_accumb__rdiv_fc32
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rdiv_fc32
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rdiv_fc32
// C=scalar+B GB_bind1st__rdiv_fc32
// C=scalar+B' GB_bind1st_tran__rdiv_fc32
// C=A+scalar GB_bind2nd__rdiv_fc32
// C=A'+scalar GB_bind2nd_tran__rdiv_fc32
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// B,b type: GxB_FC32_t
// BinaryOp: cij = GB_FC32_div (bij, aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_BTYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
GxB_FC32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_FC32_div (y, x) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_FC32 || GxB_NO_RDIV_FC32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__rdiv_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__rdiv_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__rdiv_fc32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__rdiv_fc32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC32_t
GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__rdiv_fc32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *GB_RESTRICT Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__rdiv_fc32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *GB_RESTRICT Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__rdiv_fc32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__rdiv_fc32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__rdiv_fc32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ;
GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t bij = Bx [p] ;
Cx [p] = GB_FC32_div (bij, x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__rdiv_fc32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ;
GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
Cx [p] = GB_FC32_div (y, aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = Ax [pA] ; \
Cx [pC] = GB_FC32_div (aij, x) ; \
}
GrB_Info GB_bind1st_tran__rdiv_fc32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = Ax [pA] ; \
Cx [pC] = GB_FC32_div (y, aij) ; \
}
GrB_Info GB_bind2nd_tran__rdiv_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
quantize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE %
% Q Q U U A A NN N T I ZZ E %
% Q Q U U AAAAA N N N T I ZZZ EEEEE %
% Q QQ U U A A N NN T I ZZ E %
% QQQQ UUU A A N N T IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Methods to Reduce the Number of Unique Colors in an Image %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Realism in computer graphics typically requires using 24 bits/pixel to
% generate an image. Yet many graphic display devices do not contain the
% amount of memory necessary to match the spatial and color resolution of
% the human eye. The Quantize methods takes a 24 bit image and reduces
% the number of colors so it can be displayed on raster device with less
% bits per pixel. In most instances, the quantized image closely
% resembles the original reference image.
%
% A reduction of colors in an image is also desirable for image
% transmission and real-time animation.
%
% QuantizeImage() takes a standard RGB or monochrome images and quantizes
% them down to some fixed number of colors.
%
% For purposes of color allocation, an image is a set of n pixels, where
% each pixel is a point in RGB space. RGB space is a 3-dimensional
% vector space, and each pixel, Pi, is defined by an ordered triple of
% red, green, and blue coordinates, (Ri, Gi, Bi).
%
% Each primary color component (red, green, or blue) represents an
% intensity which varies linearly from 0 to a maximum value, Cmax, which
% corresponds to full saturation of that color. Color allocation is
% defined over a domain consisting of the cube in RGB space with opposite
% vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax =
% 255.
%
% The algorithm maps this domain onto a tree in which each node
% represents a cube within that domain. In the following discussion
% these cubes are defined by the coordinate of two opposite vertices (vertex
% nearest the origin in RGB space and the vertex farthest from the origin).
%
% The tree's root node represents the entire domain, (0,0,0) through
% (Cmax,Cmax,Cmax). Each lower level in the tree is generated by
% subdividing one node's cube into eight smaller cubes of equal size.
% This corresponds to bisecting the parent cube with planes passing
% through the midpoints of each edge.
%
% The basic algorithm operates in three phases: Classification,
% Reduction, and Assignment. Classification builds a color description
% tree for the image. Reduction collapses the tree until the number it
% represents, at most, the number of colors desired in the output image.
% Assignment defines the output image's color map and sets each pixel's
% color by restorage_class in the reduced tree. Our goal is to minimize
% the numerical discrepancies between the original colors and quantized
% colors (quantization error).
%
% Classification begins by initializing a color description tree of
% sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color description
% tree in the storage_class phase for realistic values of Cmax. If
% colors components in the input image are quantized to k-bit precision,
% so that Cmax= 2k-1, the tree would need k levels below the root node to
% allow representing each possible input color in a leaf. This becomes
% prohibitive because the tree's total number of nodes is 1 +
% sum(i=1, k, 8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing the pixel's color. It updates the following data for each
% such node:
%
% n1: Number of pixels whose color is contained in the RGB cube which
% this node represents;
%
% n2: Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb: Sums of the red, green, and blue component values for all
% pixels not classified at a lower depth. The combination of these sums
% and n2 will ultimately characterize the mean color of a set of pixels
% represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the
% quantization error for a node.
%
% Reduction repeatedly prunes the tree until the number of nodes with n2
% > 0 is less than or equal to the maximum number of colors allowed in
% the output image. On any given iteration over the tree, it selects
% those nodes whose E count is minimal for pruning and merges their color
% statistics upward. It uses a pruning threshold, Ep, to govern node
% selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors within
% the cubic volume which the node represents. This includes n1 - n2
% pixels whose colors should be defined by nodes at a lower level in the
% tree.
%
% Assignment generates the output image from the pruned tree. The output
% image consists of two parts: (1) A color map, which is an array of
% color descriptions (RGB triples) for each color present in the output
% image; (2) A pixel array, which represents each pixel as an index
% into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% This method is based on a similar algorithm written by Paul Raveling.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compare.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
/*
Define declarations.
*/
#if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE)
#define CacheShift 2
#else
#define CacheShift 3
#endif
#define ErrorQueueLength 16
#define ErrorRelativeWeight PerceptibleReciprocal(16)
#define MaxNodes 266817
#define MaxTreeDepth 8
#define NodesInAList 1920
/*
Typdef declarations.
*/
typedef struct _DoublePixelPacket
{
double
red,
green,
blue,
alpha;
} DoublePixelPacket;
typedef struct _NodeInfo
{
struct _NodeInfo
*parent,
*child[16];
MagickSizeType
number_unique;
DoublePixelPacket
total_color;
double
quantize_error;
size_t
color_number,
id,
level;
} NodeInfo;
typedef struct _Nodes
{
NodeInfo
*nodes;
struct _Nodes
*next;
} Nodes;
typedef struct _CubeInfo
{
NodeInfo
*root;
size_t
colors,
maximum_colors;
ssize_t
transparent_index;
MagickSizeType
transparent_pixels;
DoublePixelPacket
target;
double
distance,
pruning_threshold,
next_threshold;
size_t
nodes,
free_nodes,
color_number;
NodeInfo
*next_node;
Nodes
*node_queue;
MemoryInfo
*memory_info;
ssize_t
*cache;
DoublePixelPacket
error[ErrorQueueLength];
double
diffusion,
weights[ErrorQueueLength];
QuantizeInfo
*quantize_info;
MagickBooleanType
associate_alpha;
ssize_t
x,
y;
size_t
depth;
MagickOffsetType
offset;
MagickSizeType
span;
} CubeInfo;
/*
Method prototypes.
*/
static CubeInfo
*GetCubeInfo(const QuantizeInfo *,const size_t,const size_t);
static NodeInfo
*GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *);
static MagickBooleanType
AssignImageColors(Image *,CubeInfo *,ExceptionInfo *),
ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *),
DitherImage(Image *,CubeInfo *,ExceptionInfo *),
SetGrayscaleImage(Image *,ExceptionInfo *),
SetImageColormap(Image *,CubeInfo *,ExceptionInfo *);
static void
ClosestColor(const Image *,CubeInfo *,const NodeInfo *),
DefineImageColormap(Image *,CubeInfo *,NodeInfo *),
DestroyCubeInfo(CubeInfo *),
PruneLevel(CubeInfo *,const NodeInfo *),
PruneToCubeDepth(CubeInfo *,const NodeInfo *),
ReduceImageColors(const Image *,CubeInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireQuantizeInfo() allocates the QuantizeInfo structure.
%
% The format of the AcquireQuantizeInfo method is:
%
% QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
{
QuantizeInfo
*quantize_info;
quantize_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*quantize_info));
GetQuantizeInfo(quantize_info);
if (image_info != (ImageInfo *) NULL)
{
const char
*option;
quantize_info->dither_method=image_info->dither == MagickFalse ?
NoDitherMethod : RiemersmaDitherMethod;
option=GetImageOption(image_info,"dither");
if (option != (const char *) NULL)
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,option);
quantize_info->measure_error=image_info->verbose;
}
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A s s i g n I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AssignImageColors() generates the output image from the pruned tree. The
% output image consists of two parts: (1) A color map, which is an array
% of color descriptions (RGB triples) for each color present in the
% output image; (2) A pixel array, which represents each pixel as an
% index into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% The format of the AssignImageColors() method is:
%
% MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static inline void AssociateAlphaPixel(const Image *image,
const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(GetPixelAlpha(image,pixel) == OpaqueAlpha))
{
alpha_pixel->red=(double) GetPixelRed(image,pixel);
alpha_pixel->green=(double) GetPixelGreen(image,pixel);
alpha_pixel->blue=(double) GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
return;
}
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel));
alpha_pixel->red=alpha*GetPixelRed(image,pixel);
alpha_pixel->green=alpha*GetPixelGreen(image,pixel);
alpha_pixel->blue=alpha*GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
}
static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info,
const PixelInfo *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(pixel->alpha == OpaqueAlpha))
{
alpha_pixel->red=(double) pixel->red;
alpha_pixel->green=(double) pixel->green;
alpha_pixel->blue=(double) pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
return;
}
alpha=(double) (QuantumScale*pixel->alpha);
alpha_pixel->red=alpha*pixel->red;
alpha_pixel->green=alpha*pixel->green;
alpha_pixel->blue=alpha*pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
}
static inline size_t ColorToNodeId(const CubeInfo *cube_info,
const DoublePixelPacket *pixel,size_t index)
{
size_t
id;
id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) |
((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 |
((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2);
if (cube_info->associate_alpha != MagickFalse)
id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3;
return(id);
}
static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define AssignImageTag "Assign/Image"
ColorspaceType
colorspace;
ssize_t
y;
/*
Allocate image colormap.
*/
colorspace=image->colorspace;
if (cube_info->quantize_info->colorspace != UndefinedColorspace)
(void) TransformImageColorspace(image,cube_info->quantize_info->colorspace,
exception);
cube_info->transparent_pixels=0;
cube_info->transparent_index=(-1);
if (SetImageColormap(image,cube_info,exception) == MagickFalse)
return(MagickFalse);
/*
Create a reduced color image.
*/
if (cube_info->quantize_info->dither_method != NoDitherMethod)
(void) DitherImage(image,cube_info,exception);
else
{
CacheView
*image_view;
MagickBooleanType
status;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CubeInfo
cube;
Quantum
*magick_restrict q;
ssize_t
count,
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
for (x=0; x < (ssize_t) image->columns; x+=count)
{
DoublePixelPacket
pixel;
const NodeInfo
*node_info;
ssize_t
i;
size_t
id,
index;
/*
Identify the deepest node containing the pixel's color.
*/
for (count=1; (x+count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,q,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,&cube,q,&pixel);
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
index=cube.color_number;
for (i=0; i < (ssize_t) count; i++)
{
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(
image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(
image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(
image->colormap[index].blue),q);
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(
image->colormap[index].alpha),q);
}
q+=GetPixelChannels(image);
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
if (cube_info->quantize_info->measure_error != MagickFalse)
(void) GetImageQuantizeError(image,exception);
if ((cube_info->quantize_info->number_colors == 2) &&
(IsGrayColorspace(cube_info->quantize_info->colorspace)))
{
double
intensity;
/*
Monochrome image.
*/
intensity=GetPixelInfoLuma(image->colormap+0) < QuantumRange/2.0 ? 0.0 :
QuantumRange;
if (image->colors > 1)
{
intensity=0.0;
if (GetPixelInfoLuma(image->colormap+0) >
GetPixelInfoLuma(image->colormap+1))
intensity=(double) QuantumRange;
}
image->colormap[0].red=intensity;
image->colormap[0].green=intensity;
image->colormap[0].blue=intensity;
if (image->colors > 1)
{
image->colormap[1].red=(double) QuantumRange-intensity;
image->colormap[1].green=(double) QuantumRange-intensity;
image->colormap[1].blue=(double) QuantumRange-intensity;
}
}
(void) SyncImage(image,exception);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(IssRGBCompatibleColorspace(colorspace) == MagickFalse))
(void) TransformImageColorspace(image,colorspace,exception);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClassifyImageColors() begins by initializing a color description tree
% of sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color
% description tree in the storage_class phase for realistic values of
% Cmax. If colors components in the input image are quantized to k-bit
% precision, so that Cmax= 2k-1, the tree would need k levels below the
% root node to allow representing each possible input color in a leaf.
% This becomes prohibitive because the tree's total number of nodes is
% 1 + sum(i=1,k,8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing It updates the following data for each such node:
%
% n1 : Number of pixels whose color is contained in the RGB cube
% which this node represents;
%
% n2 : Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb : Sums of the red, green, and blue component values for
% all pixels not classified at a lower depth. The combination of
% these sums and n2 will ultimately characterize the mean color of a
% set of pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the quantization
% error for a node.
%
% The format of the ClassifyImageColors() method is:
%
% MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
% const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o image: the image.
%
*/
static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info)
{
MagickBooleanType
associate_alpha;
associate_alpha=image->alpha_trait != UndefinedPixelTrait ? MagickTrue :
MagickFalse;
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
associate_alpha=MagickFalse;
cube_info->associate_alpha=associate_alpha;
}
static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
const Image *image,ExceptionInfo *exception)
{
#define ClassifyImageTag "Classify/Image"
CacheView
*image_view;
double
bisect;
DoublePixelPacket
error,
mid,
midpoint,
pixel;
MagickBooleanType
proceed;
NodeInfo
*node_info;
size_t
count,
id,
index,
level;
ssize_t
y;
/*
Classify the first cube_info->maximum_colors colors to a tree depth of 8.
*/
SetAssociatedAlpha(image,cube_info);
if (cube_info->quantize_info->colorspace != image->colorspace)
{
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,
cube_info->quantize_info->colorspace,exception);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace((Image *) image,sRGBColorspace,
exception);
}
midpoint.red=(double) QuantumRange/2.0;
midpoint.green=(double) QuantumRange/2.0;
midpoint.blue=(double) QuantumRange/2.0;
midpoint.alpha=(double) QuantumRange/2.0;
error.alpha=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= MaxTreeDepth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
continue;
}
if (level == MaxTreeDepth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance) != 0)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
if (cube_info->colors > cube_info->maximum_colors)
{
PruneToCubeDepth(cube_info,cube_info->root);
break;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
for (y++; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= cube_info->depth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
image->filename);
continue;
}
if (level == cube_info->depth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance) != 0)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
if (cube_info->quantize_info->colorspace != image->colorspace)
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace,exception);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneQuantizeInfo() makes a duplicate of the given quantize info structure,
% or if quantize info is NULL, a new one.
%
% The format of the CloneQuantizeInfo method is:
%
% QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o clone_info: Method CloneQuantizeInfo returns a duplicate of the given
% quantize info, or if image info is NULL a new one.
%
% o quantize_info: a structure of type info.
%
*/
MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
{
QuantizeInfo
*clone_info;
clone_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetQuantizeInfo(clone_info);
if (quantize_info == (QuantizeInfo *) NULL)
return(clone_info);
clone_info->number_colors=quantize_info->number_colors;
clone_info->tree_depth=quantize_info->tree_depth;
clone_info->dither_method=quantize_info->dither_method;
clone_info->colorspace=quantize_info->colorspace;
clone_info->measure_error=quantize_info->measure_error;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o s e s t C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClosestColor() traverses the color cube tree at a particular node and
% determines which colormap entry best represents the input color.
%
% The format of the ClosestColor method is:
%
% void ClosestColor(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void ClosestColor(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
ClosestColor(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
double
alpha,
beta,
distance,
pixel;
DoublePixelPacket
*magick_restrict q;
PixelInfo
*magick_restrict p;
/*
Determine if this color is "closest".
*/
p=image->colormap+node_info->color_number;
q=(&cube_info->target);
alpha=1.0;
beta=1.0;
if (cube_info->associate_alpha != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*p->alpha);
beta=(MagickRealType) (QuantumScale*q->alpha);
}
pixel=alpha*p->red-beta*q->red;
distance=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->green-beta*q->green;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->blue-beta*q->blue;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
if (cube_info->associate_alpha != MagickFalse)
{
pixel=p->alpha-q->alpha;
distance+=pixel*pixel;
}
if (distance <= cube_info->distance)
{
cube_info->distance=distance;
cube_info->color_number=node_info->color_number;
}
}
}
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p r e s s I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompressImageColormap() compresses an image colormap by removing any
% duplicate or unused color entries.
%
% The format of the CompressImageColormap method is:
%
% MagickBooleanType CompressImageColormap(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CompressImageColormap(Image *image,
ExceptionInfo *exception)
{
QuantizeInfo
quantize_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsPaletteImage(image) == MagickFalse)
return(MagickFalse);
GetQuantizeInfo(&quantize_info);
quantize_info.number_colors=image->colors;
quantize_info.tree_depth=MaxTreeDepth;
return(QuantizeImage(&quantize_info,image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineImageColormap() traverses the color cube tree and notes each colormap
% entry. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero.
%
% The format of the DefineImageColormap method is:
%
% void DefineImageColormap(Image *image,CubeInfo *cube_info,
% NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void DefineImageColormap(Image *image,CubeInfo *cube_info,
NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
DefineImageColormap(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
double
alpha;
PixelInfo
*magick_restrict q;
/*
Colormap entry is defined by the mean color in this cube.
*/
q=image->colormap+image->colors;
alpha=(double) ((MagickOffsetType) node_info->number_unique);
alpha=PerceptibleReciprocal(alpha);
if (cube_info->associate_alpha == MagickFalse)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
q->alpha=(double) OpaqueAlpha;
}
else
{
double
opacity;
opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha);
q->alpha=(double) ClampToQuantum(opacity);
if (q->alpha == OpaqueAlpha)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
}
else
{
double
gamma;
gamma=(double) (QuantumScale*q->alpha);
gamma=PerceptibleReciprocal(gamma);
q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.blue);
if (node_info->number_unique > cube_info->transparent_pixels)
{
cube_info->transparent_pixels=node_info->number_unique;
cube_info->transparent_index=(ssize_t) image->colors;
}
}
}
node_info->color_number=image->colors++;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyCubeInfo() deallocates memory associated with an image.
%
% The format of the DestroyCubeInfo method is:
%
% DestroyCubeInfo(CubeInfo *cube_info)
%
% A description of each parameter follows:
%
% o cube_info: the address of a structure of type CubeInfo.
%
*/
static void DestroyCubeInfo(CubeInfo *cube_info)
{
Nodes
*nodes;
/*
Release color cube tree storage.
*/
do
{
nodes=cube_info->node_queue->next;
cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory(
cube_info->node_queue->nodes);
cube_info->node_queue=(Nodes *) RelinquishMagickMemory(
cube_info->node_queue);
cube_info->node_queue=nodes;
} while (cube_info->node_queue != (Nodes *) NULL);
if (cube_info->memory_info != (MemoryInfo *) NULL)
cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info);
cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info);
cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo
% structure.
%
% The format of the DestroyQuantizeInfo method is:
%
% QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
*/
MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
quantize_info->signature=(~MagickCoreSignature);
quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info);
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DitherImage() distributes the difference between an original image and
% the corresponding color reduced algorithm to neighboring pixels using
% serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns
% MagickTrue if the image is dithered otherwise MagickFalse.
%
% The format of the DitherImage method is:
%
% MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels)
{
ssize_t
i;
assert(pixels != (DoublePixelPacket **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (DoublePixelPacket *) NULL)
pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static DoublePixelPacket **AcquirePixelThreadSet(const size_t count)
{
DoublePixelPacket
**pixels;
size_t
number_threads;
ssize_t
i;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (DoublePixelPacket **) NULL)
return((DoublePixelPacket **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2*
sizeof(**pixels));
if (pixels[i] == (DoublePixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static inline ssize_t CacheOffset(CubeInfo *cube_info,
const DoublePixelPacket *pixel)
{
#define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift)))
#define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift)))
#define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift)))
#define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift)))
ssize_t
offset;
offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) |
GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) |
BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue))));
if (cube_info->associate_alpha != MagickFalse)
offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha)));
return(offset);
}
static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
DoublePixelPacket
**pixels;
MagickBooleanType
status;
ssize_t
y;
/*
Distribute quantization error using Floyd-Steinberg.
*/
pixels=AcquirePixelThreadSet(image->columns);
if (pixels == (DoublePixelPacket **) NULL)
return(MagickFalse);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
CubeInfo
cube;
DoublePixelPacket
*current,
*previous;
Quantum
*magick_restrict q;
size_t
index;
ssize_t
x,
v;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
current=pixels[id]+(y & 0x01)*image->columns;
previous=pixels[id]+((y+1) & 0x01)*image->columns;
v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1);
for (x=0; x < (ssize_t) image->columns; x++)
{
DoublePixelPacket
color,
pixel;
ssize_t
i;
ssize_t
u;
u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x;
AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel);
if (x > 0)
{
pixel.red+=7.0*cube_info->diffusion*current[u-v].red/16;
pixel.green+=7.0*cube_info->diffusion*current[u-v].green/16;
pixel.blue+=7.0*cube_info->diffusion*current[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=7.0*cube_info->diffusion*current[u-v].alpha/16;
}
if (y > 0)
{
if (x < (ssize_t) (image->columns-1))
{
pixel.red+=cube_info->diffusion*previous[u+v].red/16;
pixel.green+=cube_info->diffusion*previous[u+v].green/16;
pixel.blue+=cube_info->diffusion*previous[u+v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=cube_info->diffusion*previous[u+v].alpha/16;
}
pixel.red+=5.0*cube_info->diffusion*previous[u].red/16;
pixel.green+=5.0*cube_info->diffusion*previous[u].green/16;
pixel.blue+=5.0*cube_info->diffusion*previous[u].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=5.0*cube_info->diffusion*previous[u].alpha/16;
if (x > 0)
{
pixel.red+=3.0*cube_info->diffusion*previous[u-v].red/16;
pixel.green+=3.0*cube_info->diffusion*previous[u-v].green/16;
pixel.blue+=3.0*cube_info->diffusion*previous[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=3.0*cube_info->diffusion*previous[u-v].alpha/16;
}
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube.associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(&cube,&pixel);
if (cube.cache[i] < 0)
{
NodeInfo
*node_info;
size_t
node_id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
node_id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[node_id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[node_id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
cube.cache[i]=(ssize_t) cube.color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) cube.cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image));
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),
q+u*GetPixelChannels(image));
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),
q+u*GetPixelChannels(image));
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),
q+u*GetPixelChannels(image));
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),
q+u*GetPixelChannels(image));
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
/*
Store the error.
*/
AssociateAlphaPixelInfo(&cube,image->colormap+index,&color);
current[u].red=pixel.red-color.red;
current[u].green=pixel.green-color.green;
current[u].blue=pixel.blue-color.blue;
if (cube.associate_alpha != MagickFalse)
current[u].alpha=pixel.alpha-color.alpha;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
image_view=DestroyCacheView(image_view);
pixels=DestroyPixelThreadSet(pixels);
return(MagickTrue);
}
static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view,
CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CubeInfo
*p;
DoublePixelPacket
color,
pixel;
MagickBooleanType
proceed;
size_t
index;
p=cube_info;
if ((p->x >= 0) && (p->x < (ssize_t) image->columns) &&
(p->y >= 0) && (p->y < (ssize_t) image->rows))
{
Quantum
*magick_restrict q;
ssize_t
i;
/*
Distribute error.
*/
q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
AssociateAlphaPixel(image,cube_info,q,&pixel);
for (i=0; i < ErrorQueueLength; i++)
{
pixel.red+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].red;
pixel.green+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].green;
pixel.blue+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].blue;
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].alpha;
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(cube_info,&pixel);
if (p->cache[i] < 0)
{
NodeInfo
*node_info;
size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=p->root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(cube_info,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
p->target=pixel;
p->distance=(double) (4.0*(QuantumRange+1.0)*((double)
QuantumRange+1.0)+1.0);
ClosestColor(image,p,node_info->parent);
p->cache[i]=(ssize_t) p->color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) p->cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube_info->quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q);
if (cube_info->associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
return(MagickFalse);
/*
Propagate the error as the last entry of the error queue.
*/
(void) memmove(p->error,p->error+1,(ErrorQueueLength-1)*
sizeof(p->error[0]));
AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color);
p->error[ErrorQueueLength-1].red=pixel.red-color.red;
p->error[ErrorQueueLength-1].green=pixel.green-color.green;
p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue;
if (cube_info->associate_alpha != MagickFalse)
p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha;
proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span);
if (proceed == MagickFalse)
return(MagickFalse);
p->offset++;
}
switch (direction)
{
case WestGravity: p->x--; break;
case EastGravity: p->x++; break;
case NorthGravity: p->y--; break;
case SouthGravity: p->y++; break;
}
return(MagickTrue);
}
static MagickBooleanType Riemersma(Image *image,CacheView *image_view,
CubeInfo *cube_info,const size_t level,const unsigned int direction,
ExceptionInfo *exception)
{
MagickBooleanType
status;
status=MagickTrue;
if (level == 1)
switch (direction)
{
case WestGravity:
{
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
break;
}
case EastGravity:
{
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
break;
}
case NorthGravity:
{
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
break;
}
case SouthGravity:
{
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
break;
}
default:
break;
}
else
switch (direction)
{
case WestGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
break;
}
case EastGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
break;
}
case NorthGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
break;
}
case SouthGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
break;
}
default:
break;
}
return(status);
}
static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*artifact;
MagickBooleanType
status;
size_t
extent,
level;
artifact=GetImageArtifact(image,"dither:diffusion-amount");
if (artifact != (const char *) NULL)
cube_info->diffusion=StringToDoubleInterval(artifact,1.0);
if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod)
return(FloydSteinbergDither(image,cube_info,exception));
/*
Distribute quantization error along a Hilbert curve.
*/
(void) memset(cube_info->error,0,ErrorQueueLength*sizeof(*cube_info->error));
cube_info->x=0;
cube_info->y=0;
extent=MagickMax(image->columns,image->rows);
level=(size_t) log2((double) extent);
if (((size_t) 1UL << level) < extent)
level++;
cube_info->offset=0;
cube_info->span=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
status=MagickTrue;
if (level > 0)
status=Riemersma(image,image_view,cube_info,level,NorthGravity,exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCubeInfo() initialize the Cube data structure.
%
% The format of the GetCubeInfo method is:
%
% CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info,
% const size_t depth,const size_t maximum_colors)
%
% A description of each parameter follows.
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o depth: Normally, this integer value is zero or one. A zero or
% one tells Quantize to choose a optimal tree depth of Log4(number_colors).
% A tree of this depth generally allows the best representation of the
% reference image with the least amount of memory and the fastest
% computational speed. In some cases, such as an image with low color
% dispersion (a few number of colors), a value other than
% Log4(number_colors) is required. To expand the color tree completely,
% use a value of 8.
%
% o maximum_colors: maximum colors.
%
*/
static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info,
const size_t depth,const size_t maximum_colors)
{
CubeInfo
*cube_info;
double
weight;
size_t
length;
ssize_t
i;
/*
Initialize tree to describe color cube_info.
*/
cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info));
if (cube_info == (CubeInfo *) NULL)
return((CubeInfo *) NULL);
(void) memset(cube_info,0,sizeof(*cube_info));
cube_info->depth=depth;
if (cube_info->depth > MaxTreeDepth)
cube_info->depth=MaxTreeDepth;
if (cube_info->depth < 2)
cube_info->depth=2;
cube_info->maximum_colors=maximum_colors;
/*
Initialize root node.
*/
cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL);
if (cube_info->root == (NodeInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->root->parent=cube_info->root;
cube_info->quantize_info=CloneQuantizeInfo(quantize_info);
if (cube_info->quantize_info->dither_method == NoDitherMethod)
return(cube_info);
/*
Initialize dither resources.
*/
length=(size_t) (1UL << (4*(8-CacheShift)));
cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache));
if (cube_info->memory_info == (MemoryInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info);
/*
Initialize color cache.
*/
(void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*length);
/*
Distribute weights along a curve of exponential decay.
*/
weight=1.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[i]=PerceptibleReciprocal(weight);
weight*=exp(log(1.0/ErrorRelativeWeight)/(ErrorQueueLength-1.0));
}
cube_info->diffusion=1.0;
return(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t N o d e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNodeInfo() allocates memory for a new node in the color cube tree and
% presets all fields to zero.
%
% The format of the GetNodeInfo method is:
%
% NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
% const size_t level,NodeInfo *parent)
%
% A description of each parameter follows.
%
% o node: The GetNodeInfo method returns a pointer to a queue of nodes.
%
% o id: Specifies the child number of the node.
%
% o level: Specifies the level in the storage_class the node resides.
%
*/
static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
const size_t level,NodeInfo *parent)
{
NodeInfo
*node_info;
if (cube_info->free_nodes == 0)
{
Nodes
*nodes;
/*
Allocate a new queue of nodes.
*/
nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes));
if (nodes == (Nodes *) NULL)
return((NodeInfo *) NULL);
nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList,
sizeof(*nodes->nodes));
if (nodes->nodes == (NodeInfo *) NULL)
return((NodeInfo *) NULL);
nodes->next=cube_info->node_queue;
cube_info->node_queue=nodes;
cube_info->next_node=nodes->nodes;
cube_info->free_nodes=NodesInAList;
}
cube_info->nodes++;
cube_info->free_nodes--;
node_info=cube_info->next_node++;
(void) memset(node_info,0,sizeof(*node_info));
node_info->parent=parent;
node_info->id=id;
node_info->level=level;
return(node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t i z e E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantizeError() measures the difference between the original
% and quantized images. This difference is the total quantization error.
% The error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% The format of the GetImageQuantizeError method is:
%
% MagickBooleanType GetImageQuantizeError(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageQuantizeError(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
alpha,
area,
beta,
distance,
maximum_error,
mean_error,
mean_error_per_pixel;
ssize_t
index,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->total_colors=GetNumberColors(image,(FILE *) NULL,exception);
(void) memset(&image->error,0,sizeof(image->error));
if (image->storage_class == DirectClass)
return(MagickTrue);
alpha=1.0;
beta=1.0;
area=3.0*image->columns*image->rows;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) GetPixelIndex(image,p);
if (image->alpha_trait != UndefinedPixelTrait)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,p));
beta=(double) (QuantumScale*image->colormap[index].alpha);
}
distance=fabs((double) (alpha*GetPixelRed(image,p)-beta*
image->colormap[index].red));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta*
image->colormap[index].green));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta*
image->colormap[index].blue));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area;
image->error.normalized_mean_error=(double) QuantumScale*QuantumScale*
mean_error/area;
image->error.normalized_maximum_error=(double) QuantumScale*maximum_error;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetQuantizeInfo() initializes the QuantizeInfo structure.
%
% The format of the GetQuantizeInfo method is:
%
% GetQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to a QuantizeInfo structure.
%
*/
MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
(void) memset(quantize_info,0,sizeof(*quantize_info));
quantize_info->number_colors=256;
quantize_info->dither_method=RiemersmaDitherMethod;
quantize_info->colorspace=UndefinedColorspace;
quantize_info->measure_error=MagickFalse;
quantize_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% K m e a n s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% KmeansImage() applies k-means color reduction to an image. This is a
% colorspace clustering or segmentation technique.
%
% The format of the KmeansImage method is:
%
% MagickBooleanType KmeansImage(Image *image,const size_t number_colors,
% const size_t max_iterations,const double tolerance,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_colors: number of colors to use as seeds.
%
% o max_iterations: maximum number of iterations while converging.
%
% o tolerance: the maximum tolerance.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _KmeansInfo
{
double
red,
green,
blue,
alpha,
black,
count,
distortion;
} KmeansInfo;
static KmeansInfo **DestroyKmeansThreadSet(KmeansInfo **kmeans_info)
{
ssize_t
i;
assert(kmeans_info != (KmeansInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (kmeans_info[i] != (KmeansInfo *) NULL)
kmeans_info[i]=(KmeansInfo *) RelinquishMagickMemory(kmeans_info[i]);
kmeans_info=(KmeansInfo **) RelinquishMagickMemory(kmeans_info);
return(kmeans_info);
}
static KmeansInfo **AcquireKmeansThreadSet(const size_t number_colors)
{
KmeansInfo
**kmeans_info;
ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
kmeans_info=(KmeansInfo **) AcquireQuantumMemory(number_threads,
sizeof(*kmeans_info));
if (kmeans_info == (KmeansInfo **) NULL)
return((KmeansInfo **) NULL);
(void) memset(kmeans_info,0,number_threads*sizeof(*kmeans_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
kmeans_info[i]=(KmeansInfo *) AcquireQuantumMemory(number_colors,
sizeof(**kmeans_info));
if (kmeans_info[i] == (KmeansInfo *) NULL)
return(DestroyKmeansThreadSet(kmeans_info));
}
return(kmeans_info);
}
static inline double KmeansMetric(const Image *magick_restrict image,
const Quantum *magick_restrict p,const PixelInfo *magick_restrict q)
{
double
gamma,
metric,
pixel;
gamma=1.0;
metric=0.0;
if ((image->alpha_trait != UndefinedPixelTrait) ||
(q->alpha_trait != UndefinedPixelTrait))
{
pixel=GetPixelAlpha(image,p)-(q->alpha_trait != UndefinedPixelTrait ?
q->alpha : OpaqueAlpha);
metric+=pixel*pixel;
if (image->alpha_trait != UndefinedPixelTrait)
gamma*=QuantumScale*GetPixelAlpha(image,p);
if (q->alpha_trait != UndefinedPixelTrait)
gamma*=QuantumScale*q->alpha;
}
if (image->colorspace == CMYKColorspace)
{
pixel=QuantumScale*(GetPixelBlack(image,p)-q->black);
metric+=gamma*pixel*pixel;
gamma*=QuantumScale*(QuantumRange-GetPixelBlack(image,p));
gamma*=QuantumScale*(QuantumRange-q->black);
}
metric*=3.0;
pixel=QuantumScale*(GetPixelRed(image,p)-q->red);
if (IsHueCompatibleColorspace(image->colorspace) != MagickFalse)
{
if (fabs((double) pixel) > 0.5)
pixel-=0.5;
pixel*=2.0;
}
metric+=gamma*pixel*pixel;
pixel=QuantumScale*(GetPixelGreen(image,p)-q->green);
metric+=gamma*pixel*pixel;
pixel=QuantumScale*(GetPixelBlue(image,p)-q->blue);
metric+=gamma*pixel*pixel;
return(metric);
}
MagickExport MagickBooleanType KmeansImage(Image *image,
const size_t number_colors,const size_t max_iterations,const double tolerance,
ExceptionInfo *exception)
{
#define KmeansImageTag "Kmeans/Image"
#define RandomColorComponent(info) (QuantumRange*GetPseudoRandomValue(info))
CacheView
*image_view;
const char
*colors;
double
previous_tolerance;
KmeansInfo
**kmeans_pixels;
MagickBooleanType
verbose,
status;
ssize_t
n;
size_t
number_threads;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
colors=GetImageArtifact(image,"kmeans:seed-colors");
if (colors == (const char *) NULL)
{
CubeInfo
*cube_info;
QuantizeInfo
*quantize_info;
size_t
depth;
/*
Seed clusters from color quantization.
*/
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->colorspace=image->colorspace;
quantize_info->number_colors=number_colors;
quantize_info->dither_method=NoDitherMethod;
n=number_colors;
for (depth=1; n != 0; depth++)
n>>=2;
cube_info=GetCubeInfo(quantize_info,depth,number_colors);
if (cube_info == (CubeInfo *) NULL)
{
quantize_info=DestroyQuantizeInfo(quantize_info);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=ClassifyImageColors(cube_info,image,exception);
if (status != MagickFalse)
{
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
status=SetImageColormap(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
quantize_info=DestroyQuantizeInfo(quantize_info);
if (status == MagickFalse)
return(status);
}
else
{
char
color[MagickPathExtent];
const char
*p;
/*
Seed clusters from color list (e.g. red;green;blue).
*/
status=AcquireImageColormap(image,number_colors,exception);
if (status == MagickFalse)
return(status);
for (n=0, p=colors; n < (ssize_t) image->colors; n++)
{
const char
*q;
for (q=p; *q != '\0'; q++)
if (*q == ';')
break;
(void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1,
MagickPathExtent));
(void) QueryColorCompliance(color,AllCompliance,image->colormap+n,
exception);
if (*q == '\0')
{
n++;
break;
}
p=q+1;
}
if (n < (ssize_t) image->colors)
{
RandomInfo
*random_info;
/*
Seed clusters from random values.
*/
random_info=AcquireRandomInfo();
for ( ; n < (ssize_t) image->colors; n++)
{
(void) QueryColorCompliance("#000",AllCompliance,image->colormap+n,
exception);
image->colormap[n].red=RandomColorComponent(random_info);
image->colormap[n].green=RandomColorComponent(random_info);
image->colormap[n].blue=RandomColorComponent(random_info);
if (image->alpha_trait != UndefinedPixelTrait)
image->colormap[n].alpha=RandomColorComponent(random_info);
if (image->colorspace == CMYKColorspace)
image->colormap[n].black=RandomColorComponent(random_info);
}
random_info=DestroyRandomInfo(random_info);
}
}
/*
Iterative refinement.
*/
kmeans_pixels=AcquireKmeansThreadSet(number_colors);
if (kmeans_pixels == (KmeansInfo **) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
previous_tolerance=0.0;
verbose=IsStringTrue(GetImageArtifact(image,"debug"));
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
image_view=AcquireAuthenticCacheView(image,exception);
for (n=0; n < (ssize_t) max_iterations; n++)
{
double
distortion;
ssize_t
j,
y;
for (j=0; j < (ssize_t) number_threads; j++)
(void) memset(kmeans_pixels[j],0,image->colors*sizeof(*kmeans_pixels[j]));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
min_distance;
ssize_t
i,
k;
/*
Assign each pixel whose mean has the least squared color distance.
*/
k=0;
min_distance=KmeansMetric(image,q,image->colormap+0);
for (i=1; i < (ssize_t) image->colors; i++)
{
double
distance;
if (min_distance <= MagickEpsilon)
break;
distance=KmeansMetric(image,q,image->colormap+i);
if (distance < min_distance)
{
min_distance=distance;
k=i;
}
}
kmeans_pixels[id][k].red+=QuantumScale*GetPixelRed(image,q);
kmeans_pixels[id][k].green+=QuantumScale*GetPixelGreen(image,q);
kmeans_pixels[id][k].blue+=QuantumScale*GetPixelBlue(image,q);
if (image->alpha_trait != UndefinedPixelTrait)
kmeans_pixels[id][k].alpha+=QuantumScale*GetPixelAlpha(image,q);
if (image->colorspace == CMYKColorspace)
kmeans_pixels[id][k].black+=QuantumScale*GetPixelBlack(image,q);
kmeans_pixels[id][k].count++;
kmeans_pixels[id][k].distortion+=min_distance;
SetPixelIndex(image,(Quantum) k,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
if (status == MagickFalse)
break;
/*
Reduce sums to [0] entry.
*/
for (j=1; j < (ssize_t) number_threads; j++)
{
ssize_t
k;
for (k=0; k < (ssize_t) image->colors; k++)
{
kmeans_pixels[0][k].red+=kmeans_pixels[j][k].red;
kmeans_pixels[0][k].green+=kmeans_pixels[j][k].green;
kmeans_pixels[0][k].blue+=kmeans_pixels[j][k].blue;
if (image->alpha_trait != UndefinedPixelTrait)
kmeans_pixels[0][k].alpha+=kmeans_pixels[j][k].alpha;
if (image->colorspace == CMYKColorspace)
kmeans_pixels[0][k].black+=kmeans_pixels[j][k].black;
kmeans_pixels[0][k].count+=kmeans_pixels[j][k].count;
kmeans_pixels[0][k].distortion+=kmeans_pixels[j][k].distortion;
}
}
/*
Calculate the new means (centroids) of the pixels in the new clusters.
*/
distortion=0.0;
for (j=0; j < (ssize_t) image->colors; j++)
{
double
gamma;
gamma=PerceptibleReciprocal((double) kmeans_pixels[0][j].count);
image->colormap[j].red=gamma*QuantumRange*kmeans_pixels[0][j].red;
image->colormap[j].green=gamma*QuantumRange*kmeans_pixels[0][j].green;
image->colormap[j].blue=gamma*QuantumRange*kmeans_pixels[0][j].blue;
if (image->alpha_trait != UndefinedPixelTrait)
image->colormap[j].alpha=gamma*QuantumRange*kmeans_pixels[0][j].alpha;
if (image->colorspace == CMYKColorspace)
image->colormap[j].black=gamma*QuantumRange*kmeans_pixels[0][j].black;
distortion+=kmeans_pixels[0][j].distortion;
}
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,"distortion[%.20g]: %*g %*g\n",(double) n,
GetMagickPrecision(),distortion,GetMagickPrecision(),
fabs(distortion-previous_tolerance));
if (fabs(distortion-previous_tolerance) <= tolerance)
break;
previous_tolerance=distortion;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,KmeansImageTag,(MagickOffsetType) n,
max_iterations);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
kmeans_pixels=DestroyKmeansThreadSet(kmeans_pixels);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
(void) SetImageProgress(image,KmeansImageTag,(MagickOffsetType)
max_iterations-1,max_iterations);
if (status == MagickFalse)
return(status);
return(SyncImage(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t e r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PosterizeImage() reduces the image to a limited number of colors for a
% "poster" effect.
%
% The format of the PosterizeImage method is:
%
% MagickBooleanType PosterizeImage(Image *image,const size_t levels,
% const DitherMethod dither_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o levels: Number of color levels allowed in each channel. Very low values
% (2, 3, or 4) have the most visible effect.
%
% o dither_method: choose from UndefinedDitherMethod, NoDitherMethod,
% RiemersmaDitherMethod, FloydSteinbergDitherMethod.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels,
const DitherMethod dither_method,ExceptionInfo *exception)
{
#define PosterizeImageTag "Posterize/Image"
#define PosterizePixel(pixel) ClampToQuantum((MagickRealType) QuantumRange*( \
MagickRound(QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1))
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
QuantizeInfo
*quantize_info;
ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Posterize colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double)
PosterizePixel(image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double)
PosterizePixel(image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double)
PosterizePixel(image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double)
PosterizePixel(image->colormap[i].alpha);
}
/*
Posterize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels*
levels,MaxColormapSize+1);
quantize_info->dither_method=dither_method;
quantize_info->tree_depth=MaxTreeDepth;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e C h i l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneChild() deletes the given node and merges its statistics into its
% parent.
%
% The format of the PruneSubtree method is:
%
% PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
{
NodeInfo
*parent;
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneChild(cube_info,node_info->child[i]);
if (cube_info->nodes > cube_info->maximum_colors)
{
/*
Merge color statistics into parent.
*/
parent=node_info->parent;
parent->number_unique+=node_info->number_unique;
parent->total_color.red+=node_info->total_color.red;
parent->total_color.green+=node_info->total_color.green;
parent->total_color.blue+=node_info->total_color.blue;
parent->total_color.alpha+=node_info->total_color.alpha;
parent->child[node_info->id]=(NodeInfo *) NULL;
cube_info->nodes--;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e L e v e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneLevel() deletes all nodes at the bottom level of the color tree merging
% their color statistics into their parent node.
%
% The format of the PruneLevel method is:
%
% PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneLevel(cube_info,node_info->child[i]);
if (node_info->level == cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e T o C u b e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneToCubeDepth() deletes any nodes at a depth greater than
% cube_info->depth while merging their color statistics into their parent
% node.
%
% The format of the PruneToCubeDepth method is:
%
% PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneToCubeDepth(cube_info,node_info->child[i]);
if (node_info->level > cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImage() analyzes the colors within a reference image and chooses a
% fixed number of colors to represent the image. The goal of the algorithm
% is to minimize the color difference between the input and output image while
% minimizing the processing time.
%
% The format of the QuantizeImage method is:
%
% MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
Image *image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
ImageType
type;
MagickBooleanType
status;
size_t
depth,
maximum_colors;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
type=IdentifyImageGray(image,exception);
if (IsGrayImageType(type) != MagickFalse)
(void) SetGrayscaleImage(image,exception);
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2))
depth--;
if ((image->alpha_trait != UndefinedPixelTrait) && (depth > 5))
depth--;
if (IsGrayImageType(type) != MagickFalse)
depth=MaxTreeDepth;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,image,exception);
if (status != MagickFalse)
{
/*
Reduce the number of colors in the image.
*/
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImages() analyzes the colors within a set of reference images and
% chooses a fixed number of colors to represent the set. The goal of the
% algorithm is to minimize the color difference between the input and output
% images while minimizing the processing time.
%
% The format of the QuantizeImages method is:
%
% MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
% Image *images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: Specifies a pointer to a list of Image structures.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
Image *images,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
proceed,
status;
MagickProgressMonitor
progress_monitor;
size_t
depth,
maximum_colors,
number_images;
ssize_t
i;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (GetNextImageInList(images) == (Image *) NULL)
{
/*
Handle a single image with QuantizeImage.
*/
status=QuantizeImage(quantize_info,images,exception);
return(status);
}
status=MagickFalse;
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if (quantize_info->dither_method != NoDitherMethod)
depth--;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(MagickFalse);
}
number_images=GetImageListLength(images);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,
image->client_data);
status=ClassifyImageColors(cube_info,image,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
if (status != MagickFalse)
{
/*
Reduce the number of colors in an image sequence.
*/
ReduceImageColors(images,cube_info);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor)
NULL,image->client_data);
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,
image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u a n t i z e E r r o r F l a t t e n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeErrorFlatten() traverses the color cube and flattens the quantization
% error into a sorted 1D array. This accelerates the color reduction process.
%
% Contributed by Yoya.
%
% The format of the QuantizeErrorFlatten method is:
%
% size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
% const NodeInfo *node_info,const ssize_t offset,
% double *quantize_error)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is current pointer.
%
% o offset: quantize error offset.
%
% o quantize_error: the quantization error vector.
%
*/
static size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
const NodeInfo *node_info,const ssize_t offset,double *quantize_error)
{
size_t
n,
number_children;
ssize_t
i;
if (offset >= (ssize_t) cube_info->nodes)
return(0);
quantize_error[offset]=node_info->quantize_error;
n=1;
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children ; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n,
quantize_error);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Reduce() traverses the color cube tree and prunes any node whose
% quantization error falls below a particular threshold.
%
% The format of the Reduce method is:
%
% Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
Reduce(cube_info,node_info->child[i]);
if (node_info->quantize_error <= cube_info->pruning_threshold)
PruneChild(cube_info,node_info);
else
{
/*
Find minimum pruning threshold.
*/
if (node_info->number_unique > 0)
cube_info->colors++;
if (node_info->quantize_error < cube_info->next_threshold)
cube_info->next_threshold=node_info->quantize_error;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceImageColors() repeatedly prunes the tree until the number of nodes
% with n2 > 0 is less than or equal to the maximum number of colors allowed
% in the output image. On any given iteration over the tree, it selects
% those nodes whose E value is minimal for pruning and merges their
% color statistics upward. It uses a pruning threshold, Ep, to govern
% node selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors
% within the cubic volume which the node represents. This includes n1 -
% n2 pixels whose colors should be defined by nodes at a lower level in
% the tree.
%
% The format of the ReduceImageColors method is:
%
% ReduceImageColors(const Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static int QuantizeErrorCompare(const void *error_p,const void *error_q)
{
double
*p,
*q;
p=(double *) error_p;
q=(double *) error_q;
if (*p > *q)
return(1);
if (fabs(*q-*p) <= MagickEpsilon)
return(0);
return(-1);
}
static void ReduceImageColors(const Image *image,CubeInfo *cube_info)
{
#define ReduceImageTag "Reduce/Image"
MagickBooleanType
proceed;
MagickOffsetType
offset;
size_t
span;
cube_info->next_threshold=0.0;
if (cube_info->colors > cube_info->maximum_colors)
{
double
*quantize_error;
/*
Enable rapid reduction of the number of unique colors.
*/
quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes,
sizeof(*quantize_error));
if (quantize_error != (double *) NULL)
{
(void) QuantizeErrorFlatten(cube_info,cube_info->root,0,
quantize_error);
qsort(quantize_error,cube_info->nodes,sizeof(double),
QuantizeErrorCompare);
if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100))
cube_info->next_threshold=quantize_error[cube_info->nodes-110*
(cube_info->maximum_colors+1)/100];
quantize_error=(double *) RelinquishMagickMemory(quantize_error);
}
}
for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; )
{
cube_info->pruning_threshold=cube_info->next_threshold;
cube_info->next_threshold=cube_info->root->quantize_error-1;
cube_info->colors=0;
Reduce(cube_info,cube_info->root);
offset=(MagickOffsetType) span-cube_info->colors;
proceed=SetImageProgress(image,ReduceImageTag,offset,span-
cube_info->maximum_colors+1);
if (proceed == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImage() replaces the colors of an image with the closest of the colors
% from the reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
Image *image,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(remap_image != (Image *) NULL);
assert(remap_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
% Image *images,Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
Image *images,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
status;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
if (remap_image == (Image *) NULL)
{
/*
Create a global colormap for an image sequence.
*/
status=QuantizeImages(quantize_info,images,exception);
return(status);
}
/*
Classify image colors from the reference image.
*/
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetGrayscaleImage() converts an image to a PseudoClass grayscale image.
%
% The format of the SetGrayscaleImage method is:
%
% MagickBooleanType SetGrayscaleImage(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
double
intensity;
PixelInfo
*color_1,
*color_2;
color_1=(PixelInfo *) x;
color_2=(PixelInfo *) y;
intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)-
GetPixelInfoIntensity((const Image *) NULL,color_2);
if (intensity < (double) INT_MIN)
intensity=(double) INT_MIN;
if (intensity > (double) INT_MAX)
intensity=(double) INT_MAX;
return((int) intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType SetGrayscaleImage(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
PixelInfo
*colormap;
size_t
extent;
ssize_t
*colormap_index,
i,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace,exception);
extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1));
colormap_index=(ssize_t *) AcquireQuantumMemory(extent,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
(void) memset(colormap_index,(-1),extent*sizeof(*colormap_index));
if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
image->colors=0;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(image,q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=(double)
GetPixelRed(image,q);
image->colormap[image->colors].green=(double)
GetPixelGreen(image,q);
image->colormap[image->colors].blue=(double)
GetPixelBlue(image,q);
image->colors++;
}
}
SetPixelIndex(image,(Quantum) colormap_index[intensity],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
(void) memset(colormap_index,0,extent*sizeof(*colormap_index));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].alpha=(double) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelInfo),
IntensityCompare);
colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap));
if (colormap == (PixelInfo *) NULL)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].alpha]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap(
GetPixelIndex(image,q))],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (SetImageMonochrome(image,exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColormap() traverses the color cube tree and sets the colormap of
% the image. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero.
%
% The format of the SetImageColormap method is:
%
% MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info,
% ExceptionInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
size_t
number_colors;
number_colors=MagickMax(cube_info->maximum_colors,cube_info->colors);
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
DefineImageColormap(image,cube_info,cube_info->root);
if (image->colors != number_colors)
{
image->colormap=(PixelInfo *) ResizeQuantumMemory(image->colormap,
image->colors+1,sizeof(*image->colormap));
if (image->colormap == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
return(MagickTrue);
}
|
Main.c | #include "XSbench_header.h"
#include "marker_stub.h"
#ifdef MPI
#include<mpi.h>
#endif
int main( int argc, char* argv[] )
{
// =====================================================================
// Initialization & Command Line Read-In
// =====================================================================
int version = 13;
int mype = 0;
int max_procs = omp_get_num_procs();
int i, thread, mat;
unsigned long seed;
double omp_start, omp_end, p_energy;
unsigned long long vhash = 0;
int nprocs;
#ifdef MPI
MPI_Status stat;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &mype);
#endif
// rand() is only used in the serial initialization stages.
// A custom RNG is used in parallel portions.
#ifdef VERIFICATION
srand(26);
#else
srand(time(NULL));
#endif
// Process CLI Fields -- store in "Inputs" structure
Inputs in = read_CLI( argc, argv );
// Set number of OpenMP Threads
omp_set_num_threads(in.nthreads);
// Print-out of Input Summary
if( mype == 0 ) {
MARKER_INIT;
print_inputs( in, nprocs, version );
}
// =====================================================================
// Prepare Nuclide Energy Grids, Unionized Energy Grid, & Material Data
// =====================================================================
// Allocate & fill energy grids
#ifndef BINARY_READ
if( mype == 0) printf("Generating Nuclide Energy Grids...\n");
#endif
NuclideGridPoint ** nuclide_grids = gpmatrix(in.n_isotopes,in.n_gridpoints);
#ifdef VERIFICATION
generate_grids_v( nuclide_grids, in.n_isotopes, in.n_gridpoints );
#else
generate_grids( nuclide_grids, in.n_isotopes, in.n_gridpoints );
#endif
// Sort grids by energy
#ifndef BINARY_READ
if( mype == 0) printf("Sorting Nuclide Energy Grids...\n");
sort_nuclide_grids( nuclide_grids, in.n_isotopes, in.n_gridpoints );
#endif
// Prepare Unionized Energy Grid Framework
#ifndef BINARY_READ
GridPoint * energy_grid = generate_energy_grid( in.n_isotopes,
in.n_gridpoints, nuclide_grids );
#else
GridPoint * energy_grid = (GridPoint *)malloc( in.n_isotopes *
in.n_gridpoints * sizeof( GridPoint ) );
int * index_data = (int *) malloc( in.n_isotopes * in.n_gridpoints
* in.n_isotopes * sizeof(int));
for( i = 0; i < in.n_isotopes*in.n_gridpoints; i++ )
energy_grid[i].xs_ptrs = &index_data[i*in.n_isotopes];
#endif
// Double Indexing. Filling in energy_grid with pointers to the
// nuclide_energy_grids.
#ifndef BINARY_READ
set_grid_ptrs( energy_grid, nuclide_grids, in.n_isotopes, in.n_gridpoints );
#endif
#ifdef BINARY_READ
if( mype == 0 ) printf("Reading data from \"XS_data.dat\" file...\n");
binary_read(in.n_isotopes, in.n_gridpoints, nuclide_grids, energy_grid);
#endif
// Get material data
if( mype == 0 )
printf("Loading Mats...\n");
int *num_nucs = load_num_nucs(in.n_isotopes);
int **mats = load_mats(num_nucs, in.n_isotopes);
#ifdef VERIFICATION
double **concs = load_concs_v(num_nucs);
#else
double **concs = load_concs(num_nucs);
#endif
#ifdef BINARY_DUMP
if( mype == 0 ) printf("Dumping data to binary file...\n");
binary_dump(in.n_isotopes, in.n_gridpoints, nuclide_grids, energy_grid);
if( mype == 0 ) printf("Binary file \"XS_data.dat\" written! Exiting...\n");
return 0;
#endif
// =====================================================================
// Cross Section (XS) Parallel Lookup Simulation Begins
// =====================================================================
// Outer benchmark loop can loop through all possible # of threads
#ifdef BENCHMARK
for( int bench_n = 1; bench_n <=omp_get_num_procs(); bench_n++ )
{
in.nthreads = bench_n;
omp_set_num_threads(in.nthreads);
#endif
if( mype == 0 )
{
printf("\n");
border_print();
center_print("SIMULATION", 79);
border_print();
}
omp_start = omp_get_wtime();
//initialize papi with one thread (master) here
#ifdef PAPI
if ( PAPI_library_init(PAPI_VER_CURRENT) != PAPI_VER_CURRENT){
fprintf(stderr, "PAPI library init error!\n");
exit(1);
}
#endif
MARKER_START(mype);
#if defined(MPI) && defined(GEM5_MARKERS)
MPI_Barrier(MPI_COMM_WORLD);
#endif
// OpenMP compiler directives - declaring variables as shared or private
#pragma omp parallel default(none) \
private(i, thread, p_energy, mat, seed) \
shared( max_procs, in, energy_grid, nuclide_grids, \
mats, concs, num_nucs, mype, vhash)
{
// Initialize parallel PAPI counters
#ifdef PAPI
int eventset = PAPI_NULL;
int num_papi_events;
#pragma omp critical
{
counter_init(&eventset, &num_papi_events);
}
#endif
double macro_xs_vector[5];
// Initialize RNG seeds for threads
thread = omp_get_thread_num();
seed = (thread+1)*19+17;
// XS Lookup Loop
#pragma omp for schedule(dynamic)
for( i = 0; i < in.lookups; i++ )
{
// Status text
if( INFO && mype == 0 && thread == 0 && i % 1000 == 0 )
printf("\rCalculating XS's... (%.0lf%% completed)",
(i / ( (double)in.lookups / (double) in.nthreads ))
/ (double) in.nthreads * 100.0);
// Randomly pick an energy and material for the particle
#ifdef VERIFICATION
#pragma omp critical
{
p_energy = rn_v();
mat = pick_mat(&seed);
}
#else
p_energy = rn(&seed);
mat = pick_mat(&seed);
#endif
// debugging
//printf("E = %lf mat = %d\n", p_energy, mat);
// This returns the macro_xs_vector, but we're not going
// to do anything with it in this program, so return value
// is written over.
calculate_macro_xs( p_energy, mat, in.n_isotopes,
in.n_gridpoints, num_nucs, concs,
energy_grid, nuclide_grids, mats,
macro_xs_vector );
// Verification hash calculation
// This method provides a consistent hash accross
// architectures and compilers.
#ifdef VERIFICATION
char line[256];
sprintf(line, "%.5lf %d %.5lf %.5lf %.5lf %.5lf %.5lf",
p_energy, mat,
macro_xs_vector[0],
macro_xs_vector[1],
macro_xs_vector[2],
macro_xs_vector[3],
macro_xs_vector[4]);
unsigned long long vhash_local = hash(line, 10000);
#pragma omp atomic
vhash += vhash_local;
#endif
}
// Prints out thread local PAPI counters
#ifdef PAPI
if( mype == 0 && thread == 0 )
{
printf("\n");
border_print();
center_print("PAPI COUNTER RESULTS", 79);
border_print();
printf("Count \tSmybol \tDescription\n");
}
{
#pragma omp barrier
}
counter_stop(&eventset, num_papi_events);
#endif
}
#ifndef PAPI
if( mype == 0)
{
printf("\n" );
printf("Simulation complete.\n" );
MARKER_STOP(mype);
}
#endif
omp_end = omp_get_wtime();
// Print / Save Results and Exit
print_results( in, mype, omp_end-omp_start, nprocs, vhash );
#ifdef BENCHMARK
}
#endif
#ifdef MPI
MPI_Finalize();
#endif
return 0;
}
|
sapH_fmt_plug.c | /*
* this is a SAP-H plugin for john the ripper.
* Copyright (c) 2014 JimF, and it is hereby released
* to the general public under the following terms: Redistribution and use in
* source and binary forms, with or without modification, are permitted.
*
* The internals of this algorithm were found on the hashcat forum, and
* implemented here, whether, it is right or wrong. A link to that post is:
* http://hashcat.net/forum/thread-3804.html
* There are some things which are unclear, BUT which have been coded as listed
* within that post. Things such as the signatures themselves are somewhat
* unclear, and do not follow patterns well. The sha1 signature is lower case
* and does not contain the 1. The other signatures are upper case. This code
* was implemented in the exact manner as described on the forum, and will be
* used as such, until we find out that it is right or wrong (i.e. we get sample
* hashs from a REAL system in the other formats). If things are not correct,
* getting this format corrected will be trivial.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_sapH;
#elif FMT_REGISTERS_H
john_register_one(&fmt_sapH);
#else
#include <string.h>
#include <ctype.h>
#include "arch.h"
/* for now, undef this until I get OMP working, then start on SIMD */
//#undef _OPENMP
//#undef SIMD_COEF_32
//#undef SIMD_PARA_SHA1
//#undef SIMD_COEF_32
//#undef SIMD_PARA_SHA256
//#undef SIMD_COEF_64
//#undef SIMD_PARA_SHA512
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "base64_convert.h"
#include "sha.h"
#include "sha2.h"
#include "johnswap.h"
#if defined(_OPENMP)
#include <omp.h>
#ifdef SIMD_COEF_32
#ifndef OMP_SCALE
#define OMP_SCALE 8
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#endif
/*
* Assumption is made that SIMD_COEF_32*SIMD_PARA_SHA1 is >= than
* SHA256_COEF*PARA and SHA512_COEF*PARA, and that these other 2
* will evenly divide the SIMD_COEF_32*SHA1_SSRE_PARA value.
* Works with current code. BUT if SIMD_PARA_SHA1 was 3 and
* SIMD_PARA_SHA256 was 2, then we would have problems.
*/
#ifdef SIMD_COEF_32
#define NBKEYS1 (SIMD_COEF_32 * SIMD_PARA_SHA1)
#else
#define NBKEYS1 1
#endif
#ifdef SIMD_COEF_32
#define NBKEYS256 (SIMD_COEF_32 * SIMD_PARA_SHA256)
#else
#define NBKEYS256 1
#endif
#ifdef SIMD_COEF_64
#define NBKEYS512 (SIMD_COEF_64 * SIMD_PARA_SHA512)
#else
#define NBKEYS512 1
#endif
// the least common multiple of the NBKEYS* above
#define NBKEYS (SIMD_COEF_32*SIMD_PARA_SHA1*SIMD_PARA_SHA256*SIMD_PARA_SHA512)
#include "simd-intrinsics.h"
#define FORMAT_LABEL "saph"
#define FORMAT_NAME "SAP CODVN H (PWDSALTEDHASH)"
#define FORMAT_TAG "{x-issha, "
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define FORMAT_TAG256 "{x-isSHA256, "
#define FORMAT_TAG256_LEN (sizeof(FORMAT_TAG256)-1)
#define FORMAT_TAG384 "{x-isSHA384, "
#define FORMAT_TAG384_LEN (sizeof(FORMAT_TAG384)-1)
#define FORMAT_TAG512 "{x-isSHA512, "
#define FORMAT_TAG512_LEN (sizeof(FORMAT_TAG512)-1)
#define ALGORITHM_NAME "SHA-1/SHA-2 " SHA1_ALGORITHM_NAME
#include "memdbg.h"
#define BENCHMARK_COMMENT " (SHA1x1024)"
#define BENCHMARK_LENGTH 0
#define SALT_LENGTH 16 /* the max used sized salt */
#define CIPHERTEXT_LENGTH 132 /* max salt+sha512 + 2^32 iterations */
#define BINARY_SIZE 16 /* we cut off all hashes down to 16 bytes */
#define MAX_BINARY_SIZE 64 /* sha512 is 64 byte */
#define SHA1_BINARY_SIZE 20
#define SHA256_BINARY_SIZE 32
#define SHA384_BINARY_SIZE 48
#define SHA512_BINARY_SIZE 64
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct sapH_salt)
#define SALT_ALIGN 4
/* NOTE, format is slow enough that endianity conversion is pointless. Just use flat buffers. */
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
#define PLAINTEXT_LENGTH 23 /* Real world max. is 40 */
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define PLAINTEXT_LENGTH 40
#endif
static struct fmt_tests tests[] = {
/* first 2 hashes are 'default' 1024 iteration with 12 bytes salt so */
/* timings reflect that, and benchmark comment set to (sha1, 1024) */
{"{x-issha, 1024}hmiyJ2a/Z+HRpjQ37Osz+rYax9UxMjM0NTY3ODkwYWI=","OpenWall"},
{"{x-issha, 1024}fRLe9EvN/Le81BDEDZR5SEC0O6BhYmNkZWZnaHVrYWw=","JohnTheRipper"},
{"{x-issha, 1024}L1PHSP1vOwdYh0ASjswI69fQQQhzQXFlWmxnaFA5","booboo"},
{"{x-issha, 1024}dCjaHQ47/WeSwsoSYDR/8puLby5T","booboo"}, /* 1 byte salt */
{"{x-issha, 1024}+q+WSxWXJt7SjV5VJEymEKPUbn1FQWM=","HYulafeE!3"},
{"{x-issha, 6666}7qNFlIR+ZQUpe2DtSBvpvzU5VlBzcG1DVGxvOEFQODI=","dif_iterations"},
{"{x-isSHA256, 3000}UqMnsr5BYN+uornWC7yhGa/Wj0u5tshX19mDUQSlgih6OTFoZjRpMQ==","booboo"},
{"{x-isSHA256, 3000}ydi0JlyU6lX5305Qk/Q3uLBbIFjWuTyGo3tPBZDcGFd6NkFvV1gza3RkNg==","GottaGoWhereNeeded"},
{"{x-isSHA384, 5000}3O/F4YGKNmIYHDu7ZQ7Q+ioCOQi4HRY4yrggKptAU9DtmHigCuGqBiAPVbKbEAfGTzh4YlZLWUM=","booboo"},
{"{x-isSHA384, 5000}XSLo2AKIvACwqW/X416UeVbHOXmio4u27Z7cgXS2rxND+zTpN+x3JNfQcEQX2PT0Z3FPdEY2dHM=","yiPP3rs"},
{"{x-isSHA512, 7500}ctlX6qYsWspafEzwoej6nFp7zRQQjr8y22vE+xeveIX2gUndAw9N2Gep5azNUwuxOe2o7tusF800OfB9tg4taWI4Tg==","booboo"},
{"{x-isSHA512, 7500}Qyrh2JXgGkvIfKYOJRdWFut5/pVnXI/vZvqJ7N+Tz9M1zUTXGWCZSom4az4AhqOuAahBwuhcKqMq/pYPW4h3cThvT2JaWVBw","hapy1CCe!"},
{"{x-isSHA512, 18009}C2+Sij3JyXPPDuQgsF6Zot7XnjRFX86X67tWJpUzXNnFw2dKcGPH6HDEzVJ8HN8+cJe4vZaOYTlmdz09gI7YEwECAwQFBgcICQoLDA0ODwA=","maxlen"},
{NULL}
};
static char (*saved_plain)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_key)[BINARY_SIZE/sizeof(uint32_t)];
static struct sapH_salt {
int slen; /* actual length of salt ( 1 to 16 bytes) */
int type; /* 1, 256, 384 or 512 for sha1, sha256, sha384 or sha512 */
unsigned iter; /* from 1 to 2^32 rounds */
unsigned char s[SALT_LENGTH];
} *sapH_cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_plain = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_plain));
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
}
static void done(void)
{
MEM_FREE(crypt_key);
MEM_FREE(saved_plain);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *cp = ciphertext;
char *keeptr;
int len, hash_len=0;
char tmp[MAX_BINARY_SIZE+SALT_LENGTH];
/* first check for 'simple' signatures before allocation other stuff. */
if (!strncmp(cp, FORMAT_TAG, FORMAT_TAG_LEN))
hash_len = SHA1_BINARY_SIZE;
else if (!strncmp(cp, FORMAT_TAG256, FORMAT_TAG256_LEN))
hash_len = SHA256_BINARY_SIZE;
else if (!strncmp(cp, FORMAT_TAG384, FORMAT_TAG384_LEN))
hash_len = SHA384_BINARY_SIZE;
else if (!strncmp(cp, FORMAT_TAG512, FORMAT_TAG512_LEN))
hash_len = SHA512_BINARY_SIZE;
else
return 0;
keeptr = strdup(cp);
cp = keeptr;
while (*cp++ != ' ') ; /* skip the "{x-issha?, " */
if ((cp = strtokm(cp, "}")) == NULL)
goto err;
if (!isdecu(cp))
goto err;
// we want the entire rest of the line here, to mime compare.
if ((cp = strtokm(NULL, "")) == NULL)
goto err;
if (strlen(cp) != base64_valid_length(cp, e_b64_mime, flg_Base64_MIME_TRAIL_EQ|flg_Base64_MIME_TRAIL_EQ_CNT, 0))
goto err;
len = base64_convert(cp, e_b64_mime, strlen(cp), tmp, e_b64_raw,
sizeof(tmp), flg_Base64_MIME_TRAIL_EQ, 0);
len -= hash_len;
if (len < 1 || len > SALT_LENGTH)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void set_salt(void *salt)
{
sapH_cur_salt = (struct sapH_salt*)salt;
}
static void set_key(char *key, int index)
{
strcpy((char*)saved_plain[index], key);
}
static char *get_key(int index)
{
return (char*)saved_plain[index];
}
static int cmp_all(void *binary, int count) {
int index;
for (index = 0; index < count; index++)
if (*(uint32_t*)binary == *(uint32_t*)crypt_key[index])
return 1;
return 0;
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int cmp_one(void * binary, int index)
{
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
}
static void crypt_all_1(int count) {
int idx=0;
#if defined(_OPENMP)
#pragma omp parallel for default(none) private(idx) shared(count, sapH_cur_salt, saved_plain, crypt_key)
#endif
for (idx = 0; idx < count; idx += NBKEYS1)
{
SHA_CTX ctx;
uint32_t i;
#if !defined (SIMD_COEF_32)
uint32_t len = strlen(saved_plain[idx]);
unsigned char tmp[PLAINTEXT_LENGTH+SHA1_BINARY_SIZE], *cp=&tmp[len];
SHA1_Init(&ctx);
SHA1_Update(&ctx, saved_plain[idx], len);
SHA1_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);
strcpy((char*)tmp, saved_plain[idx]);
len += SHA1_BINARY_SIZE;
SHA1_Final(cp, &ctx);
for (i = 1; i < sapH_cur_salt->iter; ++i) {
SHA1_Init(&ctx);
SHA1_Update(&ctx, tmp, len);
SHA1_Final(cp, &ctx);
}
memcpy(crypt_key[idx], cp, BINARY_SIZE);
#else
unsigned char _IBuf[64*NBKEYS1+MEM_ALIGN_SIMD], *keys, tmpBuf[20], _OBuf[20*NBKEYS1+MEM_ALIGN_SIMD], *crypt;
uint32_t j, *crypt32, offs[NBKEYS1], len;
keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_SIMD);
crypt = (unsigned char*)mem_align(_OBuf, MEM_ALIGN_SIMD);
crypt32 = (uint32_t*)crypt;
memset(keys, 0, 64*NBKEYS1);
for (i = 0; i < NBKEYS1; ++i) {
len = strlen(saved_plain[idx+i]);
SHA1_Init(&ctx);
SHA1_Update(&ctx, saved_plain[idx+i], len);
SHA1_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);
SHA1_Final(tmpBuf, &ctx);
memcpy(&keys[i<<6], saved_plain[idx+i], len);
memcpy(&keys[(i<<6)+len], tmpBuf, 20);
keys[(i<<6)+len+20] = 0x80;
offs[i] = len;
len += 20;
keys[(i<<6)+60] = (len<<3)&0xff;
keys[(i<<6)+61] = (len>>5);
}
for (i = 1; i < sapH_cur_salt->iter; ++i) {
uint32_t k;
SIMDSHA1body(keys, crypt32, NULL, SSEi_FLAT_IN);
for (k = 0; k < NBKEYS1; ++k) {
uint32_t *pcrypt = &crypt32[ ((k/SIMD_COEF_32)*(SIMD_COEF_32*5)) + (k&(SIMD_COEF_32-1))];
uint32_t *Icp32 = (uint32_t *)(&keys[(k<<6)+offs[k]]);
for (j = 0; j < 5; ++j) {
Icp32[j] = JOHNSWAP(*pcrypt);
pcrypt += SIMD_COEF_32;
}
}
}
// now marshal into crypt_out;
for (i = 0; i < NBKEYS1; ++i) {
uint32_t *Optr32 = (uint32_t*)(crypt_key[idx+i]);
uint32_t *Iptr32 = &crypt32[ ((i/SIMD_COEF_32)*(SIMD_COEF_32*5)) + (i&(SIMD_COEF_32-1))];
// we only want 16 bytes, not 20
for (j = 0; j < 4; ++j) {
Optr32[j] = JOHNSWAP(*Iptr32);
Iptr32 += SIMD_COEF_32;
}
}
#endif
}
}
static void crypt_all_256(int count) {
int idx;
#if defined(_OPENMP)
#pragma omp parallel for default(none) private(idx) shared(count, sapH_cur_salt, saved_plain, crypt_key)
#endif
for (idx = 0; idx < count; idx += NBKEYS256) {
SHA256_CTX ctx;
uint32_t i;
#if !defined (SIMD_COEF_32)
uint32_t len = strlen(saved_plain[idx]);
unsigned char tmp[PLAINTEXT_LENGTH+SHA256_BINARY_SIZE], *cp=&tmp[len];
SHA256_Init(&ctx);
SHA256_Update(&ctx, saved_plain[idx], len);
SHA256_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);
strcpy((char*)tmp, saved_plain[idx]);
len += SHA256_BINARY_SIZE;
SHA256_Final(cp, &ctx);
for (i = 1; i < sapH_cur_salt->iter; ++i) {
SHA256_Init(&ctx);
SHA256_Update(&ctx, tmp, len);
SHA256_Final(cp, &ctx);
}
memcpy(crypt_key[idx], cp, BINARY_SIZE);
#else
unsigned char _IBuf[64*NBKEYS256+MEM_ALIGN_SIMD], *keys, tmpBuf[32], _OBuf[32*NBKEYS256+MEM_ALIGN_SIMD], *crypt;
uint32_t j, *crypt32, offs[NBKEYS256], len;
keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_SIMD);
crypt = (unsigned char*)mem_align(_OBuf, MEM_ALIGN_SIMD);
crypt32 = (uint32_t*)crypt;
memset(keys, 0, 64*NBKEYS256);
for (i = 0; i < NBKEYS256; ++i) {
len = strlen(saved_plain[idx+i]);
SHA256_Init(&ctx);
SHA256_Update(&ctx, saved_plain[idx+i], len);
SHA256_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);
SHA256_Final(tmpBuf, &ctx);
memcpy(&keys[i<<6], saved_plain[idx+i], len);
memcpy(&keys[(i<<6)+len], tmpBuf, 32);
keys[(i<<6)+len+32] = 0x80;
offs[i] = len;
len += 32;
keys[(i<<6)+60] = (len<<3)&0xff;
keys[(i<<6)+61] = (len>>5);
}
for (i = 1; i < sapH_cur_salt->iter; ++i) {
uint32_t k;
SIMDSHA256body(keys, crypt32, NULL, SSEi_FLAT_IN);
for (k = 0; k < NBKEYS256; ++k) {
uint32_t *pcrypt = &crypt32[ ((k/SIMD_COEF_32)*(SIMD_COEF_32*8)) + (k&(SIMD_COEF_32-1))];
uint32_t *Icp32 = (uint32_t *)(&keys[(k<<6)+offs[k]]);
for (j = 0; j < 8; ++j) {
Icp32[j] = JOHNSWAP(*pcrypt);
pcrypt += SIMD_COEF_32;
}
}
}
// now marshal into crypt_out;
for (i = 0; i < NBKEYS256; ++i) {
uint32_t *Optr32 = (uint32_t*)(crypt_key[idx+i]);
uint32_t *Iptr32 = &crypt32[ ((i/SIMD_COEF_32)*(SIMD_COEF_32*8)) + (i&(SIMD_COEF_32-1))];
// we only want 16 bytes, not 32
for (j = 0; j < 4; ++j) {
Optr32[j] = JOHNSWAP(*Iptr32);
Iptr32 += SIMD_COEF_32;
}
}
#endif
}
}
static void crypt_all_384(int count) {
int idx;
#if defined(_OPENMP)
#pragma omp parallel for default(none) private(idx) shared(count, sapH_cur_salt, saved_plain, crypt_key)
#endif
for (idx = 0; idx < count; idx+=NBKEYS512) {
SHA512_CTX ctx;
uint32_t i;
#if !defined SIMD_COEF_64
uint32_t len = strlen(saved_plain[idx]);
unsigned char tmp[PLAINTEXT_LENGTH+SHA384_BINARY_SIZE], *cp=&tmp[len];
SHA384_Init(&ctx);
SHA384_Update(&ctx, saved_plain[idx], len);
SHA384_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);
strcpy((char*)tmp, saved_plain[idx]);
len += SHA384_BINARY_SIZE;
SHA384_Final(cp, &ctx);
for (i = 1; i < sapH_cur_salt->iter; ++i) {
SHA384_Init(&ctx);
SHA384_Update(&ctx, tmp, len);
SHA384_Final(cp, &ctx);
}
memcpy(crypt_key[idx], cp, BINARY_SIZE);
#else
unsigned char _IBuf[128*NBKEYS512+MEM_ALIGN_SIMD], *keys, tmpBuf[64], _OBuf[64*NBKEYS512+MEM_ALIGN_SIMD], *crypt;
uint64_t j, *crypt64, offs[NBKEYS512];
uint32_t len;
keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_SIMD);
crypt = (unsigned char*)mem_align(_OBuf, MEM_ALIGN_SIMD);
crypt64 = (uint64_t*)crypt;
memset(keys, 0, 128*NBKEYS512);
for (i = 0; i < NBKEYS512; ++i) {
len = strlen(saved_plain[idx+i]);
SHA384_Init(&ctx);
SHA384_Update(&ctx, saved_plain[idx+i], len);
SHA384_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);
SHA384_Final(tmpBuf, &ctx);
memcpy(&keys[i<<7], saved_plain[idx+i], len);
memcpy(&keys[(i<<7)+len], tmpBuf, 48);
keys[(i<<7)+len+48] = 0x80;
offs[i] = len;
len += 48;
keys[(i<<7)+120] = (len<<3)&0xff;
keys[(i<<7)+121] = (len>>5);
}
for (i = 1; i < sapH_cur_salt->iter; ++i) {
uint32_t k;
SIMDSHA512body(keys, crypt64, NULL, SSEi_FLAT_IN|SSEi_CRYPT_SHA384);
for (k = 0; k < NBKEYS512; ++k) {
uint64_t *pcrypt = &crypt64[ ((k/SIMD_COEF_64)*(SIMD_COEF_64*8)) + (k&(SIMD_COEF_64-1))];
uint64_t *Icp64 = (uint64_t *)(&keys[(k<<7)+offs[k]]);
for (j = 0; j < 6; ++j) {
Icp64[j] = JOHNSWAP64(*pcrypt);
pcrypt += SIMD_COEF_64;
}
}
}
// now marshal into crypt_out;
for (i = 0; i < NBKEYS512; ++i) {
uint64_t *Optr64 = (uint64_t*)(crypt_key[idx+i]);
uint64_t *Iptr64 = &crypt64[ ((i/SIMD_COEF_64)*(SIMD_COEF_64*8)) + (i&(SIMD_COEF_64-1))];
// we only want 16 bytes, not 48
for (j = 0; j < 2; ++j) {
Optr64[j] = JOHNSWAP64(*Iptr64);
Iptr64 += SIMD_COEF_64;
}
}
#endif
}
}
static void crypt_all_512(int count) {
int idx;
#if defined(_OPENMP)
#pragma omp parallel for default(none) private(idx) shared(count, sapH_cur_salt, saved_plain, crypt_key)
#endif
for (idx = 0; idx < count; idx+=NBKEYS512) {
SHA512_CTX ctx;
uint32_t i;
#if !defined SIMD_COEF_64
uint32_t len = strlen(saved_plain[idx]);
unsigned char tmp[PLAINTEXT_LENGTH+SHA512_BINARY_SIZE], *cp=&tmp[len];
SHA512_Init(&ctx);
SHA512_Update(&ctx, saved_plain[idx], len);
SHA512_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);
strcpy((char*)tmp, saved_plain[idx]);
len += SHA512_BINARY_SIZE;
SHA512_Final(cp, &ctx);
for (i = 1; i < sapH_cur_salt->iter; ++i) {
SHA512_Init(&ctx);
SHA512_Update(&ctx, tmp, len);
SHA512_Final(cp, &ctx);
}
memcpy(crypt_key[idx], cp, BINARY_SIZE);
#else
unsigned char _IBuf[128*NBKEYS512+MEM_ALIGN_SIMD], *keys, tmpBuf[64], _OBuf[64*NBKEYS512+MEM_ALIGN_SIMD], *crypt;
uint64_t j, *crypt64, offs[NBKEYS512];
uint32_t len;
keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_SIMD);
crypt = (unsigned char*)mem_align(_OBuf, MEM_ALIGN_SIMD);
crypt64 = (uint64_t*)crypt;
memset(keys, 0, 128*NBKEYS512);
for (i = 0; i < NBKEYS512; ++i) {
len = strlen(saved_plain[idx+i]);
SHA512_Init(&ctx);
SHA512_Update(&ctx, saved_plain[idx+i], len);
SHA512_Update(&ctx, sapH_cur_salt->s, sapH_cur_salt->slen);
SHA512_Final(tmpBuf, &ctx);
memcpy(&keys[i<<7], saved_plain[idx+i], len);
memcpy(&keys[(i<<7)+len], tmpBuf, 64);
keys[(i<<7)+len+64] = 0x80;
offs[i] = len;
len += 64;
keys[(i<<7)+120] = (len<<3)&0xff;
keys[(i<<7)+121] = (len>>5);
}
for (i = 1; i < sapH_cur_salt->iter; ++i) {
uint32_t k;
SIMDSHA512body(keys, crypt64, NULL, SSEi_FLAT_IN);
for (k = 0; k < NBKEYS512; ++k) {
uint64_t *pcrypt = &crypt64[ ((k/SIMD_COEF_64)*(SIMD_COEF_64*8)) + (k&(SIMD_COEF_64-1))];
uint64_t *Icp64 = (uint64_t *)(&keys[(k<<7)+offs[k]]);
for (j = 0; j < 8; ++j) {
Icp64[j] = JOHNSWAP64(*pcrypt);
pcrypt += SIMD_COEF_64;
}
}
}
// now marshal into crypt_out;
for (i = 0; i < NBKEYS512; ++i) {
uint64_t *Optr64 = (uint64_t*)(crypt_key[idx+i]);
uint64_t *Iptr64 = &crypt64[((i/SIMD_COEF_64)*(SIMD_COEF_64*8)) + (i&(SIMD_COEF_64-1))];
// we only want 16 bytes, not 64
for (j = 0; j < 2; ++j) {
Optr64[j] = JOHNSWAP64(*Iptr64);
Iptr64 += SIMD_COEF_64;
}
}
#endif
}
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
/*
* split logic into 4 separate functions, to make the logic more
* simplistic, when we start adding OMP + SIMD code
*/
switch(sapH_cur_salt->type) {
case 1: crypt_all_1(*pcount); break;
case 2: crypt_all_256(*pcount); break;
case 3: crypt_all_384(*pcount); break;
case 4: crypt_all_512(*pcount); break;
}
return *pcount;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char cp[BINARY_SIZE]; /* only stores part the size of each hash */
uint32_t jnk[BINARY_SIZE/4];
} b;
char *cp = ciphertext;
memset(b.cp, 0, sizeof(b.cp));
if (!strncasecmp(cp, FORMAT_TAG, FORMAT_TAG_LEN)) { cp += FORMAT_TAG_LEN; }
else if (!strncasecmp(cp, FORMAT_TAG256, FORMAT_TAG256_LEN)) { cp += FORMAT_TAG256_LEN; }
else if (!strncasecmp(cp, FORMAT_TAG384, FORMAT_TAG384_LEN)) { cp += FORMAT_TAG384_LEN; }
else if (!strncasecmp(cp, FORMAT_TAG512, FORMAT_TAG512_LEN)) { cp += FORMAT_TAG512_LEN; }
else { fprintf(stderr, "error, bad signature in sap-H format!\n"); error(); }
while (*cp != '}') ++cp;
++cp;
base64_convert(cp, e_b64_mime, strlen(cp), b.cp, e_b64_raw,
sizeof(b.cp), flg_Base64_MIME_TRAIL_EQ, 0);
return b.cp;
}
static void *get_salt(char *ciphertext)
{
static struct sapH_salt s;
char *cp = ciphertext;
unsigned char tmp[MAX_BINARY_SIZE+SALT_LENGTH];
int total_len, hash_len = 0;
memset(&s, 0, sizeof(s));
if (!strncasecmp(cp, FORMAT_TAG, FORMAT_TAG_LEN)) { s.type = 1; cp += FORMAT_TAG_LEN; hash_len = SHA1_BINARY_SIZE; }
else if (!strncasecmp(cp, FORMAT_TAG256, FORMAT_TAG256_LEN)) { s.type = 2; cp += FORMAT_TAG256_LEN; hash_len = SHA256_BINARY_SIZE; }
else if (!strncasecmp(cp, FORMAT_TAG384, FORMAT_TAG384_LEN)) { s.type = 3; cp += FORMAT_TAG384_LEN; hash_len = SHA384_BINARY_SIZE; }
else if (!strncasecmp(cp, FORMAT_TAG512, FORMAT_TAG512_LEN)) { s.type = 4; cp += FORMAT_TAG512_LEN; hash_len = SHA512_BINARY_SIZE; }
else { fprintf(stderr, "error, bad signature in sap-H format!\n"); error(); }
sscanf(cp, "%u", &s.iter);
while (*cp != '}') ++cp;
++cp;
total_len = base64_convert(cp, e_b64_mime, strlen(cp), tmp, e_b64_raw,
sizeof(tmp), flg_Base64_MIME_TRAIL_EQ, 0);
s.slen = total_len-hash_len;
memcpy(s.s, &tmp[hash_len], s.slen);
return &s;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
/* we 'could' cash switch the SHA/sha and unify case. If they an vary, we will have to. */
return ciphertext;
}
static int get_hash_0(int index) { return *(uint32_t*)crypt_key[index] & PH_MASK_0; }
static int get_hash_1(int index) { return *(uint32_t*)crypt_key[index] & PH_MASK_1; }
static int get_hash_2(int index) { return *(uint32_t*)crypt_key[index] & PH_MASK_2; }
static int get_hash_3(int index) { return *(uint32_t*)crypt_key[index] & PH_MASK_3; }
static int get_hash_4(int index) { return *(uint32_t*)crypt_key[index] & PH_MASK_4; }
static int get_hash_5(int index) { return *(uint32_t*)crypt_key[index] & PH_MASK_5; }
static int get_hash_6(int index) { return *(uint32_t*)crypt_key[index] & PH_MASK_6; }
static int salt_hash(void *salt)
{
unsigned char *cp = (unsigned char*)salt;
unsigned int hash = 5381;
unsigned int i;
for (i = 0; i < sizeof(struct sapH_salt); i++)
hash = ((hash << 5) + hash) ^ cp[i];
return hash & (SALT_HASH_SIZE - 1);
}
static unsigned int sapH_type(void *salt)
{
struct sapH_salt *my_salt;
my_salt = (struct sapH_salt *)salt;
return my_salt->type;
}
static unsigned int iteration_count(void *salt)
{
struct sapH_salt *my_salt;
my_salt = (struct sapH_salt *)salt;
return my_salt->iter;
}
struct fmt_main fmt_sapH = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_OMP | FMT_CASE | FMT_8_BIT | FMT_UTF8,
{
"hash type [1:SHA1 2:SHA256 3:SHA384 4:SHA512]",
"iteration count",
},
{ FORMAT_TAG, FORMAT_TAG256, FORMAT_TAG384, FORMAT_TAG512 },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
{
sapH_type,
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
par_rap.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "_hypre_utilities.h"
/*--------------------------------------------------------------------------
* hypre_BoomerAMGBuildCoarseOperator
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildCoarseOperator( hypre_ParCSRMatrix *RT,
hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *P,
hypre_ParCSRMatrix **RAP_ptr )
{
hypre_BoomerAMGBuildCoarseOperatorKT( RT, A, P, 0, RAP_ptr);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGBuildCoarseOperatorKT( hypre_ParCSRMatrix *RT,
hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *P,
HYPRE_Int keepTranspose,
hypre_ParCSRMatrix **RAP_ptr )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RAP] -= hypre_MPI_Wtime();
#endif
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *RT_diag = hypre_ParCSRMatrixDiag(RT);
hypre_CSRMatrix *RT_offd = hypre_ParCSRMatrixOffd(RT);
HYPRE_Int num_cols_diag_RT = hypre_CSRMatrixNumCols(RT_diag);
HYPRE_Int num_cols_offd_RT = hypre_CSRMatrixNumCols(RT_offd);
HYPRE_Int num_rows_offd_RT = hypre_CSRMatrixNumRows(RT_offd);
hypre_ParCSRCommPkg *comm_pkg_RT = hypre_ParCSRMatrixCommPkg(RT);
HYPRE_Int num_recvs_RT = 0;
HYPRE_Int num_sends_RT = 0;
HYPRE_Int *send_map_starts_RT;
HYPRE_Int *send_map_elmts_RT;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag);
HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd);
hypre_CSRMatrix *P_diag = hypre_ParCSRMatrixDiag(P);
HYPRE_Real *P_diag_data = hypre_CSRMatrixData(P_diag);
HYPRE_Int *P_diag_i = hypre_CSRMatrixI(P_diag);
HYPRE_Int *P_diag_j = hypre_CSRMatrixJ(P_diag);
hypre_CSRMatrix *P_offd = hypre_ParCSRMatrixOffd(P);
HYPRE_BigInt *col_map_offd_P = hypre_ParCSRMatrixColMapOffd(P);
HYPRE_Real *P_offd_data = hypre_CSRMatrixData(P_offd);
HYPRE_Int *P_offd_i = hypre_CSRMatrixI(P_offd);
HYPRE_Int *P_offd_j = hypre_CSRMatrixJ(P_offd);
HYPRE_BigInt first_col_diag_P = hypre_ParCSRMatrixFirstColDiag(P);
HYPRE_BigInt last_col_diag_P;
HYPRE_Int num_cols_diag_P = hypre_CSRMatrixNumCols(P_diag);
HYPRE_Int num_cols_offd_P = hypre_CSRMatrixNumCols(P_offd);
HYPRE_BigInt *coarse_partitioning = hypre_ParCSRMatrixColStarts(P);
HYPRE_BigInt *RT_partitioning = hypre_ParCSRMatrixColStarts(RT);
hypre_ParCSRMatrix *RAP;
HYPRE_BigInt *col_map_offd_RAP = NULL;
HYPRE_BigInt *new_col_map_offd_RAP = NULL;
hypre_CSRMatrix *RAP_int = NULL;
HYPRE_Real *RAP_int_data;
HYPRE_Int *RAP_int_i;
HYPRE_BigInt *RAP_int_j;
hypre_CSRMatrix *RAP_ext;
HYPRE_Real *RAP_ext_data = NULL;
HYPRE_Int *RAP_ext_i = NULL;
HYPRE_BigInt *RAP_ext_j = NULL;
hypre_CSRMatrix *RAP_diag;
HYPRE_Real *RAP_diag_data;
HYPRE_Int *RAP_diag_i;
HYPRE_Int *RAP_diag_j;
hypre_CSRMatrix *RAP_offd;
HYPRE_Real *RAP_offd_data = NULL;
HYPRE_Int *RAP_offd_i = NULL;
HYPRE_Int *RAP_offd_j = NULL;
HYPRE_Int RAP_size;
HYPRE_Int RAP_ext_size;
HYPRE_Int RAP_diag_size;
HYPRE_Int RAP_offd_size;
HYPRE_Int P_ext_diag_size;
HYPRE_Int P_ext_offd_size;
HYPRE_BigInt first_col_diag_RAP;
HYPRE_BigInt last_col_diag_RAP;
HYPRE_Int num_cols_offd_RAP = 0;
hypre_CSRMatrix *R_diag;
HYPRE_Real *R_diag_data;
HYPRE_Int *R_diag_i;
HYPRE_Int *R_diag_j;
hypre_CSRMatrix *R_offd;
HYPRE_Real *R_offd_data;
HYPRE_Int *R_offd_i;
HYPRE_Int *R_offd_j;
HYPRE_Real *RA_diag_data_array = NULL;
HYPRE_Int *RA_diag_j_array = NULL;
HYPRE_Real *RA_offd_data_array = NULL;
HYPRE_Int *RA_offd_j_array = NULL;
hypre_CSRMatrix *Ps_ext;
HYPRE_Real *Ps_ext_data;
HYPRE_Int *Ps_ext_i;
HYPRE_BigInt *Ps_ext_j;
HYPRE_Real *P_ext_diag_data = NULL;
HYPRE_Int *P_ext_diag_i = NULL;
HYPRE_Int *P_ext_diag_j = NULL;
HYPRE_Real *P_ext_offd_data = NULL;
HYPRE_Int *P_ext_offd_i = NULL;
HYPRE_Int *P_ext_offd_j = NULL;
HYPRE_BigInt *P_big_offd_j = NULL;
HYPRE_BigInt *col_map_offd_Pext;
HYPRE_Int *map_P_to_Pext = NULL;
HYPRE_Int *map_P_to_RAP = NULL;
HYPRE_Int *map_Pext_to_RAP = NULL;
HYPRE_Int *P_marker;
HYPRE_Int **P_mark_array;
HYPRE_Int **A_mark_array;
HYPRE_Int *A_marker;
HYPRE_BigInt *temp;
HYPRE_BigInt n_coarse, n_coarse_RT;
HYPRE_Int square = 1;
HYPRE_Int num_cols_offd_Pext = 0;
HYPRE_Int ic, i, j, k;
HYPRE_Int i1, i2, i3, ii, ns, ne, size, rest;
HYPRE_Int cnt = 0; /*value; */
HYPRE_Int jj1, jj2, jj3, jcol;
HYPRE_Int *jj_count, *jj_cnt_diag, *jj_cnt_offd;
HYPRE_Int jj_counter, jj_count_diag, jj_count_offd;
HYPRE_Int jj_row_begining, jj_row_begin_diag, jj_row_begin_offd;
HYPRE_Int start_indexing = 0; /* start indexing for RAP_data at 0 */
HYPRE_Int num_nz_cols_A;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Real r_entry;
HYPRE_Real r_a_product;
HYPRE_Real r_a_p_product;
HYPRE_Real zero = 0.0;
HYPRE_Int *prefix_sum_workspace;
/*-----------------------------------------------------------------------
* Copy ParCSRMatrix RT into CSRMatrix R so that we have row-wise access
* to restriction .
*-----------------------------------------------------------------------*/
hypre_MPI_Comm_size(comm,&num_procs);
num_threads = hypre_NumThreads();
if (comm_pkg_RT)
{
num_recvs_RT = hypre_ParCSRCommPkgNumRecvs(comm_pkg_RT);
num_sends_RT = hypre_ParCSRCommPkgNumSends(comm_pkg_RT);
send_map_starts_RT =hypre_ParCSRCommPkgSendMapStarts(comm_pkg_RT);
send_map_elmts_RT = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_RT);
}
else if (num_procs > 1)
{
hypre_MatvecCommPkgCreate(RT);
comm_pkg_RT = hypre_ParCSRMatrixCommPkg(RT);
num_recvs_RT = hypre_ParCSRCommPkgNumRecvs(comm_pkg_RT);
num_sends_RT = hypre_ParCSRCommPkgNumSends(comm_pkg_RT);
send_map_starts_RT =hypre_ParCSRCommPkgSendMapStarts(comm_pkg_RT);
send_map_elmts_RT = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_RT);
}
hypre_CSRMatrixTranspose(RT_diag,&R_diag,1);
if (num_cols_offd_RT)
{
hypre_CSRMatrixTranspose(RT_offd,&R_offd,1);
R_offd_data = hypre_CSRMatrixData(R_offd);
R_offd_i = hypre_CSRMatrixI(R_offd);
R_offd_j = hypre_CSRMatrixJ(R_offd);
}
/*-----------------------------------------------------------------------
* Access the CSR vectors for R. Also get sizes of fine and
* coarse grids.
*-----------------------------------------------------------------------*/
R_diag_data = hypre_CSRMatrixData(R_diag);
R_diag_i = hypre_CSRMatrixI(R_diag);
R_diag_j = hypre_CSRMatrixJ(R_diag);
n_coarse = hypre_ParCSRMatrixGlobalNumCols(P);
num_nz_cols_A = num_cols_diag_A + num_cols_offd_A;
n_coarse_RT = hypre_ParCSRMatrixGlobalNumCols(RT);
if (n_coarse != n_coarse_RT)
square = 0;
/*-----------------------------------------------------------------------
* Generate Ps_ext, i.e. portion of P that is stored on neighbor procs
* and needed locally for triple matrix product
*-----------------------------------------------------------------------*/
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_UnorderedIntMap send_map_elmts_RT_inverse_map;
HYPRE_Int *send_map_elmts_starts_RT_aggregated = NULL;
HYPRE_Int *send_map_elmts_RT_aggregated = NULL;
HYPRE_Int send_map_elmts_RT_inverse_map_initialized =
num_sends_RT > 0 && send_map_starts_RT[num_sends_RT] - send_map_starts_RT[0] > 0;
if (send_map_elmts_RT_inverse_map_initialized)
{
hypre_UnorderedIntSet send_map_elmts_set;
hypre_UnorderedIntSetCreate(&send_map_elmts_set, 2*(send_map_starts_RT[num_sends_RT] - send_map_starts_RT[0]), 16*hypre_NumThreads());
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = send_map_starts_RT[0]; i < send_map_starts_RT[num_sends_RT]; i++)
{
HYPRE_Int key = send_map_elmts_RT[i];
hypre_UnorderedIntSetPut(&send_map_elmts_set, key);
}
HYPRE_Int send_map_elmts_unique_size;
HYPRE_Int *send_map_elmts_unique = hypre_UnorderedIntSetCopyToArray(&send_map_elmts_set, &send_map_elmts_unique_size);
hypre_UnorderedIntSetDestroy(&send_map_elmts_set);
hypre_UnorderedIntMapCreate(&send_map_elmts_RT_inverse_map, 2*send_map_elmts_unique_size, 16*hypre_NumThreads());
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = 0; i < send_map_elmts_unique_size; i++)
{
hypre_UnorderedIntMapPutIfAbsent(&send_map_elmts_RT_inverse_map, send_map_elmts_unique[i], i);
}
hypre_TFree(send_map_elmts_unique, HYPRE_MEMORY_HOST);
send_map_elmts_starts_RT_aggregated = hypre_TAlloc(HYPRE_Int, send_map_elmts_unique_size + 1, HYPRE_MEMORY_HOST);
send_map_elmts_RT_aggregated = hypre_TAlloc(HYPRE_Int, send_map_starts_RT[num_sends_RT], HYPRE_MEMORY_HOST);
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = 0; i < send_map_elmts_unique_size; i++)
{
send_map_elmts_starts_RT_aggregated[i] = 0;
}
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = send_map_starts_RT[0]; i < send_map_starts_RT[num_sends_RT]; i++)
{
HYPRE_Int idx = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, send_map_elmts_RT[i]);
#pragma omp atomic
send_map_elmts_starts_RT_aggregated[idx]++;
}
for (i = 0; i < send_map_elmts_unique_size - 1; i++)
{
send_map_elmts_starts_RT_aggregated[i + 1] += send_map_elmts_starts_RT_aggregated[i];
}
send_map_elmts_starts_RT_aggregated[send_map_elmts_unique_size] = send_map_starts_RT[num_sends_RT];
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = send_map_starts_RT[num_sends_RT] - 1; i >= send_map_starts_RT[0]; i--)
{
HYPRE_Int idx = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, send_map_elmts_RT[i]);
HYPRE_Int offset = hypre_fetch_and_add(send_map_elmts_starts_RT_aggregated + idx, -1) - 1;
send_map_elmts_RT_aggregated[offset] = i;
}
}
#endif /* HYPRE_CONCURRENT_HOPSCOTCH */
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] -= hypre_MPI_Wtime();
#endif
if (num_procs > 1)
{
Ps_ext = hypre_ParCSRMatrixExtractBExt(P,A,1);
Ps_ext_data = hypre_CSRMatrixData(Ps_ext);
Ps_ext_i = hypre_CSRMatrixI(Ps_ext);
Ps_ext_j = hypre_CSRMatrixBigJ(Ps_ext);
}
P_ext_diag_i = hypre_TAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST);
P_ext_offd_i = hypre_TAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST);
P_ext_diag_i[0] = 0;
P_ext_offd_i[0] = 0;
P_ext_diag_size = 0;
P_ext_offd_size = 0;
last_col_diag_P = first_col_diag_P + (HYPRE_BigInt) num_cols_diag_P - 1;
/*HYPRE_Int prefix_sum_workspace[2*(num_threads + 1)];*/
prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(num_threads + 1), HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j)
#endif /* This threading causes problem, maybe the prefix_sum in combination with BigInt? */
{
HYPRE_Int i_begin, i_end;
hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_A);
HYPRE_Int P_ext_diag_size_private = 0;
HYPRE_Int P_ext_offd_size_private = 0;
for (i = i_begin; i < i_end; i++)
{
for (j=Ps_ext_i[i]; j < Ps_ext_i[i+1]; j++)
if (Ps_ext_j[j] < first_col_diag_P || Ps_ext_j[j] > last_col_diag_P)
P_ext_offd_size_private++;
else
P_ext_diag_size_private++;
}
hypre_prefix_sum_pair(&P_ext_diag_size_private, &P_ext_diag_size, &P_ext_offd_size_private, &P_ext_offd_size, prefix_sum_workspace);
#ifdef HYPRE_USING_OPENMP
#pragma omp master
#endif
{
if (P_ext_diag_size)
{
P_ext_diag_j = hypre_CTAlloc(HYPRE_Int, P_ext_diag_size, HYPRE_MEMORY_HOST);
P_ext_diag_data = hypre_CTAlloc(HYPRE_Real, P_ext_diag_size, HYPRE_MEMORY_HOST);
}
if (P_ext_offd_size)
{
P_ext_offd_j = hypre_CTAlloc(HYPRE_Int, P_ext_offd_size, HYPRE_MEMORY_HOST);
P_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, P_ext_offd_size, HYPRE_MEMORY_HOST);
P_ext_offd_data = hypre_CTAlloc(HYPRE_Real, P_ext_offd_size, HYPRE_MEMORY_HOST);
//temp = hypre_CTAlloc(HYPRE_BigInt, P_ext_offd_size+num_cols_offd_P, HYPRE_MEMORY_HOST);
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i = i_begin; i < i_end; i++)
{
for (j=Ps_ext_i[i]; j < Ps_ext_i[i+1]; j++)
{
HYPRE_BigInt value = Ps_ext_j[j];
if (value < first_col_diag_P || value > last_col_diag_P)
{
//Ps_ext_j[P_ext_offd_size_private] = value;
//temp[P_ext_offd_size_private] = value;
P_big_offd_j[P_ext_offd_size_private] = value;
P_ext_offd_data[P_ext_offd_size_private++] = Ps_ext_data[j];
}
else
{
P_ext_diag_j[P_ext_diag_size_private] = (HYPRE_Int)(Ps_ext_j[j] - first_col_diag_P);
P_ext_diag_data[P_ext_diag_size_private++] = Ps_ext_data[j];
}
}
P_ext_diag_i[i+1] = P_ext_diag_size_private;
P_ext_offd_i[i+1] = P_ext_offd_size_private;
}
} /* omp parallel */
hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Ps_ext);
Ps_ext = NULL;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (P_ext_offd_size || num_cols_offd_P)
{
hypre_UnorderedBigIntSet found_set;
hypre_UnorderedBigIntSetCreate(&found_set, P_ext_offd_size + num_cols_offd_P, 16*hypre_NumThreads());
#pragma omp parallel private(i)
{
#pragma omp for HYPRE_SMP_SCHEDULE
for (i = 0; i < P_ext_offd_size; i++)
{
//hypre_UnorderedBigIntSetPut(&found_set, Ps_ext_j[i]);
hypre_UnorderedBigIntSetPut(&found_set, P_big_offd_j[i]);
}
#pragma omp for HYPRE_SMP_SCHEDULE
for (i = 0; i < num_cols_offd_P; i++)
{
hypre_UnorderedBigIntSetPut(&found_set, col_map_offd_P[i]);
}
} /* omp parallel */
/* Warning on getting temp right !!!!! */
temp = hypre_UnorderedBigIntSetCopyToArray(&found_set, &num_cols_offd_Pext);
hypre_UnorderedBigIntSetDestroy(&found_set);
hypre_UnorderedBigIntMap col_map_offd_Pext_inverse;
hypre_big_sort_and_create_inverse_map(temp, num_cols_offd_Pext, &col_map_offd_Pext, &col_map_offd_Pext_inverse);
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i=0 ; i < P_ext_offd_size; i++)
//Ps_ext_j[i] = hypre_UnorderedBigIntMapGet(&col_map_offd_Pext_inverse, Ps_ext_j[i]);
P_ext_offd_j[i] = hypre_UnorderedBigIntMapGet(&col_map_offd_Pext_inverse, P_big_offd_j[i]);
if (num_cols_offd_Pext) hypre_UnorderedBigIntMapDestroy(&col_map_offd_Pext_inverse);
}
#else /* !HYPRE_CONCURRENT_HOPSCOTCH */
if (P_ext_offd_size || num_cols_offd_P)
{
temp = hypre_CTAlloc(HYPRE_BigInt, P_ext_offd_size+num_cols_offd_P, HYPRE_MEMORY_HOST);
for (i=0; i < P_ext_offd_size; i++)
//Ps_ext_j[i] = temp[i];
//temp[i] = Ps_ext_j[i];
temp[i] = P_big_offd_j[i];
cnt = P_ext_offd_size;
for (i=0; i < num_cols_offd_P; i++)
temp[cnt++] = col_map_offd_P[i];
}
if (cnt)
{
hypre_BigQsort0(temp, 0, cnt-1);
num_cols_offd_Pext = 1;
HYPRE_BigInt value = temp[0];
for (i=1; i < cnt; i++)
{
if (temp[i] > value)
{
value = temp[i];
temp[num_cols_offd_Pext++] = value;
}
}
}
if (num_cols_offd_Pext)
col_map_offd_Pext = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_Pext, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_offd_Pext; i++)
col_map_offd_Pext[i] = temp[i];
if (P_ext_offd_size || num_cols_offd_P)
hypre_TFree(temp, HYPRE_MEMORY_HOST);
/*if (P_ext_offd_size)
P_ext_offd_j = hypre_CTAlloc(HYPRE_Int, P_ext_offd_size, HYPRE_MEMORY_HOST);*/
for (i=0 ; i < P_ext_offd_size; i++)
P_ext_offd_j[i] = hypre_BigBinarySearch(col_map_offd_Pext,
//Ps_ext_j[i],
P_big_offd_j[i],
num_cols_offd_Pext);
#endif /* !HYPRE_CONCURRENT_HOPSCOTCH */
if (P_ext_offd_size)
hypre_TFree(P_big_offd_j, HYPRE_MEMORY_HOST);
/*if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Ps_ext);
Ps_ext = NULL;
}*/
if (num_cols_offd_P)
{
map_P_to_Pext = hypre_CTAlloc(HYPRE_Int, num_cols_offd_P, HYPRE_MEMORY_HOST);
cnt = 0;
for (i=0; i < num_cols_offd_Pext; i++)
if (col_map_offd_Pext[i] == col_map_offd_P[cnt])
{
map_P_to_Pext[cnt++] = i;
if (cnt == num_cols_offd_P) break;
}
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] += hypre_MPI_Wtime();
#endif
/*-----------------------------------------------------------------------
* First Pass: Determine size of RAP_int and set up RAP_int_i if there
* are more than one processor and nonzero elements in R_offd
*-----------------------------------------------------------------------*/
P_mark_array = hypre_CTAlloc(HYPRE_Int *, num_threads, HYPRE_MEMORY_HOST);
A_mark_array = hypre_CTAlloc(HYPRE_Int *, num_threads, HYPRE_MEMORY_HOST);
if (num_cols_offd_RT)
{
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_counter,jj_row_begining,A_marker,P_marker) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < num_threads; ii++)
{
size = num_cols_offd_RT/num_threads;
rest = num_cols_offd_RT - size*num_threads;
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
/*-----------------------------------------------------------------------
* Allocate marker arrays.
*-----------------------------------------------------------------------*/
if (num_cols_offd_Pext || num_cols_diag_P)
{
P_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_cols_diag_P+num_cols_offd_Pext, HYPRE_MEMORY_HOST);
P_marker = P_mark_array[ii];
}
A_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_nz_cols_A, HYPRE_MEMORY_HOST);
A_marker = A_mark_array[ii];
/*-----------------------------------------------------------------------
* Initialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
for (ic = 0; ic < num_cols_diag_P+num_cols_offd_Pext; ic++)
{
P_marker[ic] = -1;
}
for (i = 0; i < num_nz_cols_A; i++)
{
A_marker[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over exterior c-points
*-----------------------------------------------------------------------*/
for (ic = ns; ic < ne; ic++)
{
jj_row_begining = jj_counter;
/*--------------------------------------------------------------------
* Loop over entries in row ic of R_offd.
*--------------------------------------------------------------------*/
for (jj1 = R_offd_i[ic]; jj1 < R_offd_i[ic+1]; jj1++)
{
i1 = R_offd_j[jj1];
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_offd.
*-----------------------------------------------------------------*/
for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++)
{
i2 = A_offd_j[jj2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (A_marker[i2] != ic)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2] = ic;
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_ext.
*-----------------------------------------------------------*/
for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++)
{
i3 = P_ext_diag_j[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
jj_counter++;
}
}
for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++)
{
i3 = P_ext_offd_j[jj3] + num_cols_diag_P;
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
jj_counter++;
}
}
}
}
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_diag.
*-----------------------------------------------------------------*/
for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++)
{
i2 = A_diag_j[jj2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (A_marker[i2+num_cols_offd_A] != ic)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2+num_cols_offd_A] = ic;
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_diag.
*-----------------------------------------------------------*/
for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++)
{
i3 = P_diag_j[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
jj_counter++;
}
}
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_offd.
*-----------------------------------------------------------*/
for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++)
{
i3 = map_P_to_Pext[P_offd_j[jj3]] + num_cols_diag_P;
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
jj_counter++;
}
}
}
}
}
}
jj_count[ii] = jj_counter;
}
/*-----------------------------------------------------------------------
* Allocate RAP_int_data and RAP_int_j arrays.
*-----------------------------------------------------------------------*/
for (i = 0; i < num_threads-1; i++)
jj_count[i+1] += jj_count[i];
RAP_size = jj_count[num_threads-1];
RAP_int_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_RT+1, HYPRE_MEMORY_HOST);
RAP_int_data = hypre_CTAlloc(HYPRE_Real, RAP_size, HYPRE_MEMORY_HOST);
RAP_int_j = hypre_CTAlloc(HYPRE_BigInt, RAP_size, HYPRE_MEMORY_HOST);
RAP_int_i[num_cols_offd_RT] = RAP_size;
/*-----------------------------------------------------------------------
* Second Pass: Fill in RAP_int_data and RAP_int_j.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_counter,jj_row_begining,A_marker,P_marker,r_entry,r_a_product,r_a_p_product) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < num_threads; ii++)
{
size = num_cols_offd_RT/num_threads;
rest = num_cols_offd_RT - size*num_threads;
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
/*-----------------------------------------------------------------------
* Initialize some stuff.
*-----------------------------------------------------------------------*/
if (num_cols_offd_Pext || num_cols_diag_P)
P_marker = P_mark_array[ii];
A_marker = A_mark_array[ii];
jj_counter = start_indexing;
if (ii > 0) jj_counter = jj_count[ii-1];
for (ic = 0; ic < num_cols_diag_P+num_cols_offd_Pext; ic++)
{
P_marker[ic] = -1;
}
for (i = 0; i < num_nz_cols_A; i++)
{
A_marker[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over exterior c-points.
*-----------------------------------------------------------------------*/
for (ic = ns; ic < ne; ic++)
{
jj_row_begining = jj_counter;
RAP_int_i[ic] = jj_counter;
/*--------------------------------------------------------------------
* Loop over entries in row ic of R_offd.
*--------------------------------------------------------------------*/
for (jj1 = R_offd_i[ic]; jj1 < R_offd_i[ic+1]; jj1++)
{
i1 = R_offd_j[jj1];
r_entry = R_offd_data[jj1];
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_offd.
*-----------------------------------------------------------------*/
for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++)
{
i2 = A_offd_j[jj2];
r_a_product = r_entry * A_offd_data[jj2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (A_marker[i2] != ic)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2] = ic;
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_ext.
*-----------------------------------------------------------*/
for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++)
{
i3 = P_ext_diag_j[jj3];
r_a_p_product = r_a_product * P_ext_diag_data[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
RAP_int_data[jj_counter] = r_a_p_product;
RAP_int_j[jj_counter] = (HYPRE_BigInt)i3 + first_col_diag_P;
jj_counter++;
}
else
{
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
}
for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++)
{
i3 = P_ext_offd_j[jj3] + num_cols_diag_P;
r_a_p_product = r_a_product * P_ext_offd_data[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
RAP_int_data[jj_counter] = r_a_p_product;
RAP_int_j[jj_counter]
= col_map_offd_Pext[i3-num_cols_diag_P];
jj_counter++;
}
else
{
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
}
}
/*--------------------------------------------------------------
* If i2 is previously visited ( A_marker[12]=ic ) it yields
* no new entries in RAP and can just add new contributions.
*--------------------------------------------------------------*/
else
{
for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++)
{
i3 = P_ext_diag_j[jj3];
r_a_p_product = r_a_product * P_ext_diag_data[jj3];
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++)
{
i3 = P_ext_offd_j[jj3] + num_cols_diag_P;
r_a_p_product = r_a_product * P_ext_offd_data[jj3];
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
}
}
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_diag.
*-----------------------------------------------------------------*/
for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++)
{
i2 = A_diag_j[jj2];
r_a_product = r_entry * A_diag_data[jj2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (A_marker[i2+num_cols_offd_A] != ic)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2+num_cols_offd_A] = ic;
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_diag.
*-----------------------------------------------------------*/
for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++)
{
i3 = P_diag_j[jj3];
r_a_p_product = r_a_product * P_diag_data[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
RAP_int_data[jj_counter] = r_a_p_product;
RAP_int_j[jj_counter] = (HYPRE_BigInt)i3 + first_col_diag_P;
jj_counter++;
}
else
{
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
}
for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++)
{
i3 = map_P_to_Pext[P_offd_j[jj3]] + num_cols_diag_P;
r_a_p_product = r_a_product * P_offd_data[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
RAP_int_data[jj_counter] = r_a_p_product;
RAP_int_j[jj_counter] =
col_map_offd_Pext[i3-num_cols_diag_P];
jj_counter++;
}
else
{
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
}
}
/*--------------------------------------------------------------
* If i2 is previously visited ( A_marker[12]=ic ) it yields
* no new entries in RAP and can just add new contributions.
*--------------------------------------------------------------*/
else
{
for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++)
{
i3 = P_diag_j[jj3];
r_a_p_product = r_a_product * P_diag_data[jj3];
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++)
{
i3 = map_P_to_Pext[P_offd_j[jj3]] + num_cols_diag_P;
r_a_p_product = r_a_product * P_offd_data[jj3];
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
}
}
}
}
if (num_cols_offd_Pext || num_cols_diag_P)
hypre_TFree(P_mark_array[ii], HYPRE_MEMORY_HOST);
hypre_TFree(A_mark_array[ii], HYPRE_MEMORY_HOST);
}
RAP_int = hypre_CSRMatrixCreate(num_cols_offd_RT,num_rows_offd_RT,RAP_size);
hypre_CSRMatrixMemoryLocation(RAP_int) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixI(RAP_int) = RAP_int_i;
hypre_CSRMatrixBigJ(RAP_int) = RAP_int_j;
hypre_CSRMatrixData(RAP_int) = RAP_int_data;
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] -= hypre_MPI_Wtime();
#endif
RAP_ext_size = 0;
if (num_sends_RT || num_recvs_RT)
{
void *request;
hypre_ExchangeExternalRowsInit(RAP_int, comm_pkg_RT, &request);
RAP_ext = hypre_ExchangeExternalRowsWait(request);
RAP_ext_i = hypre_CSRMatrixI(RAP_ext);
RAP_ext_j = hypre_CSRMatrixBigJ(RAP_ext);
RAP_ext_data = hypre_CSRMatrixData(RAP_ext);
RAP_ext_size = RAP_ext_i[hypre_CSRMatrixNumRows(RAP_ext)];
}
if (num_cols_offd_RT)
{
hypre_CSRMatrixDestroy(RAP_int);
RAP_int = NULL;
}
RAP_diag_i = hypre_TAlloc(HYPRE_Int, num_cols_diag_RT+1, HYPRE_MEMORY_DEVICE);
RAP_offd_i = hypre_TAlloc(HYPRE_Int, num_cols_diag_RT+1, HYPRE_MEMORY_DEVICE);
first_col_diag_RAP = first_col_diag_P;
last_col_diag_RAP = first_col_diag_P + num_cols_diag_P - 1;
/*-----------------------------------------------------------------------
* check for new nonzero columns in RAP_offd generated through RAP_ext
*-----------------------------------------------------------------------*/
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_UnorderedBigIntMap col_map_offd_RAP_inverse;
if (RAP_ext_size || num_cols_offd_Pext)
{
hypre_UnorderedBigIntSet found_set;
hypre_UnorderedBigIntSetCreate(&found_set, 2*(RAP_ext_size + num_cols_offd_Pext), 16*hypre_NumThreads());
cnt = 0;
#pragma omp parallel private(i)
{
#pragma omp for HYPRE_SMP_SCHEDULE
for (i = 0; i < RAP_ext_size; i++)
{
if (RAP_ext_j[i] < first_col_diag_RAP
|| RAP_ext_j[i] > last_col_diag_RAP)
hypre_UnorderedBigIntSetPut(&found_set, RAP_ext_j[i]);
}
#pragma omp for HYPRE_SMP_SCHEDULE
for (i = 0; i < num_cols_offd_Pext; i++)
{
hypre_UnorderedBigIntSetPut(&found_set, col_map_offd_Pext[i]);
}
} /* omp parallel */
temp = hypre_UnorderedBigIntSetCopyToArray(&found_set, &num_cols_offd_RAP);
hypre_UnorderedBigIntSetDestroy(&found_set);
hypre_big_sort_and_create_inverse_map(temp, num_cols_offd_RAP, &col_map_offd_RAP, &col_map_offd_RAP_inverse);
}
#else /* !HYPRE_CONCURRENT_HOPSCOTCH */
if (RAP_ext_size || num_cols_offd_Pext)
{
temp = hypre_CTAlloc(HYPRE_BigInt, RAP_ext_size+num_cols_offd_Pext, HYPRE_MEMORY_HOST);
cnt = 0;
for (i=0; i < RAP_ext_size; i++)
if (RAP_ext_j[i] < first_col_diag_RAP
|| RAP_ext_j[i] > last_col_diag_RAP)
temp[cnt++] = RAP_ext_j[i];
for (i=0; i < num_cols_offd_Pext; i++)
temp[cnt++] = col_map_offd_Pext[i];
if (cnt)
{
hypre_BigQsort0(temp,0,cnt-1);
HYPRE_BigInt value = temp[0];
num_cols_offd_RAP = 1;
for (i=1; i < cnt; i++)
{
if (temp[i] > value)
{
value = temp[i];
temp[num_cols_offd_RAP++] = value;
}
}
}
/* now evaluate col_map_offd_RAP */
if (num_cols_offd_RAP)
col_map_offd_RAP = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_RAP, HYPRE_MEMORY_HOST);
for (i=0 ; i < num_cols_offd_RAP; i++)
col_map_offd_RAP[i] = temp[i];
hypre_TFree(temp, HYPRE_MEMORY_HOST);
}
#endif /* !HYPRE_CONCURRENT_HOPSCOTCH */
if (num_cols_offd_P)
{
map_P_to_RAP = hypre_TAlloc(HYPRE_Int, num_cols_offd_P, HYPRE_MEMORY_HOST);
cnt = 0;
for (i=0; i < num_cols_offd_RAP; i++)
if (col_map_offd_RAP[i] == col_map_offd_P[cnt])
{
map_P_to_RAP[cnt++] = i;
if (cnt == num_cols_offd_P) break;
}
}
if (num_cols_offd_Pext)
{
map_Pext_to_RAP = hypre_TAlloc(HYPRE_Int, num_cols_offd_Pext, HYPRE_MEMORY_HOST);
cnt = 0;
for (i=0; i < num_cols_offd_RAP; i++)
if (col_map_offd_RAP[i] == col_map_offd_Pext[cnt])
{
map_Pext_to_RAP[cnt++] = i;
if (cnt == num_cols_offd_Pext) break;
}
}
/*-----------------------------------------------------------------------
* Convert RAP_ext column indices
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < RAP_ext_size; i++)
if (RAP_ext_j[i] < first_col_diag_RAP
|| RAP_ext_j[i] > last_col_diag_RAP)
RAP_ext_j[i] = (HYPRE_BigInt)num_cols_diag_P
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
+(HYPRE_BigInt)hypre_UnorderedBigIntMapGet(&col_map_offd_RAP_inverse, RAP_ext_j[i]);
#else
+(HYPRE_BigInt)hypre_BigBinarySearch(col_map_offd_RAP, RAP_ext_j[i],num_cols_offd_RAP);
#endif
else
RAP_ext_j[i] -= first_col_diag_RAP;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (num_cols_offd_RAP)
hypre_UnorderedBigIntMapDestroy(&col_map_offd_RAP_inverse);
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] += hypre_MPI_Wtime();
#endif
/* need to allocate new P_marker etc. and make further changes */
/*-----------------------------------------------------------------------
* Initialize some stuff.
*-----------------------------------------------------------------------*/
jj_cnt_diag = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_cnt_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,k,jcol,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_count_diag,jj_count_offd,jj_row_begin_diag,jj_row_begin_offd,A_marker,P_marker) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < num_threads; ii++)
{
size = num_cols_diag_RT/num_threads;
rest = num_cols_diag_RT - size*num_threads;
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
P_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_cols_diag_P+num_cols_offd_RAP, HYPRE_MEMORY_HOST);
A_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_nz_cols_A, HYPRE_MEMORY_HOST);
P_marker = P_mark_array[ii];
A_marker = A_mark_array[ii];
jj_count_diag = start_indexing;
jj_count_offd = start_indexing;
for (ic = 0; ic < num_cols_diag_P+num_cols_offd_RAP; ic++)
{
P_marker[ic] = -1;
}
for (i = 0; i < num_nz_cols_A; i++)
{
A_marker[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over interior c-points.
*-----------------------------------------------------------------------*/
for (ic = ns; ic < ne; ic++)
{
/*--------------------------------------------------------------------
* Set marker for diagonal entry, RAP_{ic,ic}. and for all points
* being added to row ic of RAP_diag and RAP_offd through RAP_ext
*--------------------------------------------------------------------*/
jj_row_begin_diag = jj_count_diag;
jj_row_begin_offd = jj_count_offd;
if (square)
P_marker[ic] = jj_count_diag++;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (send_map_elmts_RT_inverse_map_initialized)
{
HYPRE_Int i = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, ic);
if (i != -1)
{
for (j = send_map_elmts_starts_RT_aggregated[i]; j < send_map_elmts_starts_RT_aggregated[i + 1]; j++)
{
HYPRE_Int jj = send_map_elmts_RT_aggregated[j];
for (k=RAP_ext_i[jj]; k < RAP_ext_i[jj+1]; k++)
{
jcol = (HYPRE_Int)RAP_ext_j[k];
if (jcol < num_cols_diag_P)
{
if (P_marker[jcol] < jj_row_begin_diag)
{
P_marker[jcol] = jj_count_diag;
jj_count_diag++;
}
}
else
{
if (P_marker[jcol] < jj_row_begin_offd)
{
P_marker[jcol] = jj_count_offd;
jj_count_offd++;
}
}
}
}
} // if (set)
}
#else /* !HYPRE_CONCURRENT_HOPSCOTCH */
for (i=0; i < num_sends_RT; i++)
for (j = send_map_starts_RT[i]; j < send_map_starts_RT[i+1]; j++)
if (send_map_elmts_RT[j] == ic)
{
for (k=RAP_ext_i[j]; k < RAP_ext_i[j+1]; k++)
{
jcol = (HYPRE_Int) RAP_ext_j[k];
if (jcol < num_cols_diag_P)
{
if (P_marker[jcol] < jj_row_begin_diag)
{
P_marker[jcol] = jj_count_diag;
jj_count_diag++;
}
}
else
{
if (P_marker[jcol] < jj_row_begin_offd)
{
P_marker[jcol] = jj_count_offd;
jj_count_offd++;
}
}
}
break;
}
#endif /* !HYPRE_CONCURRENT_HOPSCOTCH */
/*--------------------------------------------------------------------
* Loop over entries in row ic of R_diag.
*--------------------------------------------------------------------*/
for (jj1 = R_diag_i[ic]; jj1 < R_diag_i[ic+1]; jj1++)
{
i1 = R_diag_j[jj1];
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_offd.
*-----------------------------------------------------------------*/
if (num_cols_offd_A)
{
for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++)
{
i2 = A_offd_j[jj2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (A_marker[i2] != ic)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2] = ic;
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_ext.
*-----------------------------------------------------------*/
for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++)
{
i3 = P_ext_diag_j[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begin_diag)
{
P_marker[i3] = jj_count_diag;
jj_count_diag++;
}
}
for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++)
{
i3 = map_Pext_to_RAP[P_ext_offd_j[jj3]]+num_cols_diag_P;
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begin_offd)
{
P_marker[i3] = jj_count_offd;
jj_count_offd++;
}
}
}
}
}
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_diag.
*-----------------------------------------------------------------*/
for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++)
{
i2 = A_diag_j[jj2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (A_marker[i2+num_cols_offd_A] != ic)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2+num_cols_offd_A] = ic;
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_diag.
*-----------------------------------------------------------*/
for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++)
{
i3 = P_diag_j[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begin_diag)
{
P_marker[i3] = jj_count_diag;
jj_count_diag++;
}
}
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_offd.
*-----------------------------------------------------------*/
if (num_cols_offd_P)
{
for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++)
{
i3 = map_P_to_RAP[P_offd_j[jj3]] + num_cols_diag_P;
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begin_offd)
{
P_marker[i3] = jj_count_offd;
jj_count_offd++;
}
}
}
}
}
}
/*--------------------------------------------------------------------
* Set RAP_diag_i and RAP_offd_i for this row.
*--------------------------------------------------------------------*/
/*
RAP_diag_i[ic] = jj_row_begin_diag;
RAP_offd_i[ic] = jj_row_begin_offd;
*/
}
jj_cnt_diag[ii] = jj_count_diag;
jj_cnt_offd[ii] = jj_count_offd;
}
for (i=0; i < num_threads-1; i++)
{
jj_cnt_diag[i+1] += jj_cnt_diag[i];
jj_cnt_offd[i+1] += jj_cnt_offd[i];
}
jj_count_diag = jj_cnt_diag[num_threads-1];
jj_count_offd = jj_cnt_offd[num_threads-1];
RAP_diag_i[num_cols_diag_RT] = jj_count_diag;
RAP_offd_i[num_cols_diag_RT] = jj_count_offd;
/*-----------------------------------------------------------------------
* Allocate RAP_diag_data and RAP_diag_j arrays.
* Allocate RAP_offd_data and RAP_offd_j arrays.
*-----------------------------------------------------------------------*/
RAP_diag_size = jj_count_diag;
if (RAP_diag_size)
{
RAP_diag_data = hypre_CTAlloc(HYPRE_Real, RAP_diag_size, HYPRE_MEMORY_DEVICE);
RAP_diag_j = hypre_CTAlloc(HYPRE_Int, RAP_diag_size, HYPRE_MEMORY_DEVICE);
}
RAP_offd_size = jj_count_offd;
if (RAP_offd_size)
{
RAP_offd_data = hypre_CTAlloc(HYPRE_Real, RAP_offd_size, HYPRE_MEMORY_DEVICE);
RAP_offd_j = hypre_CTAlloc(HYPRE_Int, RAP_offd_size, HYPRE_MEMORY_DEVICE);
}
if (RAP_offd_size == 0 && num_cols_offd_RAP != 0)
{
num_cols_offd_RAP = 0;
hypre_TFree(col_map_offd_RAP, HYPRE_MEMORY_HOST);
}
RA_diag_data_array = hypre_TAlloc(HYPRE_Real, num_cols_diag_A*num_threads, HYPRE_MEMORY_HOST);
RA_diag_j_array = hypre_TAlloc(HYPRE_Int, num_cols_diag_A*num_threads, HYPRE_MEMORY_HOST);
if (num_cols_offd_A)
{
RA_offd_data_array = hypre_TAlloc(HYPRE_Real, num_cols_offd_A*num_threads, HYPRE_MEMORY_HOST);
RA_offd_j_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_A*num_threads, HYPRE_MEMORY_HOST);
}
/*-----------------------------------------------------------------------
* Second Pass: Fill in RAP_diag_data and RAP_diag_j.
* Second Pass: Fill in RAP_offd_data and RAP_offd_j.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,k,jcol,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_count_diag,jj_count_offd,jj_row_begin_diag,jj_row_begin_offd,A_marker,P_marker,r_entry,r_a_product,r_a_p_product) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < num_threads; ii++)
{
size = num_cols_diag_RT/num_threads;
rest = num_cols_diag_RT - size*num_threads;
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
/*-----------------------------------------------------------------------
* Initialize some stuff.
*-----------------------------------------------------------------------*/
P_marker = P_mark_array[ii];
A_marker = A_mark_array[ii];
for (ic = 0; ic < num_cols_diag_P+num_cols_offd_RAP; ic++)
{
P_marker[ic] = -1;
}
for (i = 0; i < num_nz_cols_A ; i++)
{
A_marker[i] = -1;
}
jj_count_diag = start_indexing;
jj_count_offd = start_indexing;
if (ii > 0)
{
jj_count_diag = jj_cnt_diag[ii-1];
jj_count_offd = jj_cnt_offd[ii-1];
}
// temporal matrix RA = R*A
// only need to store one row per thread because R*A and (R*A)*P are fused
// into one loop.
hypre_CSRMatrix RA_diag, RA_offd;
RA_diag.data = RA_diag_data_array + num_cols_diag_A*ii;
RA_diag.j = RA_diag_j_array + num_cols_diag_A*ii;
RA_diag.num_nonzeros = 0;
RA_offd.num_nonzeros = 0;
if (num_cols_offd_A)
{
RA_offd.data = RA_offd_data_array + num_cols_offd_A*ii;
RA_offd.j = RA_offd_j_array + num_cols_offd_A*ii;
}
/*-----------------------------------------------------------------------
* Loop over interior c-points.
*-----------------------------------------------------------------------*/
for (ic = ns; ic < ne; ic++)
{
/*--------------------------------------------------------------------
* Create diagonal entry, RAP_{ic,ic} and add entries of RAP_ext
*--------------------------------------------------------------------*/
jj_row_begin_diag = jj_count_diag;
jj_row_begin_offd = jj_count_offd;
RAP_diag_i[ic] = jj_row_begin_diag;
RAP_offd_i[ic] = jj_row_begin_offd;
HYPRE_Int ra_row_begin_diag = RA_diag.num_nonzeros;
HYPRE_Int ra_row_begin_offd = RA_offd.num_nonzeros;
if (square)
{
P_marker[ic] = jj_count_diag;
RAP_diag_data[jj_count_diag] = zero;
RAP_diag_j[jj_count_diag] = ic;
jj_count_diag++;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (send_map_elmts_RT_inverse_map_initialized)
{
HYPRE_Int i = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, ic);
if (i != -1)
{
for (j = send_map_elmts_starts_RT_aggregated[i]; j < send_map_elmts_starts_RT_aggregated[i + 1]; j++)
{
HYPRE_Int jj = send_map_elmts_RT_aggregated[j];
for (k=RAP_ext_i[jj]; k < RAP_ext_i[jj+1]; k++)
{
jcol = (HYPRE_Int)RAP_ext_j[k];
if (jcol < num_cols_diag_P)
{
if (P_marker[jcol] < jj_row_begin_diag)
{
P_marker[jcol] = jj_count_diag;
RAP_diag_data[jj_count_diag]
= RAP_ext_data[k];
RAP_diag_j[jj_count_diag] = jcol;
jj_count_diag++;
}
else
RAP_diag_data[P_marker[jcol]]
+= RAP_ext_data[k];
}
else
{
if (P_marker[jcol] < jj_row_begin_offd)
{
P_marker[jcol] = jj_count_offd;
RAP_offd_data[jj_count_offd]
= RAP_ext_data[k];
RAP_offd_j[jj_count_offd]
= jcol-num_cols_diag_P;
jj_count_offd++;
}
else
RAP_offd_data[P_marker[jcol]]
+= RAP_ext_data[k];
}
}
}
} // if (set)
}
#else /* !HYPRE_CONCURRENT_HOPSCOTCH */
for (i=0; i < num_sends_RT; i++)
for (j = send_map_starts_RT[i]; j < send_map_starts_RT[i+1]; j++)
if (send_map_elmts_RT[j] == ic)
{
for (k=RAP_ext_i[j]; k < RAP_ext_i[j+1]; k++)
{
jcol = (HYPRE_Int)RAP_ext_j[k];
if (jcol < num_cols_diag_P)
{
if (P_marker[jcol] < jj_row_begin_diag)
{
P_marker[jcol] = jj_count_diag;
RAP_diag_data[jj_count_diag]
= RAP_ext_data[k];
RAP_diag_j[jj_count_diag] = jcol;
jj_count_diag++;
}
else
RAP_diag_data[P_marker[jcol]]
+= RAP_ext_data[k];
}
else
{
if (P_marker[jcol] < jj_row_begin_offd)
{
P_marker[jcol] = jj_count_offd;
RAP_offd_data[jj_count_offd]
= RAP_ext_data[k];
RAP_offd_j[jj_count_offd]
= jcol-num_cols_diag_P;
jj_count_offd++;
}
else
RAP_offd_data[P_marker[jcol]]
+= RAP_ext_data[k];
}
}
break;
}
#endif /* !HYPRE_CONCURRENT_HOPSCOTCH */
/*--------------------------------------------------------------------
* Loop over entries in row ic of R_diag and compute row ic of RA.
*--------------------------------------------------------------------*/
for (jj1 = R_diag_i[ic]; jj1 < R_diag_i[ic+1]; jj1++)
{
i1 = R_diag_j[jj1];
r_entry = R_diag_data[jj1];
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_offd.
*-----------------------------------------------------------------*/
if (num_cols_offd_A)
{
for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++)
{
i2 = A_offd_j[jj2];
HYPRE_Real a_entry = A_offd_data[jj2];
HYPRE_Int marker = A_marker[i2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (marker < ra_row_begin_offd)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2] = RA_offd.num_nonzeros;
RA_offd.data[RA_offd.num_nonzeros - ra_row_begin_offd] = r_entry * a_entry;
RA_offd.j[RA_offd.num_nonzeros - ra_row_begin_offd] = i2;
RA_offd.num_nonzeros++;
}
/*--------------------------------------------------------------
* If i2 is previously visited ( A_marker[12]=ic ) it yields
* no new entries in RA and can just add new contributions.
*--------------------------------------------------------------*/
else
{
RA_offd.data[marker - ra_row_begin_offd] += r_entry * a_entry;
// JSP: compiler will more likely to generate FMA instructions
// when we don't eliminate common subexpressions of
// r_entry * A_offd_data[jj2] manually.
}
} // loop over entries in row i1 of A_offd
} // num_cols_offd_A
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_diag.
*-----------------------------------------------------------------*/
for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++)
{
i2 = A_diag_j[jj2];
HYPRE_Real a_entry = A_diag_data[jj2];
HYPRE_Int marker = A_marker[i2+num_cols_offd_A];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (marker < ra_row_begin_diag)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2+num_cols_offd_A] = RA_diag.num_nonzeros;
RA_diag.data[RA_diag.num_nonzeros - ra_row_begin_diag] = r_entry * a_entry;
RA_diag.j[RA_diag.num_nonzeros - ra_row_begin_diag] = i2;
RA_diag.num_nonzeros++;
}
/*--------------------------------------------------------------
* If i2 is previously visited ( A_marker[12]=ic ) it yields
* no new entries in RA and can just add new contributions.
*--------------------------------------------------------------*/
else
{
RA_diag.data[marker - ra_row_begin_diag] += r_entry * a_entry;
}
} // loop over entries in row i1 of A_diag
} // loop over entries in row ic of R_diag
/*--------------------------------------------------------------------
* Loop over entries in row ic of RA_offd.
*--------------------------------------------------------------------*/
for (jj1 = ra_row_begin_offd; jj1 < RA_offd.num_nonzeros; jj1++)
{
i1 = RA_offd.j[jj1 - ra_row_begin_offd];
r_a_product = RA_offd.data[jj1 - ra_row_begin_offd];
/*-----------------------------------------------------------
* Loop over entries in row i1 of P_ext.
*-----------------------------------------------------------*/
for (jj2 = P_ext_diag_i[i1]; jj2 < P_ext_diag_i[i1+1]; jj2++)
{
i2 = P_ext_diag_j[jj2];
HYPRE_Real p_entry = P_ext_diag_data[jj2];
HYPRE_Int marker = P_marker[i2];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i2} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (marker < jj_row_begin_diag)
{
P_marker[i2] = jj_count_diag;
RAP_diag_data[jj_count_diag] = r_a_product * p_entry;
RAP_diag_j[jj_count_diag] = i2;
jj_count_diag++;
}
else
RAP_diag_data[marker] += r_a_product * p_entry;
}
for (jj2 = P_ext_offd_i[i1]; jj2 < P_ext_offd_i[i1+1]; jj2++)
{
i2 = map_Pext_to_RAP[P_ext_offd_j[jj2]] + num_cols_diag_P;
HYPRE_Real p_entry = P_ext_offd_data[jj2];
HYPRE_Int marker = P_marker[i2];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i2} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (marker < jj_row_begin_offd)
{
P_marker[i2] = jj_count_offd;
RAP_offd_data[jj_count_offd] = r_a_product * p_entry;
RAP_offd_j[jj_count_offd] = i2 - num_cols_diag_P;
jj_count_offd++;
}
else
RAP_offd_data[marker] += r_a_product * p_entry;
}
} // loop over entries in row ic of RA_offd
/*--------------------------------------------------------------------
* Loop over entries in row ic of RA_diag.
*--------------------------------------------------------------------*/
for (jj1 = ra_row_begin_diag; jj1 < RA_diag.num_nonzeros; jj1++)
{
HYPRE_Int i1 = RA_diag.j[jj1 - ra_row_begin_diag];
HYPRE_Real r_a_product = RA_diag.data[jj1 - ra_row_begin_diag];
/*-----------------------------------------------------------------
* Loop over entries in row i1 of P_diag.
*-----------------------------------------------------------------*/
for (jj2 = P_diag_i[i1]; jj2 < P_diag_i[i1+1]; jj2++)
{
i2 = P_diag_j[jj2];
HYPRE_Real p_entry = P_diag_data[jj2];
HYPRE_Int marker = P_marker[i2];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i2} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (marker < jj_row_begin_diag)
{
P_marker[i2] = jj_count_diag;
RAP_diag_data[jj_count_diag] = r_a_product * p_entry;
RAP_diag_j[jj_count_diag] = i2;
jj_count_diag++;
}
else
{
RAP_diag_data[marker] += r_a_product * p_entry;
}
}
if (num_cols_offd_P)
{
for (jj2 = P_offd_i[i1]; jj2 < P_offd_i[i1+1]; jj2++)
{
i2 = map_P_to_RAP[P_offd_j[jj2]] + num_cols_diag_P;
HYPRE_Real p_entry = P_offd_data[jj2];
HYPRE_Int marker = P_marker[i2];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i2} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (marker < jj_row_begin_offd)
{
P_marker[i2] = jj_count_offd;
RAP_offd_data[jj_count_offd] = r_a_product * p_entry;
RAP_offd_j[jj_count_offd] = i2 - num_cols_diag_P;
jj_count_offd++;
}
else
{
RAP_offd_data[marker] += r_a_product * p_entry;
}
}
} // num_cols_offd_P
} // loop over entries in row ic of RA_diag.
} // Loop over interior c-points.
hypre_TFree(P_mark_array[ii], HYPRE_MEMORY_HOST);
hypre_TFree(A_mark_array[ii], HYPRE_MEMORY_HOST);
} // omp parallel for
/* check if really all off-diagonal entries occurring in col_map_offd_RAP
are represented and eliminate if necessary */
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd_RAP, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_offd_RAP; i++)
P_marker[i] = -1;
jj_count_offd = 0;
#ifdef HYPRE_USING_ATOMIC
#pragma omp parallel for private(i3) reduction(+:jj_count_offd) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < RAP_offd_size; i++)
{
i3 = RAP_offd_j[i];
#ifdef HYPRE_USING_ATOMIC
if (hypre_compare_and_swap(P_marker + i3, -1, 0) == -1)
{
jj_count_offd++;
}
#else
if (P_marker[i3])
{
P_marker[i3] = 0;
jj_count_offd++;
}
#endif
}
if (jj_count_offd < num_cols_offd_RAP)
{
new_col_map_offd_RAP = hypre_CTAlloc(HYPRE_BigInt, jj_count_offd, HYPRE_MEMORY_HOST);
jj_counter = 0;
for (i=0; i < num_cols_offd_RAP; i++)
if (!P_marker[i])
{
P_marker[i] = jj_counter;
new_col_map_offd_RAP[jj_counter++] = col_map_offd_RAP[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i3) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < RAP_offd_size; i++)
{
i3 = RAP_offd_j[i];
RAP_offd_j[i] = P_marker[i3];
}
num_cols_offd_RAP = jj_count_offd;
hypre_TFree(col_map_offd_RAP, HYPRE_MEMORY_HOST);
col_map_offd_RAP = new_col_map_offd_RAP;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
RAP = hypre_ParCSRMatrixCreate(comm, n_coarse_RT, n_coarse,
RT_partitioning, coarse_partitioning,
num_cols_offd_RAP, RAP_diag_size,
RAP_offd_size);
RAP_diag = hypre_ParCSRMatrixDiag(RAP);
hypre_CSRMatrixI(RAP_diag) = RAP_diag_i;
if (RAP_diag_size)
{
hypre_CSRMatrixData(RAP_diag) = RAP_diag_data;
hypre_CSRMatrixJ(RAP_diag) = RAP_diag_j;
}
RAP_offd = hypre_ParCSRMatrixOffd(RAP);
hypre_CSRMatrixI(RAP_offd) = RAP_offd_i;
if (num_cols_offd_RAP)
{
hypre_CSRMatrixData(RAP_offd) = RAP_offd_data;
hypre_CSRMatrixJ(RAP_offd) = RAP_offd_j;
hypre_ParCSRMatrixColMapOffd(RAP) = col_map_offd_RAP;
}
if (num_procs > 1)
{
/* hypre_GenerateRAPCommPkg(RAP, A); */
hypre_MatvecCommPkgCreate(RAP);
}
*RAP_ptr = RAP;
/*-----------------------------------------------------------------------
* Free R, P_ext and marker arrays.
*-----------------------------------------------------------------------*/
if (keepTranspose)
{
hypre_ParCSRMatrixDiagT(RT) = R_diag;
}
else
{
hypre_CSRMatrixDestroy(R_diag);
}
R_diag = NULL;
if (num_cols_offd_RT)
{
if (keepTranspose)
{
hypre_ParCSRMatrixOffdT(RT) = R_offd;
}
else
{
hypre_CSRMatrixDestroy(R_offd);
}
R_offd = NULL;
}
if (num_sends_RT || num_recvs_RT)
{
hypre_CSRMatrixDestroy(RAP_ext);
RAP_ext = NULL;
}
hypre_TFree(P_mark_array, HYPRE_MEMORY_HOST);
hypre_TFree(A_mark_array, HYPRE_MEMORY_HOST);
hypre_TFree(P_ext_diag_i, HYPRE_MEMORY_HOST);
hypre_TFree(P_ext_offd_i, HYPRE_MEMORY_HOST);
hypre_TFree(jj_cnt_diag, HYPRE_MEMORY_HOST);
hypre_TFree(jj_cnt_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd_P)
{
hypre_TFree(map_P_to_Pext, HYPRE_MEMORY_HOST);
hypre_TFree(map_P_to_RAP, HYPRE_MEMORY_HOST);
}
if (num_cols_offd_Pext)
{
hypre_TFree(col_map_offd_Pext, HYPRE_MEMORY_HOST);
hypre_TFree(map_Pext_to_RAP, HYPRE_MEMORY_HOST);
}
if (P_ext_diag_size)
{
hypre_TFree(P_ext_diag_data, HYPRE_MEMORY_HOST);
hypre_TFree(P_ext_diag_j, HYPRE_MEMORY_HOST);
}
if (P_ext_offd_size)
{
hypre_TFree(P_ext_offd_data, HYPRE_MEMORY_HOST);
hypre_TFree(P_ext_offd_j, HYPRE_MEMORY_HOST);
}
hypre_TFree(RA_diag_data_array, HYPRE_MEMORY_HOST);
hypre_TFree(RA_diag_j_array, HYPRE_MEMORY_HOST);
if (num_cols_offd_A)
{
hypre_TFree(RA_offd_data_array, HYPRE_MEMORY_HOST);
hypre_TFree(RA_offd_j_array, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (send_map_elmts_RT_inverse_map_initialized)
{
hypre_UnorderedIntMapDestroy(&send_map_elmts_RT_inverse_map);
}
hypre_TFree(send_map_elmts_starts_RT_aggregated, HYPRE_MEMORY_HOST);
hypre_TFree(send_map_elmts_RT_aggregated, HYPRE_MEMORY_HOST);
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RAP] += hypre_MPI_Wtime();
#endif
return(0);
}
|
simd-14.c | /* { dg-do run } */
/* { dg-additional-options "-msse2" { target sse2_runtime } } */
/* { dg-additional-options "-mavx" { target avx_runtime } } */
int
main ()
{
int i, j, b, c = 0;
i = 4; j = 4; b = 7;
#pragma omp simd linear(b:2) reduction(+:c)
for (i = 0; i < 64; i++)
{
c = c + (b != 7 + 2 * i);
b = b + 2;
}
if (c || i != 64 || b != 7 + 64 * 2)
__builtin_abort ();
i = 4; j = 4; b = 7;
#pragma omp simd linear(b:3) reduction(+:c)
for (i = 0; i < 64; i += 4)
{
c = c + (b != 7 + i / 4 * 3);
b = b + 3;
}
if (c || i != 64 || b != 7 + 16 * 3)
__builtin_abort ();
i = 4; j = 4; b = 7;
#pragma omp simd linear(i) linear(b:2) reduction(+:c)
for (i = 0; i < 64; i++)
{
c = c + (b != 7 + 2 * i);
b = b + 2;
}
if (c || i != 64 || b != 7 + 64 * 2)
__builtin_abort ();
i = 4; j = 4; b = 7;
#pragma omp simd linear(i:4) linear(b:3) reduction(+:c)
for (i = 0; i < 64; i += 4)
{
c = c + (b != 7 + i / 4 * 3);
b = b + 3;
}
if (c || i != 64 || b != 7 + 16 * 3)
__builtin_abort ();
i = 4; j = 4; b = 7;
#pragma omp simd collapse (2) linear(b:2) reduction(+:c)
for (i = 0; i < 8; i++)
for (j = 0; j < 8; j++)
{
c = c + (b != 7 + 2 * j + 2 * 8 * i);
b = b + 2;
}
if (c || i != 8 || j != 8 || b != 7 + 64 * 2)
__builtin_abort ();
i = 4; j = 4; b = 7;
#pragma omp simd collapse (2) lastprivate (i, j) linear(b:2) reduction(+:c)
for (i = 0; i < 8; i++)
for (j = 0; j < 8; j++)
{
c = c + (b != 7 + 2 * j + 2 * 8 * i);
b = b + 2;
}
if (c || i != 8 || j != 8 || b != 7 + 64 * 2)
__builtin_abort ();
i = 4; j = 4; b = 7;
#pragma omp parallel for simd schedule (static, 4) linear(b:2) reduction(+:c)
for (i = 0; i < 64; i++)
{
c = c + (b != 7 + 2 * i);
b = b + 2;
}
if (c || i != 64 || b != 7 + 64 * 2)
__builtin_abort ();
i = 4; j = 4; b = 7;
#pragma omp parallel for simd schedule (static, 4) linear(b:3) reduction(+:c)
for (i = 0; i < 64; i += 4)
{
c = c + (b != 7 + i / 4 * 3);
b = b + 3;
}
if (c || i != 64 || b != 7 + 16 * 3)
__builtin_abort ();
i = 4; j = 4; b = 7;
#pragma omp parallel for simd schedule (static, 4) linear(i) linear(b:2) reduction(+:c)
for (i = 0; i < 64; i++)
{
c = c + (b != 7 + 2 * i);
b = b + 2;
}
if (c || i != 64 || b != 7 + 64 * 2)
__builtin_abort ();
i = 4; j = 4; b = 7;
#pragma omp parallel for simd schedule (static, 4) linear(i:4) linear(b:3) reduction(+:c)
for (i = 0; i < 64; i += 4)
{
c = c + (b != 7 + i / 4 * 3);
b = b + 3;
}
if (c || i != 64 || b != 7 + 16 * 3)
__builtin_abort ();
i = 4; j = 4; b = 7;
#pragma omp parallel for simd lastprivate (i, j) collapse (2) schedule (static, 4) linear(b:2) reduction(+:c)
for (i = 0; i < 8; i++)
for (j = 0; j < 8; j++)
{
c = c + (b != 7 + 2 * j + 2 * 8 * i);
b = b + 2;
}
if (c || i != 8 || j != 8 || b != 7 + 64 * 2)
__builtin_abort ();
i = 4; j = 4; b = 7;
#pragma omp parallel for simd collapse (2) schedule (static, 4) linear(b:2) reduction(+:c)
for (i = 0; i < 8; i++)
for (j = 0; j < 8; j++)
{
c = c + (b != 7 + 2 * j + 2 * 8 * i);
b = b + 2;
}
if (c || i != 8 || j != 8 || b != 7 + 64 * 2)
__builtin_abort ();
return 0;
}
|
task_early_fulfill.c | // RUN: %libomp-compile && env OMP_NUM_THREADS='3' \
// RUN: %libomp-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// Checked gcc 10.1 still does not support detach clause on task construct.
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8, gcc-9, gcc-10
// gcc 11 introduced detach clause, but gomp interface in libomp has no support
// XFAIL: gcc-11, gcc-12
// clang supports detach clause since version 11.
// UNSUPPORTED: clang-10, clang-9, clang-8, clang-7
// icc compiler does not support detach clause.
// UNSUPPORTED: icc
#include "callback.h"
#include <omp.h>
int main() {
#pragma omp parallel
#pragma omp master
{
omp_event_handle_t event;
#pragma omp task detach(event) if (0)
{ omp_fulfill_event(event); }
#pragma omp taskwait
}
return 0;
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_schedule'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released'
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin:
// CHECK-SAME: parent_task_id=[[PARENT_TASK_ID:[0-9]+]],
// CHECK-SAME: parent_task_frame.exit=[[NULL]],
// CHECK-SAME: parent_task_frame.reenter=0x{{[0-f]+}},
// CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]],
// CHECK-SAME: requested_team_size=3,
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: parent_task_id=[[IMPLICIT_TASK_ID]],
// CHECK-SAME: parent_task_frame.exit=0x{{[0-f]+}},
// CHECK-SAME: parent_task_frame.reenter=0x{{[0-f]+}},
// CHECK-SAME: new_task_id=[[TASK_ID:[0-9]+]],
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_schedule:
// CHECK-SAME: first_task_id=[[IMPLICIT_TASK_ID]],
// CHECK-SAME: second_task_id=[[TASK_ID]],
// CHECK-SAME: prior_task_status=ompt_task_switch=7
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_schedule:
// CHECK-SAME: first_task_id=[[TASK_ID]],
// CHECK-SAME: second_task_id=18446744073709551615,
// CHECK-SAME: prior_task_status=ompt_task_early_fulfill=5
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_schedule:
// CHECK-SAME: first_task_id=[[TASK_ID]],
// CHECK-SAME: second_task_id=[[IMPLICIT_TASK_ID]],
// CHECK-SAME: prior_task_status=ompt_task_complete=1
|
convolutiondepthwise_3x3_int8.h | // SenseNets is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2018 SenseNets Technology Ltd. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
#if __aarch64__
static void convdw3x3s1_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const signed char* kernel = (const signed char *)_kernel + p*9;
int* outptr0 = out;
int* outptr0n = outptr0 + outw;
const signed char* img0 = bottom_blob.channel(p);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w*2;
const signed char* r3 = img0 + w*3;
int i = 0;
int8x8_t _k0 = vdup_n_s8(kernel[0]);
int8x8_t _k1 = vdup_n_s8(kernel[1]);
int8x8_t _k2 = vdup_n_s8(kernel[2]);
int8x8_t _k3 = vdup_n_s8(kernel[3]);
int8x8_t _k4 = vdup_n_s8(kernel[4]);
int8x8_t _k5 = vdup_n_s8(kernel[5]);
int8x8_t _k6 = vdup_n_s8(kernel[6]);
int8x8_t _k7 = vdup_n_s8(kernel[7]);
int8x8_t _k8 = vdup_n_s8(kernel[8]);
for (; i+1 < outh; i+=2)
{
int nn = outw >> 3;
int remain = outw & 7;
for (; nn >0; nn--)
{
int8x8_t _r0 = vld1_s8(r0);
int8x8_t _r0n = vld1_s8(r0+8);
int8x8_t _r01 = vext_s8(_r0, _r0n, 1);
int8x8_t _r02 = vext_s8(_r0, _r0n, 2);
int16x8_t _sum0 = vmull_s8(_r0, _k0);
_sum0 = vmlal_s8(_sum0, _r01, _k1);
_sum0 = vmlal_s8(_sum0, _r02, _k2);
int8x8_t _r1 = vld1_s8(r1);
int8x8_t _r1n = vld1_s8(r1+8);
int8x8_t _r11 = vext_s8(_r1, _r1n, 1);
int8x8_t _r12 = vext_s8(_r1, _r1n, 2);
_sum0 = vmlal_s8(_sum0, _r1, _k3);
_sum0 = vmlal_s8(_sum0, _r11, _k4);
_sum0 = vmlal_s8(_sum0, _r12, _k5);
int16x8_t _sum1 = vmull_s8(_r1, _k0);
_sum1 = vmlal_s8(_sum1, _r11, _k1);
_sum1 = vmlal_s8(_sum1, _r12, _k2);
int8x8_t _r2 = vld1_s8(r2);
int8x8_t _r2n = vld1_s8(r2+8);
int8x8_t _r21 = vext_s8(_r2, _r2n, 1);
int8x8_t _r22 = vext_s8(_r2, _r2n, 2);
_sum0 = vmlal_s8(_sum0, _r2, _k6);
_sum0 = vmlal_s8(_sum0, _r21, _k7);
_sum0 = vmlal_s8(_sum0, _r22, _k8);
_sum1 = vmlal_s8(_sum1, _r2, _k3);
_sum1 = vmlal_s8(_sum1, _r21, _k4);
_sum1 = vmlal_s8(_sum1, _r22, _k5);
int8x8_t _r3 = vld1_s8(r3);
int8x8_t _r3n = vld1_s8(r3+8);
int8x8_t _r31 = vext_s8(_r3, _r3n, 1);
int8x8_t _r32 = vext_s8(_r3, _r3n, 2);
_sum1 = vmlal_s8(_sum1, _r3, _k6);
_sum1 = vmlal_s8(_sum1, _r31, _k7);
_sum1 = vmlal_s8(_sum1, _r32, _k8);
int32x4_t sum0_s32 = vmovl_s16(vget_low_s16(_sum0));
int32x4_t sum0n_s32 = vmovl_s16(vget_high_s16(_sum0));
vst1q_s32(outptr0, sum0_s32);
vst1q_s32(outptr0+4, sum0n_s32);
int32x4_t sum1_s32 = vmovl_s16(vget_low_s16(_sum1));
int32x4_t sum1n_s32 = vmovl_s16(vget_high_s16(_sum1));
vst1q_s32(outptr0n, sum1_s32);
vst1q_s32(outptr0n+4, sum1n_s32);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
outptr0 += 8;
outptr0n += 8;
}
for (; remain>0; remain--)
{
//Todo Neon
int sum0 = 0;
int sum0n = 0;
sum0 += (int)r0[0] * kernel[0];
sum0 += (int)r0[1] * kernel[1];
sum0 += (int)r0[2] * kernel[2];
sum0 += (int)r1[0] * kernel[3];
sum0 += (int)r1[1] * kernel[4];
sum0 += (int)r1[2] * kernel[5];
sum0 += (int)r2[0] * kernel[6];
sum0 += (int)r2[1] * kernel[7];
sum0 += (int)r2[2] * kernel[8];
sum0n += (int)r1[0] * kernel[0];
sum0n += (int)r1[1] * kernel[1];
sum0n += (int)r1[2] * kernel[2];
sum0n += (int)r2[0] * kernel[3];
sum0n += (int)r2[1] * kernel[4];
sum0n += (int)r2[2] * kernel[5];
sum0n += (int)r3[0] * kernel[6];
sum0n += (int)r3[1] * kernel[7];
sum0n += (int)r3[2] * kernel[8];
*outptr0 = sum0;
*outptr0n = sum0n;
r0++;
r1++;
r2++;
r3++;
outptr0++;
outptr0n++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr0 += outw;
outptr0n += outw;
}
for (; i < outh; i++)
{
int nn = outw >> 3;
int remain = outw & 7;
for (; nn >0; nn--)
{
int8x8_t _r0 = vld1_s8(r0);
int8x8_t _r0n = vld1_s8(r0+8);
int8x8_t _r01 = vext_s8(_r0, _r0n, 1);
int8x8_t _r02 = vext_s8(_r0, _r0n, 2);
int16x8_t _sum0 = vmull_s8(_r0, _k0);
_sum0 = vmlal_s8(_sum0, _r01, _k1);
_sum0 = vmlal_s8(_sum0, _r02, _k2);
int8x8_t _r1 = vld1_s8(r1);
int8x8_t _r1n = vld1_s8(r1+8);
int8x8_t _r11 = vext_s8(_r1, _r1n, 1);
int8x8_t _r12 = vext_s8(_r1, _r1n, 2);
_sum0 = vmlal_s8(_sum0, _r1, _k3);
_sum0 = vmlal_s8(_sum0, _r11, _k4);
_sum0 = vmlal_s8(_sum0, _r12, _k5);
int8x8_t _r2 = vld1_s8(r2);
int8x8_t _r2n = vld1_s8(r2+8);
int8x8_t _r21 = vext_s8(_r2, _r2n, 1);
int8x8_t _r22 = vext_s8(_r2, _r2n, 2);
_sum0 = vmlal_s8(_sum0, _r2, _k6);
_sum0 = vmlal_s8(_sum0, _r21, _k7);
_sum0 = vmlal_s8(_sum0, _r22, _k8);
int32x4_t sum0_s32 = vmovl_s16(vget_low_s16(_sum0));
int32x4_t sum0n_s32 = vmovl_s16(vget_high_s16(_sum0));
vst1q_s32(outptr0, sum0_s32);
vst1q_s32(outptr0+4, sum0n_s32);
r0 += 8;
r1 += 8;
r2 += 8;
outptr0 += 8;
}
for (; remain>0; remain--)
{
int sum = 0;
sum += (int)r0[0] * kernel[0];
sum += (int)r0[1] * kernel[1];
sum += (int)r0[2] * kernel[2];
sum += (int)r1[0] * kernel[3];
sum += (int)r1[1] * kernel[4];
sum += (int)r1[2] * kernel[5];
sum += (int)r2[0] * kernel[6];
sum += (int)r2[1] * kernel[7];
sum += (int)r2[2] * kernel[8];
*outptr0 = sum;
r0++;
r1++;
r2++;
outptr0++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
static void convdw3x3s2_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const signed char* kernel = (const signed char*)_kernel + p*9;
int* outptr = out;
const signed char* img = bottom_blob.channel(p);
const signed char* r0 = img;
const signed char* r1 = img + w;
const signed char* r2 = img + w*2;
int i = 0;
int8x8_t _k0 = vdup_n_s8(kernel[0]);
int8x8_t _k1 = vdup_n_s8(kernel[1]);
int8x8_t _k2 = vdup_n_s8(kernel[2]);
int8x8_t _k3 = vdup_n_s8(kernel[3]);
int8x8_t _k4 = vdup_n_s8(kernel[4]);
int8x8_t _k5 = vdup_n_s8(kernel[5]);
int8x8_t _k6 = vdup_n_s8(kernel[6]);
int8x8_t _k7 = vdup_n_s8(kernel[7]);
int8x8_t _k8 = vdup_n_s8(kernel[8]);
for (; i < outh; i++)
{
int nn = outw >> 3;
int remain = outw & 7;
for (; nn > 0; nn--)
{
int8x8x2_t _r0 = vld2_s8(r0);
int8x8x2_t _r0n = vld2_s8(r0+16);
int8x8_t _r00 = _r0.val[0];
int8x8_t _r01 = _r0.val[1];
int8x8_t _r02 = vext_s8(_r00, _r0n.val[0], 1);
int16x8_t _sum = vmull_s8(_r00, _k0);
_sum = vmlal_s8(_sum, _r01, _k1);
_sum = vmlal_s8(_sum, _r02, _k2);
int8x8x2_t _r1 = vld2_s8(r1);
int8x8x2_t _r1n = vld2_s8(r1+16);
int8x8_t _r10 = _r1.val[0];
int8x8_t _r11 = _r1.val[1];
int8x8_t _r12 = vext_s8(_r10, _r1n.val[0], 1);
_sum = vmlal_s8(_sum, _r10, _k3);
_sum = vmlal_s8(_sum, _r11, _k4);
_sum = vmlal_s8(_sum, _r12, _k5);
int8x8x2_t _r2 = vld2_s8(r2);
int8x8x2_t _r2n = vld2_s8(r2+16);
int8x8_t _r20 = _r2.val[0];
int8x8_t _r21 = _r2.val[1];
int8x8_t _r22 = vext_s8(_r20, _r2n.val[0], 1);
_sum = vmlal_s8(_sum, _r20, _k6);
_sum = vmlal_s8(_sum, _r21, _k7);
_sum = vmlal_s8(_sum, _r22, _k8);
int32x4_t sum0_s32 = vmovl_s16(vget_low_s16(_sum));
int32x4_t sum0n_s32 = vmovl_s16(vget_high_s16(_sum));
vst1q_s32(outptr, sum0_s32);
vst1q_s32(outptr+4, sum0n_s32);
r0 += 16;
r1 += 16;
r2 += 16;
outptr += 8;
}
for (; remain>0; remain--)
{
int sum = 0;
sum += (int)r0[0] * kernel[0];
sum += (int)r0[1] * kernel[1];
sum += (int)r0[2] * kernel[2];
sum += (int)r1[0] * kernel[3];
sum += (int)r1[1] * kernel[4];
sum += (int)r1[2] * kernel[5];
sum += (int)r2[0] * kernel[6];
sum += (int)r2[1] * kernel[7];
sum += (int)r2[2] * kernel[8];
*outptr = sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
#else // __aarch64__
static void convdw3x3s1_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
const signed char* kernel = (const signed char *)_kernel + p*9;
int* outptr0_s32 = out0;
int* outptr0n_s32 = outptr0_s32 + outw;
const signed char* img0 = bottom_blob.channel(p);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w*2;
const signed char* r3 = img0 + w*3;
int i = 0;
for (; i+1 < outh; i+=2)
{
int nn = outw >> 3;
int remain = outw & 7;
if (nn > 0)
{
asm volatile(
"vld1.8 {d26-d27}, [%0] \n"
: "=r"(kernel) // %0
: "0"(kernel)
: "cc", "memory"
);
asm volatile(
"0: \n"
"pld [%3, #128] \n"
"vld1.32 {d0-d1}, [%3] \n"// r0
"add %3, #8 \n"
"vext.8 d2, d0, d1, #1 \n"
"vext.8 d3, d0, d1, #2 \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0
"vmlal.s8 q2, d2, d30 \n"// k1
"vmlal.s8 q2, d3, d31 \n"// k2
"pld [%4, #128] \n"
"vld1.32 {d6-d7}, [%4] \n"// r1
"add %4, #8 \n"
"vext.8 d8, d6, d7, #1 \n"
"vext.8 d9, d6, d7, #2 \n"
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3
"vmlal.s8 q2, d8, d30 \n"// k4
"vmlal.s8 q2, d9, d31 \n"// k5
"pld [%5, #128] \n"
"vld1.32 {d10-d11}, [%5] \n"// r2
"add %5, #8 \n"
"vext.8 d12, d10, d11, #1 \n"
"vext.8 d13, d10, d11, #2 \n"
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6
"vmlal.s8 q2, d12, d30 \n"// k7
"vmlal.s8 q2, d13, d31 \n"// k8
"pld [%6, #128] \n"
"vld1.32 {d14-d15}, [%6] \n"// r3
"add %6, #8 \n"
"vext.8 d16, d14, d15, #1 \n"
"vext.8 d17, d14, d15, #2 \n"
"vmovl.s16 q9, d4 \n"
"vmovl.s16 q10, d5 \n"
"vst1.32 {d18-d21}, [%1]! \n"// sum0
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d6, d1 \n"// k0
"vmlal.s8 q2, d8, d30 \n"// k1
"vmlal.s8 q2, d9, d31 \n"// k2
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d10, d1 \n"// k3
"vmlal.s8 q2, d12, d30 \n"// k4
"vmlal.s8 q2, d13, d31 \n"// k5
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d14, d1 \n"// k6
"vmlal.s8 q2, d16, d30 \n"// k7
"vmlal.s8 q2, d17, d31 \n"// k8
"vmovl.s16 q9, d4 \n"
"vmovl.s16 q10, d5 \n"
"vst1.32 {d18-d21}, [%2]! \n"// sum0n
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0_s32), // %1
"=r"(outptr0n_s32), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr0_s32),
"2"(outptr0n_s32),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
for (; remain>0; remain--)
{
//Todo Neon
int sum0 = 0;
int sum0n = 0;
sum0 += (int)r0[0] * kernel[0];
sum0 += (int)r0[1] * kernel[1];
sum0 += (int)r0[2] * kernel[2];
sum0 += (int)r1[0] * kernel[3];
sum0 += (int)r1[1] * kernel[4];
sum0 += (int)r1[2] * kernel[5];
sum0 += (int)r2[0] * kernel[6];
sum0 += (int)r2[1] * kernel[7];
sum0 += (int)r2[2] * kernel[8];
sum0n += (int)r1[0] * kernel[0];
sum0n += (int)r1[1] * kernel[1];
sum0n += (int)r1[2] * kernel[2];
sum0n += (int)r2[0] * kernel[3];
sum0n += (int)r2[1] * kernel[4];
sum0n += (int)r2[2] * kernel[5];
sum0n += (int)r3[0] * kernel[6];
sum0n += (int)r3[1] * kernel[7];
sum0n += (int)r3[2] * kernel[8];
*outptr0_s32 = sum0;
*outptr0n_s32 = sum0n;
r0++;
r1++;
r2++;
r3++;
outptr0_s32++;
outptr0n_s32++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr0_s32 += outw;
outptr0n_s32 += outw;
}
for (; i < outh; i++)
{
int nn = outw >> 3;
int remain = outw & 7;
if (nn > 0)
{
asm volatile(
"vld1.8 {d26-d27}, [%0] \n"
: "=r"(kernel) // %0
: "0"(kernel)
: "cc", "memory"
);
asm volatile(
"0: \n"
"pld [%2, #128] \n"
"vld1.32 {d0-d1}, [%2] \n"// r0
"add %2, #8 \n"
"vext.8 d2, d0, d1, #1 \n"
"vext.8 d3, d0, d1, #2 \n"
"vdup.s8 d1, d26[0] \n"
"vdup.s8 d30, d26[1] \n"
"vdup.s8 d31, d26[2] \n"
"vmull.s8 q2, d0, d1 \n"// k0
"vmlal.s8 q2, d2, d30 \n"// k1
"vmlal.s8 q2, d3, d31 \n"// k2
"pld [%3, #128] \n"
"vld1.32 {d6-d7}, [%3] \n"// r1
"add %3, #8 \n"
"vext.8 d8, d6, d7, #1 \n"
"vext.8 d9, d6, d7, #2 \n"
"vdup.s8 d1, d26[3] \n"
"vdup.s8 d30, d26[4] \n"
"vdup.s8 d31, d26[5] \n"
"vmlal.s8 q2, d6, d1 \n"// k3
"vmlal.s8 q2, d8, d30 \n"// k4
"vmlal.s8 q2, d9, d31 \n"// k5
"pld [%4, #128] \n"
"vld1.32 {d10-d11}, [%4] \n"// r2
"add %4, #8 \n"
"vext.8 d12, d10, d11, #1 \n"
"vext.8 d13, d10, d11, #2 \n"
"vdup.s8 d1, d26[6] \n"
"vdup.s8 d30, d26[7] \n"
"vdup.s8 d31, d27[0] \n"
"vmlal.s8 q2, d10, d1 \n"// k6
"vmlal.s8 q2, d12, d30 \n"// k7
"vmlal.s8 q2, d13, d31 \n"// k8
"vmovl.s16 q9, d4 \n"
"vmovl.s16 q10, d5 \n"
"vst1.32 {d18-d21}, [%1]! \n"// sum0
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0_s32), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr0_s32),
"2"(r0),
"3"(r1),
"4"(r2)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
for (; remain>0; remain--)
{
int sum = 0;
sum += (int)r0[0] * kernel[0];
sum += (int)r0[1] * kernel[1];
sum += (int)r0[2] * kernel[2];
sum += (int)r1[0] * kernel[3];
sum += (int)r1[1] * kernel[4];
sum += (int)r1[2] * kernel[5];
sum += (int)r2[0] * kernel[6];
sum += (int)r2[1] * kernel[7];
sum += (int)r2[2] * kernel[8];
*outptr0_s32 = sum;
r0++;
r1++;
r2++;
outptr0_s32++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
static void convdw3x3s2_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const signed char* kernel = (const signed char*)_kernel + p*9;
int* outptr_s32 = out;
const signed char* img = bottom_blob.channel(p);
const signed char* r0 = img;
const signed char* r1 = img + w;
const signed char* r2 = img + w*2;
int i = 0;
int8x8_t _k0 = vdup_n_s8(kernel[0]);
int8x8_t _k1 = vdup_n_s8(kernel[1]);
int8x8_t _k2 = vdup_n_s8(kernel[2]);
int8x8_t _k3 = vdup_n_s8(kernel[3]);
int8x8_t _k4 = vdup_n_s8(kernel[4]);
int8x8_t _k5 = vdup_n_s8(kernel[5]);
int8x8_t _k6 = vdup_n_s8(kernel[6]);
int8x8_t _k7 = vdup_n_s8(kernel[7]);
int8x8_t _k8 = vdup_n_s8(kernel[8]);
for (; i < outh; i++)
{
int nn = outw >> 3;
int remain = outw & 7;
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%2, #192] \n"
"vld2.s8 {d0-d1}, [%2]! \n" // r0
"vld2.s8 {d2-d3}, [%2] \n" //
"vext.8 d3, d0, d2, #1 \n"
"vmull.s8 q2, d0, %P10 \n" // k00
"vmull.s8 q3, d1, %P11 \n" // k01
"vmull.s8 q4, d3, %P12 \n" // k02
"veor q7, q0, q0 \n"
"veor q8, q0, q0 \n"
"pld [%3, #192] \n"
"vld2.s8 {d0-d1}, [%3]! \n" // r1
"vld2.s8 {d2-d3}, [%3] \n" //
"vext.8 d3, d0, d2, #1 \n"
"vmlal.s8 q2, d0, %P13 \n" // k03
"vmlal.s8 q3, d1, %P14 \n" // k04
"vmlal.s8 q4, d3, %P15 \n" // k05
"pld [%4, #192] \n"
"vld2.s8 {d0-d1}, [%4]! \n" // r2
"vld2.s8 {d2-d3}, [%4] \n" //
"vext.8 d3, d0, d2, #1 \n"
"vmlal.s8 q2, d0, %P16 \n" // k06
"vmlal.s8 q3, d1, %P17 \n" // k07
"vmlal.s8 q4, d3, %P18 \n" // k08
"vadd.s16 q2, q2, q3 \n"
"vadd.s16 q2, q2, q4 \n"
"vaddw.s16 q7, q7, d4 \n"
"vaddw.s16 q8, q8, d5 \n"
"vst1.32 {d14-d17}, [%1]! \n" // sum
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr_s32), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr_s32),
"2"(r0), // %7
"3"(r1), // %8
"4"(r2), // %9
"w"(_k0), // %10
"w"(_k1), // %11
"w"(_k2), // %12
"w"(_k3), // %13
"w"(_k4), // %14
"w"(_k5), // %15
"w"(_k6), // %16
"w"(_k7), // %17
"w"(_k8) // %18
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q6", "q7", "q8", "q13", "q14"
);
}
for (; remain>0; remain--)
{
int sum = 0;
sum += (int)r0[0] * kernel[0];
sum += (int)r0[1] * kernel[1];
sum += (int)r0[2] * kernel[2];
sum += (int)r1[0] * kernel[3];
sum += (int)r1[1] * kernel[4];
sum += (int)r1[2] * kernel[5];
sum += (int)r2[0] * kernel[6];
sum += (int)r2[1] * kernel[7];
sum += (int)r2[2] * kernel[8];
*outptr_s32 = sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr_s32++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
#endif
|
middle6r.c | /*
* Date: 11 December 2015
* Contact: Thomas Peyrin - thomas.peyrin@gmail.com
*/
/*
* Simulation of boomerang analysis for Skinny
* Date: March 21, 2020
* Author: Hosein Hadipour
* Contact: hsn.hadipour@gmail.com
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#include <stdbool.h>
// #define DEBUG 1
#define Nthreads 12
// Table that encodes the parameters of the various Skinny versions:
// (block size, key size, number of rounds)
//Skinny-64-64: 32 rounds
//Skinny-64-128: 36 rounds
//Skinny-64-192: 40 rounds
//Skinny-128-128: 40 rounds
//Skinny-128-256: 48 rounds
//Skinny-128-384: 56 rounds
int versions[6][3] = {{64, 64, 32}, {64, 128, 36}, {64, 192, 40}, {128, 128, 40}, {128, 256, 48}, {128, 384, 56}};
// Packing of data is done as follows (state[i][j] stands for row i and column j):
// 0 1 2 3
// 4 5 6 7
// 8 9 10 11
//12 13 14 15
// 4-bit Sbox
const unsigned char sbox_4[16] = {12, 6, 9, 0, 1, 10, 2, 11, 3, 8, 5, 13, 4, 14, 7, 15};
const unsigned char sbox_4_inv[16] = {3, 4, 6, 8, 12, 10, 1, 14, 9, 2, 5, 7, 0, 11, 13, 15};
// 8-bit Sbox
const unsigned char sbox_8[256] = {0x65, 0x4c, 0x6a, 0x42, 0x4b, 0x63, 0x43, 0x6b, 0x55, 0x75, 0x5a, 0x7a, 0x53, 0x73, 0x5b, 0x7b, 0x35, 0x8c, 0x3a, 0x81, 0x89, 0x33, 0x80, 0x3b, 0x95, 0x25, 0x98, 0x2a, 0x90, 0x23, 0x99, 0x2b, 0xe5, 0xcc, 0xe8, 0xc1, 0xc9, 0xe0, 0xc0, 0xe9, 0xd5, 0xf5, 0xd8, 0xf8, 0xd0, 0xf0, 0xd9, 0xf9, 0xa5, 0x1c, 0xa8, 0x12, 0x1b, 0xa0, 0x13, 0xa9, 0x05, 0xb5, 0x0a, 0xb8, 0x03, 0xb0, 0x0b, 0xb9, 0x32, 0x88, 0x3c, 0x85, 0x8d, 0x34, 0x84, 0x3d, 0x91, 0x22, 0x9c, 0x2c, 0x94, 0x24, 0x9d, 0x2d, 0x62, 0x4a, 0x6c, 0x45, 0x4d, 0x64, 0x44, 0x6d, 0x52, 0x72, 0x5c, 0x7c, 0x54, 0x74, 0x5d, 0x7d, 0xa1, 0x1a, 0xac, 0x15, 0x1d, 0xa4, 0x14, 0xad, 0x02, 0xb1, 0x0c, 0xbc, 0x04, 0xb4, 0x0d, 0xbd, 0xe1, 0xc8, 0xec, 0xc5, 0xcd, 0xe4, 0xc4, 0xed, 0xd1, 0xf1, 0xdc, 0xfc, 0xd4, 0xf4, 0xdd, 0xfd, 0x36, 0x8e, 0x38, 0x82, 0x8b, 0x30, 0x83, 0x39, 0x96, 0x26, 0x9a, 0x28, 0x93, 0x20, 0x9b, 0x29, 0x66, 0x4e, 0x68, 0x41, 0x49, 0x60, 0x40, 0x69, 0x56, 0x76, 0x58, 0x78, 0x50, 0x70, 0x59, 0x79, 0xa6, 0x1e, 0xaa, 0x11, 0x19, 0xa3, 0x10, 0xab, 0x06, 0xb6, 0x08, 0xba, 0x00, 0xb3, 0x09, 0xbb, 0xe6, 0xce, 0xea, 0xc2, 0xcb, 0xe3, 0xc3, 0xeb, 0xd6, 0xf6, 0xda, 0xfa, 0xd3, 0xf3, 0xdb, 0xfb, 0x31, 0x8a, 0x3e, 0x86, 0x8f, 0x37, 0x87, 0x3f, 0x92, 0x21, 0x9e, 0x2e, 0x97, 0x27, 0x9f, 0x2f, 0x61, 0x48, 0x6e, 0x46, 0x4f, 0x67, 0x47, 0x6f, 0x51, 0x71, 0x5e, 0x7e, 0x57, 0x77, 0x5f, 0x7f, 0xa2, 0x18, 0xae, 0x16, 0x1f, 0xa7, 0x17, 0xaf, 0x01, 0xb2, 0x0e, 0xbe, 0x07, 0xb7, 0x0f, 0xbf, 0xe2, 0xca, 0xee, 0xc6, 0xcf, 0xe7, 0xc7, 0xef, 0xd2, 0xf2, 0xde, 0xfe, 0xd7, 0xf7, 0xdf, 0xff};
const unsigned char sbox_8_inv[256] = {0xac, 0xe8, 0x68, 0x3c, 0x6c, 0x38, 0xa8, 0xec, 0xaa, 0xae, 0x3a, 0x3e, 0x6a, 0x6e, 0xea, 0xee, 0xa6, 0xa3, 0x33, 0x36, 0x66, 0x63, 0xe3, 0xe6, 0xe1, 0xa4, 0x61, 0x34, 0x31, 0x64, 0xa1, 0xe4, 0x8d, 0xc9, 0x49, 0x1d, 0x4d, 0x19, 0x89, 0xcd, 0x8b, 0x8f, 0x1b, 0x1f, 0x4b, 0x4f, 0xcb, 0xcf, 0x85, 0xc0, 0x40, 0x15, 0x45, 0x10, 0x80, 0xc5, 0x82, 0x87, 0x12, 0x17, 0x42, 0x47, 0xc2, 0xc7, 0x96, 0x93, 0x03, 0x06, 0x56, 0x53, 0xd3, 0xd6, 0xd1, 0x94, 0x51, 0x04, 0x01, 0x54, 0x91, 0xd4, 0x9c, 0xd8, 0x58, 0x0c, 0x5c, 0x08, 0x98, 0xdc, 0x9a, 0x9e, 0x0a, 0x0e, 0x5a, 0x5e, 0xda, 0xde, 0x95, 0xd0, 0x50, 0x05, 0x55, 0x00, 0x90, 0xd5, 0x92, 0x97, 0x02, 0x07, 0x52, 0x57, 0xd2, 0xd7, 0x9d, 0xd9, 0x59, 0x0d, 0x5d, 0x09, 0x99, 0xdd, 0x9b, 0x9f, 0x0b, 0x0f, 0x5b, 0x5f, 0xdb, 0xdf, 0x16, 0x13, 0x83, 0x86, 0x46, 0x43, 0xc3, 0xc6, 0x41, 0x14, 0xc1, 0x84, 0x11, 0x44, 0x81, 0xc4, 0x1c, 0x48, 0xc8, 0x8c, 0x4c, 0x18, 0x88, 0xcc, 0x1a, 0x1e, 0x8a, 0x8e, 0x4a, 0x4e, 0xca, 0xce, 0x35, 0x60, 0xe0, 0xa5, 0x65, 0x30, 0xa0, 0xe5, 0x32, 0x37, 0xa2, 0xa7, 0x62, 0x67, 0xe2, 0xe7, 0x3d, 0x69, 0xe9, 0xad, 0x6d, 0x39, 0xa9, 0xed, 0x3b, 0x3f, 0xab, 0xaf, 0x6b, 0x6f, 0xeb, 0xef, 0x26, 0x23, 0xb3, 0xb6, 0x76, 0x73, 0xf3, 0xf6, 0x71, 0x24, 0xf1, 0xb4, 0x21, 0x74, 0xb1, 0xf4, 0x2c, 0x78, 0xf8, 0xbc, 0x7c, 0x28, 0xb8, 0xfc, 0x2a, 0x2e, 0xba, 0xbe, 0x7a, 0x7e, 0xfa, 0xfe, 0x25, 0x70, 0xf0, 0xb5, 0x75, 0x20, 0xb0, 0xf5, 0x22, 0x27, 0xb2, 0xb7, 0x72, 0x77, 0xf2, 0xf7, 0x2d, 0x79, 0xf9, 0xbd, 0x7d, 0x29, 0xb9, 0xfd, 0x2b, 0x2f, 0xbb, 0xbf, 0x7b, 0x7f, 0xfb, 0xff};
// ShiftAndSwitchRows permutation
const unsigned char P[16] = {0, 1, 2, 3, 7, 4, 5, 6, 10, 11, 8, 9, 13, 14, 15, 12};
const unsigned char P_inv[16] = {0, 1, 2, 3, 5, 6, 7, 4, 10, 11, 8, 9, 15, 12, 13, 14};
// Tweakey permutation
const unsigned char TWEAKEY_P[16] = {9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7};
const unsigned char TWEAKEY_P_inv[16] = {8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1};
// round constants
const unsigned char RC[62] = {
0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, 0x37, 0x2F,
0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, 0x1D, 0x3A, 0x35, 0x2B,
0x16, 0x2C, 0x18, 0x30, 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E,
0x1C, 0x38, 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A,
0x34, 0x29, 0x12, 0x24, 0x08, 0x11, 0x22, 0x04, 0x09, 0x13,
0x26, 0x0c, 0x19, 0x32, 0x25, 0x0a, 0x15, 0x2a, 0x14, 0x28,
0x10, 0x20};
FILE *fic;
void init_prng(int offset) {
//int initial_seed = 0x5EC7F2B0;
//int initial_seed = 0x30051991; My birthday!
unsigned int initial_seed = 10*time(NULL) + 11*offset;
srand(initial_seed); // Initialization, should only be called once. int r = rand();
printf("[+] PRNG initialized to 0x%08X\n", initial_seed);
}
void display_matrix(unsigned char state[4][4], int ver)
{
int i;
unsigned char input[16];
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
for (i = 0; i < 8; i++)
fprintf(fic, "%02x", input[i]);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
for (i = 0; i < 16; i++)
fprintf(fic, "%02x", input[i]);
}
}
void display_cipher_state(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int k;
fprintf(fic, "S = ");
display_matrix(state, ver);
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
fprintf(fic, " - TK%i = ", k + 1);
display_matrix(keyCells[k], ver);
}
}
// Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state
void AddKey(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int i, j, k;
unsigned char pos;
unsigned char keyCells_tmp[3][4][4];
// apply the subtweakey to the internal state
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] ^= keyCells[0][i][j];
if (2 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j];
else if (3 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j];
}
}
// update the subtweakey states with the permutation
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the TWEAKEY permutation
pos = TWEAKEY_P[j + 4 * i];
keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3];
}
}
}
// update the subtweakey states with the LFSRs
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
//application of LFSRs for TK updates
if (k == 1)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01);
}
else if (k == 2)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j]) & 0x8) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80);
}
}
}
}
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
keyCells[k][i][j] = keyCells_tmp[k][i][j];
}
}
}
}
// Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state (inverse function}
void AddKey_inv(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int i, j, k;
unsigned char pos;
unsigned char keyCells_tmp[3][4][4];
// update the subtweakey states with the permutation
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the inverse TWEAKEY permutation
pos = TWEAKEY_P_inv[j + 4 * i];
keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3];
}
}
}
// update the subtweakey states with the LFSRs
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 2; i <= 3; i++)
{
for (j = 0; j < 4; j++)
{
//application of inverse LFSRs for TK updates
if (k == 1)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8) ^ ((keyCells_tmp[k][i][j]) & 0x8);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80);
}
else if (k == 2)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01);
}
}
}
}
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
keyCells[k][i][j] = keyCells_tmp[k][i][j];
}
}
}
// apply the subtweakey to the internal state
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] ^= keyCells[0][i][j];
if (2 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j];
else if (3 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j];
}
}
}
// Apply the constants: using a LFSR counter on 6 bits, we XOR the 6 bits to the first 6 bits of the internal state
void AddConstants(unsigned char state[4][4], int r)
{
state[0][0] ^= (RC[r] & 0xf);
state[1][0] ^= ((RC[r] >> 4) & 0x3);
state[2][0] ^= 0x2;
}
// apply the 4-bit Sbox
void SubCell4(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_4[state[i][j]];
}
// apply the 4-bit inverse Sbox
void SubCell4_inv(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_4_inv[state[i][j]];
}
// apply the 8-bit Sbox
void SubCell8(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_8[state[i][j]];
}
// apply the 8-bit inverse Sbox
void SubCell8_inv(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_8_inv[state[i][j]];
}
// Apply the ShiftRows function
void ShiftRows(unsigned char state[4][4])
{
int i, j, pos;
unsigned char state_tmp[4][4];
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the ShiftRows permutation
pos = P[j + 4 * i];
state_tmp[i][j] = state[pos >> 2][pos & 0x3];
}
}
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] = state_tmp[i][j];
}
}
}
// Apply the inverse ShiftRows function
void ShiftRows_inv(unsigned char state[4][4])
{
int i, j, pos;
unsigned char state_tmp[4][4];
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the inverse ShiftRows permutation
pos = P_inv[j + 4 * i];
state_tmp[i][j] = state[pos >> 2][pos & 0x3];
}
}
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] = state_tmp[i][j];
}
}
}
// Apply the linear diffusion matrix
//M =
//1 0 1 1
//1 0 0 0
//0 1 1 0
//1 0 1 0
void MixColumn(unsigned char state[4][4])
{
int j;
unsigned char temp;
for (j = 0; j < 4; j++)
{
state[1][j] ^= state[2][j];
state[2][j] ^= state[0][j];
state[3][j] ^= state[2][j];
temp = state[3][j];
state[3][j] = state[2][j];
state[2][j] = state[1][j];
state[1][j] = state[0][j];
state[0][j] = temp;
}
}
// Apply the inverse linear diffusion matrix
void MixColumn_inv(unsigned char state[4][4])
{
int j;
unsigned char temp;
for (j = 0; j < 4; j++)
{
temp = state[3][j];
state[3][j] = state[0][j];
state[0][j] = state[1][j];
state[1][j] = state[2][j];
state[2][j] = temp;
state[3][j] ^= state[2][j];
state[2][j] ^= state[0][j];
state[1][j] ^= state[2][j];
}
}
// decryption function of Skinny
void dec(unsigned char *input, const unsigned char *userkey, int ver, int r)
{
unsigned char state[4][4];
unsigned char dummy[4][4] = {{0}};
unsigned char keyCells[3][4][4];
int i;
memset(keyCells, 0, 48);
for (i = 0; i < 16; i++)
{
if (versions[ver][0] == 64)
{
if (i & 1)
{
state[i >> 2][i & 0x3] = input[i >> 1] & 0xF;
keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF;
}
else
{
state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF;
keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF;
}
}
else if (versions[ver][0] == 128)
{
state[i >> 2][i & 0x3] = input[i] & 0xFF;
keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF;
if (versions[ver][1] >= 256)
keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF;
if (versions[ver][1] >= 384)
keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF;
}
}
for (i = r - 1; i >= 0; i--)
{
AddKey(dummy, keyCells, ver);
}
#ifdef DEBUG
fprintf(fic, "DEC - initial state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
for (i = r - 1; i >= 0; i--)
{
MixColumn_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after MixColumn_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
ShiftRows_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after ShiftRows_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddKey_inv(state, keyCells, ver);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after AddKey_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddConstants(state, i);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after AddConstants_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
SubCell4_inv(state);
else
SubCell8_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after SubCell_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
}
#ifdef DEBUG
fprintf(fic, "DEC - final state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
}
}
// encryption function of Skinny
void enc(unsigned char *input, const unsigned char *userkey, int ver, int r)
{
unsigned char state[4][4];
unsigned char keyCells[3][4][4];
int i;
memset(keyCells, 0, 48);
for (i = 0; i < 16; i++)
{
if (versions[ver][0] == 64)
{
if (i & 1)
{
state[i >> 2][i & 0x3] = input[i >> 1] & 0xF;
keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF;
}
else
{
state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF;
keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF;
}
}
else if (versions[ver][0] == 128)
{
state[i >> 2][i & 0x3] = input[i] & 0xFF;
keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF;
if (versions[ver][1] >= 256)
keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF;
if (versions[ver][1] >= 384)
keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF;
}
}
#ifdef DEBUG
fprintf(fic, "ENC - initial state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
for (i = 0; i < r; i++)
{
if (versions[ver][0] == 64)
SubCell4(state);
else
SubCell8(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after SubCell: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddConstants(state, i);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after AddConstants: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddKey(state, keyCells, ver);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after AddKey: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
ShiftRows(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after ShiftRows: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
MixColumn(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after MixColumn: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
} //The last subtweakey should not be added
#ifdef DEBUG
fprintf(fic, "ENC - final state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
}
}
// generate test vectors for all the versions of Skinny
void TestVectors(int ver)
{
unsigned char p[16];
unsigned char c[16];
unsigned char k[48];
int n;
for (n = 1; n < 10; n++)
{
int i;
for (i = 0; i < (versions[ver][0] >> 3); i++)
c[i] = p[i] = rand() & 0xff;
for (i = 0; i < (versions[ver][0] >> 3); i++)
printf("%02x", p[i]);
printf("\n");
for (i = 0; i < (versions[ver][1] >> 3); i++)
k[i] = rand() & 0xff;
fprintf(fic, "TK = ");
for (i = 0; i < (versions[ver][1] >> 3); i++)
fprintf(fic, "%02x", k[i]);
fprintf(fic, "\n");
fprintf(fic, "P = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", p[i]);
fprintf(fic, "\n");
enc(c, k, ver, 10);
fprintf(fic, "C = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", c[i]);
fprintf(fic, "\n");
dec(c, k, ver, 10);
fprintf(fic, "P' = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", c[i]);
fprintf(fic, "\n\n");
}
}
int boomerang(int r, int ver, int N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2)
{
int i;
unsigned char p1[16], p2[16];
unsigned char c3[16], c4[16];
unsigned char k1[48], k2[48], k3[48], k4[48];
// randomly choose k1
for (i = 0; i < (versions[ver][1] >> 3); i++)
k1[i] = rand() & 0xff;
// derive k2
for (i = 0; i < (versions[ver][1] >> 3); i++)
k2[i] = k1[i] ^ dk1[i];
// derive k3
for (i = 0; i < (versions[ver][1] >> 3); i++)
k3[i] = k1[i] ^ dk2[i];
// derive k4
for (i = 0; i < (versions[ver][1] >> 3); i++)
k4[i] = k2[i] ^ dk2[i];
int num = 0;
for (int t = 0; t < N3; t++)
{
// randomly choose p1
for (i = 0; i < (versions[ver][0] >> 3); i++)
p1[i] = (rand() ^ c3[i]) & 0xff;
// derive p2
for (i = 0; i < (versions[ver][0] >> 3); i++)
p2[i] = p1[i] ^ dp[i];
enc(p1, k1, ver, r);
enc(p2, k2, ver, r);
// derive c3
for (i = 0; i < (versions[ver][0] >> 3); i++)
c3[i] = p1[i] ^ dc[i];
// derive c4
for (i = 0; i < (versions[ver][0] >> 3); i++)
c4[i] = p2[i] ^ dc[i];
dec(c3, k3, ver, r);
dec(c4, k4, ver, r);
bool flag = 1;
for (i = 0; i < (versions[ver][0] >> 3); i++)
if ((c3[i] ^ c4[i]) != dp[i])
flag = 0;
if (flag)
{
num++;
}
}
return num;
}
double send_boomerangs(int R, int ver, int N1, int N2, int N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2)
{
// Parallel execution
int NUM[N1];
int counter;
printf("#Rounds: %d rounds\n", R);
printf("#Total Queries = (#Parallel threads) * (#Bunches per thread) * (#Queries per bunch) = %d * %d * %d = 2^(%f)\n", N1, N2, N3, log(N1 * N2 * N3) / log(2));
clock_t clock_timer;
double wall_timer;
clock_timer = clock();
wall_timer = omp_get_wtime();
omp_set_num_threads(N1);
#pragma omp parallel for
for (counter = 0; counter < N1; counter++)
{
int num = 0;
int ID = omp_get_thread_num();
init_prng(ID);
for (int j = 0; j < N2; j++)
{
num += boomerang(R, ver, N3, dp, dc, dk1, dk2);
}
NUM[ID] = num;
}
printf("%s: %0.4f\n", "time on clock", (double)(clock() - clock_timer) / CLOCKS_PER_SEC);
printf("%s: %0.4f\n", "time on wall", omp_get_wtime() - wall_timer);
double sum = 0;
double sum_temp = 1;
for (int i = 0; i < N1; i++)
sum += NUM[i];
printf("sum = %f\n", sum);
sum_temp = (double)(N1 * N2 * N3) / sum;
printf("2^(-%f)\n\n", log(sum_temp) / log(2));
printf("##########################\n");
return sum;
}
void convert_hexstr_to_statearray(int ver, char hex_str[], unsigned char dx[16])
{
for (int i = 0; i < (versions[ver][0] >> 3); i++)
{
char hex[2];
hex[0] = hex_str[2 * i];
hex[1] = hex_str[2 * i + 1];
dx[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff);
}
}
void convert_hexstr_to_tweakarray(int ver, char hex_str[], unsigned char dt[48])
{
for (int i = 0; i < (versions[ver][1] >> 3); i++)
{
char hex[2];
hex[0] = hex_str[2 * i];
hex[1] = hex_str[2 * i + 1];
dt[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff);
}
}
int main()
{
// srand((unsigned)time(NULL)); // Initialization, should only be called once. int r = rand();
// init_prng(1);
// //test all versions of Skinny
// for (i = 0; i < (sizeof(versions) / sizeof(*versions)); i++)
// {
// sprintf(name, "test_vectors_%i_%i.txt", versions[i][0], versions[i][1]);
// fic = fopen(name, "w");
// fprintf(fic, "\n\nSkinny-%i/%i: \n", versions[i][0], versions[i][1]);
// TestVectors(i);
// fclose(fic);
// printf("Generating test vectors for Skinny-%i/%i - saved in file test_vectors_%i_%i.txt \n", versions[i][0], versions[i][1], versions[i][0], versions[i][1]);
// }
unsigned char dp[16];
unsigned char dc[16];
unsigned char dk1[48];
unsigned char dk2[48];
// #######################################################################################################
// #######################################################################################################
// ############################## User must change only the following lines ##############################
int n = 1000; // Number of independet experiments
int R = 6; // Number of rounds
int ver = 4; // Determine the version:
// [0 = Skinny-64-64]
// [1 = Skinny-64-128]
// [2 = Skinny-64-192]
// [3 = Skinny-128-128]
// [4 = Skinny-128-256]
// [5 = Skinny-128-384]
char dp_str[] = "00000000000000000006000000000000";
char dc_str[] = "00000000000000000000000000000000";
char dk1_str[] = "0000000000000000000000000200000000000000000000000000000004000000";
char dk2_str[] = "000000000000000000000000000000f80000000000000000000000000000007f";
// #######################################################################################################
// #######################################################################################################
convert_hexstr_to_statearray(ver, dp_str, dp);
convert_hexstr_to_statearray(ver, dc_str, dc);
convert_hexstr_to_tweakarray(ver, dk1_str, dk1);
convert_hexstr_to_tweakarray(ver, dk2_str, dk2);
//########################## Number of queries #########################
int N1 = Nthreads; // Number of parallel threads : N1
int deg1 = 12;
int deg2 = 12;
int N2 = 1 << deg1; // Number of bunches per thread: N2 = 2^(deg1)
int N3 = 1 << deg2; // Number of queries per bunch: N3 = 2^(deg2)
//################### Number of total queries : N1*N2*N3 ###############
double sum = 0;
for (int i = 0; i < n; i++)
{
sum += send_boomerangs(R, ver, N1, N2, N3, dp, dc, dk1, dk2);
}
printf("\nAverage = 2^(-%0.4f)\n", (log(n) + log(N1) + log(N2) + log(N3) - log(sum))/log(2));
// sum = (double)(n * N1 * N2 * N3) / sum;
// printf("\nAverage = 2^(-%0.2f)\n", log(sum) / log(2));
return 0;
}
|
gather_nd_op_cpu_impl.h | /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_KERNELS_GATHER_ND_OP_CPU_IMPL_H_
#define TENSORFLOW_CORE_KERNELS_GATHER_ND_OP_CPU_IMPL_H_
// Specialization of GatherNdSlice to CPU
#define EIGEN_USE_THREADS
#include <atomic>
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/gather_nd_op.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mem.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/util.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
namespace generator {
template <typename T, typename Index, int IXDIM>
class GatherNdSliceGenerator {
public:
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE GatherNdSliceGenerator(
const Index slice_size, typename TTypes<Index>::ConstMatrix Tindices,
typename TTypes<T, IXDIM + 1>::ConstTensor Tparams,
typename TTypes<T>::Matrix Tout, std::atomic<Index>* error_loc)
: slice_size_(slice_size),
Tindices_(Tindices),
Tparams_(Tparams),
Tout_(Tout),
error_loc_(error_loc) {}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool GenerateIndices(
const Index loc, Eigen::array<Eigen::DenseIndex, IXDIM + 1>* ix) const {
(*ix)[IXDIM] = 0;
bool out_of_bounds = false;
for (int i = 0; i < IXDIM; ++i) {
const Index ix_i = internal::SubtleMustCopy(Tindices_(loc, i));
(*ix)[i] = ix_i;
out_of_bounds |= !FastBoundsCheck(ix_i, Tparams_.dimension(i));
}
return out_of_bounds;
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE int32
operator()(const Eigen::array<Eigen::DenseIndex, 1>& loc_array) const {
const Index loc = loc_array[0];
Eigen::array<Eigen::DenseIndex, IXDIM + 1> ix;
Eigen::array<Eigen::DenseIndex, 2> ix_out;
ix_out[0] = loc;
ix_out[1] = 0;
const bool out_of_bounds = GenerateIndices(loc, &ix);
if (TF_PREDICT_FALSE(out_of_bounds)) {
error_loc_->store(loc);
std::fill_n(&Tout_(ix_out), slice_size_, T());
} else {
std::copy_n(&Tparams_(ix), slice_size_, &Tout_(ix_out));
}
return static_cast<int32>(0); // Return something...
}
private:
const Index slice_size_;
const typename TTypes<Index>::ConstMatrix Tindices_;
const typename TTypes<T, IXDIM + 1>::ConstTensor Tparams_;
mutable typename TTypes<T>::Matrix Tout_;
std::atomic<Index>* error_loc_;
};
} // namespace generator
namespace functor {
template <typename T, typename Index, int IXDIM>
struct GatherNdSlice<CPUDevice, T, Index, IXDIM> {
Index operator()(const CPUDevice& d, const Index slice_size,
typename TTypes<int32>::Scalar Tscratch,
typename TTypes<T, IXDIM + 1>::ConstTensor Tparams,
typename TTypes<Index>::ConstMatrix Tindices,
typename TTypes<T>::Matrix Tout) {
std::atomic<Index> error_loc(-1);
const Eigen::DenseIndex batch_size = Tindices.dimension(0);
#if !defined(EIGEN_HAS_INDEX_LIST)
Eigen::Tensor<Eigen::DenseIndex, 1>::Dimensions reshape_dims{{ 1 }};
Eigen::array<Eigen::DenseIndex, 1> broadcast_dims{{ batch_size }};
#else
Eigen::IndexList<Eigen::type2index<1> > reshape_dims;
Eigen::IndexList<Eigen::DenseIndex> broadcast_dims;
broadcast_dims.set(0, batch_size);
#endif
generator::GatherNdSliceGenerator<T, Index, IXDIM> gather_nd_generator(
slice_size, Tindices, Tparams, Tout, &error_loc);
#if defined(INTEL_MKL) && defined(ENABLE_MKL)
// Eigen implementation below is not highly performant. gather_nd_generator
// does not seem to be called in parallel, leading to very poor performance.
// Additionally, since it uses scalar (Tscratch) to invoke 'generate', it
// needs to go through redundant operations like 'reshape', 'broadcast' and
// 'sum'. OpenMP loop below essentially does same thing as Eigen code, but
// is considerably more efficient.
#pragma omp parallel for
for (Eigen::DenseIndex i = 0; i < batch_size; i++) {
const Eigen::array<Eigen::DenseIndex, 1> loc{i};
gather_nd_generator(loc);
}
#else // INTEL_MKL && ENABLE_MKL
Tscratch.device(d) = Tscratch.reshape(reshape_dims)
.broadcast(broadcast_dims)
.generate(gather_nd_generator)
.sum();
#endif // INTEL_MKL && ENABLE_MKL
// error_loc() returns -1 if there's no out-of-bounds index,
// otherwise it returns the location of an OOB index in Tindices.
return error_loc.load();
}
};
#define REGISTER_GATHER_ND_FULL(T, Index) \
template Index GatherNdSlice<CPUDevice, T, Index, CPU_PROVIDED_IXDIM>:: \
operator()(const CPUDevice& d, const Index slice_size, \
typename TTypes<int32>::Scalar Tscratch, \
typename TTypes<T, CPU_PROVIDED_IXDIM + 1>::ConstTensor Tparams, \
typename TTypes<Index>::ConstMatrix Tindices, \
typename TTypes<T>::Matrix Tout);
#define REGISTER_GATHER_ND_CPU(type) \
REGISTER_GATHER_ND_FULL(type, int32); \
REGISTER_GATHER_ND_FULL(type, int64)
TF_CALL_ALL_TYPES(REGISTER_GATHER_ND_CPU);
TF_CALL_QUANTIZED_TYPES(REGISTER_GATHER_ND_CPU);
} // namespace functor
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_GATHER_ND_OP_CPU_IMPL_H_
|
coordinate_common.h | /*!
* Copyright 2018 by Contributors
* \author Rory Mitchell
*/
#pragma once
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include <limits>
#include "xgboost/data.h"
#include "xgboost/parameter.h"
#include "./param.h"
#include "../gbm/gblinear_model.h"
#include "../common/random.h"
namespace xgboost {
namespace linear {
struct CoordinateParam : public XGBoostParameter<CoordinateParam> {
int top_k;
DMLC_DECLARE_PARAMETER(CoordinateParam) {
DMLC_DECLARE_FIELD(top_k)
.set_lower_bound(0)
.set_default(0)
.describe("The number of top features to select in 'thrifty' feature_selector. "
"The value of zero means using all the features.");
}
};
/**
* \brief Calculate change in weight for a given feature. Applies l1/l2 penalty normalised by the
* number of training instances.
*
* \param sum_grad The sum gradient.
* \param sum_hess The sum hess.
* \param w The weight.
* \param reg_alpha Unnormalised L1 penalty.
* \param reg_lambda Unnormalised L2 penalty.
*
* \return The weight update.
*/
inline double CoordinateDelta(double sum_grad, double sum_hess, double w,
double reg_alpha, double reg_lambda) {
if (sum_hess < 1e-5f) return 0.0f;
const double sum_grad_l2 = sum_grad + reg_lambda * w;
const double sum_hess_l2 = sum_hess + reg_lambda;
const double tmp = w - sum_grad_l2 / sum_hess_l2;
if (tmp >= 0) {
return std::max(-(sum_grad_l2 + reg_alpha) / sum_hess_l2, -w);
} else {
return std::min(-(sum_grad_l2 - reg_alpha) / sum_hess_l2, -w);
}
}
/**
* \brief Calculate update to bias.
*
* \param sum_grad The sum gradient.
* \param sum_hess The sum hess.
*
* \return The weight update.
*/
inline double CoordinateDeltaBias(double sum_grad, double sum_hess) {
return -sum_grad / sum_hess;
}
/**
* \brief Get the gradient with respect to a single feature.
*
* \param group_idx Zero-based index of the group.
* \param num_group Number of groups.
* \param fidx The target feature.
* \param gpair Gradients.
* \param p_fmat The feature matrix.
*
* \return The gradient and diagonal Hessian entry for a given feature.
*/
inline std::pair<double, double> GetGradient(int group_idx, int num_group, int fidx,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat) {
double sum_grad = 0.0, sum_hess = 0.0;
for (const auto &batch : p_fmat->GetBatches<CSCPage>()) {
auto col = batch[fidx];
const auto ndata = static_cast<bst_omp_uint>(col.size());
for (bst_omp_uint j = 0; j < ndata; ++j) {
const bst_float v = col[j].fvalue;
auto &p = gpair[col[j].index * num_group + group_idx];
if (p.GetHess() < 0.0f) continue;
sum_grad += p.GetGrad() * v;
sum_hess += p.GetHess() * v * v;
}
}
return std::make_pair(sum_grad, sum_hess);
}
/**
* \brief Get the gradient with respect to a single feature. Row-wise multithreaded.
*
* \param group_idx Zero-based index of the group.
* \param num_group Number of groups.
* \param fidx The target feature.
* \param gpair Gradients.
* \param p_fmat The feature matrix.
*
* \return The gradient and diagonal Hessian entry for a given feature.
*/
inline std::pair<double, double> GetGradientParallel(int group_idx, int num_group, int fidx,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat) {
double sum_grad = 0.0, sum_hess = 0.0;
for (const auto &batch : p_fmat->GetBatches<CSCPage>()) {
auto col = batch[fidx];
const auto ndata = static_cast<bst_omp_uint>(col.size());
#pragma omp parallel for schedule(static) reduction(+ : sum_grad, sum_hess)
for (bst_omp_uint j = 0; j < ndata; ++j) {
const bst_float v = col[j].fvalue;
auto &p = gpair[col[j].index * num_group + group_idx];
if (p.GetHess() < 0.0f) continue;
sum_grad += p.GetGrad() * v;
sum_hess += p.GetHess() * v * v;
}
}
return std::make_pair(sum_grad, sum_hess);
}
/**
* \brief Get the gradient with respect to the bias. Row-wise multithreaded.
*
* \param group_idx Zero-based index of the group.
* \param num_group Number of groups.
* \param gpair Gradients.
* \param p_fmat The feature matrix.
*
* \return The gradient and diagonal Hessian entry for the bias.
*/
inline std::pair<double, double> GetBiasGradientParallel(int group_idx, int num_group,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat) {
double sum_grad = 0.0, sum_hess = 0.0;
const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_);
#pragma omp parallel for schedule(static) reduction(+ : sum_grad, sum_hess)
for (bst_omp_uint i = 0; i < ndata; ++i) {
auto &p = gpair[i * num_group + group_idx];
if (p.GetHess() >= 0.0f) {
sum_grad += p.GetGrad();
sum_hess += p.GetHess();
}
}
return std::make_pair(sum_grad, sum_hess);
}
/**
* \brief Updates the gradient vector with respect to a change in weight.
*
* \param fidx The feature index.
* \param group_idx Zero-based index of the group.
* \param num_group Number of groups.
* \param dw The change in weight.
* \param in_gpair The gradient vector to be updated.
* \param p_fmat The input feature matrix.
*/
inline void UpdateResidualParallel(int fidx, int group_idx, int num_group,
float dw, std::vector<GradientPair> *in_gpair,
DMatrix *p_fmat) {
if (dw == 0.0f) return;
for (const auto &batch : p_fmat->GetBatches<CSCPage>()) {
auto col = batch[fidx];
// update grad value
const auto num_row = static_cast<bst_omp_uint>(col.size());
#pragma omp parallel for schedule(static)
for (bst_omp_uint j = 0; j < num_row; ++j) {
GradientPair &p = (*in_gpair)[col[j].index * num_group + group_idx];
if (p.GetHess() < 0.0f) continue;
p += GradientPair(p.GetHess() * col[j].fvalue * dw, 0);
}
}
}
/**
* \brief Updates the gradient vector based on a change in the bias.
*
* \param group_idx Zero-based index of the group.
* \param num_group Number of groups.
* \param dbias The change in bias.
* \param in_gpair The gradient vector to be updated.
* \param p_fmat The input feature matrix.
*/
inline void UpdateBiasResidualParallel(int group_idx, int num_group, float dbias,
std::vector<GradientPair> *in_gpair,
DMatrix *p_fmat) {
if (dbias == 0.0f) return;
const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_);
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < ndata; ++i) {
GradientPair &g = (*in_gpair)[i * num_group + group_idx];
if (g.GetHess() < 0.0f) continue;
g += GradientPair(g.GetHess() * dbias, 0);
}
}
/**
* \brief Abstract class for stateful feature selection or ordering
* in coordinate descent algorithms.
*/
class FeatureSelector {
public:
/*! \brief factory method */
static FeatureSelector *Create(int choice);
/*! \brief virtual destructor */
virtual ~FeatureSelector() = default;
/**
* \brief Setting up the selector state prior to looping through features.
*
* \param model The model.
* \param gpair The gpair.
* \param p_fmat The feature matrix.
* \param alpha Regularisation alpha.
* \param lambda Regularisation lambda.
* \param param A parameter with algorithm-dependent use.
*/
virtual void Setup(const gbm::GBLinearModel &,
const std::vector<GradientPair> &,
DMatrix *,
float , float , int ) {}
/**
* \brief Select next coordinate to update.
*
* \param iteration The iteration in a loop through features
* \param model The model.
* \param group_idx Zero-based index of the group.
* \param gpair The gpair.
* \param p_fmat The feature matrix.
* \param alpha Regularisation alpha.
* \param lambda Regularisation lambda.
*
* \return The index of the selected feature. -1 indicates none selected.
*/
virtual int NextFeature(int iteration,
const gbm::GBLinearModel &model,
int group_idx,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda) = 0;
};
/**
* \brief Deterministic selection by cycling through features one at a time.
*/
class CyclicFeatureSelector : public FeatureSelector {
public:
int NextFeature(int iteration, const gbm::GBLinearModel &model,
int , const std::vector<GradientPair> &,
DMatrix *, float, float) override {
return iteration % model.learner_model_param->num_feature;
}
};
/**
* \brief Similar to Cyclic but with random feature shuffling prior to each update.
* \note Its randomness is controllable by setting a random seed.
*/
class ShuffleFeatureSelector : public FeatureSelector {
public:
void Setup(const gbm::GBLinearModel &model,
const std::vector<GradientPair> &g,
DMatrix *, float, float, int) override {
if (feat_index_.size() == 0) {
feat_index_.resize(model.learner_model_param->num_feature);
std::iota(feat_index_.begin(), feat_index_.end(), 0);
}
std::shuffle(feat_index_.begin(), feat_index_.end(), common::GlobalRandom());
}
int NextFeature(int iteration, const gbm::GBLinearModel &model,
int, const std::vector<GradientPair> &,
DMatrix *, float, float) override {
return feat_index_[iteration % model.learner_model_param->num_feature];
}
protected:
std::vector<bst_uint> feat_index_;
};
/**
* \brief A random (with replacement) coordinate selector.
* \note Its randomness is controllable by setting a random seed.
*/
class RandomFeatureSelector : public FeatureSelector {
public:
int NextFeature(int, const gbm::GBLinearModel &model,
int, const std::vector<GradientPair> &,
DMatrix *, float, float) override {
return common::GlobalRandom()() % model.learner_model_param->num_feature;
}
};
/**
* \brief Select coordinate with the greatest gradient magnitude.
* \note It has O(num_feature^2) complexity. It is fully deterministic.
*
* \note It allows restricting the selection to top_k features per group with
* the largest magnitude of univariate weight change, by passing the top_k value
* through the `param` argument of Setup(). That would reduce the complexity to
* O(num_feature*top_k).
*/
class GreedyFeatureSelector : public FeatureSelector {
public:
void Setup(const gbm::GBLinearModel &model,
const std::vector<GradientPair> &,
DMatrix *, float, float, int param) override {
top_k_ = static_cast<bst_uint>(param);
const bst_uint ngroup = model.learner_model_param->num_output_group;
if (param <= 0) top_k_ = std::numeric_limits<bst_uint>::max();
if (counter_.size() == 0) {
counter_.resize(ngroup);
gpair_sums_.resize(model.learner_model_param->num_feature * ngroup);
}
for (bst_uint gid = 0u; gid < ngroup; ++gid) {
counter_[gid] = 0u;
}
}
int NextFeature(int, const gbm::GBLinearModel &model,
int group_idx, const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda) override {
// k-th selected feature for a group
auto k = counter_[group_idx]++;
// stop after either reaching top-K or going through all the features in a group
if (k >= top_k_ || counter_[group_idx] == model.learner_model_param->num_feature) return -1;
const int ngroup = model.learner_model_param->num_output_group;
const bst_omp_uint nfeat = model.learner_model_param->num_feature;
// Calculate univariate gradient sums
std::fill(gpair_sums_.begin(), gpair_sums_.end(), std::make_pair(0., 0.));
for (const auto &batch : p_fmat->GetBatches<CSCPage>()) {
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < nfeat; ++i) {
const auto col = batch[i];
const bst_uint ndata = col.size();
auto &sums = gpair_sums_[group_idx * nfeat + i];
for (bst_uint j = 0u; j < ndata; ++j) {
const bst_float v = col[j].fvalue;
auto &p = gpair[col[j].index * ngroup + group_idx];
if (p.GetHess() < 0.f) continue;
sums.first += p.GetGrad() * v;
sums.second += p.GetHess() * v * v;
}
}
}
// Find a feature with the largest magnitude of weight change
int best_fidx = 0;
double best_weight_update = 0.0f;
for (bst_omp_uint fidx = 0; fidx < nfeat; ++fidx) {
auto &s = gpair_sums_[group_idx * nfeat + fidx];
float dw = std::abs(static_cast<bst_float>(
CoordinateDelta(s.first, s.second, model[fidx][group_idx], alpha, lambda)));
if (dw > best_weight_update) {
best_weight_update = dw;
best_fidx = fidx;
}
}
return best_fidx;
}
protected:
bst_uint top_k_;
std::vector<bst_uint> counter_;
std::vector<std::pair<double, double>> gpair_sums_;
};
/**
* \brief Thrifty, approximately-greedy feature selector.
*
* \note Prior to cyclic updates, reorders features in descending magnitude of
* their univariate weight changes. This operation is multithreaded and is a
* linear complexity approximation of the quadratic greedy selection.
*
* \note It allows restricting the selection to top_k features per group with
* the largest magnitude of univariate weight change, by passing the top_k value
* through the `param` argument of Setup().
*/
class ThriftyFeatureSelector : public FeatureSelector {
public:
void Setup(const gbm::GBLinearModel &model,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda, int param) override {
top_k_ = static_cast<bst_uint>(param);
if (param <= 0) top_k_ = std::numeric_limits<bst_uint>::max();
const bst_uint ngroup = model.learner_model_param->num_output_group;
const bst_omp_uint nfeat = model.learner_model_param->num_feature;
if (deltaw_.size() == 0) {
deltaw_.resize(nfeat * ngroup);
sorted_idx_.resize(nfeat * ngroup);
counter_.resize(ngroup);
gpair_sums_.resize(nfeat * ngroup);
}
// Calculate univariate gradient sums
std::fill(gpair_sums_.begin(), gpair_sums_.end(), std::make_pair(0., 0.));
for (const auto &batch : p_fmat->GetBatches<CSCPage>()) {
// column-parallel is usually faster than row-parallel
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < nfeat; ++i) {
const auto col = batch[i];
const bst_uint ndata = col.size();
for (bst_uint gid = 0u; gid < ngroup; ++gid) {
auto &sums = gpair_sums_[gid * nfeat + i];
for (bst_uint j = 0u; j < ndata; ++j) {
const bst_float v = col[j].fvalue;
auto &p = gpair[col[j].index * ngroup + gid];
if (p.GetHess() < 0.f) continue;
sums.first += p.GetGrad() * v;
sums.second += p.GetHess() * v * v;
}
}
}
}
// rank by descending weight magnitude within the groups
std::fill(deltaw_.begin(), deltaw_.end(), 0.f);
std::iota(sorted_idx_.begin(), sorted_idx_.end(), 0);
bst_float *pdeltaw = &deltaw_[0];
for (bst_uint gid = 0u; gid < ngroup; ++gid) {
// Calculate univariate weight changes
for (bst_omp_uint i = 0; i < nfeat; ++i) {
auto ii = gid * nfeat + i;
auto &s = gpair_sums_[ii];
deltaw_[ii] = static_cast<bst_float>(CoordinateDelta(
s.first, s.second, model[i][gid], alpha, lambda));
}
// sort in descending order of deltaw abs values
auto start = sorted_idx_.begin() + gid * nfeat;
std::sort(start, start + nfeat,
[pdeltaw](size_t i, size_t j) {
return std::abs(*(pdeltaw + i)) > std::abs(*(pdeltaw + j));
});
counter_[gid] = 0u;
}
}
int NextFeature(int, const gbm::GBLinearModel &model,
int group_idx, const std::vector<GradientPair> &,
DMatrix *, float, float) override {
// k-th selected feature for a group
auto k = counter_[group_idx]++;
// stop after either reaching top-N or going through all the features in a group
if (k >= top_k_ || counter_[group_idx] == model.learner_model_param->num_feature) return -1;
// note that sorted_idx stores the "long" indices
const size_t grp_offset = group_idx * model.learner_model_param->num_feature;
return static_cast<int>(sorted_idx_[grp_offset + k] - grp_offset);
}
protected:
bst_uint top_k_;
std::vector<bst_float> deltaw_;
std::vector<size_t> sorted_idx_;
std::vector<bst_uint> counter_;
std::vector<std::pair<double, double>> gpair_sums_;
};
inline FeatureSelector *FeatureSelector::Create(int choice) {
switch (choice) {
case kCyclic:
return new CyclicFeatureSelector();
case kShuffle:
return new ShuffleFeatureSelector();
case kThrifty:
return new ThriftyFeatureSelector();
case kGreedy:
return new GreedyFeatureSelector();
case kRandom:
return new RandomFeatureSelector();
default:
LOG(FATAL) << "unknown coordinate selector: " << choice;
}
return nullptr;
}
} // namespace linear
} // namespace xgboost
|
nblist.c | #include <stdio.h>
#include <stddef.h>
#include <stdlib.h>
#include "defreal.h"
#if (defined(_OPENMP) || defined(SPEC_OPENMP)) && !defined(SPEC_SUPPRESS_OPENMP) && !defined(SPEC_AUTO_SUPPRESS_OPENMP)
# undef OPENMP
# define OPENMP
#else
# undef OPENMP
#endif
#ifdef OPENMP
#include <omp.h>
#endif
#if defined(MPI) || defined(SCALAPACK)
#include "mpi.h"
#endif
/* Fundamental NAB types */
typedef int INT_T;
typedef size_t SIZE_T;
/* Necessary function definitions */
INT_T *ivector(INT_T, INT_T);
void free_ivector(INT_T*, INT_T, INT_T);
INT_T myroc(INT_T, INT_T, INT_T, INT_T);
INT_T get_mytaskid(void);
INT_T get_numtasks(void);
INT_T get_blocksize(void);
/* Here is the structure for a kd tree node. */
typedef struct kdnode {
INT_T n;
struct kdnode *lo, *hi;
} KDNODE_T;
/***********************************************************************
DOWNHEAP_PAIRS()
************************************************************************/
/*
* The downheap function from Robert Sedgewick's "Algorithms in C++" p. 152,
* corrected for the fact that Sedgewick indexes the heap array from 1 to n
* whereas Java indexes the heap array from 0 to n-1. Note, however, that
* the heap should be indexed conceptually from 1 to n in order that for
* any node k the two children are found at nodes 2*k and 2*k+1. Move down
* the heap, exchanging the node at position k with the larger of its two
* children if necessary and stopping when the node at k is larger than both
* children or the bottom of the heap is reached. Note that it is possible
* for the node at k to have only one child: this case is treated properly.
* A full exchange is not necessary because the variable 'v' is involved in
* the exchanges. The 'while' loop has two exits: one for the case that
* the bottom of the heap is hit, and another for the case that the heap
* condition (the parent is greater than or equal to both children) is
* satisfied somewhere in the interior of the heap.
*
* Used by the heapsort_pairs function which sorts the pair list arrays.
*
* Calling parameters are as follows:
*
* a - array of indices into the atomic coordinate array x
* n - the number of items to be sorted
* k - the exchange node (or element)
*/
static
void downheap_pairs(int *a, int n, int k)
{
int j, v;
v = a[k - 1];
while (k <= n / 2) {
j = k + k;
if ((j < n) && (a[j - 1] < a[j]))
j++;
if (v >= a[j - 1])
break;
a[k - 1] = a[j - 1];
k = j;
}
a[k - 1] = v;
}
/***********************************************************************
HEAPSORT_PAIRS()
************************************************************************/
/*
* The heapsort function from Robert Sedgewick's "Algorithms in C++" p. 156,
* corrected for the fact that Sedgewick indexes the heap array from 1 to n
* whereas Java indexes the heap array from 0 to n-1. Note, however, that
* the heap should be indexed conceptually from 1 to n in order that for
* any node k the two children are found at nodes 2*k and 2*k+1. In what
* follows, the 'for' loop heaporders the array in linear time and the
* 'while' loop exchanges the largest element with the last element then
* repairs the heap.
*
* Calling parameters are as follows:
*
* a - array of indices into the atomic coordinate array x
* n - the number of items to be sorted
*
* Used by the nblist function to sort the pair list arrays.
*/
static
void heapsort_pairs(int *a, int n)
{
int k, v;
for (k = n / 2; k >= 1; k--)
downheap_pairs(a, n, k);
while (n > 1) {
v = a[0];
a[0] = a[n - 1];
a[n - 1] = v;
downheap_pairs(a, --n, 1);
}
}
/***********************************************************************
DOWNHEAP_INDEX()
************************************************************************/
/*
* The downheap function from Robert Sedgewick's "Algorithms in C++" p. 152,
* corrected for the fact that Sedgewick indexes the heap array from 1 to n
* whereas Java indexes the heap array from 0 to n-1. Note, however, that
* the heap should be indexed conceptually from 1 to n in order that for
* any node k the two children are found at nodes 2*k and 2*k+1. Move down
* the heap, exchanging the node at position k with the larger of its two
* children if necessary and stopping when the node at k is larger than both
* children or the bottom of the heap is reached. Note that it is possible
* for the node at k to have only one child: this case is treated properly.
* A full exchange is not necessary because the variable 'v' is involved in
* the exchanges. The 'while' loop has two exits: one for the case that
* the bottom of the heap is hit, and another for the case that the heap
* condition (the parent is greater than or equal to both children) is
* satisfied somewhere in the interior of the heap.
*
* Used by the heapsort_index function which sorts the index arrays indirectly
* by comparing components of the Cartesian coordinates.
*
* Calling parameters are as follows:
*
* a - array of indices into the atomic coordinate array x
* n - the number of items to be sorted
* k - the exchange node (or element)
* x - the atomic coordinate array
* p - the partition (x, y, z or w) on which sorting occurs
* dim - 3 or 4: dimension of the coordinate space
*/
static
void downheap_index(int *a, int n, int k, REAL_T * x, int p, int dim)
{
int j, v;
v = a[k - 1];
while (k <= n / 2) {
j = k + k;
if ((j < n) && (x[dim * a[j - 1] + p] < x[dim * a[j] + p]))
j++;
if (x[dim * v + p] >= x[dim * a[j - 1] + p])
break;
a[k - 1] = a[j - 1];
k = j;
}
a[k - 1] = v;
}
/***********************************************************************
HEAPSORT_INDEX()
************************************************************************/
/*
* The heapsort function from Robert Sedgewick's "Algorithms in C++" p. 156,
* corrected for the fact that Sedgewick indexes the heap array from 1 to n
* whereas Java indexes the heap array from 0 to n-1. Note, however, that
* the heap should be indexed conceptually from 1 to n in order that for
* any node k the two children are found at nodes 2*k and 2*k+1. In what
* follows, the 'for' loop heaporders the array in linear time and the
* 'while' loop exchanges the largest element with the last element then
* repairs the heap.
*
* Calling parameters are as follows:
*
* a - array of indices into the atomic coordinate array x
* n - the number of items to be sorted
* x - the atomic coordinate array
* p - the partition (x, y, z or w) on which sorting occurs
* dim - 3 or 4: dimension of the coordinate space
*
* Used by the nblist function to sort the xn, yn, zn, wn and on arrays.
*/
static
void heapsort_index(int *a, int n, REAL_T * x, int p, int dim)
{
int k, v;
for (k = n / 2; k >= 1; k--)
downheap_index(a, n, k, x, p, dim);
while (n > 1) {
v = a[0];
a[0] = a[n - 1];
a[n - 1] = v;
downheap_index(a, --n, 1, x, p, dim);
}
}
/***********************************************************************
BUILDKDTREE()
************************************************************************/
/*
* Build the kd tree by recursively subdividing the atom number
* arrays and adding nodes to the tree. Note that the arrays
* are permuted cyclically as control moves down the tree in
* order that sorting occur on x, y, z and (for 4D) w. Also,
* if it is desired that the kd tree provide a partial atom
* order, the sorting will occur on o, x, y, z and (for 4D) w. The
* temporary array is provided for the copy and partition operation.
*
* Calling parameters are as follows:
*
* xn - x sorted array of atom numbers
* yn - y sorted array of atom numbers
* zn - z sorted array of atom numbers
* wn - w sorted array of atom numbers
* on - ordinal array of atom numbers
* tn - temporary array for atom numbers
* start - first element of array
* end - last element of array
* kdpptr - pointer to pointer to kd tree node next available for allocation
* that - the node currently visited, the equivalent of 'this' in C++
* x - atomic coordinate array
* p - the partition (x, y, z, w or o) on which sorting occurs
* dim - 3 or 4: dimension of the coordinate space
*/
#define SORT_ATOM_NUMBERS
static
void buildkdtree(int *xn, int *yn, int *zn, int *wn, int *on, int *tn,
int start, int end, KDNODE_T ** kdpptr, KDNODE_T * that,
REAL_T * x, int p, int dim)
{
int i, middle, imedian, lower, upper;
REAL_T median;
#if defined(SPEC)
median = 0.0;
#endif
/*
* The partition cycles by dim unless SORT_ATOM_NUMBERS is defined,
* in which case it cycles by dim+1. Note that if SORT_ATOM_NUMBERS
* is defined and the partition equals zero, sorting will occur
* on the ordinal atom number instead of the atom's cartesian
* coordinate.
*/
#ifndef SORT_ATOM_NUMBERS
p %= dim;
#else
p %= (dim + 1);
#endif
/* If only one element is passed to this function, add it to the tree. */
if (end == start) {
that->n = xn[start];
}
/*
* Otherwise, if two elements are passed to this function, determine
* whether the first element is the low child or the high child. Or,
* if neither element is the low child, choose the second element to
* be the high child. Allocate a new KDNODE_T and make it one or the
* other of the children.
*/
else if (end == start + 1) {
/* Check whether the first element is the low child. */
#ifdef SORT_ATOM_NUMBERS
if (((p == 0) && (xn[start] < xn[end])) ||
((p != 0) && (x[dim * xn[start] + p - 1] <
x[dim * xn[end] + p - 1])))
#else
if (x[dim * xn[start] + p] < x[dim * xn[end] + p])
#endif
{
that->n = xn[end];
(*kdpptr)->n = xn[start];
(*kdpptr)->lo = NULL;
(*kdpptr)->hi = NULL;
that->lo = (*kdpptr)++;
}
/* Check whether the second element is the low child. */
#ifdef SORT_ATOM_NUMBERS
else if (((p == 0) && (xn[start] > xn[end])) ||
((p != 0) && (x[dim * xn[start] + p - 1] >
x[dim * xn[end] + p - 1])))
#else
else if (x[dim * xn[start] + p] > x[dim * xn[end] + p])
#endif
{
that->n = xn[start];
(*kdpptr)->n = xn[end];
(*kdpptr)->lo = NULL;
(*kdpptr)->hi = NULL;
that->lo = (*kdpptr)++;
}
/* Neither element is the low child so use the second as the high child. */
else {
that->n = xn[start];
(*kdpptr)->n = xn[end];
(*kdpptr)->lo = NULL;
(*kdpptr)->hi = NULL;
that->hi = (*kdpptr)++;
}
}
/* Otherwise, more than two elements are passed to this function. */
else {
/*
* The middle element of the xn array is taken as the element about
* which the yn and zn arrays will be partitioned. However, search
* lower elements of the xn array to ensure that the p values of the
* atomic coordinate array that correspond to these elements are indeed
* less than the median value because they may be equal to it. This
* approach is consistent with partitioning between < and >=.
*
* The search described above is not necessary if SORT_ATOM_NUMBERS is
* defined and p==0 because in this case sorting occurs on the
* ordinal atom number instead of the atomic coordinate, and the
* ordinal atom numbers are all unique.
*/
middle = (start + end) / 2;
#ifdef SORT_ATOM_NUMBERS
if (p == 0) {
imedian = xn[middle];
} else {
median = x[dim * xn[middle] + p - 1];
for (i = middle - 1; i >= start; i--) {
if (x[dim * xn[i] + p - 1] < median) {
break;
} else {
middle = i;
}
}
}
#else
median = x[dim * xn[middle] + p];
for (i = middle - 1; i >= start; i--) {
if (x[dim * xn[i] + p] < median) {
break;
} else {
middle = i;
}
}
#endif
/* Store the middle element at this kd node. */
that->n = xn[middle];
/*
* Scan the yn array in ascending order and compare the p value of
* each corresponding element of the atomic coordinate array to the
* median value. If the p value is less than the median value, copy
* the element of the yn array into the lower part of the tn array.
* If the p value is greater than or equal to the median value, copy
* the element of the yn array into the upper part of the tn array.
* The lower part of the tn array begins with the start index, and the
* upper part of the tn array begins one element above the middle index.
* At the end of this scan and copy operation, the tn array will have
* been subdivided into three groups: (1) a group of indices beginning
* with start and continuing up to but not including middle, which indices
* point to atoms for which the p value is less than the median value;
* (2) the middle index that has been stored in this node of the kd tree;
* and (3) a group of indices beginning one address above middle and
* continuing up to and including end, which indices point to atoms for
* which the p value is greater than or equal to the median value.
*
* This approach preserves the relative heapsorted order of elements
* of the atomic coordinate array that correspond to elements of the
* yn array while those elements are partitioned about the p median.
*
* Note: when scanning the yn array, skip the element (i.e., the atom
* number) that equals the middle element because that atom number has
* been stored at this node of the kd-tree.
*/
lower = start - 1;
upper = middle;
for (i = start; i <= end; i++) {
if (yn[i] != xn[middle]) {
#ifdef SORT_ATOM_NUMBERS
if (((p == 0) && (yn[i] < imedian)) ||
((p != 0) && (x[dim * yn[i] + p - 1] < median)))
#else
if (x[dim * yn[i] + p] < median)
#endif
{
tn[++lower] = yn[i];
} else {
tn[++upper] = yn[i];
}
}
}
/*
* All elements of the yn array between start and end have been copied
* and partitioned into the tn array, so the yn array is available for
* elements of the zn array to be copied and partitioned into the yn
* array, in the same manner in which elements of the yn array were
* copied and partitioned into the tn array.
*
* This approach preserves the relative heapsorted order of elements
* of the atomic coordinate array that correspond to elements of the
* zn array while those elements are partitioned about the p median.
*
* Note: when scanning the zn array, skip the element (i.e., the atom
* number) that equals the middle element because that atom number has
* been stored at this node of the kd-tree.
*/
lower = start - 1;
upper = middle;
for (i = start; i <= end; i++) {
if (zn[i] != xn[middle]) {
#ifdef SORT_ATOM_NUMBERS
if (((p == 0) && (zn[i] < imedian)) ||
((p != 0) && (x[dim * zn[i] + p - 1] < median)))
#else
if (x[dim * zn[i] + p] < median)
#endif
{
yn[++lower] = zn[i];
} else {
yn[++upper] = zn[i];
}
}
}
/*
* Execute the following region of code if SORT_ATOM_NUMBERS is defined,
* or if SORT_ATOM_NUMBERS is not defined and dim==4.
*/
#ifndef SORT_ATOM_NUMBERS
if (dim == 4)
#endif
{
/*
* All elements of the zn array between start and end have been copied
* and partitioned into the yn array, so the zn array is available for
* elements of the wn array to be copied and partitioned into the zn
* array, in the same manner in which elements of the zn array were
* copied and partitioned into the yn array.
*
* This approach preserves the relative heapsorted order of elements
* of the atomic coordinate array that correspond to elements of the
* wn array while those elements are partitioned about the p median.
*
* Note: when scanning the wn array, skip the element (i.e., the atom
* number) that equals the middle element because that atom number has
* been stored at this node of the kd-tree.
*/
lower = start - 1;
upper = middle;
for (i = start; i <= end; i++) {
if (wn[i] != xn[middle]) {
#ifdef SORT_ATOM_NUMBERS
if (((p == 0) && (wn[i] < imedian)) ||
((p != 0) && (x[dim * wn[i] + p - 1] < median)))
#else
if (x[dim * wn[i] + p] < median)
#endif
{
zn[++lower] = wn[i];
} else {
zn[++upper] = wn[i];
}
}
}
}
/*
* Execute the following region of code if SORT_ATOM_NUMBERS is defined
* and dim==4.
*/
#ifdef SORT_ATOM_NUMBERS
if (dim == 4) {
/*
* All elements of the wn array between start and end have been copied
* and partitioned into the zn array, so the wn array is available for
* elements of the on array to be copied and partitioned into the wn
* array, in the same manner in which elements of the wn array were
* copied and partitioned into the zn array.
*
* This approach preserves the relative heapsorted order of elements
* of the atomic coordinate array that correspond to elements of the
* wn array while those elements are partitioned about the p median.
*
* Note: when scanning the on array, skip the element (i.e., the atom
* number) that equals the middle element because that atom number has
* been stored at this node of the kd-tree.
*/
lower = start - 1;
upper = middle;
for (i = start; i <= end; i++) {
if (on[i] != xn[middle]) {
if (((p == 0) && (on[i] < imedian)) ||
((p != 0) && (x[dim * on[i] + p - 1] < median))) {
wn[++lower] = on[i];
} else {
wn[++upper] = on[i];
}
}
}
}
#endif
/*
* Recurse down the lo branch of the tree if the lower group of
* the tn array is non-null. Note permutation of the xn, yn, zn, wn,
* on and tn arrays. In particular, xn was used for partitioning at
* this level of the tree. At one level down the tree, yn (which
* has been copied into tn) will be used for partitioning. At two
* levels down the tree, zn (which has been copied into yn) will
* be used for partitioning. If SORT_ATOM_NUMBERS is defined, or if
* SORT_ATOM_NUMBERS is not defined and dim==4, at three levels down the
* tree, wn (which has been copied into zn) will be used for partitoning.
* At four levels down the tree, xn will be used for partitioning.
* In this manner, partitioning cycles through xn, yn, zn and wn
* at successive levels of the tree.
*
* Note that for 3D the wn array isn't allocated so don't permute it
* cyclically along with the other arrays in the recursive call.
*
* Note also that if SORT_ATOM_NUMBERS isn't defined the on array isn't
* allocated so don't permute it cyclically in the recursive call.
*/
if (lower >= start) {
(*kdpptr)->lo = NULL;
(*kdpptr)->hi = NULL;
that->lo = (*kdpptr)++;
#ifndef SORT_ATOM_NUMBERS
if (dim == 4) {
buildkdtree(tn, yn, zn, xn, on, wn,
start, lower, kdpptr, that->lo, x, p + 1, dim);
} else {
buildkdtree(tn, yn, xn, wn, on, zn,
start, lower, kdpptr, that->lo, x, p + 1, dim);
}
#else
if (dim == 4) {
buildkdtree(tn, yn, zn, wn, xn, on,
start, lower, kdpptr, that->lo, x, p + 1, dim);
} else {
buildkdtree(tn, yn, zn, xn, on, wn,
start, lower, kdpptr, that->lo, x, p + 1, dim);
}
#endif
}
/*
* Recurse down the hi branch of the tree if the upper group of
* the tn array is non-null. Note permutation of the xn, yn, zn, wn
* and tn arrays, as explained above for recursion down the lo
* branch of the tree.
*
* Note that for 3D the wn array isn't allocated so don't permute it
* cyclically along with the other arrays in the recursive call.
*
* Note also that if SORT_ATOM_NUMBERS isn't defined the on array isn't
* allocated so don't permute it cyclically in the recursive call.
*/
if (upper > middle) {
(*kdpptr)->lo = NULL;
(*kdpptr)->hi = NULL;
that->hi = (*kdpptr)++;
#ifndef SORT_ATOM_NUMBERS
if (dim == 4) {
buildkdtree(tn, yn, zn, xn, on, wn,
middle + 1, end, kdpptr, that->hi, x, p + 1, dim);
} else {
buildkdtree(tn, yn, xn, wn, on, zn,
middle + 1, end, kdpptr, that->hi, x, p + 1, dim);
}
#else
if (dim == 4) {
buildkdtree(tn, yn, zn, wn, xn, on,
middle + 1, end, kdpptr, that->hi, x, p + 1, dim);
} else {
buildkdtree(tn, yn, zn, xn, on, wn,
middle + 1, end, kdpptr, that->hi, x, p + 1, dim);
}
#endif
}
}
}
/***********************************************************************
SEARCHKDTREE()
************************************************************************/
/*
* Walk the kd tree and generate the pair lists for the upper and lower
* triangles. The pair lists are partially ordered in descending atom
* number if SORT_ATOM_NUMBERS is defined. Descending order is preferred
* by the subsequent heap sort of the pair lists that will occur if
* HEAP_SORT_PAIRS is defined.
*
* Calling parameters are as follows:
*
* that - the node currently visited, equivalent to 'this' in C++
* x - atomic coordinate array
* p - the partition (x, y, z, w or o) on which sorting occurs
* q - the query atom number
* loindexp - pointer to pair count array index for the lower triangle
* upindexp - pointer to pair count array index for the upper triangle
* lopairlist - the pair list for the lower triangle
* uppairlist - the pair list for the upper triangle
* cut - the cutoff distance
* cut2 - the cutoff distance
*/
static
void searchkdtree(KDNODE_T * that, REAL_T * x, INT_T p, INT_T q,
INT_T * loindexp, INT_T * upindexp,
INT_T * lopairlist, INT_T * uppairlist,
REAL_T cut, REAL_T cut2, int dim, int *frozen)
{
REAL_T xij, yij, zij, wij, r2;
/*
* The partition cycles by dim unless SORT_ATOM_NUMBERS is defined,
* in which case it cycles by dim+1. Note that if SORT_ATOM_NUMBERS
* is defined and the partition equals zero, sorting has occured
* on the ordinal atom number instead of the atom's cartesian
* coordinate.
*/
#ifndef SORT_ATOM_NUMBERS
p %= dim;
#else
p %= (dim + 1);
#endif
/*
* Search the high branch of the tree if the atomic coordinate of the
* query atom plus the cutoff radius is greater than or equal to the
* atomic coordinate of the kd node atom.
*
* If SORT_ATOM_NUMBERS is defined and p==0, always search the high branch.
*/
#ifdef SORT_ATOM_NUMBERS
if (((p == 0) && (that->hi != NULL)) ||
((p != 0) && (that->hi != NULL) &&
(x[dim * q + p - 1] + cut >= x[dim * that->n + p - 1])))
#else
if ((that->hi != NULL) &&
(x[dim * q + p] + cut >= x[dim * that->n + p]))
#endif
{
searchkdtree(that->hi, x, p + 1, q, loindexp, upindexp,
lopairlist, uppairlist, cut, cut2, dim, frozen);
}
/*
* If the query atom number does not equal the kd tree node atom number
* and at least one of the two atoms is not frozen, calculate the interatomic
* distance and add the kd tree node atom to one of the pair lists if the
* distance is less than the cutoff distance. The atom belongs on the lower
* triangle pair list if the atom number is less than the query node atom
* number. Otherwise, it belongs on the upper triangle pair list.
*/
if ((q != that->n) && (!frozen[q] || !frozen[that->n])) {
xij = x[dim * q + 0] - x[dim * that->n + 0];
yij = x[dim * q + 1] - x[dim * that->n + 1];
zij = x[dim * q + 2] - x[dim * that->n + 2];
r2 = xij * xij + yij * yij + zij * zij;
if (dim == 4) {
wij = x[dim * q + 3] - x[dim * that->n + 3];
r2 += wij * wij;
}
if (r2 < cut2) {
if (that->n < q) {
lopairlist[*loindexp] = that->n;
(*loindexp)++;
} else {
uppairlist[*upindexp] = that->n;
(*upindexp)++;
}
}
}
/*
* Search the low branch of the tree if the atomic coordinate of the
* query atom minus the cutoff radius is less than the atomic coordinate
* of the kd node atom.
*
* If SORT_ATOM_NUMBERS is defined and p==0, always search the low branch.
*/
#ifdef SORT_ATOM_NUMBERS
if (((p == 0) && (that->lo != NULL)) ||
((p != 0) && (that->lo != NULL) &&
(x[dim * q + p - 1] - cut < x[dim * that->n + p - 1])))
#else
if ((that->lo != NULL) && (x[dim * q + p] - cut < x[dim * that->n + p]))
#endif
{
searchkdtree(that->lo, x, p + 1, q, loindexp, upindexp,
lopairlist, uppairlist, cut, cut2, dim, frozen);
}
}
/***********************************************************************
NBLIST()
************************************************************************/
/*
* Create the non-bonded and non-polar pairlists using a kd-tree.
* The kd-tree nodes are allocated from the kdtree array.
*
* Calling parameters are as follows:
*
* lpears - the number of pairs on the lower triangle pair list
* upears - the number of pairs on the upper triangle pair list
* pearlist - the pair list, contiguous for the upper and lower triangles
* x - atomic coordinate array
* context_PxQ - the ScaLAPACK context
* derivs - the derivative flag: -1 for 2nd derivs, 1 for 1st derivs
* cutoff - the cutoff radius
* natom - number of atoms
* dim - 3 or 4: dimension of the coordinate space
* frozen[] - list of frozen atoms
*
* This function returns the total number of pairs.
*/
INT_T nblist(INT_T * lpears, INT_T * upears, INT_T ** pearlist, REAL_T * x,
INT_T context_PxQ, INT_T derivs, REAL_T cutoff, int natom,
int dim, int *frozen)
{
int i, j, locnt, upcnt, totpair, numthreads, threadnum, blocksize;
int *xn, *yn, *zn, *wn = NULL, *on, *tn, *lopairlist, *uppairlist;
REAL_T cutoff2;
KDNODE_T *kdtree, *kdptr, *root;
#ifdef SCALAPACK
int myrow, mycol, nprow, npcol, lotot, uptot;
int *lopearlist, *uppearlist, *divblk, *modrow;
#endif
/* Square the cutoff distances for use in searchkdtree. */
cutoff2 = cutoff * cutoff;
/* Get the block size. */
blocksize = get_blocksize();
/* Allocate the kdtree array that must hold one node per atom. */
if ((kdtree = (KDNODE_T *) malloc(natom * sizeof(KDNODE_T))) == NULL) {
fprintf(stderr, "Error allocate kdnode array in nbtree!\n");
exit(1);
}
/*
* Allocate, initialize and sort the arrays that hold the results of the
* heapsort on x,y,z. These arrays are used as pointers (via array indices)
* into the atomic coordinate array x. Allocate an additional temp array
* so that the buildkdtree function can cycle through x,y,z. Also allocate
* and sort an additional array for the w coordinate if dim==4, and
* allocate an array for the ordinal atom number if SORT_ATOM_NUMBERS is
* defined.
*
* The temp array and the ordinal atom array are not sorted.
*/
xn = ivector(0, natom);
yn = ivector(0, natom);
zn = ivector(0, natom);
tn = ivector(0, natom);
if (dim == 4) {
wn = ivector(0, natom);
}
#ifdef SORT_ATOM_NUMBERS
on = ivector(0, natom);
#endif
for (i = 0; i < natom; i++) {
xn[i] = yn[i] = zn[i] = i;
if (dim == 4) {
wn[i] = i;
}
#ifdef SORT_ATOM_NUMBERS
on[i] = i;
#endif
}
heapsort_index(xn, natom, x, 0, dim);
heapsort_index(yn, natom, x, 1, dim);
heapsort_index(zn, natom, x, 2, dim);
if (dim == 4) {
heapsort_index(wn, natom, x, 3, dim);
}
/*
* Build the kd tree. For 3D the wn array is ignored because it wasn't
* allocated. When SORT_ATOM_NUMBERS is not defined the on array is
* ignored because it wasn't allocated either. See the recursive calls
* to the buildkdtree function from within that function to verify that
* arrays that are ignored do not participate in the cyclic permutation
* of arrays in the recursive calls.
*
* But if SORT_ATOM_NUMBERS is defined the xn, yn, zn, wn and on array order
* is permuted in the non-recursive call to the buildkdtree function
* (below) so that the sort at the root node of the tree occurs on the
* ordinal atom number.
*/
kdptr = kdtree;
root = kdptr++;
root->lo = NULL;
root->hi = NULL;
#ifndef SORT_ATOM_NUMBERS
buildkdtree(xn, yn, zn, wn, on, tn, 0, natom - 1, &kdptr, root, x, 0,
dim);
#else
buildkdtree(on, xn, yn, zn, wn, tn, 0, natom - 1, &kdptr, root, x, 0,
dim);
#endif
/*
* Search the kd tree with each atom and record pairs into temporary
* arrays for the lower and upper triangle pair lists for one atom.
* Copy the temporary pair lists into a pair list array that is
* allocated separately for each atom and that contains the lower
* and upper triangle pair lists contiguously packed.
*
* The pairlist array is an array of pair list arrays.
*/
totpair = 0;
#if !defined(SPEC) || defined(OPENMP)
#pragma omp parallel reduction (+: totpair) \
private (i, j, locnt, upcnt, lopairlist, uppairlist, threadnum, numthreads)
#endif
{
/*
* Get the thread number and the number of threads for multi-threaded
* execution under OpenMP, ScaLAPACK or MPI. These variables are not
* used for single-threaded execution.
*
* If MPI is defined, the manner of assignment of the threadnum
* and numthreads variables depends upon derivs, as follows.
*
* If derivs >= 0, the call to nblist is intended to build
* a pair list for the first derivative calculation. Therefore,
* the threadnum and numthreads variables are assigned in
* a row cyclic manner that is required for the first derivative
* calculation. This row cyclic approach makes optimal use of
* the MPI tasks in that each task has the minimum number of
* pair lists for the first derivative calculation.
*
* If derivs < 0 the call to nblist is intended to build a pair
* list for the Born second derivative calculation. However,
* the Born second derivative calculation is not parallelized
* for MPI. Therefore, the pair list will be fully populated
* for each MPI task.
*
* If OPENMP is defined, the threadnum and numthreads variables
* are assigned in a row cyclic manner for both the first and
* second derivative calculations. Thus for both calculations
* each OpenMP thread has the minimum number of pair lists.
*
* If SCALAPACK is defined, the manner of assignment of the threadnum
* and numthreads variables depends upon derivs variable, as follows.
*
* If derivs > 0, the call to nblist is intended to build
* a pair list for the first derivative calculation. Therefore,
* the threadnum and numthreads variables are assigned in
* a row cyclic manner that is required for the first derivative
* calculation. As in the MPI case, each MPI task has the minimum
* number of pair list for the first derivative calculation.
*
* If derivs < 0, the call to nblist is intended to build a pair
* list either for the Born second derivative calculation or for
* the nonbonded second derivative calculation. For the nonbonded
* case, the calculation is not parallelized; therefore, the pair
* list will be fully populated for each ScaLAPACK process.
*
* For the Born case, the threadnum and numthreads variables are
* assigned from the process column and the number of process columns,
* respectively. Each row of a particular column receives the same
* pair list from the search of the kd tree, but thereafter the
* pair list is culled using the process row and number of process
* rows so that each row ultimately receives a unique pair list.
* Thus, although for the Born second derivative calculation each process
* column receives more pair lists than does each MPI task for the first
* derivative calculation, the pair lists are shorter, and therefore the
* total number of pairs that are processed by each process column is the
* same as in the first derivative calculation, with the following.
* caveat. For processes that do not lie of the process grid, the
* process column is -1 which will result in no pair list for that process
* due to the fact that the myroc function returns 0. Hence for the
* second derivative calculation, fewer processes have pair lists than
* for the first derivative calculation unless the total number of
* MPI tasks is a square such as 1, 4, 9, et cetera.
*
*/
#if defined(OPENMP)
threadnum = omp_get_thread_num();
numthreads = omp_get_num_threads();
#elif defined(SCALAPACK)
if (derivs < 0) {
blacs_gridinfo_(&context_PxQ, &nprow, &npcol, &myrow, &mycol);
threadnum = mycol;
numthreads = npcol;
} else if (derivs > 0) {
threadnum = get_mytaskid();
numthreads = get_numtasks();
} else {
threadnum = 0;
numthreads = 1;
}
#elif defined(MPI)
if (derivs <= 0) {
threadnum = 0;
numthreads = 1;
} else {
threadnum = get_mytaskid();
numthreads = get_numtasks();
}
#endif
/*
* Allocate the temporary arrays for the lower and upper triangles.
* These arrays must be large enough to hold a maximum size pair
* list for one atom. For the ScaLAPACK second derivatives an
* extra set of temporary arrays is used from which the final pair
* lists are culled.
*
* Also, for ScaLAPACK allocate and initialize lookup tables
* for division and modulus operations.
*/
lopairlist = ivector(0, natom);
uppairlist = ivector(0, natom);
#ifdef SCALAPACK
if (derivs < 0 && gb) {
lopearlist = ivector(0, natom);
uppearlist = ivector(0, natom);
divblk = ivector(0, natom);
modrow = ivector(0, natom);
for (i = 0; i < natom; i++) {
divblk[i] = i / blocksize;
modrow[i] = i % nprow;
}
}
#endif
/*
* Search the kd tree with each atom. If no pair list array
* has been allocated and there are pair atoms, allocate a
* pair list array. If a pair list array has been allocated
* but it is too small for the number of pair atoms, deallocate
* the pair list array and allocate a larger array. If it is
* at least 33% larger than is necessary for the pair atoms,
* deallocate it and allocate a smaller pair list array. Copy the
* lower and upper triangle pair lists into the pair list array,
* packed contiguously with the lower triangle pair list followed
* by the upper triangle pair list.
*
* Explicitly assign threads to loop indices for the following loop,
* in a manner equivalent to (static, N) scheduling with OpenMP,
* and identical to the manner in which threads are assigned in
* nbond, egb and egb2.
*
* There is an implied barrier end of this OpenMP parallel region.
* Because the following loop is the only loop in this parallel
* region, there is no need for an explicit barrier. Furthermore,
* all OpenMP parallelized loops that use the pair list also use
* loop index to thread mapping that is identical to what is used for
* this loop. Hence, no race condition would exist even if OpenMP
* threads were not synchronized at the end of this parallel region
* because each thread constructs the pair list that it uses thereafter.
* This same argument applies to MPI tasks: no synchronization is
* necessary.
*/
for (i = 0; i < natom; i++) {
#if defined(OPENMP) || defined(MPI) || defined(SCALAPACK)
if (!myroc(i, blocksize, numthreads, threadnum))
continue;
#endif
#ifdef SCALAPACK
if (derivs < 0 && gb) {
lotot = uptot = 0;
searchkdtree(root, x, 0, i, &lotot, &uptot,
lopearlist, uppearlist, cutoff, cutoff2, dim,
frozen);
} else {
locnt = upcnt = 0;
searchkdtree(root, x, 0, i, &locnt, &upcnt,
lopairlist, uppairlist, cutoff, cutoff2, dim,
frozen);
}
#else
locnt = upcnt = 0;
searchkdtree(root, x, 0, i, &locnt, &upcnt,
lopairlist, uppairlist, cutoff, cutoff2, dim,
frozen);
#endif
/*
* If SORT_ATOM_NUMBERS is defined, the upper and lower triangle
* pair lists are partially sorted by ordinal atom number using
* the kd tree.
*
* If HEAP_SORT_PAIRS is defined, sort the upper and lower triangle
* pair lists using heap sort.
*
* If the pair lists are sorted by ordinal atom number using the
* kd tree, the subsequent heap sort of the pair lists is quicker,
* but the kd tree sort is not necessary.
*/
#define HEAP_SORT_PAIRS
#ifdef HEAP_SORT_PAIRS
#ifdef SCALAPACK
if (derivs < 0 && gb) {
heapsort_pairs(lopearlist, lotot);
heapsort_pairs(uppearlist, uptot);
} else {
heapsort_pairs(lopairlist, locnt);
heapsort_pairs(uppairlist, upcnt);
}
#else
heapsort_pairs(lopairlist, locnt);
heapsort_pairs(uppairlist, upcnt);
#endif
#endif
/*
* For the ScaLAPACK second derivatives cull the pair lists
* by copying to the final pair lists only those atoms that
* are active in a particular process row. Use the lookup
* tables for a faster form of calls to myroc of the form:
*
* myroc(lopearlist[j], blocksize, npcol, mycol)
*/
#ifdef SCALAPACK
if (derivs < 0 && gb) {
locnt = 0;
for (j = 0; j < lotot; j++) {
if (myrow >= 0 && modrow[divblk[lopearlist[j]]] == myrow) {
lopairlist[locnt++] = lopearlist[j];
}
}
upcnt = 0;
for (j = 0; j < uptot; j++) {
if (myrow >= 0 && modrow[divblk[uppearlist[j]]] == myrow) {
uppairlist[upcnt++] = uppearlist[j];
}
}
}
#endif
if ((pearlist[i] == NULL) && (locnt + upcnt > 0)) {
pearlist[i] = ivector(0, locnt + upcnt);
} else if ((pearlist[i] != NULL) &&
((locnt + upcnt > lpears[i] + upears[i]) ||
(4 * (locnt + upcnt) <
3 * (lpears[i] + upears[i])))) {
free_ivector(pearlist[i], 0, lpears[i] + upears[i]);
pearlist[i] = ivector(0, locnt + upcnt);
}
lpears[i] = locnt;
upears[i] = upcnt;
for (j = 0; j < locnt; j++) {
pearlist[i][j] = lopairlist[j];
}
for (j = 0; j < upcnt; j++) {
pearlist[i][locnt + j] = uppairlist[j];
}
totpair += locnt + upcnt;
}
/*
* Deallocate the temporary arrays for the lower and upper triangles.
* For ScaLAPACK deallocate the addtional temporary arrays as well as
* the lookup tables.
*/
free_ivector(lopairlist, 0, natom);
free_ivector(uppairlist, 0, natom);
#ifdef SCALAPACK
if (derivs < 0 && gb) {
free_ivector(lopearlist, 0, natom);
free_ivector(uppearlist, 0, natom);
free_ivector(divblk, 0, natom);
free_ivector(modrow, 0, natom);
}
#endif
}
/* Free the temporary arrays. */
free(kdtree);
free_ivector(xn, 0, natom);
free_ivector(yn, 0, natom);
free_ivector(zn, 0, natom);
free_ivector(tn, 0, natom);
if (dim == 4) {
free_ivector(wn, 0, natom);
}
#ifdef SORT_ATOM_NUMBERS
free_ivector(on, 0, natom);
#endif
return totpair;
}
|
pkzip_fmt_plug.c | /* PKZIP patch for john to handle 'old' pkzip passwords (old 'native' format)
*
* Written by Jim Fougeron <jfoug at cox.net> in 2011. No copyright
* is claimed, and the software is hereby placed in the public domain.
* In case this attempt to disclaim copyright and place the software in the
* public domain is deemed null and void, then the software is
* Copyright (c) 2011 Jim Fougeron and it is hereby released to the
* general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* There's ABSOLUTELY NO WARRANTY, express or implied.
*
*/
#include "arch.h"
#if !AC_BUILT
#define HAVE_LIBZ 1 /* legacy build has -lz in LDFLAGS */
#endif
#if HAVE_LIBZ
#if FMT_EXTERNS_H
extern struct fmt_main fmt_pkzip;
#elif FMT_REGISTERS_H
john_register_one(&fmt_pkzip);
#else
#include <string.h>
#include <zlib.h>
#include "common.h"
#include "misc.h"
#include "formats.h"
#define USE_PKZIP_MAGIC 1
#include "pkzip.h"
#include "pkzip_inffixed.h" // This file is a data file, taken from zlib
#include "loader.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "memdbg.h"
#define FORMAT_LABEL "PKZIP"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1000
#define PLAINTEXT_LENGTH 31
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define SALT_SIZE (sizeof(PKZ_SALT*))
#define SALT_ALIGN (sizeof(ARCH_WORD_32))
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 64
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
//#define ZIP_DEBUG 1
//#define ZIP_DEBUG 2
/*
* It is likely that this should be put into the arch.h files for the different systems,
* IF we find a system which operates faster doing the non-table work.
* However, in current testing, it is always faster to use the multiply table. It only
* takes 16kb, and almost always stays in the cache for any system newer than a 386.
*/
#define PKZIP_USE_MULT_TABLE
#if ARCH_LITTLE_ENDIAN
#define KB1 0
#define KB2 3
#else
#define KB1 3
#define KB2 0
#endif
/*
* filename:$pkzip$C*B*[DT*MT{CL*UL*CR*OF*OX}*CT*DL*CS*DA]*$/pkzip$ (deprecated)
* filename:$pkzip2$C*B*[DT*MT{CL*UL*CR*OF*OX}*CT*DL*CS*TC*DA]*$/pkzip2$ (new format, with 2 checksums)
*
* All numeric and 'binary data' fields are stored in hex.
*
* C is the count of hashes present (the array of items, inside the [] C can be 1 to 3.).
* B is number of valid bytes in the checksum (1 or 2). Unix zip is 2 bytes, all others are 1
* ARRAY of data starts here (there will be C array elements)
* DT is a "Data Type enum". This will be 1 2 or 3. 1 is 'partial'. 2 and 3 are full file data (2 is inline, 3 is load from file).
* MT Magic Type enum. 0 is no 'type'. 255 is 'text'. Other types (like MS Doc, GIF, etc), see source.
* NOTE, CL, DL, CRC, OFF are only present if DT != 1
* CL Compressed length of file blob data (includes 12 byte IV).
* UL Uncompressed length of the file.
* CR CRC32 of the 'final' file.
* OF Offset to the PK\x3\x4 record for this file data. If DT==2, then this will be a 0, as it is not needed, all of the data is already included in the line.
* OX Additional offset (past OF), to get to the zip data within the file.
* END OF 'optional' fields.
* CT Compression type (0 or 8) 0 is stored, 8 is imploded.
* DL Length of the DA data.
* CS Checksum from crc32.
* TC Checksum from timestamp
* DA This is the 'data'. It will be hex data if DT==1 or 2. If DT==3, then it is a filename (name of the .zip file).
* END of array items.
* The format string will end with $/pkzip$
*
* NOTE, after some code testing, it has come to show, that the 'magic' may not be needed, or very useful. The problem with it, is IF the file
* ends up NOT starting with any of the magic values, then we will have a false negative, and NEVER be able to crack the zip's password. For now
* we have a #define (right before the #include "pkzip.h"). If that define is uncommented, then pkzip format will be built with magic logic.
* However, right now it is not being built that way.
*
*/
static struct fmt_tests tests[] = {
/* compression of a perl file. We have the same password, same file used twice in a row (pkzip, 1 byte checksum). NOTE, pkzip uses random IV, so both encrypted blobs are different */
{"\
$pkzip$1*1*2*0*e4*1c5*eda7a8de*0*4c*8*e4*eda7*194883130e4c7419bd735c53dec36f0c4b6de6daefea0f507d67ff7256a49b5ea93ccfd9b12f2ee99053ee0b1c9e1c2b88aeaeb6bd4e60094a1ea118785d4ded6dae94\
cade41199330f4f11b37cba7cda5d69529bdfa43e2700ba517bd2f7ff4a0d4b3d7f2559690ec044deb818c44844d6dd50adbebf02cec663ae8ebb0dde05d2abc31eaf6de36a2fc19fda65dd6a7e449f669d1f8c75e9daa0a3f7b\
e8feaa43bf84762d6dbcc9424285a93cedfa3a75dadc11e969065f94fe3991bc23c9b09eaa5318aa29fa02e83b6bee26cafec0a5e189242ac9e562c7a5ed673f599cefcd398617*$/pkzip$", "password" },
{"\
$pkzip$1*1*2*0*e4*1c5*eda7a8de*0*4c*8*e4*eda7*581f798527109cbadfca0b3318435a000be84366caf9723f841a2b13e27c2ed8cdb5628705a98c3fbbfb34552ed498c51a172641bf231f9948bca304a6be2138ab718f\
6a5b1c513a2fb80c49030ff1a404f7bd04dd47c684317adea4107e5d70ce13edc356c60bebd532418e0855428f9dd582265956e39a0b446a10fd8b7ffb2b4af559351bbd549407381c0d2acc270f3bcaffb275cbe2f628cb09e2\
978e87cd023d4ccb50caaa92b6c952ba779980d65f59f664dde2451cc456d435188be59301a5df1b1b4fed6b7509196334556c44208a9d7e2d9e237f591d6c9fc467b408bf0aaa*$/pkzip$", "password" },
/* Now the same file, compressed twice, using unix zip (info-zip), with 2 byte checksums */
{"\
$pkzip$1*2*2*0*e4*1c5*eda7a8de*0*47*8*e4*4bb6*436c9ffa4328870f6272349b591095e1b1126420c3041744650282bc4f575d0d4a5fc5fb34724e6a1cde742192387b9ed749ab5c72cd6bb0206f102e9216538f095fb7\
73661cfde82c2e2a619332998124648bf4cd0da56279f0c297567d9b5d684125ee92920dd513fd18c27afba2a9633614f75d8f8b9a14095e3fafe8165330871287222e6681dd9c0f830cf5d464457b257d0900eed29107fad8af\
3ac4f87cf5af5183ff0516ccd9aeac1186006c8d11b18742dfb526aadbf2906772fbfe8fb18798967fd397a724d59f6fcd4c32736550986d227a6b447ef70585c049a1a4d7bf25*$/pkzip$", "password" },
{"\
$pkzip$1*2*2*0*e4*1c5*eda7a8de*0*47*8*e4*4bb6*436c9ffa4328870f6272349b591095e1b1126420c3041744650282bc4f575d0d4a5fc5fb34724e6a1cde742192387b9ed749ab5c72cd6bb0206f102e9216538f095fb7\
73661cfde82c2e2a619332998124648bf4cd0da56279f0c297567d9b5d684125ee92920dd513fd18c27afba2a9633614f75d8f8b9a14095e3fafe8165330871287222e6681dd9c0f830cf5d464457b257d0900eed29107fad8af\
3ac4f87cf5af5183ff0516ccd9aeac1186006c8d11b18742dfb526aadbf2906772fbfe8fb18798967fd397a724d59f6fcd4c32736550986d227a6b447ef70585c049a1a4d7bf25*$/pkzip$", "password"},
/* now a pkzip archive, with 3 files, 1 byte checksum */
{"\
$pkzip$3*1*1*0*8*24*4001*8986ec4d693e86c1a42c1bd2e6a994cb0b98507a6ec937fe0a41681c02fe52c61e3cc046*1*0*8*24*4003*a087adcda58de2e14e73db0043a4ff0ed3acc6a9aee3985d7cb81d5ddb32b840ea20\
57d9*2*0*e4*1c5*eda7a8de*0*4c*8*e4*eda7*89a792af804bf38e31fdccc8919a75ab6eb75d1fd6e7ecefa3c5b9c78c3d50d656f42e582af95882a38168a8493b2de5031bb8b39797463cb4769a955a2ba72abe48ee75b103\
f93ef9984ae740559b9bd84cf848d693d86acabd84749853675fb1a79edd747867ef52f4ee82435af332d43f0d0bb056c49384d740523fa75b86a6d29a138da90a8de31dbfa89f2f6b0550c2b47c43d907395904453ddf42a665\
b5f7662de170986f89d46d944b519e1db9d13d4254a6b0a5ac02b3cfdd468d7a4965e4af05699a920e6f3ddcedb57d956a6b2754835b14e174070ba6aec4882d581c9f30*$/pkzip$", "3!files"},
/* following are from CMIYC 2012 */
{"$pkzip$1*1*2*0*163*2b5*cd154083*0*26*8*163*cd15*d6b094794b40116a8b387c10159225d776f815b178186e51faf16fa981fddbffdfa22f6c6f32d2f81dab35e141f2899841991f3cb8d53f8ee1f1d85657f7c7a82ebb2d63182803c6beee00e0bf6c72edeeb1b00dc9f07f917bb8544cc0e96ca01503cd0fb6632c296cebe3fb9b64543925daae6b7ea95cfd27c42f6f3465e0ab2c812b9aeeb15209ce3b691f27ea43a7a77b89c2387e31c4775866a044b6da783af8ddb72784ccaff4d9a246db96484e865ea208ade290b0131b4d2dd21f172693e6b5c90f2eb9b67572b55874b6d3a78763212b248629e744c07871a6054e24ef74b6d779e44970e1619df223b4e5a72a189bef40682b62be6fb7f65e087ca6ee19d1ebfc259fa7e3d98f3cb99347689f8360294352accffb146edafa9e91afba1f119f95145738ac366b332743d4ff40d49fac42b8758c43b0af5b60b8a1c63338359ffbff432774f2c92de3f8c49bd4611e134db98e6a3f2cfb148d2b20f75abab6*$/pkzip$", "passwort"},
{"$pkzip$1*1*2*0*163*2b6*46abc149*0*28*8*163*46ab*0f539b23b761a347a329f362f7f1f0249515f000404c77ec0b0ffe06f29140e8fa3e8e5a6354e57f3252fae3d744212d4d425dc44389dd4450aa9a4f2f3c072bee39d6ac6662620812978f7ab166c66e1acb703602707ab2da96bb28033485ec192389f213e48eda8fc7d9dad1965b097fafebfda6703117db90e0295db9a653058cb28215c3245e6e0f6ad321065bf7b8cc5f66f6f2636e0d02ea35a6ba64bbf0191c308098fd836e278abbce7f10c3360a0a682663f59f92d9c2dcfc87cde2aae27ea18a14d2e4a0752b6b51e7a5c4c8c2bab88f4fb0aba27fb20e448655021bb3ac63752fdb01e6b7c99f9223f9e15d71eb1bd8e323f522fc3da467ff0aae1aa17824085d5d6f1cdfc9c7c689cd7cb057005d94ba691f388484cfb842c8775baac220a5490ed945c8b0414dbfc4589254b856aade49f1aa386db86e9fc87e6475b452bd72c5e2122df239f8c2fd462ca54c1a5bddac36918c5f5cf0cc94aa6ee820*$/pkzip$", "Credit11"},
{"$pkzip$1*1*2*0*163*2b6*46abc149*0*26*8*163*46ab*7ea9a6b07ddc9419439311702b4800e7e1f620b0ab8535c5aa3b14287063557b176cf87a800b8ee496643c0b54a77684929cc160869db4443edc44338294458f1b6c8f056abb0fa27a5e5099e19a07735ff73dc91c6b20b05c023b3ef019529f6f67584343ac6d86fa3d12113f3d374b047efe90e2a325c0901598f31f7fb2a31a615c51ea8435a97d07e0bd4d4afbd228231dbc5e60bf1116ce49d6ce2547b63a1b057f286401acb7c21afbb673f3e26bc1b2114ab0b581f039c2739c7dd0af92c986fc4831b6c294783f1abb0765cf754eada132df751cf94cad7f29bb2fec0c7c47a7177dea82644fc17b455ba2b4ded6d9a24e268fcc4545cae73b14ceca1b429d74d1ebb6947274d9b0dcfb2e1ac6f6b7cd2be8f6141c3295c0dbe25b65ff89feb62cb24bd5be33853b88b8ac839fdd295f71e17a7ae1f054e27ba5e60ca03c6601b85c3055601ce41a33127938440600aaa16cfdd31afaa909fd80afc8690aaf*$/pkzip$", "7J0rdan!!"},
/* CMIYC 2013 "pro" hard hash */
{"$pkzip$1*2*2*0*6b*73*8e687a5b*0*46*8*6b*0d9d*636fedc7a78a7f80cda8542441e71092d87d13da94c93848c230ea43fab5978759e506110b77bd4bc10c95bc909598a10adfd4febc0d42f3cd31e4fec848d6f49ab24bb915cf939fb1ce09326378bb8ecafde7d3fe06b6013628a779e017be0f0ad278a5b04e41807ae9fc*$/pkzip$", "c00rslit3!"},
/* http://corkami.googlecode.com/files/ChristmasGIFts.zip (fixed with 2 byte checksums from timestamp, using new $pkzip2$ type) */
{"$pkzip2$3*2*1*2*8*c0*7224*72f6*6195f9f3401076b22f006105c4323f7ac8bb8ebf8d570dc9c7f13ddacd8f071783f6bef08e09ce4f749af00178e56bc948ada1953a0263c706fd39e96bb46731f827a764c9d55945a89b952f0503747703d40ed4748a8e5c31cb7024366d0ef2b0eb4232e250d343416c12c7cbc15d41e01e986857d320fb6a2d23f4c44201c808be107912dbfe4586e3bf2c966d926073078b92a2a91568081daae85cbcddec75692485d0e89994634c71090271ac7b4a874ede424dafe1de795075d2916eae*1*6*8*c0*26ee*461b*944bebb405b5eab4322a9ce6f7030ace3d8ec776b0a989752cf29569acbdd1fb3f5bd5fe7e4775d71f9ba728bf6c17aad1516f3aebf096c26f0c40e19a042809074caa5ae22f06c7dcd1d8e3334243bca723d20875bd80c54944712562c4ff5fdb25be5f4eed04f75f79584bfd28f8b786dd82fd0ffc760893dac4025f301c2802b79b3cb6bbdf565ceb3190849afdf1f17688b8a65df7bc53bc83b01a15c375e34970ae080307638b763fb10783b18b5dec78d8dfac58f49e3c3be62d6d54f9*2*0*2a*1e*4a204eab*ce8*2c*0*2a*4a20*7235*6b6e1a8de47449a77e6f0d126b217d6b2b72227c0885f7dc10a2fb3e7cb0e611c5c219a78f98a9069f30*$/pkzip2$", "123456"},
{NULL}
};
/* these static fields are used in the crypt_all loop, and the cmp_all/cmp_one we */
/* perform the pkzip 'checksum' checking. If we do get a 'hit', then that pass & */
/* salt pair is checked fully within the cmp_exact, where it gets inflated and */
/* checked (possibly also a 'sample TEXT record is done first, as a quick check */
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static u32 *K12;
static PKZ_SALT *salt;
static u8 *chk;
static int dirty=1;
#if USE_PKZIP_MAGIC
static ZIP_SIGS SIGS[256];
#endif
#ifdef PKZIP_USE_MULT_TABLE
static u8 mult_tab[16384];
#define PKZ_MULT(b,w) b^mult_tab[(u16)(w.u)>>2]
#else
inline u8 PKZ_MULT(u8 b, MY_WORD w) {u16 t = w.u|2; return b ^ (u8)(((u16)(t*(t^1))>>8)); }
#endif
extern struct fmt_main fmt_pkzip;
static const char *ValidateZipContents(FILE *in, long offset, u32 offex, int len, u32 crc);
/* Since the pkzip format textual representation is pretty complex, with multiple */
/* 'optional' sections, we have a VERY complete valid. Valid will make SURE that */
/* the format is completely valid. Thus, there is little or no error checking later */
/* in the rest of the code. It 'should' not be needed, and is done here. There is */
/* a little error checking later in the file, for some of the file opening stuff, */
/* since the file can change from the time of this 'valid' call, until when the data */
/* is actually read from the file. */
/* */
/* NOTE, we may want to later make a 'prepare()' function, and do all file loading */
/* there, so that we have a 'complete' format line, with the zip data contained. */
static int valid(char *ciphertext, struct fmt_main *self)
{
c8 *p, *cp, *cpkeep;
int cnt, data_len, ret=0;
u32 crc;
FILE *in;
const char *sFailStr;
long offset;
u32 offex;
int type;
int complen = 0;
int type2 = 0;
if (strncmp(ciphertext, "$pkzip$", 7)) {
if (!strncmp(ciphertext, "$pkzip2$", 8))
type2 = 1;
else
return ret;
}
cpkeep = strdup(ciphertext);
cp = cpkeep;
p = &cp[7];
if (type2)
++p;
if ((cp = strtokm(p, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) {
sFailStr = "Out of data, reading count of hashes field"; goto Bail; }
sscanf(cp, "%x", &cnt);
if (cnt < 1 || cnt > MAX_PKZ_FILES) {
sFailStr = "Count of hashes field out of range"; goto Bail; }
if ((cp = strtokm(NULL, "*")) == NULL || cp[0] < '0' || cp[0] > '2' || cp[1]) {
sFailStr = "Number of valid hash bytes empty or out of range"; goto Bail; }
while (cnt--) {
if ((cp = strtokm(NULL, "*")) == NULL || cp[0]<'1' || cp[0]>'3' || cp[1]) {
sFailStr = "Invalid data enumeration type"; goto Bail; }
type = cp[0] - '0';
if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) {
sFailStr = "Invalid type enumeration"; goto Bail; }
if (type > 1) {
if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) {
sFailStr = "Invalid compressed length"; goto Bail; }
sscanf(cp, "%x", &complen);
if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) {
sFailStr = "Invalid data length value"; goto Bail; }
if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) {
sFailStr = "Invalid CRC value"; goto Bail; }
sscanf(cp, "%x", &crc);
if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) {
sFailStr = "Invalid offset length"; goto Bail; }
sscanf(cp, "%lx", &offset);
if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) {
sFailStr = "Invalid offset length"; goto Bail; }
sscanf(cp, "%x", &offex);
}
if ((cp = strtokm(NULL, "*")) == NULL || (cp[0] != '0' && cp[0] != '8') || cp[1]) {
sFailStr = "Compression type enumeration"; goto Bail; }
if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) {
sFailStr = "Invalid data length value"; goto Bail; }
sscanf(cp, "%x", &data_len);
if ((cp = strtokm(NULL, "*")) == NULL || !ishexlc(cp) || strlen(cp) != 4) {
sFailStr = "invalid checksum value"; goto Bail; }
if (type2) {
if ((cp = strtokm(NULL, "*")) == NULL || !ishexlc(cp) || strlen(cp) != 4) {
sFailStr = "invalid checksum2 value"; goto Bail;}
}
if ((cp = strtokm(NULL, "*")) == NULL) goto Bail;
if (type > 1) {
if (type == 3) {
if ( strlen(cp) != data_len) {
sFailStr = "invalid checksum value"; goto Bail; }
in = fopen(cp, "rb"); /* have to open in bin mode for OS's where this matters, DOS/Win32 */
if (!in) {
/* this error is listed, even if not in pkzip debugging mode. */
/* But not if we're just reading old pot lines */
if (!ldr_in_pot)
fprintf(stderr, "Error loading a pkzip hash line. The ZIP file '%s' could NOT be found\n", cp);
return 0;
}
sFailStr = ValidateZipContents(in, offset, offex, complen, crc);
if (*sFailStr) {
/* this error is listed, even if not in pkzip debugging mode. */
fprintf(stderr, "pkzip validation failed [%s] Hash is %s\n", sFailStr, ciphertext);
fclose(in);
return 0;
}
fseek(in, offset+offex, SEEK_SET);
if (complen < 16*1024) {
/* simply load the whole blob */
void *tbuf = mem_alloc(complen);
if (fread(tbuf, 1, complen, in) != complen) {
MEM_FREE(tbuf);
fclose(in);
return 0;
}
data_len = complen;
MEM_FREE(tbuf);
}
fclose(in);
} else {
/* 'inline' data. */
if (complen != data_len) {
sFailStr = "length of full data does not match the salt len"; goto Bail; }
if (!ishexlc(cp) || strlen(cp) != data_len<<1) {
sFailStr = "invalid inline data"; goto Bail; }
}
} else {
if (!ishexlc(cp) || strlen(cp) != data_len<<1) {
sFailStr = "invalid partial data"; goto Bail; }
}
}
if ((cp = strtokm(NULL, "*")) == NULL) goto Bail;
if (strtokm(NULL, "") != NULL) goto Bail;
if (type2) ret = !strcmp(cp, "$/pkzip2$");
else ret = !strcmp(cp, "$/pkzip$");
Bail:;
#ifdef ZIP_DEBUG
if (!ret) fprintf (stderr, "pkzip validation failed [%s] Hash is %s\n", sFailStr, ciphertext);
#endif
MEM_FREE(cpkeep);
return ret;
}
static const char *ValidateZipContents(FILE *fp, long offset, u32 offex, int _len, u32 _crc) {
u32 id;
u16 version, flags, method, modtm, moddt, namelen, exlen;
u32 crc, complen, uncomplen;
if (fseek(fp, offset, SEEK_SET) != 0)
return "Not able to seek to specified offset in the .zip file, to read the zip blob data.";
id = fget32LE(fp);
if (id != 0x04034b50U)
return "Compressed zip file offset does not point to start of zip blob";
/* Ok, see if this IS the correct file blob. */
version = fget16LE(fp);
flags = fget16LE(fp);
method = fget16LE(fp);
modtm = fget16LE(fp);
moddt = fget16LE(fp);
crc = fget32LE(fp);
complen = fget32LE(fp);
uncomplen = fget32LE(fp);
namelen = fget16LE(fp);
exlen = fget16LE(fp);
/* unused vars. */
(void)uncomplen;
(void)modtm;
(void)moddt;
/* Even if we 'miss', we keep walking back. We 'can' miss if the CRC of file, or some other */
/* binary data happens to have the 0x04034b50 signature, thus giving us a false local header hit. */
if (_crc == crc && _len == complen && (0x14 == version || 0xA == version) && (flags & 1) && (method == 8 || method == 0) && offex==30+namelen+exlen)
return "";
return "We could NOT find the internal zip data in this ZIP file";
}
static u8 *buf_copy (char *p, int len) {
u8 *op = mem_alloc_tiny(len, MEM_ALIGN_NONE);
memcpy(op, p, len);
return op;
}
static void init(struct fmt_main *self)
{
#ifdef PKZIP_USE_MULT_TABLE
unsigned short n=0;
#endif
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
K12 = mem_calloc(sizeof(*K12) * 3, self->params.max_keys_per_crypt);
chk = mem_calloc(sizeof(*chk), self->params.max_keys_per_crypt);
/*
* Precompute the multiply mangling, within several parts of the hash. There is a pattern,
* 64k entries long. However the exact same value is produced 4 times in a row, every
* time. Thus, we can build a 16k wide array, and then access the array using this
* ((val&0xFFFF) >> 2) This is faster on all current HW, since the 16kb array access
* (and the and/shift) is faster than performing the whole mult, 2 shifts, 2 adds and
* an and (if the compiler can optimize it to that)
*
* There is a # define at the top of this file that turns this OFF. if that define is
* not set, then these mult's will be done in the crypt_all and decrypt functions
*/
#ifdef PKZIP_USE_MULT_TABLE
for (n = 0; n < 16384; n++)
mult_tab[n] = (((unsigned)(n*4+3) * (n*4+2)) >> 8) & 0xff;
#endif
#if USE_PKZIP_MAGIC
//static char *MagicTypes[]= { "", "DOC", "XLS", "DOT", "XLT", "EXE", "DLL", "ZIP", "BMP", "DIB", "GIF", "PDF", "GZ", "TGZ", "BZ2", "TZ2", "FLV", "SWF", "MP3", NULL };
//static int MagicToEnum[] = {0, 1, 1, 1, 1, 2, 2, 3, 4, 4, 5, 6, 7, 7, 8, 8, 9, 10, 11, 0};
// decent sources of these:
// http://www.garykessler.net/library/file_sigs.html
// http://en.wikipedia.org/wiki/List_of_file_signatures
// http://toorcon.techpathways.com/uploads/headersig.txt
// not available, 2012-12-28)
// archive.org still has a version:
// http://web.archive.org/web/20110725085828/http://toorcon.techpathways.com/uploads/headersig.txt
// there are many more.
//case 1: // DOC/XLS
SIGS[1].magic_signature[0] = (u8*)str_alloc_copy("\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1");
SIGS[1].magic_sig_len[0] = 8;
SIGS[1].magic_signature[1] = buf_copy("\x50\x4B\x03\x04\x14\x00\x06\x00\x08", 10); // a .zip file 'sort of'
SIGS[1].magic_sig_len[1] = 9;
SIGS[1].magic_signature[2] = buf_copy("\x09\x04\x06\x00\x00\x00\x10\x00\xF6\x05\x5C\x00", 13); // older XLS format (office 95)
SIGS[1].magic_sig_len[2] = 12;
SIGS[1].magic_signature[3] = buf_copy("\x09\x02\x06\x00\x00\x00\x10\x00\xB9\x04\x5C\x00", 13); // older XLS v2
SIGS[1].magic_sig_len[3] = 12;
SIGS[1].magic_signature[4] = buf_copy("\x50\x4B\x03\x04\x14\x00\x00\x00\x00\x00", 11); //DOC Star Writer 6.0
SIGS[1].magic_sig_len[4] = 10;
SIGS[1].magic_signature[5] = buf_copy("\x31\xBE\x00\x00\x00\xAB\x00\x00", 9); //DOC MS Word for DOS v6 File
SIGS[1].magic_sig_len[5] = 8;
SIGS[1].magic_signature[6] = (u8*)str_alloc_copy("\x12\x34\x56\x78\x90\xFF"); //DOC MS Word 6.0 File
SIGS[1].magic_sig_len[6] = 6;
SIGS[1].magic_signature[7] = (u8*)str_alloc_copy("\x7F\xFE\x34\x0A"); //MS Word File
SIGS[1].magic_sig_len[7] = 4;
SIGS[1].magic_count = 8;
SIGS[1].max_len = 12;
//case 2: // Win32/DOS exe file MZ
SIGS[2].magic_signature[0] = (u8*)str_alloc_copy("MZ");
SIGS[2].magic_sig_len[0] = 2;
SIGS[2].magic_count = 1;
SIGS[2].max_len = 2;
//case 3: // PKZIP
SIGS[3].magic_signature[0] = (u8*)str_alloc_copy("\x50\x4B\x03\x04");
SIGS[3].magic_sig_len[0] = 4;
SIGS[3].magic_count = 1;
SIGS[3].max_len = 4;
//case 4: // BMP
SIGS[4].magic_signature[0] = (u8*)str_alloc_copy("BM");
SIGS[4].magic_sig_len[0] = 2;
SIGS[4].magic_count = 1;
SIGS[4].max_len = 2;
//case 5: // GIF
SIGS[5].magic_signature[0] = (u8*)str_alloc_copy("GIF87a");
SIGS[5].magic_sig_len[0] = 6;
SIGS[5].magic_signature[1] = (u8*)str_alloc_copy("GIF89a");
SIGS[5].magic_sig_len[1] = 6;
SIGS[5].magic_count = 2;
SIGS[5].max_len = 6;
//case 6: // PDF
SIGS[6].magic_signature[0] = (u8*)str_alloc_copy("%PDF");
SIGS[6].magic_sig_len[0] = 4;
SIGS[6].magic_count = 1;
SIGS[6].max_len = 4;
//case 7: // GZ
SIGS[7].magic_signature[0] = (u8*)str_alloc_copy("\x1F\x8B\x08");
SIGS[7].magic_sig_len[0] = 3;
SIGS[7].magic_count = 1;
SIGS[7].max_len = 3;
//case 8: // BZ2 (there is a 'magic' pi, but byte 4 is 1 to 9, so skip the 'pi')
SIGS[8].magic_signature[0] = (u8*)str_alloc_copy("BZh");
SIGS[8].magic_sig_len[0] = 3;
SIGS[8].magic_signature[1] = (u8*)str_alloc_copy("BZ0");
SIGS[8].magic_sig_len[1] = 3;
SIGS[8].magic_count = 2;
SIGS[8].max_len = 3;
//case 9: // FLV
SIGS[9].magic_signature[0] = (u8*)str_alloc_copy("FLV\x01");
SIGS[9].magic_sig_len[0] = 4;
SIGS[9].magic_count = 1;
SIGS[9].max_len = 4;
//case 10: // SWF
SIGS[10].magic_signature[0] = (u8*)str_alloc_copy("FWS");
SIGS[10].magic_sig_len[0] = 5;
SIGS[10].magic_count = 1;
SIGS[10].max_len = 5;
//case 11: // MP3
SIGS[11].magic_signature[0] = (u8*)str_alloc_copy("ID3");
SIGS[11].magic_sig_len[0] = 3;
SIGS[11].magic_count = 1;
SIGS[11].max_len = 3;
SIGS[255].max_len = 64;
#endif
}
static void done(void)
{
MEM_FREE(chk);
MEM_FREE(K12);
MEM_FREE(saved_key);
}
static void set_salt(void *_salt) {
salt = *((PKZ_SALT**)_salt);
if (salt->H[0].h && salt->H[1].h && salt->H[2].h)
return;
// we 'late' fixup the salt.
salt->H[0].h = &salt->zip_data[0];
salt->H[1].h = &salt->zip_data[1+salt->H[0].datlen];
salt->H[2].h = &salt->zip_data[2+salt->H[0].datlen+salt->H[1].datlen];
}
static void *get_salt(char *ciphertext)
{
/* NOTE, almost NO error checking at all in this function. Proper error checking done in valid() */
static union alignment {
unsigned char c[8];
ARCH_WORD_32 a[1];
} a;
unsigned char *salt_p = a.c;
PKZ_SALT *salt, *psalt;
long offset=0;
char *H[3] = {0,0,0};
long ex_len[3] = {0,0,0};
u32 offex;
int i, j;
c8 *p, *cp, *cpalloc = (char*)mem_alloc(strlen(ciphertext)+1);
int type2 = 0;
/* Needs word align on REQ_ALIGN systems. May crash otherwise (in the sscanf) */
salt = mem_calloc(1, sizeof(PKZ_SALT));
cp = cpalloc;
strcpy(cp, ciphertext);
if (!strncmp(cp, "$pkzip$", 7))
p = &cp[7];
else {
p = &cp[8];
type2 = 1;
}
cp = strtokm(p, "*");
sscanf(cp, "%x", &(salt->cnt));
cp = strtokm(NULL, "*");
sscanf(cp, "%x", &(salt->chk_bytes));
for(i = 0; i < salt->cnt; ++i) {
int data_enum;
cp = strtokm(NULL, "*");
data_enum = *cp - '0';
cp = strtokm(NULL, "*");
#if USE_PKZIP_MAGIC
{
// mingw can't handle %hhx. Use 'normal' %x and assign back to uint_8 var
unsigned jnk;
sscanf(cp, "%x", &jnk);
salt->H[i].magic = (unsigned char)jnk;
}
salt->H[i].pSig = &SIGS[salt->H[i].magic];
#endif
if (data_enum > 1) {
cp = strtokm(NULL, "*");
sscanf(cp, "%x", &(salt->compLen));
cp = strtokm(NULL, "*");
sscanf(cp, "%x", &(salt->deCompLen));
cp = strtokm(NULL, "*");
sscanf(cp, "%x", &(salt->crc32));
cp = strtokm(NULL, "*");
sscanf(cp, "%lx", &offset);
cp = strtokm(NULL, "*");
sscanf(cp, "%x", &offex);
}
cp = strtokm(NULL, "*");
sscanf(cp, "%x", &(salt->H[i].compType));
cp = strtokm(NULL, "*");
sscanf(cp, "%x", &(salt->H[i].datlen));
cp = strtokm(NULL, "*");
for (j = 0; j < 4; ++j) {
salt->H[i].c <<= 4;
salt->H[i].c |= atoi16[ARCH_INDEX(cp[j])];
}
if (type2) {
cp = strtokm(NULL, "*");
for (j = 0; j < 4; ++j) {
salt->H[i].c2 <<= 4;
salt->H[i].c2 |= atoi16[ARCH_INDEX(cp[j])];
}
} else
salt->H[i].c2 = salt->H[i].c; // fake out 2nd hash, by copying first hash
cp = strtokm(NULL, "*");
if (data_enum > 1) {
/* if 2 or 3, we have the FULL zip blob for decrypting. */
if (data_enum == 3) {
/* read from file. */
FILE *fp;
fp = fopen(cp, "rb");
if (!fp) {
fprintf (stderr, "Error opening file for pkzip data: %s\n", cp);
MEM_FREE(cpalloc);
return 0;
}
fseek(fp, offset+offex, SEEK_SET);
if (salt->compLen < 16*1024) {
/* simply load the whole blob */
ex_len[i] = salt->compLen;
H[i] = mem_alloc(salt->compLen);
if (fread(H[i], 1, salt->compLen, fp) != salt->compLen) {
fprintf (stderr, "Error reading zip file for pkzip data: %s\n", cp);
fclose(fp);
MEM_FREE(cpalloc);
return 0;
}
fclose(fp);
salt->H[i].datlen = salt->compLen;
}
else {
/* Only load a small part (to be used in crypt_all), and set the filename in */
/* the salt->fname string, so that cmp_all can open the file, and buffered */
/* read the zip data only when it 'needs' it. */
strnzcpy(salt->fname, (const char *)cp, sizeof(salt->fname));
salt->offset = offset+offex;
ex_len[i] = 384;
H[i] = mem_alloc(384);
if (fread(H[i], 1, 384, fp) != 384) {
fprintf (stderr, "Error reading zip file for pkzip data: %s\n", cp);
fclose(fp);
MEM_FREE(cpalloc);
return 0;
}
fclose(fp);
salt->H[i].datlen = 384;
}
} else {
ex_len[i] = salt->compLen;
H[i] = mem_alloc(salt->compLen);
for (j = 0; j < salt->H[i].datlen; ++j)
H[i][j] = (atoi16[ARCH_INDEX(cp[j*2])]<<4) + atoi16[ARCH_INDEX(cp[j*2+1])];
}
/* we also load this into the 'building' salt */
salt->compType = salt->H[i].compType;
/* Now, set the 'is full zip' flag, so we later process as a zip file. */
salt->H[i].full_zip = 1;
salt->full_zip_idx = i;
} else {
ex_len[i] = salt->H[i].datlen;
H[i] = mem_alloc(salt->H[i].datlen);
for (j = 0; j < salt->H[i].datlen; ++j)
H[i][j] = (atoi16[ARCH_INDEX(cp[j*2])]<<4) + atoi16[ARCH_INDEX(cp[j*2+1])];
}
}
MEM_FREE(cpalloc);
// Ok, we want to add some 'logic' to remove the magic testing, except for specific cases.
// If the only file blobs we have are stored, and long blobs, then we want magic (3 file, 2 byte checksum does not need magic).
// A single 1 byte file, even if deflated, we want to keep magic. (possibly).
j = 0;
for (i = 0; i < salt->cnt; ++i) {
if (salt->H[i].compType == 8) {
if (salt->cnt == 1 && salt->chk_bytes == 1)
j += 10;
else
break;
}
j += 1;
}
// ok, if j == 1, then we 'might' want to use magic. Otherwise, we want to 'clear' all magic values.
if (j >= 20)
j = 0;
if (j && salt->chk_bytes == 2 && salt->cnt > 1)
j = 0; // we do not need to use magic, on 2 or 3 stored 2 byte checksum files. We already have 2^32 or 2^48 in the checksum checking
if (j && salt->chk_bytes == 1 && salt->cnt == 3)
j = 0; // we do not need to use magic, on 3 stored 2 byte checksum files. We already have 2^32 or 2^48 in the checksum checking
if (!j) {
for (i = 0; i < salt->cnt; ++i)
salt->H[i].magic = 0; // remove any 'magic' logic from this hash.
}
psalt = mem_calloc(1, sizeof(PKZ_SALT) + ex_len[0]+ex_len[1]+ex_len[2]+2);
memcpy(psalt, salt, sizeof(*salt));
memcpy(psalt->zip_data, H[0], ex_len[0]);
MEM_FREE(H[0]);
if(salt->cnt > 1)
memcpy(psalt->zip_data+ex_len[0]+1, H[1], ex_len[1]);
MEM_FREE(H[1]);
if(salt->cnt > 2)
memcpy(psalt->zip_data+ex_len[0]+ex_len[1]+2, H[2], ex_len[2]);
MEM_FREE(H[2]);
MEM_FREE(salt);
psalt->dsalt.salt_alloc_needs_free = 1; // we used mem_calloc, so JtR CAN free our pointer when done with them.
// set the JtR core linkage stuff for this dyna_salt
memcpy(salt_p, &psalt, sizeof(psalt));
psalt->dsalt.salt_cmp_offset = SALT_CMP_OFF(PKZ_SALT, cnt);
psalt->dsalt.salt_cmp_size = SALT_CMP_SIZE(PKZ_SALT, cnt, full_zip_idx, ex_len[0]+ex_len[1]+ex_len[2]+2);
return salt_p;
}
static void set_key(char *key, int index)
{
/* Keep the PW, so we can return it in get_key if asked to do so */
strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1);
dirty = 1;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int cmp_one(void *binary, int idx)
{
return chk[idx] == 1;
}
static int cmp_all(void *binary, int count)
{
int i,j;
for (i=j=0; i<count; ++i)
j+=chk[i]; /* hopefully addition like this is faster than 'count' conditional if statments */
return j;
}
/* this function is used by cmp_exact_loadfile. It will load the next
* part of the file then decrypt the data, and return just how many
* bytes were loaded.
*
* This function is 'similar' to an fread(). However, it also decrypts data
*/
static int get_next_decrypted_block(u8 *in, int sizeof_n, FILE *fp, u32 *inp_used, MY_WORD *pkey0, MY_WORD *pkey1, MY_WORD *pkey2) {
u32 new_bytes = sizeof_n, k;
u8 C;
/* we have read all the bytes, we're done */
if (*inp_used >= salt->compLen)
return 0;
if (*inp_used + new_bytes > salt->compLen)
/* this is the last block. Only load the bytes that are left */
new_bytes = salt->compLen - *inp_used;
/* return the correct 'offset', so we can track when the file buffer has been fully read */
*inp_used += new_bytes;
/* read the data */
if (fread(in, 1, new_bytes, fp) != new_bytes)
return 0;
/* decrypt the data bytes (in place, in same buffer). Easy to do, only requires 1 temp character variable. */
for (k = 0; k < new_bytes; ++k) {
C = PKZ_MULT(in[k],(*pkey2));
pkey0->u = jtr_crc32 (pkey0->u, C);
pkey1->u = (pkey1->u + pkey0->c[KB1]) * 134775813 + 1;
pkey2->u = jtr_crc32 (pkey2->u, pkey1->c[KB2]);
in[k] = C;
}
/* return the number of bytes we read from the file on this read */
return new_bytes;
}
/* Ok, this is the more complex example. Here we have to load the file (which may be HUGE)
* decrypt the bytes from this file, and then inflate that data, and crc the bytes which we
* have inflated from that stream. Then in the end, when we use all input bytes, if we have
* inflated the right amount of data, ended up with a Z_STREAM_END, and the proper sized
* decompression buffer, and the CRC matches, then we know we have the correct password
*
* This function is called from cmp_exact(), when cmp_exact finds out we have to decrypt from
* the stored .zip file.
*
* this code is modifications made to the zpipe.c 'example' code from the zlib web site.
*/
#define CHUNK (64*1024)
static int cmp_exact_loadfile(int index)
{
int ret;
u32 have, k;
z_stream strm;
unsigned char in[CHUNK];
unsigned char out[CHUNK];
FILE *fp;
MY_WORD key0, key1, key2;
u8 *b, C;
u32 inp_used, decomp_len=0;
u32 crc = 0xFFFFFFFF;
/* Open the zip file, and 'seek' to the proper offset of the binary zip blob */
fp = fopen(salt->fname, "rb");
if (!fp) {
fprintf (stderr, "\nERROR, the zip file: %s has been removed.\nWe are a possible password has been found, but FULL validation can not be done!\n", salt->fname);
return 1;
}
if (fseek(fp, salt->offset, SEEK_SET)) {
fprintf (stderr, "\nERROR, the zip file: %s fseek() failed.\nWe are a possible password has been found, but FULL validation can not be done!\n", salt->fname);
fclose(fp);
return 1;
}
/* 'seed' the decryption with the IV. We do NOT use these bytes, they simply seed us. */
key0.u = K12[index*3], key1.u = K12[index*3+1], key2.u = K12[index*3+2];
k=12;
if (fread(in, 1, 12, fp) != 12) {
fprintf (stderr, "\nERROR, the zip file: %s fread() failed.\nWe are a possible password has been found, but FULL validation can not be done!\n", salt->fname);
fclose(fp);
return 1;
}
b = salt->H[salt->full_zip_idx].h;
do {
C = PKZ_MULT(*b++,key2);
key0.u = jtr_crc32 (key0.u, C);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
}
while(--k);
/* this is 'sort of' our file pointer. It is the 'index' into the file's encrypted, compressed data buffer. */
/* we have read the 12 bytes of IV data, and updated our keys. Now we start processing the rest of the bytes */
/* to get the data to inflate, and crc check */
inp_used = 12;
if (salt->H[salt->full_zip_idx].compType == 0) {
// handle a stored blob (we do not have to decrypt it.
int avail_in;
crc = 0xFFFFFFFF;
avail_in = get_next_decrypted_block(in, CHUNK, fp, &inp_used, &key0, &key1, &key2);
while (avail_in) {
for (k = 0; k < avail_in; ++k)
crc = jtr_crc32(crc,in[k]);
avail_in = get_next_decrypted_block(in, CHUNK, fp, &inp_used, &key0, &key1, &key2);
}
fclose(fp);
return ~crc == salt->crc32;
}
/* allocate inflate state */
strm.zalloc = Z_NULL;
strm.zfree = Z_NULL;
strm.opaque = Z_NULL;
strm.avail_in = 0;
strm.next_in = Z_NULL;
ret = inflateInit2(&strm, -15);
if (ret != Z_OK) /* if zlib is hosed, then likely there is no reason at all to continue. Better to exit, and let the user 'fix' the system */
perror("Error, initializing the libz inflateInit2() system\n");
/* decompress until deflate stream ends or end of file */
do {
strm.avail_in = get_next_decrypted_block(in, CHUNK, fp, &inp_used, &key0, &key1, &key2);
if (ferror(fp)) {
inflateEnd(&strm);
fclose(fp);
fprintf (stderr, "\nERROR, the zip file: %s fread() failed.\nWe are a possible password has been found, but FULL validation can not be done!\n", salt->fname);
return 1;
}
if (strm.avail_in == 0)
break;
strm.next_in = in;
/* run inflate() on input until output buffer not full */
do {
strm.avail_out = CHUNK;
strm.next_out = out;
ret = inflate(&strm, Z_NO_FLUSH);
switch (ret) {
case Z_NEED_DICT:
case Z_DATA_ERROR:
case Z_MEM_ERROR:
inflateEnd(&strm);
fclose(fp);
return 0;
}
have = CHUNK - strm.avail_out;
/* now update our crc value */
for (k = 0; k < have; ++k)
crc = jtr_crc32(crc,out[k]);
decomp_len += have;
} while (strm.avail_out == 0);
/* done when inflate() says it's done */
} while (ret != Z_STREAM_END);
/* clean up and return */
inflateEnd(&strm);
fclose(fp);
return ret == Z_STREAM_END && inp_used == salt->compLen && decomp_len == salt->deCompLen && salt->crc32 == ~crc;
}
static int cmp_exact(char *source, int index)
{
const u8 *b;
u8 C, *decompBuf, *decrBuf, *B;
u32 k, crc;
MY_WORD key0, key1, key2;
z_stream strm;
int ret;
if (salt->H[salt->full_zip_idx].full_zip == 0)
/* we do not have a zip file, this is 'checksum' only
* POSSIBLY, we should log and output to screen that
* we are not 100% 'sure' we have the right password!! */
return 1;
#ifdef ZIP_DEBUG
fprintf(stderr, "FULL zip test being done. (pass=%s)\n", saved_key[index]);
#endif
if (salt->fname[0] == 0) {
/* we have the whole zip blob in memory, simply allocate a decrypt buffer, decrypt
* in one step, crc and be done with it. This is the 'trivial' type. */
decrBuf = mem_alloc(salt->compLen-12);
key0.u = K12[index*3], key1.u = K12[index*3+1], key2.u = K12[index*3+2];
b = salt->H[salt->full_zip_idx].h;
k=12;
do {
C = PKZ_MULT(*b++,key2);
key0.u = jtr_crc32 (key0.u, C);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
}
while(--k);
B = decrBuf;
k = salt->compLen-12;
do {
C = PKZ_MULT(*b++,key2);
key0.u = jtr_crc32 (key0.u, C);
*B++ = C;
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
} while (--k);
if (salt->H[salt->full_zip_idx].compType == 0) {
// handle a stored blob (we do not have to decrypt it.
crc = 0xFFFFFFFF;
for (k = 0; k < salt->compLen-12; ++k)
crc = jtr_crc32(crc,decrBuf[k]);
MEM_FREE(decrBuf);
return ~crc == salt->crc32;
}
strm.zalloc = Z_NULL; strm.zfree = Z_NULL; strm.opaque = Z_NULL; strm.next_in = Z_NULL; strm.avail_in = 0;
ret = inflateInit2(&strm, -15); /* 'raw', since we do not have gzip header, or gzip crc. .ZIP files are 'raw' implode data. */
if (ret != Z_OK)
perror("Error, initializing the libz inflateInit2() system\n");
decompBuf = mem_alloc(salt->deCompLen);
strm.next_in = decrBuf;
strm.avail_in = salt->compLen-12;
strm.avail_out = salt->deCompLen;
strm.next_out = decompBuf;
ret = inflate(&strm, Z_SYNC_FLUSH);
inflateEnd(&strm);
if (ret != Z_STREAM_END || strm.total_out != salt->deCompLen) {
MEM_FREE(decompBuf);
MEM_FREE(decrBuf);
return 0;
}
crc = 0xFFFFFFFF;
for (k = 0; k < strm.total_out; ++k)
crc = jtr_crc32(crc,decompBuf[k]);
MEM_FREE(decompBuf);
MEM_FREE(decrBuf);
return ~crc == salt->crc32;
}
/* we have a stand alone function to handle this more complex method of
* loading from file, decrypting, decompressing, and crc'ing the data
* It is complex enough of a task, to have its own function. */
return cmp_exact_loadfile(index);
}
#if USE_PKZIP_MAGIC
const char exBytesUTF8[64] = {
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5
};
static int isLegalUTF8_char(const u8 *source, int length) {
u8 a;
int len;
const u8 *srcptr;
if (*source < 0xC0)
return 1;
len = exBytesUTF8[*source&0x3f];
srcptr = source+len;
if (len+1 > length)
return -1;
switch (len) {
default: return -1;
/* Everything else falls through when "true"... */
case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return -1;
case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return -1;
case 2: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return -1;
switch (*source) {
/* no fall-through in this inner switch */
case 0xE0: if (a < 0xA0) return -1; break;
case 0xED: if (a > 0x9F) return -1; break;
case 0xF0: if (a < 0x90) return -1; break;
case 0xF4: if (a > 0x8F) return -1;
}
case 1: if (*source >= 0x80 && *source < 0xC2) return -1;
}
if (*source > 0xF4) return -1;
return len+1;
}
static int validate_ascii(const u8 *out, int inplen) {
int i;
int unicode=0;
for (i = 0; i < inplen-1; ++i) {
if (out[i] > 0x7E) {
// first check to 'see' if this is a valid utf8 character. If so, let it 'pass'.
if (unicode)
return 0; // in unicode mode, we ONLY handle 'ascii' bytes in the low byte.
if (out[i] > 0xC0) {
int len;
if(i > inplen-4)
return 1;
len = isLegalUTF8_char(&out[i], 5);
if (len < 0) return 0;
i += (len-1);
}
else {
if (i) {
// check for utf8 BOM \xEF \xBB \xBF
if (out[0] == 0xEF && out[1] == 0xBB && out[2] == 0xBF) {
i = 2;
continue;
}
/* check for Unicode BOM (FF FE for utf16le, FE FF for utf16be, FF FE 00 00 for utf32le, not sure if 00 00 FE FF is utf32be, but likely is) */
if (out[0] == 0xFF && out[1] == 0xFE) {
unicode = 1;
i++;
continue;
}
/* unicode BE bom */
if (out[0] == 0xFE && out[1] == 0xFF) {
unicode = 1;
i += 2;
continue;
}
/* utf32 LE */
if (out[0] == 0xFF && out[1] == 0xFE && out[2] == 0 && out[3] == 0) {
unicode = 3;
i += 3;
continue;
}
/* utf32 BE bom */
if (out[0] == 0 && out[1] == 0 && out[2] == 0xFE && out[3] == 0xFF) {
unicode = 3;
i += 6;
continue;
}
// allow a 'single' byte > 0x7E as long as bytes following are ascii.
if (out[1] <= 0x7E && out[1] >= 0x20) {
++i;
continue;
}
return 0;
}
}
} else if (out[i] < 0x20) {
/* we do not need to deal with DOS EOF char 0x1a, since we will never have the 'end' of the file */
/* we do allow the ESC character for ANSI files, however, they are frequently also binary, so will fail in other places */
if (out[i]!='\n' && out[i]!='\r' && out[i]!='\t' && out[i]!=0x1B)
return 0;
}
i += unicode; // skip the null bytes
}
return 1;
}
static int CheckSigs(const u8 *p, int len, ZIP_SIGS *pSig) {
int i, j;
for (i = 0; i < pSig->magic_count; ++i) {
int fnd = 1;
u8 *pS = pSig->magic_signature[i];
for (j = 0; j < pSig->magic_sig_len[i]; ++j) {
if (p[j] != pS[j]) {
fnd = 0;
break;
}
}
if (fnd)
return 1;
}
return 0;
}
#endif
/* note, Buf is the 'full' decrypted zip buffer (len bytes long). It DOES contain the first 3 bits, which have already
* been decoded, and have told us we had a code 2 (var table block)
* all done without BITS(), PULLBYTE(), BITSNEEDED() macros. We 'know' the data we need, and we know that we have
* 'enough', so we do not worry about all of the overhead, and validation logic.
*
* In testing, this function catches ALL bad decryptions, except about 1/300 to 1/350. So, it is not too bad.
*/
MAYBE_INLINE static int check_inflate_CODE2(u8 *next) {
u32 bits, hold, thisget, have, i;
int left;
u32 ncode;
u32 ncount[2]; // ends up being an array of 8 u8 count values. But we can clear it, and later 'check' it with 2 u32 instructions.
u8 *count; // this will point to ncount array. NOTE, this is alignment required 'safe' for Sparc systems or others requiring alignment.
#if (ARCH_LITTLE_ENDIAN==1) && (ARCH_ALLOWS_UNALIGNED==1)
// 'speedup' for x86 type systems. pkzip/inflate was designed here, so why not use it.
hold = *((u32*)next);
#else
hold = *next + (((u32)next[1])<<8) + (((u32)next[2])<<16) + (((u32)next[3])<<24);
#endif
next += 3; // we pre-increment when pulling it in the loop, thus we need to be 1 byte back.
hold >>= 3; // we already processed 3 bits
count = (u8*)ncount;
if (257+(hold&0x1F) > 286) return 0; // nlen, but we do not use it.
hold >>= 5;
if(1+(hold&0x1F) > 30) return 0; // ndist, but we do not use it.
hold >>= 5;
ncode = 4+(hold&0xF);
hold >>= 4;
// we have 15 bits left.
hold += ((u32)(*++next)) << 15;
hold += ((u32)(*++next)) << 23;
// we now have 31 bits. We need to know this for the loop below.
bits = 31;
// We have 31 bits now, in accum. If we are processing 19 codes, we do 7, then have 10 bits.
// Add 16 more and have 26, then use 21, have 5. Then load 16 more, then eat 15 of them.
have = 0;
ncount[0] = ncount[1] = 0;
for (;;) {
if (have+7>ncode)
thisget = ncode-have;
else
thisget = 7;
have += thisget;
bits -= thisget*3;
while (thisget--) {
++count[hold&7];
hold>>=3;
}
if (have == ncode)
break;
hold += ((u32)(*++next)) << bits;
bits += 8;
hold += ((u32)(*++next)) << bits;
bits += 8;
}
count[0] = 0;
if (!ncount[0] && !ncount[1]) return 0; /* if no codes at all, then simply bail, that is invalid */
/* check for an over-subscribed or incomplete set of lengths */
/* this will catch about 319 out of 320 'bad' passwords that */
/* have made it into this function. Note, only 1/4 of the */
/* passwords which pass the checksum, can make it here. Of */
/* those, we drop 319/320 or about that many (a good check!) */
left = 1;
for (i = 1; i <= 7; ++i) {
left <<= 1;
left -= count[i];
if (left < 0)
return 0; /* over-subscribed */
}
if (left > 0)
return 0; /* incomplete set */
return 1; /* Passed this check! */
}
//static code const * const lcode = lenfix;
//static code const * const dcode = distfix;
/* This function handles inflate CODE type 1. This is a 'fixed' table code. We set the fixed table, */
/* and then inflate some data (without writing anything. If we find any BAD lookback data, we can */
/* return a failure. We have 24 bytes of inflate data, and this almost always is more than enough */
/* to turn up an error. If we find we need more, we will do more than 24 */
MAYBE_INLINE static int check_inflate_CODE1(u8 *next, int left) {
u32 whave = 0, op, bits, hold,len;
code here;
#if (ARCH_LITTLE_ENDIAN==1) && (ARCH_ALLOWS_UNALIGNED==1)
// 'speedup' for x86 type systems. pkzip/inflate was designed here, so why not use it.
hold = *((u32*)next);
#else
hold = *next + (((u32)next[1])<<8) + (((u32)next[2])<<16) + (((u32)next[3])<<24);
#endif
next += 3; // we pre-increment when pulling it in the loop, thus we need to be 1 byte back.
left -= 4;
hold >>= 3; // we already processed 3 bits
bits = 32-3;
for (;;) {
if (bits < 15) {
if (left < 2)
return 1; // we are out of bytes. Return we had no error.
left -= 2;
hold += (u32)(*++next) << bits;
bits += 8;
hold += (u32)(*++next) << bits;
bits += 8;
}
here=lenfix[hold & 0x1FF];
op = (unsigned)(here.bits);
hold >>= op;
bits -= op;
op = (unsigned)(here.op);
if (op == 0) /* literal */
++whave;
else if (op & 16) { /* length base */
len = (unsigned)(here.val);
op &= 15; /* number of extra bits */
if (op) {
if (bits < op) {
if (!left)
return 1; /*we are out of bytes. Return we had no error.*/
--left;
hold += (u32)(*++next) << bits;
bits += 8;
}
len += (unsigned)hold & ((1U << op) - 1);
hold >>= op;
bits -= op;
}
if (bits < 15) {
if (left < 2)
return 1; /*we are out of bytes. Return we had no error.*/
left -= 2;
hold += (u32)(*++next) << bits;
bits += 8;
hold += (u32)(*++next) << bits;
bits += 8;
}
here = distfix[hold & 0x1F];
// dodist:
op = (unsigned)(here.bits);
hold >>= op;
bits -= op;
op = (unsigned)(here.op);
if (op & 16) { /* distance base */
u32 dist = (unsigned)(here.val);
op &= 15; /* number of extra bits */
if (bits < op) {
if (!left)
return 1; /*we are out of bytes. Return we had no error.*/
--left;
hold += (u32)(*++next) << bits;
bits += 8;
if (bits < op) {
if (!left)
return 1; /*we are out of bytes. Return we had no error.*/
--left;
hold += (u32)(*++next) << bits;
bits += 8;
}
}
dist += (unsigned)hold & ((1U << op) - 1);
if (dist > whave)
return 0; /*invalid distance too far back*/
hold >>= op;
bits -= op;
//***** start of patched code from Pavel Semjanov (see original code below)
whave += len;
}
else
return 0; /*invalid distance code*/
}
else if (op & 32) {
// end of block [may present in short sequences, but only at the end.] NOTE, we need to find out if we EVER hit the end of a block, at only 24 bytes???
if (left == 0)
return 1;
return 0;
}
else {
return 0; // invalid literal/length code.
}
//***** End of patched code from Pavel
}
}
// original code block (for above), prior to patch from Pavel Semjanov [pavel@semjanov.com]
// this code would be a direct drop in between the comments starting and stopping with //***** above
// also the dodist label was commented out (no longer used).
#if 0
whave += dist;
}
else if ((op & 64) == 0) { /* 2nd level distance code */
here = distfix[here.val + (hold & ((1U << op) - 1))];
goto dodist;
}
else
return 0; /*invalid distance code*/
}
else if (op & 64) {
// 2nd level length code.
//here = lcode[here.val + (hold & ((1U << op) - 1))];
//goto dolen;
// this causes an infinite loop. Also, I VERY seriously doubt, this will EVER happen in the first
// 24 bytes of code. NOTE, there may be problems, in the fact this causes a inf loop!, but for now,
// simply return 0, then debug later.
return 0;
}
else if (op & 32) {
// end of block NOTE, we need to find out if we EVER hit the end of a block, at only 24 bytes???
// It is VERY likely we do SHOULD NOT EVER hit this. If that is the case, return that this block is bogus.
// check next OP (if we have enough bits left), if CODE=3, fail. If code==0, check
return 0;
}
else {
return 0; // invalid literal/length code.
}
#endif
/*
* Crypt_all simply performs the checksum .zip validatation of the data. It performs
* this for ALL hashes provided. If any of them fail to match, then crypt all puts the
* complement of the 'proper' checksum of the first hash into the output. These 2 bytes
* are checked against the binary for this salt/password combination. Thus, if any
* checksum fails, it will never match binary. However, if ALL of the checksums match
* we then put the checksum bytes from the first hash, into our output data. Then, when
* the binary check (cmp_all, cmp_one) is performed, it WILL match. NOTE, this does
* not mean we have found the password. Just that all hashes quick check checksums
* for this password 'work'.
*/
static int crypt_all(int *pcount, struct db_salt *_salt)
{
const int _count = *pcount;
int idx;
#if (ZIP_DEBUG==2)
static int CNT, FAILED, FAILED2;
++CNT;
#endif
// pkzip kinda sucks a little for multi-threading, since there is different amount of work to be
// done, depenging upon the password. Thus, we pack in OMP_MOD passwords into each thread, and
// hopefully some of the differnces will even themselves out in the end. If we have 2 threads
// then thread 1 gets 0 to 127 password, and thread 2 gets 128-256. Once they 'get' their data,
// there should be no mutexing of the runtime data, thus the threads should run fast.
// Also, since we have 'multiple' files in a .zip file (and multiple checksums), we bail as at the
// first time we fail to match checksum. So, there may be some threads which check more checksums.
// Again, hopefully globbing many tests into a threads working set will flatten out these differences.
#ifdef _OPENMP
#pragma omp parallel for private(idx)
#endif
for (idx = 0; idx < _count; ++idx) {
int cur_hash_count = salt->cnt;
int cur_hash_idx = -1;
MY_WORD key0, key1, key2;
u8 C;
const u8 *b;
u8 curDecryBuf[256];
#if USE_PKZIP_MAGIC
u8 curInfBuf[128];
#endif
int k, SigChecked;
u16 e, e2, v1, v2;
z_stream strm;
int ret;
/* use the pwkey for each hash. We mangle on the 12 bytes of IV to what was computed in the pwkey load. */
if (dirty) {
u8 *p = (u8*)saved_key[idx];
/* load the 'pwkey' one time, put it into the K12 array */
key0.u = 0x12345678UL; key1.u = 0x23456789UL; key2.u = 0x34567890UL;
do {
key0.u = jtr_crc32 (key0.u, *p++);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
} while (*p);
K12[idx*3] = key0.u, K12[idx*3+1] = key1.u, K12[idx*3+2] = key2.u;
goto SkipKeyLoadInit;
}
do
{
// 2nd, and later times through the loop, AND if keys are not dirty (i.e. multiple salts
// for the same key load), we do NOT perform the key compute, but instead load the pre-computed
// key data from the array.
key0.u = K12[idx*3], key1.u = K12[idx*3+1], key2.u = K12[idx*3+2];
SkipKeyLoadInit:;
b = salt->H[++cur_hash_idx].h;
k=11;
e = salt->H[cur_hash_idx].c;
e2 = salt->H[cur_hash_idx].c2;
do
{
C = PKZ_MULT(*b++,key2);
key0.u = jtr_crc32 (key0.u, C);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
}
while(--k);
/* if the hash is a 2 byte checksum type, then check that value first */
/* There is no reason to continue if this byte does not check out. */
if (salt->chk_bytes == 2 && C != (e&0xFF) && C != (e2&0xFF))
goto Failed_Bailout;
C = PKZ_MULT(*b++,key2);
#if 1
// https://github.com/magnumripper/JohnTheRipper/issues/467
// Fixed, JimF. Added checksum test for crc32 and timestamp.
if (C != (e>>8) && C != (e2>>8))
goto Failed_Bailout;
#endif
// Now, update the key data (with that last byte.
key0.u = jtr_crc32 (key0.u, C);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
// Ok, we now have validated this checksum. We need to 'do some' extra pkzip validation work.
// What we do here, is to decrypt a little data (possibly only 1 byte), and perform a single
// 'inflate' check (if type is 8). If type is 0 (stored), and we have a signature check, then
// we do that here. Also, if the inflate code is a 0 (stored block), and we do sig check, then
// we can do that WITHOUT having to call inflate. however, if there IS a sig check, we will have
// to call inflate on 'some' data, to get a few bytes (or error code). Also, if this is a type
// 2 or 3, then we do the FULL inflate, CRC check here.
e = 0;
// First, we want to get the inflate CODE byte (the first one).
C = PKZ_MULT(*b++,key2);
SigChecked = 0;
if ( salt->H[cur_hash_idx].compType == 0) {
// handle a stored file.
// We can ONLY deal with these IF we are handling 'magic' testing.
#if USE_PKZIP_MAGIC
// Ok, if we have a signature, check it here, WITHOUT having to call zLib's inflate.
if (salt->H[cur_hash_idx].pSig->max_len) {
int len = salt->H[cur_hash_idx].pSig->max_len;
if (len > salt->H[cur_hash_idx].datlen-12)
len = salt->H[cur_hash_idx].datlen-12;
SigChecked = 1;
curDecryBuf[0] = C;
for (; e < len;) {
key0.u = jtr_crc32 (key0.u, curDecryBuf[e]);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
curDecryBuf[++e] = PKZ_MULT(*b++,key2);
}
if (salt->H[cur_hash_idx].magic == 255) {
if (!validate_ascii(&curDecryBuf[5], len-5))
goto Failed_Bailout;
} else {
if (!CheckSigs(curDecryBuf, len, salt->H[cur_hash_idx].pSig))
goto Failed_Bailout;
}
}
#endif
continue;
}
#if 1
// https://github.com/magnumripper/JohnTheRipper/issues/467
// Ok, if this is a code 3, we are done.
// Code moved to after the check for stored type. (FIXED) This check was INVALID for a stored type file.
if ( (C & 6) == 6)
goto Failed_Bailout;
#endif
if ( (C & 6) == 0) {
// Check that checksum2 is 0 or 1. If not, I 'think' we can be done
if (C > 1)
goto Failed_Bailout;
// now get 4 bytes. This is the length. It is made up of 2 16 bit values.
// these 2 values are checksumed, so it is easy to tell if the data is WRONG.
// correct data is u16_1 == (u16_2^0xFFFF)
curDecryBuf[0] = C;
for (e = 0; e <= 4; ) {
key0.u = jtr_crc32 (key0.u, curDecryBuf[e]);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
curDecryBuf[++e] = PKZ_MULT(*b++,key2);
}
v1 = curDecryBuf[1] | (((u16)curDecryBuf[2])<<8);
v2 = curDecryBuf[3] | (((u16)curDecryBuf[4])<<8);
if (v1 != (v2^0xFFFF))
goto Failed_Bailout;
#if USE_PKZIP_MAGIC
// Ok, if we have a signature, check it here, WITHOUT having to call zLib's inflate.
if (salt->H[cur_hash_idx].pSig->max_len) {
int len = salt->H[cur_hash_idx].pSig->max_len + 5;
if (len > salt->H[cur_hash_idx].datlen-12)
len = salt->H[cur_hash_idx].datlen-12;
SigChecked = 1;
for (; e < len;) {
key0.u = jtr_crc32 (key0.u, curDecryBuf[e]);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
curDecryBuf[++e] = PKZ_MULT(*b++,key2);
}
if (salt->H[cur_hash_idx].magic == 255) {
if (!validate_ascii(&curDecryBuf[5], len-5))
goto Failed_Bailout;
} else {
if (!CheckSigs(&curDecryBuf[5], len-5, salt->H[cur_hash_idx].pSig))
goto Failed_Bailout;
}
}
#endif
}
else {
// Ok, now we have handled inflate code type 3 and inflate code 0 (50% of 'random' data)
// We now have the 2 'hard' ones left (fixed table, and variable table)
curDecryBuf[0] = C;
if ((C&6) == 4) { // inflate 'code' 2 (variable table)
#if (ZIP_DEBUG==2)
static unsigned count, found;
++count;
#endif
// we need 4 bytes, + 2, + 4 at most.
for (; e < 10;) {
key0.u = jtr_crc32 (key0.u, curDecryBuf[e]);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
curDecryBuf[++e] = PKZ_MULT(*b++,key2);
}
if (!check_inflate_CODE2(curDecryBuf))
goto Failed_Bailout;
#if (ZIP_DEBUG==2)
fprintf (stderr, "CODE2 Pass=%s count = %u, found = %u\n", saved_key[idx], count, ++found);
#endif
}
else {
int til;
#if (ZIP_DEBUG==2)
static unsigned count, found;
++count;
#endif
til = 36;
if (salt->H[cur_hash_idx].datlen-12 < til)
til = salt->H[cur_hash_idx].datlen-12;
for (; e < til;) {
key0.u = jtr_crc32 (key0.u, curDecryBuf[e]);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
curDecryBuf[++e] = PKZ_MULT(*b++,key2);
}
if (!check_inflate_CODE1(curDecryBuf, til))
goto Failed_Bailout;
#if (ZIP_DEBUG==2)
fprintf (stderr, "CODE1 Pass=%s count = %u, found = %u\n", saved_key[idx], count, ++found);
#endif
}
}
#if USE_PKZIP_MAGIC
// Ok, now see if we need to check sigs, or do a FULL inflate/crc check.
if (!SigChecked && salt->H[cur_hash_idx].pSig->max_len) {
int til = 180;
if (salt->H[cur_hash_idx].datlen-12 < til)
til = salt->H[cur_hash_idx].datlen-12;
for (; e < til;) {
key0.u = jtr_crc32 (key0.u, curDecryBuf[e]);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
curDecryBuf[++e] = PKZ_MULT(*b++,key2);
}
strm.zalloc = Z_NULL; strm.zfree = Z_NULL; strm.opaque = Z_NULL; strm.next_in = Z_NULL;
strm.avail_in = til;
ret = inflateInit2(&strm, -15); /* 'raw', since we do not have gzip header, or gzip crc. .ZIP files are 'raw' implode data. */
if (ret != Z_OK)
perror("Error, initializing the libz inflateInit2() system\n");
strm.next_in = curDecryBuf;
strm.avail_out = sizeof(curInfBuf);
strm.next_out = curInfBuf;
ret = inflate(&strm, Z_SYNC_FLUSH);
inflateEnd(&strm);
if (ret != Z_OK) {
// we need to handle zips smaller than sizeof curInfBuf. If we find a zip of this
// size, the return is Z_STREAM_END, BUT things are fine.
if (ret == Z_STREAM_END && salt->deCompLen == strm.total_out)
; // things are ok.
else
goto Failed_Bailout;
}
if (!strm.total_out)
goto Failed_Bailout;
ret = salt->H[cur_hash_idx].pSig->max_len;
if (salt->H[cur_hash_idx].magic == 255) {
if (!validate_ascii(curInfBuf, strm.total_out))
goto Failed_Bailout;
} else {
if (strm.total_out < ret)
goto Failed_Bailout;
if (!CheckSigs(curInfBuf, strm.total_out, salt->H[cur_hash_idx].pSig))
goto Failed_Bailout;
}
}
#endif
if (salt->H[cur_hash_idx].full_zip) {
u8 inflateBufTmp[1024];
if (salt->compLen > 240 && salt->H[cur_hash_idx].datlen >= 200) {
for (;e < 200;) {
key0.u = jtr_crc32 (key0.u, curDecryBuf[e]);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
curDecryBuf[++e] = PKZ_MULT(*b++,key2);
}
strm.zalloc = Z_NULL; strm.zfree = Z_NULL; strm.opaque = Z_NULL; strm.next_in = Z_NULL;
strm.avail_in = e;
ret = inflateInit2(&strm, -15); /* 'raw', since we do not have gzip header, or gzip crc. .ZIP files are 'raw' implode data. */
if (ret != Z_OK)
perror("Error, initializing the libz inflateInit2() system\n");
strm.next_in = curDecryBuf;
strm.avail_out = sizeof(inflateBufTmp);
strm.next_out = inflateBufTmp;
ret = inflate(&strm, Z_SYNC_FLUSH);
inflateEnd(&strm);
if (ret != Z_OK) {
#if (ZIP_DEBUG==2)
fprintf(stderr, "fail=%d fail2=%d tot="LLd"\n", ++FAILED, FAILED2, ((long long)CNT)*_count);
#endif
goto Failed_Bailout;
}
}
goto KnownSuccess;
}
}
while(--cur_hash_count);
/* We got a checksum HIT!!!! All hash checksums matched. */
/* We load the proper checksum value for the gethash */
KnownSuccess: ;
chk[idx] = 1;
continue;
Failed_Bailout: ;
/* We load the wrong checksum value for the gethash */
chk[idx] = 0;
}
/* clear the 'dirty' flag. Then on multiple different salt calls, we will not have to */
/* encrypt the passwords again. They will have already been loaded in the K12[] array. */
dirty = 0;
return _count;
}
struct fmt_main fmt_pkzip = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_DYNA_SALT,
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_dyna_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_LIBZ */
|
blake2bp-ref.c | /*
BLAKE2 reference source code package - reference C implementations
Written in 2012 by Samuel Neves <sneves@dei.uc.pt>
To the extent possible under law, the author(s) have dedicated all copyright
and related and neighboring rights to this software to the public domain
worldwide. This software is distributed without any warranty.
You should have received a copy of the CC0 Public Domain Dedication along with
this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "blake2.h"
#include "blake2-impl.h"
#define PARALLELISM_DEGREE 4
static inline int blake2bp_init_leaf( blake2b_state *S, uint8_t outlen, uint8_t keylen, uint64_t offset )
{
blake2b_param P[1];
P->digest_length = outlen;
P->key_length = keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store64( &P->node_offset, offset );
P->node_depth = 0;
P->inner_length = BLAKE2B_OUTBYTES;
memset( P->reserved, 0, sizeof( P->reserved ) );
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2b_init_param( S, P );
}
static inline int blake2bp_init_root( blake2b_state *S, uint8_t outlen, uint8_t keylen )
{
blake2b_param P[1];
P->digest_length = outlen;
P->key_length = keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store64( &P->node_offset, 0 );
P->node_depth = 1;
P->inner_length = BLAKE2B_OUTBYTES;
memset( P->reserved, 0, sizeof( P->reserved ) );
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2b_init_param( S, P );
}
int blake2bp_init( blake2bp_state *S, const uint8_t outlen )
{
if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
if( blake2bp_init_root( S->R, outlen, 0 ) < 0 )
return -1;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2bp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
return 0;
}
int blake2bp_init_key( blake2bp_state *S, const uint8_t outlen, const void *key, const uint8_t keylen )
{
if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1;
if( !key || !keylen || keylen > BLAKE2B_KEYBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
if( blake2bp_init_root( S->R, outlen, keylen ) < 0 )
return -1;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2bp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
{
uint8_t block[BLAKE2B_BLOCKBYTES];
memset( block, 0, BLAKE2B_BLOCKBYTES );
memcpy( block, key, keylen );
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S->S[i], block, BLAKE2B_BLOCKBYTES );
secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */
}
return 0;
}
int blake2bp_update( blake2bp_state *S, const uint8_t *in, uint64_t inlen )
{
size_t left = S->buflen;
size_t fill = sizeof( S->buf ) - left;
if( left && inlen >= fill )
{
memcpy( S->buf + left, in, fill );
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, BLAKE2B_BLOCKBYTES );
in += fill;
inlen -= fill;
left = 0;
}
#if defined(_OPENMP)
#pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE)
#else
for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ )
#endif
{
#if defined(_OPENMP)
size_t id__ = omp_get_thread_num();
#endif
uint64_t inlen__ = inlen;
const uint8_t *in__ = ( const uint8_t * )in;
in__ += id__ * BLAKE2B_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES )
{
blake2b_update( S->S[id__], in__, BLAKE2B_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
}
}
in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES );
inlen %= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
if( inlen > 0 )
memcpy( S->buf + left, in, inlen );
S->buflen = left + inlen;
return 0;
}
int blake2bp_final( blake2bp_state *S, uint8_t *out, const uint8_t outlen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES];
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
{
if( S->buflen > i * BLAKE2B_BLOCKBYTES )
{
size_t left = S->buflen - i * BLAKE2B_BLOCKBYTES;
if( left > BLAKE2B_BLOCKBYTES ) left = BLAKE2B_BLOCKBYTES;
blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, left );
}
blake2b_final( S->S[i], hash[i], BLAKE2B_OUTBYTES );
}
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S->R, hash[i], BLAKE2B_OUTBYTES );
blake2b_final( S->R, out, outlen );
return 0;
}
int blake2bp( uint8_t *out, const void *in, const void *key, uint8_t outlen, uint64_t inlen, uint8_t keylen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES];
blake2b_state S[PARALLELISM_DEGREE][1];
blake2b_state FS[1];
/* Verify parameters */
if ( NULL == in ) return -1;
if ( NULL == out ) return -1;
if ( NULL == key ) keylen = 0;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2bp_init_leaf( S[i], outlen, keylen, i ) < 0 ) return -1;
S[PARALLELISM_DEGREE - 1]->last_node = 1; // mark last node
if( keylen > 0 )
{
uint8_t block[BLAKE2B_BLOCKBYTES];
memset( block, 0, BLAKE2B_BLOCKBYTES );
memcpy( block, key, keylen );
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S[i], block, BLAKE2B_BLOCKBYTES );
secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */
}
#if defined(_OPENMP)
#pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE)
#else
for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ )
#endif
{
#if defined(_OPENMP)
size_t id__ = omp_get_thread_num();
#endif
uint64_t inlen__ = inlen;
const uint8_t *in__ = ( const uint8_t * )in;
in__ += id__ * BLAKE2B_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES )
{
blake2b_update( S[id__], in__, BLAKE2B_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
}
if( inlen__ > id__ * BLAKE2B_BLOCKBYTES )
{
const size_t left = inlen__ - id__ * BLAKE2B_BLOCKBYTES;
const size_t len = left <= BLAKE2B_BLOCKBYTES ? left : BLAKE2B_BLOCKBYTES;
blake2b_update( S[id__], in__, len );
}
blake2b_final( S[id__], hash[id__], BLAKE2B_OUTBYTES );
}
if( blake2bp_init_root( FS, outlen, keylen ) < 0 )
return -1;
FS->last_node = 1; // Mark as last node
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( FS, hash[i], BLAKE2B_OUTBYTES );
blake2b_final( FS, out, outlen );
return 0;
}
#if defined(BLAKE2BP_SELFTEST)
#include <string.h>
#include "blake2-kat.h"
int main( int argc, char **argv )
{
uint8_t key[BLAKE2B_KEYBYTES];
uint8_t buf[KAT_LENGTH];
for( size_t i = 0; i < BLAKE2B_KEYBYTES; ++i )
key[i] = ( uint8_t )i;
for( size_t i = 0; i < KAT_LENGTH; ++i )
buf[i] = ( uint8_t )i;
for( size_t i = 0; i < KAT_LENGTH; ++i )
{
uint8_t hash[BLAKE2B_OUTBYTES];
blake2bp( hash, buf, key, BLAKE2B_OUTBYTES, i, BLAKE2B_KEYBYTES );
if( 0 != memcmp( hash, blake2bp_keyed_kat[i], BLAKE2B_OUTBYTES ) )
{
puts( "error" );
return -1;
}
}
puts( "ok" );
return 0;
}
#endif
|
VariableSizeMatrix.h | // Copyright (c) 2017, Lawrence Livermore National Security, LLC and
// UT-Battelle, LLC.
// Produced at the Lawrence Livermore National Laboratory and the Oak Ridge
// National Laboratory.
// LLNL-CODE-743438
// All rights reserved.
// This file is part of MGmol. For details, see https://github.com/llnl/mgmol.
// Please also read this link https://github.com/llnl/mgmol/LICENSE
/*!
* Variable size csr/csc matrix used for data transfer operations
*/
#ifndef MGMOL_VARIABLESIZEMATRIX_H_
#define MGMOL_VARIABLESIZEMATRIX_H_
#include "LocalMatrices.h"
#include "SparseRow.h"
#include "SparseRowAndTable.h"
#include "SquareSubMatrix.h"
#include "Table.h"
#include "VariableSizeMatrixInterface.h"
#include <iostream>
#include <set>
#include <vector>
// typedef enum INSERTMODE {INSERT, ADD} INSERTMODE;
/* define maximum and minimum local matrix size */
#define MAX_MAT_SIZE 10000
#define MIN_MAT_SIZE 10
/* define default tolerance for pruning matrix entries */
#define MAT_TOL 1.0e-14
/* define maximum number of print rows */
#define MAX_PRINT_ROWS 100
/* define default number of print rows for diagnostics */
#define NUM_PRINT_ROWS 5
class DataDistribution;
/* define matrix row datatype */
typedef SparseRow sparserow;
typedef SparseRowAndTable sparserowtab;
template <class T>
class VariableSizeMatrix : public VariableSizeMatrixInterface
{
typedef typename std::vector<T*>::iterator TvecIterator;
typedef typename std::vector<T*>::const_iterator const_TvecIterator;
const std::string name_;
int n_; // the dimension of the matrix
int nzmax_; // max. nnz in each row
int totnnz_; // total nnz of matrix
std::vector<int> lvars_; // Local variables in global indices
Table* table_; // Hash table for holding global, local index pairs
std::vector<T*> data_;
public:
VariableSizeMatrix(
const std::string& name, const int alloc_size); // setup data structures
VariableSizeMatrix(const VariableSizeMatrix& A,
const bool copy_table = true); // Copy constructor
template <class T2>
VariableSizeMatrix(const VariableSizeMatrix<T2>& A,
const bool copy_table = true); // Copy constructor
VariableSizeMatrix<T>& operator=(const VariableSizeMatrix<T>& a);
/* initialize a local row of the local matrix */
void updateLocalRowSquareMatrix(const int count, const int lrindex,
const int* const cols, const double* const vals,
const INSERTMODE
mode); /* update current local row by considering only column
indices for which there are rows in the local matrix. ie.
ensure a square matrix is preserved */
void insertNewRow(const int ncols, const int row, const int* cols,
const double* vals,
const bool append); /* Augment current matrix by inserting a new row */
/* initialize matrix data from square local matrix object */
void insertMatrixElements(
const LocalMatrices<MATDTYPE, MemorySpace::Host>& ss,
const std::vector<std::vector<int>>& global_indexes, const int numst,
const double tol = MAT_TOL);
void insertMatrixElements(
const SquareSubMatrix<MATDTYPE>& ss, const double tol);
void sparsify(const std::vector<bool>& keeprow);
void sparsify(const std::vector<int>& gids);
void print(std::ostream& os, const std::vector<int>& locfcns,
int nrows = NUM_PRINT_ROWS) const;
void printMatCSR(const char* fname); /* print CSR matrix */
void printMatBlock2(const int gid0, const int gid1, std::ostream& os);
// void printMatMM(ofstream& outfile); /* print
// MM matrix */
void reset(); /* reset CSR matrix to be reused */
void clear();
void setupSparseRows(const std::vector<int>&
rows); /* reset/ initialize matrix with sparse rows */
void copyData(const VariableSizeMatrix<T>& A,
const int n); /* Copy data from matrix A. Copies n rows of A */
void set2Identity(); /* Set matrix to identity */
~VariableSizeMatrix() override; // destructor
void printMat(const char* fname,
std::vector<int>& lvec); /* print select rows of CSR matrix */
template <typename T2>
double AmultSymBdiag(VariableSizeMatrix<T2>* B, const int row);
double AmultSymB_ij(VariableSizeMatrix<T>* B, const int row,
const int col); /* compute ij-th entry of A*B */
double trace(); /* compute the trace of the matrix */
double trace(const std::vector<int>&
rows); /* compute the trace of selected rows of the matrix */
double getTraceDiagProductWithMat(const std::vector<double>&
ddiag); /* return sum_i ( ddiag[i]*Mat[i][i] ) */
void copyDataToArray(int* locvars, int* colidx, double* colvals);
/* get table value */
void* getTableValue(const int key) const
{
return (*table_).get_value(key);
}
/* update/ insert key into table */
void updateTableValue(const int key) { (*table_).insert(key); }
/* update/ insert key, value into table */
void updateTableValue(const int key, const int value)
{
(*table_).insert(key, value);
}
/* get local size */
int n() const { return n_; }
/* get nzmax */
int nzmax() const
{
int nzmax = 0;
const_TvecIterator it;
for (it = data_.begin(); it != data_.end(); ++it)
nzmax = nzmax > (int)(*it)->nnz() ? nzmax : (int)(*it)->nnz();
return nzmax;
}
/* get nzmin */
int nzmin() const
{
int nzmin = n_;
const_TvecIterator it;
for (it = data_.begin(); it != data_.end(); ++it)
nzmin = nzmin < (int)(*it)->nnz() ? nzmin : (int)(*it)->nnz();
return nzmin;
}
/* get nzmax of submatrix from row begin to row end */
int getNzmaxSubmat(const int begin, const int end)
{
if (end >= n_) return 0;
int nzmax = 0;
for (int i = begin; i <= end; i++)
nzmax += (int)data_[i]->nnz();
return nzmax;
}
/* get totnnz */
int nnzmat() const { return totnnz_; }
/* get number of nonzeros for a local row */
int nnzrow(const int row) const
{
if (row >= n_) return 0;
return (int)data_[row]->nnz();
}
/* get global index of local variable */
int getLocalVariableGlobalIndex(const int lrindex) const
{
return lvars_[lrindex];
}
/* get column position on local row. Return -1 if not on local row */
bool isColumnHere(const int lrindex, const int col) const
{
int colpos = data_[lrindex]->getColumnPosition(col);
if (colpos != -1)
return true;
else
return false;
}
/* set pointer to array of global index of local variables */
int* rowIndexes() { return &lvars_[0]; }
/* get (global) column index */
int getColumnIndex(const int lrindex, const int pos) const
{
return data_[lrindex]->getColumnIndex(pos);
}
void getColumnIndexes(const int lrindex, std::vector<int>& indexes) const
{
indexes = data_[lrindex]->getColumnIndexes();
}
void getAllColumnIndexes(std::vector<int>& indexes) const;
/* get value on local row */
double getRowEntry(const int lrindex, const int pos) const
{
assert(lrindex < n_);
return data_[lrindex]->getEntryFromPosition(pos);
}
void getRowEntries(const int lrindex, std::vector<double>& values) const
{
assert(lrindex < n_);
values = data_[lrindex]->getColumnEntries();
}
int getMaxAbsOffDiagonalRowEntry(const int gid, double& value) const;
int getColumnPos(const int lrindex, const int col)
{
return data_[lrindex]->getColumnPosition(col);
}
void row_daxpy(
const int lrindex, const int size, const double alpha, double* y)
{
data_[lrindex]->axpy(size, alpha, y);
}
Table* getTable() { return table_; }
/* initialize a local row of the local matrix */
/* Assumes nnzrow is initially zero - matrix has been reset */
void initializeLocalRow(
const int ncols, const int lrindex, const int* cols, const double* vals)
{
if (ncols)
{
data_[lrindex]->assign(ncols, cols, vals);
/* update local matrix variables */
#ifdef _OPENMP
#pragma omp atomic
#endif
totnnz_ += ncols;
}
return;
}
/* Update current local rows by adding or inserting new columns. */
void updateLocalRow(const int count, const int lrindex,
const int* const cols, const double* const vals, const INSERTMODE mode)
{
// updateRow_tm_.start();
totnnz_ += data_[lrindex]->updateRow(count, cols, vals, mode);
// updateRow_tm_.stop();
return;
}
void updateLocalRowAdd(const int count, const int lrindex,
const int* const cols, const double* const vals)
{
// updateRow_tm_.start();
const int newnnz = data_[lrindex]->updateRowAdd(count, cols, vals);
#ifdef _OPENMP
#pragma omp atomic
#endif
totnnz_ += newnnz;
// updateRow_tm_.stop();
return;
}
void updateLocalRowInsert(const int count, const int lrindex,
const int* const cols, const double* const vals)
{
// updateRow_tm_.start();
totnnz_ += data_[lrindex]->updateRowInsert(count, cols, vals);
// updateRow_tm_.stop();
return;
}
/* Update current local row by adding or inserting a new column. */
void updateLocalRow(const int lrindex, const int col, const double val,
const INSERTMODE mode)
{
// updateRow_tm_.start();
totnnz_ += data_[lrindex]->updateRow(col, val, mode);
// updateRow_tm_.stop();
return;
}
void updateLocalRowAdd(const int lrindex, const int col, const double val)
{
// updateRow_tm_.start();
totnnz_ += data_[lrindex]->updateRowAdd(col, val);
// updateRow_tm_.stop();
return;
}
void updateLocalRowInsert(
const int lrindex, const int col, const double val)
{
// updateRow_tm_.start();
totnnz_ += data_[lrindex]->updateRowInsert(col, val);
// updateRow_tm_.stop();
return;
}
/* Update current local entry by adding or inserting a new value.
* Assumes that the local row index and column position of the entry
* is known.
*/
void updateLocalEntry(const int lrindex, const int pos, const double val,
const INSERTMODE mode)
{
/* begin */
/* Add or insert entry */
data_[lrindex]->updateEntry(pos, val, mode);
return;
}
void updateLocalEntryAdd(const int lrindex, const int pos, const double val)
{
/* begin */
/* Add or insert entry */
data_[lrindex]->updateEntryAdd(pos, val);
return;
}
void updateLocalEntryInsert(
const int lrindex, const int pos, const double val)
{
/* begin */
/* Add or insert entry */
data_[lrindex]->updateEntryInsert(pos, val);
return;
}
/* Insert entry into matrix */
void insertMatrixElement(const int row, const int col, const double val,
const INSERTMODE mode, const bool append)
{
#ifdef _OPENMP
#pragma omp critical(insertMatrixElement)
#endif
{
/* begin */
/* check if row exists */
int* rindex = (int*)getTableValue(row);
if (rindex != nullptr) /* row exists */
{
/* insert column */
updateLocalRow(*rindex, col, val, mode);
}
else /* insert new row */
{
insertNewRow(1, row, &col, &val, append);
}
}
}
/* get matrix entry */
double get_value(const int row, const int col) const
{
double value = 0.0;
int* rindex = (int*)getTableValue(row);
if (rindex != nullptr) value = data_[*rindex]->getColumnEntry(col);
return value;
}
/* get matrix entries from a local row = lrindex */
void getLocalRowValues(const int lrindex, const std::vector<int>& cols,
std::vector<double>& vals) const
{
vals.reserve(cols.size());
for (std::vector<int>::const_iterator it = cols.begin();
it != cols.end(); ++it)
{
vals.push_back(data_[lrindex]->getColumnEntry(*it));
}
assert(vals.size() == cols.size());
}
/* get matrix entries from a global row*/
void getRowValues(const int row, const std::vector<int>& cols,
std::vector<double>& vals) const
{
int* rindex = (int*)getTableValue(row);
if (rindex != nullptr)
{
const int lrindex = *rindex;
getLocalRowValues(lrindex, cols, vals);
/*
T* data=data_[*rindex];
vals.reserve(cols.size());
for(std::vector<int>::const_iterator it =cols.begin();
it!=cols.end();
++it)
{
vals.push_back( data->getColumnEntry(*it) );
}
*/
}
else
{
vals.resize(cols.size());
memset(&vals[0], 0, vals.size() * sizeof(double));
}
assert(vals.size() == cols.size());
}
/* get matrix entries from a sorted row*/
void getSortedRowValues(const int row, const std::vector<int>& cols,
std::vector<double>& vals) const
{
int* rindex = (int*)getTableValue(row);
if (rindex != nullptr)
{
T* data = data_[*rindex];
vals.reserve(cols.size());
sort_col_tm_.start();
data_[*rindex]->sortData();
sort_col_tm_.stop();
for (std::vector<int>::const_iterator it = cols.begin();
it != cols.end(); ++it)
{
int pos = data->getSortedDataColumnPosition(*it);
if (pos != -1)
vals.push_back(data->getEntryFromPosition(pos));
else
vals.push_back(0.0);
}
}
else
{
vals.resize(cols.size());
memset(&vals[0], 0, vals.size() * sizeof(double));
}
assert(vals.size() == cols.size());
}
/* Scale the row of the CSR matrix */
void scaleRow(const int row, const double coeff)
{
int* rindex = (int*)getTableValue(row);
if (rindex == nullptr) return;
data_[*rindex]->scale(coeff);
}
/* Scale the CSR matrix */
void scale(const double coeff)
{
const int n = n_;
for (int lrindex = 0; lrindex < n; lrindex++)
data_[lrindex]->scale(coeff);
}
// matrix multiplication operations (locally centered contributions only)
// flag== true => compute entries for specific nonzero pattern only
void AmultSymBLocal(VariableSizeMatrix<T>* B, VariableSizeMatrix<T>& C,
const std::vector<int>& locfcns,
VariableSizeMatrix<SparseRowAndTable>& pattern, bool flag = true);
// matrix multiplication operations
void AmultSymB(VariableSizeMatrix<T>* B, VariableSizeMatrix<T>& C,
VariableSizeMatrix<SparseRowAndTable>& pattern, bool flag = true);
const std::vector<int>& lvars() const { return lvars_; }
// get reference to local row at index rindex
T& getRow(int rindex) const { return *data_[rindex]; }
void sortColumnIndexes()
{
sort_col_tm_.start();
for (const_TvecIterator it = data_.begin(); it != data_.end(); ++it)
(*it)->sortData();
sort_col_tm_.stop();
}
// get pointer to row data
double* getRowEntries(const int lrindex)
{
assert(lrindex < n_);
return data_[lrindex]->getPtrToColumnEntries();
}
void axpy(const double alpha, const VariableSizeMatrix<T>& B);
void gemv(const double alpha, const std::vector<double>& x,
const double beta, std::vector<double>& y);
// compute dot product of matrix row with an array
double rowDotVec(const int row, const double* x)
{
return data_[row]->dotVec(x);
}
double pnorm(const int row, const int p) { return data_[row]->pnorm(p); }
VariableSizeMatrix<T>& operator+=(const VariableSizeMatrix<T>& a)
{
axpy(1.0, a);
return *this;
}
VariableSizeMatrix<T>& operator-=(const VariableSizeMatrix<T>& a)
{
axpy(-1.0, a);
return *this;
}
std::string name() { return name_; }
};
#endif
|
erotima_2b.c | #include <stdio.h>
#include <math.h>
#include <getopt.h>
#include <stdlib.h>
#include <sys/time.h>
#include <omp.h>
#include <string.h>
#include <cblas.h>
#define MIN_NUM_OF_NEURONS (1L)
#define DEF_NUM_OF_NEURONS (1000L)
#define MIN_NUM_OF_NEIGHBORS (0L)
#define DEF_NUM_OF_NEIGHBORS (300L)
#define DEF_DT (1.0e-04)
#define DEF_MU (1.0)
#define DEF_UTH (0.98)
#define DEF_S_MIN (0.7)
#define DEF_S_MAX (0.7)
#define DEF_SIM_TIME (20L)
#define DEF_TTRANSIENT (-1L)
void print_thread();
static struct option long_options[] =
{
{"dt", required_argument, 0, 'a'},
{"mu", required_argument, 0, 'b'},
{"uth", required_argument, 0, 'c'},
{"time", required_argument, 0, 'd'},
{"transient", required_argument, 0, 'e'},
{"s_min", required_argument, 0, 'f'},
{"s_max", required_argument, 0, 'g'},
{"n", required_argument, 0, 'n'},
{"r", required_argument, 0, 'r'},
{0, 0, 0, 0}
};
int main(int argc, char *argv[])
{
FILE *output1, *output2;
long n, r;
long i, j;
long it;
double divide;
double dt;
double tstep;
long ntstep;
long sim_time;
long ttransient;
long itime;
double uth;
double mu;
double s_min;
double s_max;
double *u, *uplus, *sigma, *omega, *omega1;
// double sum;
double time;
struct timeval global_start, global_end, IO_start, IO_end;
double global_usec, IO_usec = 0.0;
int c, option_index;
char *end_ptr;
n = DEF_NUM_OF_NEURONS;
r = DEF_NUM_OF_NEIGHBORS;
dt = DEF_DT;
mu = DEF_MU;
uth = DEF_UTH;
s_min = DEF_S_MIN;
s_max = DEF_S_MAX;
sim_time = DEF_SIM_TIME;
ttransient = DEF_TTRANSIENT;
while (1) {
c = getopt_long (argc, argv, "+n:r:", long_options, &option_index);
if (c == -1) {
break;
}
switch (c) {
case 'a':
dt = strtod(optarg, &end_ptr);
if (*end_ptr != '\0') {
printf("Option \"%s\": Invalid argument \"%s\".\n", long_options[option_index].name, optarg);
exit(1);
}
if (dt <= 0.0) {
printf("Option \"%s\": \"dt\" must be larger than zero.\n", long_options[option_index].name);
exit(1);
}
break;
case 'b':
mu = strtod(optarg, &end_ptr);
if (*end_ptr != '\0') {
printf("Option \"%s\": Invalid argument \"%s\".\n", long_options[option_index].name, optarg);
exit(1);
}
if (mu <= 0.0) {
printf("Option \"%s\": \"mu\" must be larger than zero.\n", long_options[option_index].name);
exit(1);
}
break;
case 'c':
uth = strtod(optarg, &end_ptr);
if (*end_ptr != '\0') {
printf("Option \"%s\": Invalid argument \"%s\".\n", long_options[option_index].name, optarg);
exit(1);
}
if (uth <= 0.0) {
printf("Option \"%s\": \"uth\" must be larger than zero.\n", long_options[option_index].name);
exit(1);
}
break;
case 'd':
sim_time = strtol(optarg, &end_ptr, 10);
if (*end_ptr != '\0') {
printf("Option \"%s\": Invalid argument \"%s\".\n", long_options[option_index].name, optarg);
exit(1);
}
if (sim_time < 1) {
printf("Option \"%s\": Total simulation time must be larger than zero.\n", long_options[option_index].name);
exit(1);
}
break;
case 'e':
ttransient = strtol(optarg, &end_ptr, 10);
if (*end_ptr != '\0') {
printf("Option \"%s\": Invalid argument \"%s\".\n", long_options[option_index].name, optarg);
exit(1);
}
if (ttransient < 0) {
printf("Option \"%s\": \"ttransient\" must be larger or equal than zero.\n", long_options[option_index].name);
exit(1);
}
break;
case 'f':
s_min = strtod(optarg, &end_ptr);
if (*end_ptr != '\0') {
printf("Option \"%s\": Invalid argument \"%s\".\n", long_options[option_index].name, optarg);
exit(1);
}
if (s_min <= 0.0) {
printf("Option \"%s\": \"s_min\" must be larger than zero.\n", long_options[option_index].name);
exit(1);
}
break;
case 'g':
s_max = strtod(optarg, &end_ptr);
if (*end_ptr != '\0') {
printf("Option \"%s\": Invalid argument \"%s\".\n", long_options[option_index].name, optarg);
exit(1);
}
if (s_max <= 0.0) {
printf("Option \"%s\": \"s_max\" must be larger than zero.\n", long_options[option_index].name);
exit(1);
}
break;
case 'n':
n = strtol(optarg, &end_ptr, 10);
if (*end_ptr != '\0') {
printf("Option \"%s\": Invalid argument \"%s\".\n", long_options[option_index].name, optarg);
exit(1);
}
if (n < MIN_NUM_OF_NEURONS) {
printf("Option \"%s\": Number of neurons must be at least %ld.\n", long_options[option_index].name, MIN_NUM_OF_NEURONS);
exit(1);
}
break;
case 'r':
r = strtol(optarg, &end_ptr, 10);
if (*end_ptr != '\0') {
printf("Option \"%s\": Invalid argument \"%s\".\n", long_options[option_index].name, optarg);
exit(1);
}
if (r < MIN_NUM_OF_NEIGHBORS) {
printf("Option \"%s\": Number of neighbors must be at least %ld.\n", long_options[option_index].name, MIN_NUM_OF_NEIGHBORS);
exit(1);
}
break;
case '?':
default:
exit(1);
break;
}
}
if (optind != argc) {
printf("Unknown option \"%s\".\n", argv[optind]);
exit(1);
}
if (2 * r + 1 > n) {
printf("Total number of neighbors and reference neuron (2 * %ld + 1 = %ld) cannot exceed number of neurons (%ld).\n", r, 2 * r + 1, n);
exit(1);
}
if (s_min > s_max) {
printf("s_min (%17.15f) must be smaller or equal than s_max (%17.15f).\n", s_min, s_max);
exit(1);
}
divide = (double)(2 * r);
tstep = 1.0 / dt;
ntstep = (long)tstep;
if (ttransient == DEF_TTRANSIENT) {
ttransient = (sim_time * ntstep) / 2;
} else {
ttransient *= ntstep;
}
itime = sim_time * ntstep;
printf("Running simulation with following parameters:\n");
printf(" Number of neurons : %ld\n", n);
printf(" Numger of neighbours: %ld\n", r);
printf(" Simulation time : %ld seconds (%ld time steps)\n", sim_time, itime);
printf(" Transient time : %ld seconds (%ld time steps)\n", ttransient / ntstep, ttransient);
printf(" dt : %.1e seconds \n", dt);
printf(" mu : %17.15f\n", mu);
printf(" uth : %17.15f\n", uth);
printf(" s_min : %17.15f\n", s_min);
printf(" s_max : %17.15f\n", s_max);
output1 = fopen("spacetime.out", "w");
if (output1 == NULL) {
printf("Could not open file \"spacetime.out\"");
exit(1);
}
output2 = fopen("omega.out", "w");
if (output2 == NULL) {
printf("Could not open file \"omega.out\"");
exit(1);
}
u = (double *)calloc(n, sizeof(double));
if (u == NULL) {
printf("Could not allocate memory for \"u\".\n");
exit(1);
}
uplus = (double *)calloc(n, sizeof(double));
if (uplus == NULL) {
printf("Could not allocate memory for \"uplus\".\n");
exit(1);
}
sigma = (double *)calloc(n * n, sizeof(double));
if (sigma == NULL) {
printf("Could not allocate memory for \"sigma\".\n");
exit(1);
}
omega = (double *)calloc(n, sizeof(double));
if (omega == NULL) {
printf("Could not allocate memory for \"omega\".\n");
exit(1);
}
omega1 = (double *)calloc(n, sizeof(double));
if (omega1 == NULL) {
printf("Could not allocate memory for \"omega1\".\n");
exit(1);
}
for (i = 0; i < n;) {
u[i] = drand48();
// temp_u[i] = u[i] + dt * (mu - u[i]);
printf("%ld\t%f\n", i, u[i]); i++;
u[i] = drand48();
// temp_u[i] = u[i] + dt * (mu - u[i]);
printf("%ld\t%f\n", i, u[i]); i++;
u[i] = drand48();
// temp_u[i] = u[i] + dt * (mu - u[i]);
printf("%ld\t%f\n", i, u[i]); i++;
u[i] = drand48();
// temp_u[i] = u[i] + dt * (mu - u[i]);
printf("%ld\t%f\n", i, u[i]); i++;
u[i] = drand48();
// temp_u[i] = u[i] + dt * (mu - u[i]);
printf("%ld\t%f\n", i, u[i]); i++;
u[i] = drand48();
// temp_u[i] = u[i] + dt * (mu - u[i]);
printf("%ld\t%f\n", i, u[i]); i++;
u[i] = drand48();
// temp_u[i] = u[i] + dt * (mu - u[i]);
printf("%ld\t%f\n", i, u[i]); i++;
u[i] = drand48();
// temp_u[i] = u[i] + dt * (mu - u[i]);
printf("%ld\t%f\n", i, u[i]); i++;
u[i] = drand48();
// temp_u[i] = u[i] + dt * (mu - u[i]);
printf("%ld\t%f\n", i, u[i]); i++;
u[i] = drand48();
// temp_u[i] = u[i] + dt * (mu - u[i]);
printf("%ld\t%f\n", i, u[i]); i++;
}
/*
Read connectivity matrix sigma[n][n] from file or
construct connectivity matrix.
*/
for (i = 0; i < r; i++) {
for (j = 0; j < i + r + 1; j++) {
sigma[i * n + j] = s_min + (s_max - s_min) * drand48();
}
for (j = n - r + i; j < n; j++) {
sigma[i * n + j] = s_min + (s_max - s_min) * drand48();
}
}
for (i = r; i < n - r; i++) {
for (j = 0; j < 2 * r + 1; j++) {
sigma[i * n + j + i - r] = s_min + (s_max - s_min) * drand48();
}
}
for (i = n - r; i < n; i++) {
for (j = 0; j < i - n + r + 1; j++) {
sigma[i * n + j] = s_min + (s_max - s_min) * drand48();
}
for (j = i - r; j < n; j++) {
sigma[i * n + j] = s_min + (s_max - s_min) * drand48();
}
}
#if 0
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
printf("%4.1f", sigma[i * n + j]);
}
printf("\n");
}
#endif
/*
Temporal iteration.
*/
gettimeofday(&global_start, NULL);
// double sum;
// double temp;
int step;
double semi_sum = 0.0;
for (i = 0; i < n; i++) {
semi_sum += sigma[i];
}
// omp_set_num_threads(2); // Use 4 threads for all consecutive parallel regions
double sum;
double *final_sigma = (double *)calloc(n, sizeof(double));
int total_threads;
#pragma omp parallel private(it,i,j,step) firstprivate(n,sigma,dt,mu,semi_sum)
{
double *temp_u;
int thread_id = omp_get_thread_num();
int array_pos,position;
// if (thread_id == 0 || thread_id == 1) {
total_threads = omp_get_num_threads();
position = (n / total_threads) * n * thread_id;
array_pos = (n / total_threads) * thread_id;
temp_u = (double *)calloc(n / total_threads, sizeof(double));
// }
for (it = 0; it < itime; it++) {
//////////////////////////////////////////////////////////////////////
// printf("Thread: %d position: %d array_pos: %d\n",omp_get_thread_num(),position,array_pos);
// if (thread_id == 0 || thread_id == 1) {
// printf("Thread: %d position: %d array_pos: %d\n",omp_get_thread_num(),position,array_pos);
cblas_dgemv(CblasRowMajor, CblasNoTrans, n / total_threads, n, 1.0, sigma + position, n, u, 1 , 0.0, temp_u, 1);
memcpy( final_sigma + array_pos, temp_u, n / total_threads * sizeof * final_sigma );
////////////////////////////////////////////////////////////////////
#pragma omp barrier
// #pragma omp single
// for (i = 0; i < n; i++) {
// // if (final_sigma[i] ==0.0){
// printf("sum[%d]= %f it = %d\n",i,final_sigma[i],it );
// // while(1){}
// // }
// }
// //
////////////////////////////////////////////////////////////////////
#pragma omp for schedule(static,8)
for (i = 0; i < n; i++) {
step = i * n;
#pragma omp atomic write
uplus[i] = ( u[i] + dt * (mu - u[i])) + dt * (final_sigma[i] - semi_sum * u[i]) / divide;
// temp = uplus[i];
// temp_u[i] = uplus[i];
if ( uplus[i] > uth) {
#pragma omp atomic write
uplus[i] = 0.0;
if (it >= ttransient) {
#pragma omp atomic
omega1[i] += 1.0;
}
}
}
#pragma omp barrier
#pragma omp single
{
memcpy(u, uplus, n * sizeof * u);
#if !defined(ALL_RESULTS)
if (it % ntstep == 0) {
#endif
printf("Time is %ld\n", it);
gettimeofday(&IO_start, NULL);
fprintf(output1, "%ld\t", it);
for (i = 0; i < n; i++) {
// printf("write to ouput1 i: %d\n",i);
fprintf(output1, "%19.15f", u[i]);
}
fprintf(output1, "\n");
time = (double)it * dt;
fprintf(output2, "%ld\t", it);
for (i = 0; i < n; i++) {
omega[i] = 2.0 * M_PI * omega1[i] / (time - ttransient * dt);
fprintf(output2, "%19.15f", omega[i]);
}
fprintf(output2, "\n");
gettimeofday(&IO_end, NULL);
IO_usec += ((IO_end.tv_sec - IO_start.tv_sec) * 1000000.0 + (IO_end.tv_usec - IO_start.tv_usec));
// }//master end
#if !defined(ALL_RESULTS)
}
#endif
}
}
}//omp parallel
gettimeofday(&global_end, NULL);
global_usec = ((global_end.tv_sec - global_start.tv_sec) * 1000000.0 + (global_end.tv_usec - global_start.tv_usec));
printf("Time for calculations = %13.6f sec\n", (global_usec - IO_usec) / 1000000.0);
printf("Time for I/O = %13.6f sec\n", IO_usec / 1000000.0);
printf("Total execution time = %13.6f sec\n", global_usec / 1000000.0);
fclose(output1);
fclose(output2);
return 0;
}
|
no_wait.c | #include<stdio.h>
#include<omp.h>
int main() {
#pragma omp parallel
{
int tid = omp_get_thread_num();
#pragma omp for nowait
for(int i=0; i<10; i++) {
printf("Thread : %d | %d\n", tid, i);
}
printf("%d -> Hello\n", tid);
}
} |
zgeswp.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
/******************************************************************************/
int plasma_zgeswp(plasma_enum_t colrow,
int m, int n,
plasma_complex64_t *pA, int lda, int *ipiv, int incx)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((colrow != PlasmaColumnwise) &&
(colrow != PlasmaRowwise)) {
plasma_error("illegal value of colrow");
return -1;
}
if (m < 0) {
plasma_error("illegal value of m");
return -2;
}
if (n < 0) {
plasma_error("illegal value of n");
return -3;
}
if (lda < imax(1, m)) {
plasma_error("illegal value of lda");
return -5;
}
// quick return
if (imin(n, m) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_geswp(plasma, PlasmaComplexDouble, m, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
int retval;
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
m, n, 0, 0, m, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_general_desc_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zge2desc(pA, lda, A, &sequence, &request);
// Call tile async function.
plasma_omp_zgeswp(colrow, A, ipiv, incx, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(A, pA, lda, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
// Return status.
int status = sequence.status;
return status;
}
/******************************************************************************/
void plasma_omp_zgeswp(plasma_enum_t colrow,
plasma_desc_t A, int *ipiv, int incx,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((colrow != PlasmaColumnwise) &&
(colrow != PlasmaRowwise)) {
plasma_error("illegal value of colrow");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (imin(A.m, A.n) == 0)
return;
// Call the parallel function.
plasma_pzgeswp(colrow, A, ipiv, incx, sequence, request);
}
|
GB_binop__isne_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isne_int32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__isne_int32)
// A.*B function (eWiseMult): GB (_AemultB_03__isne_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_int32)
// A*D function (colscale): GB (_AxD__isne_int32)
// D*A function (rowscale): GB (_DxB__isne_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__isne_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__isne_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_int32)
// C=scalar+B GB (_bind1st__isne_int32)
// C=scalar+B' GB (_bind1st_tran__isne_int32)
// C=A+scalar GB (_bind2nd__isne_int32)
// C=A'+scalar GB (_bind2nd_tran__isne_int32)
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_INT32 || GxB_NO_ISNE_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isne_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isne_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isne_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isne_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isne_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = Bx [p] ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isne_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = Ax [p] ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__isne_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_sgemm_pack4to1.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_pack4to1_msa(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 4u * 4, 4, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const float* bias = _bias;
Mat tmp;
if (size >= 12)
tmp.create(12 * maxk, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + size % 12 % 4, 4u * 4, 4, opt.workspace_allocator);
else if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + size % 4, 4u * 4, 4, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + size % 4, 4u * 4, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 4u * 4, 4, opt.workspace_allocator);
{
int remain_size_start = 0;
int nn_size = size / 12;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 12;
float* tmpptr = tmp.channel(i / 12);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
// transpose 4x12
v4f32 _r0 = (v4f32)__msa_ld_w(img0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(img0 + 4, 0);
v4f32 _r2 = (v4f32)__msa_ld_w(img0 + 4 * 2, 0);
v4f32 _r3 = (v4f32)__msa_ld_w(img0 + 4 * 3, 0);
v4f32 _r4 = (v4f32)__msa_ld_w(img0 + 4 * 4, 0);
v4f32 _r5 = (v4f32)__msa_ld_w(img0 + 4 * 5, 0);
v4f32 _r6 = (v4f32)__msa_ld_w(img0 + 4 * 6, 0);
v4f32 _r7 = (v4f32)__msa_ld_w(img0 + 4 * 7, 0);
v4f32 _r8 = (v4f32)__msa_ld_w(img0 + 4 * 8, 0);
v4f32 _r9 = (v4f32)__msa_ld_w(img0 + 4 * 9, 0);
v4f32 _ra = (v4f32)__msa_ld_w(img0 + 4 * 10, 0);
v4f32 _rb = (v4f32)__msa_ld_w(img0 + 4 * 11, 0);
v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6);
v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6);
v4i32 _r89r = __msa_ilvr_w((v4i32)_r9, (v4i32)_r8);
v4i32 _r89l = __msa_ilvl_w((v4i32)_r9, (v4i32)_r8);
v4i32 _rabr = __msa_ilvr_w((v4i32)_rb, (v4i32)_ra);
v4i32 _rabl = __msa_ilvl_w((v4i32)_rb, (v4i32)_ra);
v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l);
v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l);
v2i64 _r89ab_0 = __msa_ilvr_d((v2i64)_rabr, (v2i64)_r89r);
v2i64 _r89ab_1 = __msa_ilvl_d((v2i64)_rabr, (v2i64)_r89r);
v2i64 _r89ab_2 = __msa_ilvr_d((v2i64)_rabl, (v2i64)_r89l);
v2i64 _r89ab_3 = __msa_ilvl_d((v2i64)_rabl, (v2i64)_r89l);
__msa_st_w((v4i32)_r0123_0, tmpptr, 0);
__msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0);
__msa_st_w((v4i32)_r89ab_0, tmpptr + 4 * 2, 0);
__msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 3, 0);
__msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 4, 0);
__msa_st_w((v4i32)_r89ab_1, tmpptr + 4 * 5, 0);
__msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 6, 0);
__msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 7, 0);
__msa_st_w((v4i32)_r89ab_2, tmpptr + 4 * 8, 0);
__msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 9, 0);
__msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 10, 0);
__msa_st_w((v4i32)_r89ab_3, tmpptr + 4 * 11, 0);
img0 += size * 4;
tmpptr += 48;
}
}
}
remain_size_start += nn_size * 12;
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
// transpose 4x8
v4f32 _r0 = (v4f32)__msa_ld_w(img0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(img0 + 4, 0);
v4f32 _r2 = (v4f32)__msa_ld_w(img0 + 4 * 2, 0);
v4f32 _r3 = (v4f32)__msa_ld_w(img0 + 4 * 3, 0);
v4f32 _r4 = (v4f32)__msa_ld_w(img0 + 4 * 4, 0);
v4f32 _r5 = (v4f32)__msa_ld_w(img0 + 4 * 5, 0);
v4f32 _r6 = (v4f32)__msa_ld_w(img0 + 4 * 6, 0);
v4f32 _r7 = (v4f32)__msa_ld_w(img0 + 4 * 7, 0);
v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6);
v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6);
v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l);
v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l);
__msa_st_w((v4i32)_r0123_0, tmpptr, 0);
__msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0);
__msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 2, 0);
__msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 3, 0);
__msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 4, 0);
__msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 5, 0);
__msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 6, 0);
__msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 7, 0);
img0 += size * 4;
tmpptr += 32;
}
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
// transpose 4x4
v4f32 _r0 = (v4f32)__msa_ld_w(img0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(img0 + 4, 0);
v4f32 _r2 = (v4f32)__msa_ld_w(img0 + 4 * 2, 0);
v4f32 _r3 = (v4f32)__msa_ld_w(img0 + 4 * 3, 0);
v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2);
v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l);
__msa_st_w((v4i32)_r0123_0, tmpptr, 0);
__msa_st_w((v4i32)_r0123_1, tmpptr + 4, 0);
__msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 2, 0);
__msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 3, 0);
img0 += size * 4;
tmpptr += 16;
}
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
v4f32 _val = (v4f32)__msa_ld_w(img0, 0);
__msa_st_w((v4i32)_val, tmpptr, 0);
img0 += size * 4;
tmpptr += 4;
}
}
}
}
int nn_outch = outch / 4;
int remain_outch_start = nn_outch * 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
float* outptr2 = top_blob.channel(p + 2);
float* outptr3 = top_blob.channel(p + 3);
const float zeros[4] = {0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + 11 < size; i += 12)
{
const float* tmpptr = tmp.channel(i / 12);
const float* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk * 4; // inch always > 0
v4i32 _bias = __msa_ld_w(biasptr, 0);
v4f32 _sum0 = (v4f32)__msa_splati_w(_bias, 0);
v4f32 _sum1 = (v4f32)__msa_splati_w(_bias, 0);
v4f32 _sum2 = (v4f32)__msa_splati_w(_bias, 0);
v4f32 _sum3 = (v4f32)__msa_splati_w(_bias, 1);
v4f32 _sum4 = (v4f32)__msa_splati_w(_bias, 1);
v4f32 _sum5 = (v4f32)__msa_splati_w(_bias, 1);
v4f32 _sum6 = (v4f32)__msa_splati_w(_bias, 2);
v4f32 _sum7 = (v4f32)__msa_splati_w(_bias, 2);
v4f32 _sum8 = (v4f32)__msa_splati_w(_bias, 2);
v4f32 _sum9 = (v4f32)__msa_splati_w(_bias, 3);
v4f32 _suma = (v4f32)__msa_splati_w(_bias, 3);
v4f32 _sumb = (v4f32)__msa_splati_w(_bias, 3);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 96);
__builtin_prefetch(kptr0 + 32);
v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0);
v4f32 _val1 = (v4f32)__msa_ld_w(tmpptr + 4, 0);
v4f32 _val2 = (v4f32)__msa_ld_w(tmpptr + 8, 0);
v4i32 _w0123 = __msa_ld_w(kptr0, 0);
_sum0 = __msa_fmadd_w(_sum0, _val0, (v4f32)__msa_splati_w(_w0123, 0));
_sum1 = __msa_fmadd_w(_sum1, _val1, (v4f32)__msa_splati_w(_w0123, 0));
_sum2 = __msa_fmadd_w(_sum2, _val2, (v4f32)__msa_splati_w(_w0123, 0));
_sum3 = __msa_fmadd_w(_sum3, _val0, (v4f32)__msa_splati_w(_w0123, 1));
_sum4 = __msa_fmadd_w(_sum4, _val1, (v4f32)__msa_splati_w(_w0123, 1));
_sum5 = __msa_fmadd_w(_sum5, _val2, (v4f32)__msa_splati_w(_w0123, 1));
_sum6 = __msa_fmadd_w(_sum6, _val0, (v4f32)__msa_splati_w(_w0123, 2));
_sum7 = __msa_fmadd_w(_sum7, _val1, (v4f32)__msa_splati_w(_w0123, 2));
_sum8 = __msa_fmadd_w(_sum8, _val2, (v4f32)__msa_splati_w(_w0123, 2));
_sum9 = __msa_fmadd_w(_sum9, _val0, (v4f32)__msa_splati_w(_w0123, 3));
_suma = __msa_fmadd_w(_suma, _val1, (v4f32)__msa_splati_w(_w0123, 3));
_sumb = __msa_fmadd_w(_sumb, _val2, (v4f32)__msa_splati_w(_w0123, 3));
tmpptr += 12;
kptr0 += 4;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr0 + 4, 0);
__msa_st_w((v4i32)_sum2, outptr0 + 8, 0);
__msa_st_w((v4i32)_sum3, outptr1, 0);
__msa_st_w((v4i32)_sum4, outptr1 + 4, 0);
__msa_st_w((v4i32)_sum5, outptr1 + 8, 0);
__msa_st_w((v4i32)_sum6, outptr2, 0);
__msa_st_w((v4i32)_sum7, outptr2 + 4, 0);
__msa_st_w((v4i32)_sum8, outptr2 + 8, 0);
__msa_st_w((v4i32)_sum9, outptr3, 0);
__msa_st_w((v4i32)_suma, outptr3 + 4, 0);
__msa_st_w((v4i32)_sumb, outptr3 + 8, 0);
outptr0 += 12;
outptr1 += 12;
outptr2 += 12;
outptr3 += 12;
}
for (; i + 7 < size; i += 8)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const float* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk * 4; // inch always > 0
v4i32 _bias = __msa_ld_w(biasptr, 0);
v4f32 _sum0 = (v4f32)__msa_splati_w(_bias, 0);
v4f32 _sum1 = (v4f32)__msa_splati_w(_bias, 0);
v4f32 _sum2 = (v4f32)__msa_splati_w(_bias, 1);
v4f32 _sum3 = (v4f32)__msa_splati_w(_bias, 1);
v4f32 _sum4 = (v4f32)__msa_splati_w(_bias, 2);
v4f32 _sum5 = (v4f32)__msa_splati_w(_bias, 2);
v4f32 _sum6 = (v4f32)__msa_splati_w(_bias, 3);
v4f32 _sum7 = (v4f32)__msa_splati_w(_bias, 3);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 64);
__builtin_prefetch(kptr0 + 32);
v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0);
v4f32 _val1 = (v4f32)__msa_ld_w(tmpptr + 4, 0);
v4i32 _w0123 = __msa_ld_w(kptr0, 0);
_sum0 = __msa_fmadd_w(_sum0, _val0, (v4f32)__msa_splati_w(_w0123, 0));
_sum1 = __msa_fmadd_w(_sum1, _val1, (v4f32)__msa_splati_w(_w0123, 0));
_sum2 = __msa_fmadd_w(_sum2, _val0, (v4f32)__msa_splati_w(_w0123, 1));
_sum3 = __msa_fmadd_w(_sum3, _val1, (v4f32)__msa_splati_w(_w0123, 1));
_sum4 = __msa_fmadd_w(_sum4, _val0, (v4f32)__msa_splati_w(_w0123, 2));
_sum5 = __msa_fmadd_w(_sum5, _val1, (v4f32)__msa_splati_w(_w0123, 2));
_sum6 = __msa_fmadd_w(_sum6, _val0, (v4f32)__msa_splati_w(_w0123, 3));
_sum7 = __msa_fmadd_w(_sum7, _val1, (v4f32)__msa_splati_w(_w0123, 3));
tmpptr += 8;
kptr0 += 4;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr0 + 4, 0);
__msa_st_w((v4i32)_sum2, outptr1, 0);
__msa_st_w((v4i32)_sum3, outptr1 + 4, 0);
__msa_st_w((v4i32)_sum4, outptr2, 0);
__msa_st_w((v4i32)_sum5, outptr2 + 4, 0);
__msa_st_w((v4i32)_sum6, outptr3, 0);
__msa_st_w((v4i32)_sum7, outptr3 + 4, 0);
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
}
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk * 4; // inch always > 0
v4i32 _bias = __msa_ld_w(biasptr, 0);
v4f32 _sum0 = (v4f32)__msa_splati_w(_bias, 0);
v4f32 _sum1 = (v4f32)__msa_splati_w(_bias, 1);
v4f32 _sum2 = (v4f32)__msa_splati_w(_bias, 2);
v4f32 _sum3 = (v4f32)__msa_splati_w(_bias, 3);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 32);
__builtin_prefetch(kptr0 + 32);
v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0);
v4i32 _w0123 = __msa_ld_w(kptr0, 0);
_sum0 = __msa_fmadd_w(_sum0, _val0, (v4f32)__msa_splati_w(_w0123, 0));
_sum1 = __msa_fmadd_w(_sum1, _val0, (v4f32)__msa_splati_w(_w0123, 1));
_sum2 = __msa_fmadd_w(_sum2, _val0, (v4f32)__msa_splati_w(_w0123, 2));
_sum3 = __msa_fmadd_w(_sum3, _val0, (v4f32)__msa_splati_w(_w0123, 3));
tmpptr += 4;
kptr0 += 4;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr1, 0);
__msa_st_w((v4i32)_sum2, outptr2, 0);
__msa_st_w((v4i32)_sum3, outptr3, 0);
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
const float* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk * 4; // inch always > 0
v4f32 _sum = (v4f32)__msa_ld_w(biasptr, 0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 8);
__builtin_prefetch(kptr0 + 32);
v4f32 _val0 = __msa_fill_w_f32(*tmpptr++);
v4f32 _w0 = (v4f32)__msa_ld_w(kptr0, 0);
_sum = __msa_fmadd_w(_sum, _val0, _w0);
kptr0 += 4;
}
outptr0[0] = _sum[0];
outptr1[0] = _sum[1];
outptr2[0] = _sum[2];
outptr3[0] = _sum[3];
outptr0 += 1;
outptr1 += 1;
outptr2 += 1;
outptr3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
int i = 0;
for (; i + 11 < size; i += 12)
{
const float* tmpptr = tmp.channel(i / 12);
const float* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk * 4; // inch always > 0
v4f32 _sum0 = __msa_fill_w_f32(bias0);
v4f32 _sum1 = __msa_fill_w_f32(bias0);
v4f32 _sum2 = __msa_fill_w_f32(bias0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 64);
__builtin_prefetch(kptr0 + 8);
v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0);
v4f32 _val1 = (v4f32)__msa_ld_w(tmpptr + 4, 0);
v4f32 _val2 = (v4f32)__msa_ld_w(tmpptr + 8, 0);
v4f32 _w0 = __msa_fill_w_f32(*kptr0);
_sum0 = __msa_fmadd_w(_sum0, _w0, _val0);
_sum1 = __msa_fmadd_w(_sum1, _w0, _val1);
_sum2 = __msa_fmadd_w(_sum2, _w0, _val2);
tmpptr += 12;
kptr0 += 1;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr0 + 4, 0);
__msa_st_w((v4i32)_sum2, outptr0 + 8, 0);
outptr0 += 12;
}
for (; i + 7 < size; i += 8)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const float* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk * 4; // inch always > 0
v4f32 _sum0 = __msa_fill_w_f32(bias0);
v4f32 _sum1 = __msa_fill_w_f32(bias0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 32);
__builtin_prefetch(kptr0 + 8);
v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0);
v4f32 _val1 = (v4f32)__msa_ld_w(tmpptr + 4, 0);
v4f32 _w0 = __msa_fill_w_f32(*kptr0);
_sum0 = __msa_fmadd_w(_sum0, _w0, _val0);
_sum1 = __msa_fmadd_w(_sum1, _w0, _val1);
tmpptr += 8;
kptr0 += 1;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr0 + 4, 0);
outptr0 += 8;
}
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk * 4; // inch always > 0
v4f32 _sum0 = __msa_fill_w_f32(bias0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 16);
__builtin_prefetch(kptr0 + 8);
v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0);
v4f32 _w0 = __msa_fill_w_f32(*kptr0);
_sum0 = __msa_fmadd_w(_sum0, _w0, _val0);
tmpptr += 4;
kptr0 += 1;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
outptr0 += 4;
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
const float* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
float sum0 = bias0;
v4f32 _sum0 = (v4f32)__msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(tmpptr + 16);
__builtin_prefetch(kptr0 + 16);
v4f32 _val0 = (v4f32)__msa_ld_w(tmpptr, 0);
v4f32 _w0 = (v4f32)__msa_ld_w(kptr0, 0);
_sum0 = __msa_fmadd_w(_sum0, _val0, _w0);
tmpptr += 4;
kptr0 += 4;
}
sum0 += __msa_fhadd_w(_sum0);
outptr0[0] = sum0;
outptr0 += 1;
}
}
}
static void convolution_im2col_sgemm_transform_kernel_pack4to1_msa(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = pb-pa-maxk-inch/pa-outch/pb
Mat kernel = _kernel.reshape(maxk, inch, outch);
kernel_tm.create(4 * 4 * maxk, inch / 4, outch / 4 + outch % 4);
int q = 0;
for (; q + 3 < outch; q += 4)
{
float* g00 = kernel_tm.channel(q / 4);
for (int p = 0; p + 3 < inch; p += 4)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
const float* k00 = kernel.channel(q + j).row(p + i);
g00[0] = k00[k];
g00++;
}
}
}
}
}
for (; q < outch; q++)
{
const Mat k0 = kernel.channel(q);
float* g00 = kernel_tm.channel(q / 4 + q % 4);
for (int p = 0; p + 3 < inch; p += 4)
{
for (int k = 0; k < maxk; k++)
{
for (int j = 0; j < 4; j++)
{
const float* k00 = k0.row(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
}
static void convolution_im2col_sgemm_pack4to1_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 4u * 4, 4, opt.workspace_allocator);
{
const int gap = (w * stride_h - outw * stride_w) * 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
float* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const float* sptr = img.row(dilation_h * u) + dilation_w * v * 4;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
v4f32 _val = (v4f32)__msa_ld_w(sptr, 0);
__msa_st_w((v4i32)_val, ptr, 0);
sptr += stride_w * 4;
ptr += 4;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack4to1_msa(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
simd4.c | #include <math.h>
void main(int n,int m,float *a,float *b)
{
int i;
#pragma omp simd if(simd:test) simdlen(8) safelen(8)
{
for (i = 1; i < n; i++)
b[i] = ((a[i] + a[i - 1]) / 2.0);
}
}
|
pooling_2x2_pack16.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void pooling2x2s2_max_pack16_avx512(const Mat& bottom_blob, Mat& top_blob, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 16;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
float* outptr = top_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
__m512 _r00 = _mm512_loadu_ps(r0);
__m512 _r01 = _mm512_loadu_ps(r0 + 16);
__m512 _r10 = _mm512_loadu_ps(r1);
__m512 _r11 = _mm512_loadu_ps(r1 + 16);
__m512 _max0 = _mm512_max_ps(_r00, _r01);
__m512 _max1 = _mm512_max_ps(_r10, _r11);
__m512 _max = _mm512_max_ps(_max0, _max1);
_mm512_storeu_ps(outptr, _max);
r0 += 32;
r1 += 32;
outptr += 16;
}
r0 += tailstep;
r1 += tailstep;
}
}
}
|
initialcondition.c | #include <string.h>
#include <math.h>
#include <mpi.h>
#include <gsl/gsl_rng.h>
#include <fastpm/libfastpm.h>
#include <fastpm/logging.h>
#include "pmpfft.h"
/* The following functions fill the gaussian field*/
static void
pmic_fill_gaussian_gadget(PM * pm, FastPMFloat * delta_k, int seed);
static void
pmic_fill_gaussian_fast(PM * pm, FastPMFloat * delta_k, int seed);
static void
pmic_fill_gaussian_slow(PM * pm, FastPMFloat * delta_k, int seed);
void
fastpm_ic_fill_gaussiank(PM * pm, FastPMFloat * delta_k, int seed, enum FastPMFillDeltaKScheme scheme)
{
/* clear the memory to avoid any modes that we forget to set. */
memset(delta_k, 0, pm_allocsize(pm) * sizeof(delta_k[0]));
switch(scheme) {
case FASTPM_DELTAK_GADGET:
pmic_fill_gaussian_gadget(pm, delta_k, seed);
break;
case FASTPM_DELTAK_FAST:
pmic_fill_gaussian_fast(pm, delta_k, seed);
break;
case FASTPM_DELTAK_SLOW:
pmic_fill_gaussian_slow(pm, delta_k, seed);
break;
default:
pmic_fill_gaussian_gadget(pm, delta_k, seed);
break;
}
}
struct PofK {
fastpm_fkfunc func;
void * data;
double Volume;
} ;
static double _powerspec_to_transfer(double k, struct PofK * pk)
{
double f = sqrt(pk->func(k, pk->data));
f *= sqrt(1.0 / pk->Volume);
return f;
}
void
fastpm_ic_induce_correlation(PM * pm, FastPMFloat * delta_k, fastpm_fkfunc pkfunc, void * data)
{
struct PofK pk;
pk.func = pkfunc;
pk.data = data;
pk.Volume = pm->Volume;
fastpm_apply_any_transfer(pm, delta_k, delta_k, (fastpm_fkfunc) _powerspec_to_transfer, &pk);
}
void
fastpm_ic_remove_variance(PM * pm, FastPMFloat * delta_k)
{
#pragma omp parallel
{
PMKIter kiter;
for(pm_kiter_init(pm, &kiter);
!pm_kiter_stop(&kiter);
pm_kiter_next(&kiter)) {
double k2 = 0;
int d;
for(d = 0; d < 3; d++) {
k2 += kiter.kk[d][kiter.iabs[d]];
}
/* https://en.wikipedia.org/wiki/Atan2 */
double a = delta_k[kiter.ind];
double b = delta_k[kiter.ind + 1];
if(a == 0 && b == 0) {
delta_k[kiter.ind + 0] = 0;
delta_k[kiter.ind + 1] = 0;
} else {
double phase = atan2(b, a);
delta_k[kiter.ind + 0] = cos(phase);
delta_k[kiter.ind + 1] = sin(phase);
}
}
}
}
static inline void
SETSEED(PM * pm, unsigned int * table[2][2], int i, int j, gsl_rng * rng)
{
unsigned int seed = 0x7fffffff * gsl_rng_uniform(rng);
int ii[2] = {i, (pm->Nmesh[0] - i) % pm->Nmesh[0]};
int jj[2] = {j, (pm->Nmesh[1] - j) % pm->Nmesh[1]};
int d1, d2;
for(d1 = 0; d1 < 2; d1++) {
ii[d1] -= pm->ORegion.start[0];
jj[d1] -= pm->ORegion.start[1];
}
for(d1 = 0; d1 < 2; d1++)
for(d2 = 0; d2 < 2; d2++) {
if( ii[d1] >= 0 &&
ii[d1] < pm->ORegion.size[0] &&
jj[d2] >= 0 &&
jj[d2] < pm->ORegion.size[1]
) {
table[d1][d2][ii[d1] * pm->ORegion.size[1] + jj[d2]] = seed;
}
}
}
static inline unsigned int
GETSEED(PM * pm, unsigned int * table[2][2], int i, int j, int d1, int d2)
{
i -= pm->ORegion.start[0];
j -= pm->ORegion.start[1];
if(i < 0) abort();
if(j < 0) abort();
if(i >= pm->ORegion.size[0]) abort();
if(j >= pm->ORegion.size[1]) abort();
return table[d1][d2][i * pm->ORegion.size[1] + j];
}
static void
SAMPLE(gsl_rng * rng, double * ampl, double * phase)
{
*phase = gsl_rng_uniform(rng) * 2 * M_PI;
*ampl = 0;
do *ampl = gsl_rng_uniform(rng); while(*ampl == 0);
}
static void
pmic_fill_gaussian_gadget(PM * pm, FastPMFloat * delta_k, int seed)
{
/* Fill delta_k with gadget scheme */
int d;
int i, j, k;
memset(delta_k, 0, sizeof(delta_k[0]) * pm->allocsize);
gsl_rng * rng = gsl_rng_alloc(gsl_rng_ranlxd1);
gsl_rng_set(rng, seed);
unsigned int * seedtable[2][2];
for(i = 0; i < 2; i ++)
for(j = 0; j < 2; j ++) {
seedtable[i][j] = calloc(pm->ORegion.size[0] * pm->ORegion.size[1], sizeof(int));
}
for(i = 0; i < pm->Nmesh[0] / 2; i++) {
for(j = 0; j < i; j++) SETSEED(pm, seedtable, i, j, rng);
for(j = 0; j < i + 1; j++) SETSEED(pm, seedtable, j, i, rng);
for(j = 0; j < i; j++) SETSEED(pm, seedtable, pm->Nmesh[0] - 1 - i, j, rng);
for(j = 0; j < i + 1; j++) SETSEED(pm, seedtable, pm->Nmesh[1] - 1 - j, i, rng);
for(j = 0; j < i; j++) SETSEED(pm, seedtable, i, pm->Nmesh[1] - 1 - j, rng);
for(j = 0; j < i + 1; j++) SETSEED(pm, seedtable, j, pm->Nmesh[0] - 1 - i, rng);
for(j = 0; j < i; j++) SETSEED(pm, seedtable, pm->Nmesh[0] - 1 - i, pm->Nmesh[1] - 1 - j, rng);
for(j = 0; j < i + 1; j++) SETSEED(pm, seedtable, pm->Nmesh[1] - 1 - j, pm->Nmesh[0] - 1 - i, rng);
}
gsl_rng_free(rng);
ptrdiff_t irel[3];
for(i = pm->ORegion.start[0];
i < pm->ORegion.start[0] + pm->ORegion.size[0];
i ++) {
gsl_rng * lower_rng = gsl_rng_alloc(gsl_rng_ranlxd1);
gsl_rng * this_rng = gsl_rng_alloc(gsl_rng_ranlxd1);
int ci = pm->Nmesh[0] - i;
if(ci >= pm->Nmesh[0]) ci -= pm->Nmesh[0];
for(j = pm->ORegion.start[1];
j < pm->ORegion.start[1] + pm->ORegion.size[1];
j ++) {
/* always pull the gaussian from the lower quadrant plane for k = 0
* plane*/
/* always pull the whitenoise from the lower quadrant plane for k = 0
* plane and k == Nmesh / 2 plane*/
int d1 = 0, d2 = 0;
int cj = pm->Nmesh[1] - j;
if(cj >= pm->Nmesh[1]) cj -= pm->Nmesh[1];
/* d1, d2 points to the conjugate quandrant */
if( (ci == i && cj < j)
|| (ci < i && cj != j)
|| (ci < i && cj == j)) {
d1 = 1;
d2 = 1;
}
unsigned int seed_conj, seed_this;
/* the lower quadrant generator */
seed_conj = GETSEED(pm, seedtable, i, j, d1, d2);
gsl_rng_set(lower_rng, seed_conj);
seed_this = GETSEED(pm, seedtable, i, j, 0, 0);
gsl_rng_set(this_rng, seed_this);
for(k = 0; k <= pm->Nmesh[2] / 2; k ++) {
int use_conj = (d1 != 0 || d2 != 0) && (k == 0 || k == pm->Nmesh[2] / 2);
double ampl, phase;
if(use_conj) {
/* on k = 0 and Nmesh/2 plane, we use the lower quadrant generator,
* then hermit transform the result if it is nessessary */
SAMPLE(this_rng, &l, &phase);
SAMPLE(lower_rng, &l, &phase);
} else {
SAMPLE(lower_rng, &l, &phase);
SAMPLE(this_rng, &l, &phase);
}
ptrdiff_t iabs[3] = {i, j, k};
ptrdiff_t ip = 0;
for(d = 0; d < 3; d ++) {
irel[d] = iabs[d] - pm->ORegion.start[d];
ip += pm->ORegion.strides[d] * irel[d];
}
if(irel[2] < 0) continue;
if(irel[2] >= pm->ORegion.size[2]) continue;
/* we want two numbers that are of std ~ 1/sqrt(2) */
ampl = sqrt(- log(ampl));
(delta_k + 2 * ip)[0] = ampl * cos(phase);
(delta_k + 2 * ip)[1] = ampl * sin(phase);
if(use_conj) {
(delta_k + 2 * ip)[1] *= -1;
}
if((pm->Nmesh[0] - iabs[0]) % pm->Nmesh[0] == iabs[0] &&
(pm->Nmesh[1] - iabs[1]) % pm->Nmesh[1] == iabs[1] &&
(pm->Nmesh[2] - iabs[2]) % pm->Nmesh[2] == iabs[2]) {
/* The mode is self conjuguate, thus imaginary mode must be zero */
(delta_k + 2 * ip)[1] = 0;
(delta_k + 2 * ip)[0] = ampl * cos(phase);
}
if(iabs[0] == 0 && iabs[1] == 0 && iabs[2] == 0) {
/* the mean is zero */
(delta_k + 2 * ip)[0] = 0;
(delta_k + 2 * ip)[1] = 0;
}
}
}
gsl_rng_free(lower_rng);
gsl_rng_free(this_rng);
}
for(i = 0; i < 2; i ++)
for(j = 0; j < 2; j ++) {
free(seedtable[i][j]);
}
/*
char * fn[1000];
sprintf(fn, "canvas.dump.f4.%d", pm->ThisTask);
fwrite(pm->canvas, sizeof(pm->canvas[0]), pm->ORegion.total * 2, fopen(fn, "w"));
*/
}
static void
pmic_fill_gaussian_fast(PM * pm, FastPMFloat * delta_k, int seed)
{
ptrdiff_t ind;
int d;
gsl_rng* random_generator = gsl_rng_alloc(gsl_rng_ranlxd1);
/* set uncorrelated seeds */
gsl_rng_set(random_generator, seed);
for(d = 0; d < pm->ThisTask * 8; d++) {
seed = 0x7fffffff * gsl_rng_uniform(random_generator);
}
gsl_rng_set(random_generator, seed);
FastPMFloat * g_x = pm_alloc(pm);
for(ind = 0; ind < pm->IRegion.total; ind += 2) {
double phase = gsl_rng_uniform(random_generator) * 2 * M_PI;
double ampl;
do
ampl = gsl_rng_uniform(random_generator);
while(ampl == 0.0);
/* we need two gaussians of std=1.0 in real space (see footnote 1) */
ampl = sqrt(-2.0 * log(ampl));
/* r2c will reduce the variance, so we compensate here. */
ampl *= sqrt(pm_norm(pm));
g_x[ind] = ampl * sin(phase);
g_x[ind + 1] = ampl * cos(phase);
}
pm_r2c(pm, g_x, delta_k);
pm_free(pm, g_x);
}
static void
pmic_fill_gaussian_slow(PM * pm, FastPMFloat * delta_k, int seed)
{
ptrdiff_t i[3] = {0};
int d;
gsl_rng* random_generator = gsl_rng_alloc(gsl_rng_ranlxd1);
gsl_rng_set(random_generator, seed);
FastPMFloat * g_x = pm_alloc(pm);
for(i[0] = 0; i[0] < pm->Nmesh[0]; i[0]++)
for(i[1] = 0; i[1] < pm->Nmesh[1]; i[1]++)
for(i[2] = 0; i[2] < pm->Nmesh[2]; i[2]++) {
double phase = gsl_rng_uniform(random_generator) * 2 * M_PI;
double ampl;
do
ampl = gsl_rng_uniform(random_generator);
while(ampl == 0.0);
ptrdiff_t ii[3];
ptrdiff_t ind = 0;
for(d = 0; d < 3; d ++) {
if(i[d] < pm->IRegion.start[d]) goto next;
if(i[d] >= pm->IRegion.start[d] + pm->IRegion.size[d]) goto next;
ii[d] = i[d] - pm->IRegion.start[d];
ind += ii[d] * pm->IRegion.strides[d];
}
/* we need two gaussians of std=1.0 in real space */
ampl = sqrt(-2.0 * log(ampl));
/* r2c will reduce the variance, so we compensate here. */
ampl *= sqrt(pm_norm(pm));
g_x[ind] = ampl * sin(phase);
next:
continue;
}
pm_r2c(pm, g_x, delta_k);
pm_free(pm, g_x);
gsl_rng_free(random_generator);
}
/* Footnotes */
/* 1):
* We want delta(k) = delta_real + I delta_imag, where delta_real and
* delta_imag are Gaussian random variables with variance given by
* power spectrum, \sigma^2=P(k). We can obtain this equivalently as
*
* delta(k) = A exp(i phase),
*
* where the phase is random (i.e. sampled from a uniform distribution)
* and the amplitude A follows a Rayleigh distribution (see
* https://en.wikipedia.org/wiki/Rayleigh_distribution). To sample from
* Rayleigh distribution, use inverse transform sampling
* (see https://en.wikipedia.org/wiki/Inverse_transform_sampling), i.e.
* start from uniform random variable in [0,1] and then apply inverse of CDF
* of Rayleigh distribution. From F(A)=CDF(A)=1-e^{-A^2/(2\sigma^2)} we get
* A = \sigma \sqrt{-2 ln(1-CDF)}. So if x is uniform random number in [0,1], then
* A = \sigma \sqrt(-2 ln(x)) follows Rayleigh distribution as desired.
* Here we used x instead of 1-x because this does not make a difference for a
* uniform random number in [0,1]. In the code below, we start with \sigma=1 and
* multiply by sqrt(P(k)) later.
*/
|
GB_binop__pair_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pair_int32)
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pair_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__pair_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_int32)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = 1
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = 1 ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PAIR || GxB_NO_INT32 || GxB_NO_PAIR_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__pair_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pair_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pair_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pair_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
common.c | #include "common.h"
#include "linsys.h"
#define MIN_SCALE (1e-3)
#define MAX_SCALE (1e3)
abip_int ABIP(copy_A_matrix)
(
ABIPMatrix **dstp,
const ABIPMatrix *src
)
{
abip_int Annz = src->p[src->n];
ABIPMatrix *A = (ABIPMatrix *)abip_calloc(1, sizeof(ABIPMatrix));
if (!A)
{
return 0;
}
A->n = src->n;
A->m = src->m;
A->x = (abip_float *)abip_malloc(sizeof(abip_float) * Annz);
A->i = (abip_int *)abip_malloc(sizeof(abip_int) * Annz);
A->p = (abip_int *)abip_malloc(sizeof(abip_int) * (src->n + 1));
if (!A->x || !A->i || !A->p)
{
return 0;
}
memcpy(A->x, src->x, sizeof(abip_float) * Annz);
memcpy(A->i, src->i, sizeof(abip_int) * Annz);
memcpy(A->p, src->p, sizeof(abip_int) * (src->n + 1));
*dstp = A;
return 1;
}
abip_int ABIP(validate_lin_sys)
(
const ABIPMatrix *A
)
{
abip_int i;
abip_int r_max;
abip_int Annz;
if (!A->x || !A->i || !A->p)
{
abip_printf("ERROR: incomplete data!\n");
return -1;
}
for (i = 0; i < A->n; ++i)
{
if (A->p[i] == A->p[i + 1])
{
abip_printf("WARN: the %li-th column empty!\n", (long)i);
}
else if (A->p[i] > A->p[i + 1])
{
abip_printf("ERROR: the column pointers decreases!\n");
return -1;
}
}
Annz = A->p[A->n];
if (((abip_float)Annz / A->m > A->n) || (Annz <= 0))
{
abip_printf("ERROR: the number of nonzeros in A = %li, outside of valid range!\n", (long) Annz);
return -1;
}
r_max = 0;
for (i = 0; i < Annz; ++i)
{
if (A->i[i] > r_max)
{
r_max = A->i[i];
}
}
if (r_max > A->m - 1)
{
abip_printf("ERROR: the number of rows in A is inconsistent with input dimension!\n");
return -1;
}
return 0;
}
void ABIP(free_A_matrix)
(
ABIPMatrix *A
)
{
if (A->x)
{
abip_free(A->x);
}
if (A->i)
{
abip_free(A->i);
}
if (A->p)
{
abip_free(A->p);
}
abip_free(A);
}
#if EXTRA_VERBOSE > 0
static void print_A_matrix
(
const ABIPMatrix *A
)
{
abip_int i;
abip_int j;
if (A->p[A->n] < 2500)
{
abip_printf("\n");
for (i = 0; i < A->n; ++i)
{
abip_printf("Col %li: ", (long)i);
for (j = A->p[i]; j < A->p[i + 1]; j++)
{
abip_printf("A[%li,%li] = %4f, ", (long)A->i[j], (long)i, A->x[j]);
}
abip_printf("norm col = %4f\n", ABIP(norm)(&(A->x[A->p[i]]), A->p[i + 1] - A->p[i]));
}
abip_printf("norm A = %4f\n", ABIP(norm)(A->x, A->p[A->n]));
}
}
#endif
void ABIP(_normalize_A)
(
ABIPMatrix *A,
const ABIPSettings *stgs,
ABIPScaling *scal
)
{
abip_float *D = (abip_float *)abip_malloc(A->m * sizeof(abip_float));
abip_float *E = (abip_float *)abip_malloc(A->n * sizeof(abip_float));
abip_float *Dt = (abip_float *)abip_malloc(A->m * sizeof(abip_float));
abip_float *Et = (abip_float *)abip_malloc(A->n * sizeof(abip_float));
abip_float *nms = (abip_float *)abip_calloc(A->m, sizeof(abip_float));
abip_float min_row_scale = MIN_SCALE * SQRTF((abip_float)A->n);
abip_float max_row_scale = MAX_SCALE * SQRTF((abip_float)A->n);
abip_float min_col_scale = MIN_SCALE * SQRTF((abip_float)A->m);
abip_float max_col_scale = MAX_SCALE * SQRTF((abip_float)A->m);
abip_int i;
abip_int j;
abip_int c1;
abip_int c2;
abip_float wrk;
abip_float e;
#if EXTRA_VERBOSE > 0
ABIP(timer) normalize_timer;
ABIP(tic)(&normalize_timer);
abip_printf("normalizing A\n");
print_A_matrix(A);
#endif
memset(D, 0, A->m * sizeof(abip_float));
memset(E, 0, A->n * sizeof(abip_float));
for (i = 0; i < A->n; ++i)
{
c1 = A->p[i + 1] - A->p[i];
e = ABIP(norm)(&(A->x[A->p[i]]), c1);
if (e < min_col_scale)
{
e = 1;
}
else if (e > max_col_scale)
{
e = max_col_scale;
}
ABIP(scale_array)(&(A->x[A->p[i]]), 1.0 / e, c1);
E[i] = e;
}
for (i = 0; i < A->n; ++i)
{
c1 = A->p[i];
c2 = A->p[i + 1];
for (j = c1; j < c2; ++j)
{
wrk = A->x[j];
D[A->i[j]] += wrk * wrk;
}
}
for (i = 0; i < A->m; ++i)
{
D[i] = SQRTF(D[i]);
if (D[i] < min_row_scale)
{
D[i] = 1;
}
else if (D[i] > max_row_scale)
{
D[i] = max_row_scale;
}
}
for (i = 0; i < A->n; ++i)
{
for (j = A->p[i]; j < A->p[i + 1]; ++j)
{
A->x[j] /= D[A->i[j]];
}
}
for (i = 0; i < A->n; ++i)
{
for (j = A->p[i]; j < A->p[i + 1]; ++j)
{
wrk = A->x[j];
nms[A->i[j]] += wrk * wrk;
}
}
scal->mean_norm_row_A = 0.0;
for (i = 0; i < A->m; ++i)
{
scal->mean_norm_row_A += SQRTF(nms[i]) / A->m;
}
abip_free(nms);
scal->mean_norm_col_A = 0.0;
for (i = 0; i < A->n; ++i)
{
c1 = A->p[i + 1] - A->p[i];
scal->mean_norm_col_A+= ABIP(norm)(&(A->x[A->p[i]]), c1) / A->n;
}
if (stgs->scale != 1)
{
ABIP(scale_array)(A->x, stgs->scale, A->p[A->n]);
}
scal->D = D;
scal->E = E;
#if EXTRA_VERBOSE > 0
abip_printf("finished normalizing A, time: %1.2e seconds. \n", ABIP(tocq)(&normalize_timer) / 1e3);
print_A_matrix(A);
#endif
}
void ABIP(_un_normalize_A)
(
ABIPMatrix *A,
const ABIPSettings *stgs,
const ABIPScaling *scal
)
{
abip_int i;
abip_int j;
abip_float *D = scal->D;
abip_float *E = scal->E;
for (i = 0; i < A->n; ++i)
{
for (j = A->p[i]; j < A->p[i + 1]; ++j)
{
A->x[j] *= D[A->i[j]];
}
}
for (i = 0; i < A->n; ++i)
{
ABIP(scale_array)(&(A->x[A->p[i]]), E[i] / stgs->scale, A->p[i + 1] - A->p[i]);
}
}
void ABIP(_accum_by_Atrans)
(
abip_int n,
abip_float *Ax,
abip_int *Ai,
abip_int *Ap,
const abip_float *x,
abip_float *y
)
{
abip_int p;
abip_int j;
abip_int c1;
abip_int c2;
abip_float yj;
#if EXTRA_VERBOSE > 0
ABIP(timer) mult_by_Atrans_timer;
ABIP(tic)(&mult_by_Atrans_timer);
#endif
#ifdef _OPENMP
#pragma omp parallel for private(p, c1, c2, yj)
#endif
for (j = 0; j < n; j++)
{
yj = y[j];
c1 = Ap[j];
c2 = Ap[j + 1];
for (p = c1; p < c2; p++)
{
yj += Ax[p] * x[Ai[p]];
}
y[j] = yj;
}
#if EXTRA_VERBOSE > 0
abip_printf("mult By A trans time: %1.2e seconds. \n", ABIP(tocq)(&mult_by_Atrans_timer) / 1e3);
#endif
}
void ABIP(_accum_by_A)
(
abip_int n,
abip_float *Ax,
abip_int *Ai,
abip_int *Ap,
const abip_float *x,
abip_float *y
)
{
abip_int p;
abip_int j;
abip_int c1;
abip_int c2;
abip_float xj;
#if EXTRA_VERBOSE > 0
ABIP(timer) mult_by_A_timer;
ABIP(tic)(&mult_by_A_timer);
#endif
#ifdef _OPENMP
#pragma omp parallel for private(p, c1, c2, xj)
for (j = 0; j < n; j++)
{
xj = x[j];
c1 = Ap[j];
c2 = Ap[j + 1];
for (p = c1; p < c2; p++)
{
#pragma omp atomic
y[Ai[p]] += Ax[p] * xj;
}
}
#endif
for (j = 0; j < n; j++)
{
xj = x[j];
c1 = Ap[j];
c2 = Ap[j + 1];
for (p = c1; p < c2; p++)
{
y[Ai[p]] += Ax[p] * xj;
}
}
#if EXTRA_VERBOSE > 0
abip_printf("mult By A time: %1.2e seconds \n", ABIP(tocq)(&mult_by_A_timer) / 1e3);
#endif
}
abip_float ABIP(cumsum)
(
abip_int *p,
abip_int *c,
abip_int n
)
{
abip_int i;
abip_float nz = 0;
abip_float nz2 = 0;
if (!p || !c)
{
return (-1);
}
for (i = 0; i < n; i++)
{
p[i] = nz;
nz += c[i];
nz2 += c[i];
c[i] = p[i];
}
p[n] = nz;
return nz2;
}
|
uniform_grid_environment.h | // -----------------------------------------------------------------------------
//
// Copyright (C) 2021 CERN & University of Surrey for the benefit of the
// BioDynaMo collaboration. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef CORE_ENVIRONMENT_UNIFORM_GRID_ENVIRONMENT_H_
#define CORE_ENVIRONMENT_UNIFORM_GRID_ENVIRONMENT_H_
#include <assert.h>
#include <omp.h>
#include <algorithm>
#include <array>
#include <atomic>
#include <cmath>
#include <iostream>
#include <limits>
#include <memory>
#include <mutex>
#ifdef LINUX
#include <parallel/algorithm>
#endif // LINUX
#include <utility>
#include <vector>
#include <morton/morton.h> // NOLINT
#include "core/container/agent_vector.h"
#include "core/container/fixed_size_vector.h"
#include "core/container/inline_vector.h"
#include "core/container/math_array.h"
#include "core/container/parallel_resize_vector.h"
#include "core/environment/environment.h"
#include "core/environment/morton_order.h"
#include "core/functor.h"
#include "core/load_balance_info.h"
#include "core/param/param.h"
#include "core/resource_manager.h"
#include "core/util/log.h"
#include "core/util/spinlock.h"
namespace bdm {
namespace detail {
struct InitializeGPUData;
} // namespace detail
/// A class that represents Cartesian 3D grid
class UniformGridEnvironment : public Environment {
// MechanicalForcesOpCuda needs access to some UniformGridEnvironment private
// members to reconstruct
// the grid on GPU (same for MechanicalForcesOpOpenCL)
friend struct MechanicalForcesOpCuda;
friend struct ::bdm::detail::InitializeGPUData;
friend struct MechanicalForcesOpOpenCL;
friend class SchedulerTest;
public:
/// A single unit cube of the grid
struct Box {
Spinlock lock_;
// std::atomic<bool> timestamp_;
uint32_t timestamp_;
/// start value of the linked list of agents inside this box.
/// Next element can be found at `successors_[start_]`
AgentHandle start_;
/// length of the linked list (i.e. number of agents)
/// uint64_t, because sizeof(Box) = 16, for uint16_t and uint64_t
uint16_t length_;
Box() : timestamp_(0), start_(AgentHandle()), length_(0) {}
/// Copy Constructor required for boxes_.resize()
/// Since box values will be overwritten afterwards it forwards to the
/// default ctor
Box(const Box& other) : Box() {}
Box& operator=(const Box& other) {
// start_ = other.start_.load(std::memory_order_relaxed);
// length_ = other.length_.load(std::memory_order_relaxed);
start_ = other.start_;
length_ = other.length_;
return *this;
}
bool IsEmpty(uint64_t grid_timestamp) const {
return grid_timestamp != timestamp_;
}
uint16_t Size(uint64_t grid_timestamp) const {
if (IsEmpty(grid_timestamp)) {
return 0;
}
return length_;
}
/// @brief Adds an agent to this box
///
/// @param[in] agent The object's identifier
/// @param AddObject successors The successors
void AddObject(AgentHandle ah, AgentVector<AgentHandle>* successors,
UniformGridEnvironment* grid) {
std::lock_guard<Spinlock> lock_guard(lock_);
if (timestamp_ != grid->timestamp_) {
timestamp_ = grid->timestamp_;
length_ = 1;
start_ = ah;
} else {
length_++;
(*successors)[ah] = start_;
start_ = ah;
}
}
/// An iterator that iterates over the cells in this box
struct Iterator {
Iterator(UniformGridEnvironment* grid, const Box* box)
: grid_(grid), current_value_(box->start_), countdown_(box->length_) {
if (grid->timestamp_ != box->timestamp_) {
countdown_ = 0;
}
}
bool IsAtEnd() { return countdown_ <= 0; }
Iterator& operator++() {
countdown_--;
if (countdown_ > 0) {
current_value_ = grid_->successors_[current_value_];
}
return *this;
}
AgentHandle operator*() const { return current_value_; }
/// Pointer to the neighbor grid; for accessing the successor_ list
UniformGridEnvironment* grid_;
/// The current agent to be considered
AgentHandle current_value_;
/// The remain number of agents to consider
int countdown_ = 0;
};
Iterator begin() const { // NOLINT
auto* grid = static_cast<UniformGridEnvironment*>(
Simulation::GetActive()->GetEnvironment());
return Iterator(grid, this);
}
};
/// An iterator that iterates over the boxes in this grid
struct NeighborIterator {
explicit NeighborIterator(
const FixedSizeVector<const Box*, 27>& neighbor_boxes,
uint64_t grid_timestamp)
: neighbor_boxes_(neighbor_boxes),
// start iterator from box 0
box_iterator_(neighbor_boxes_[0]->begin()),
grid_timestamp_(grid_timestamp) {
// if first box is empty
if (neighbor_boxes_[0]->IsEmpty(grid_timestamp)) {
ForwardToNonEmptyBox(grid_timestamp);
}
}
bool IsAtEnd() const { return is_end_; }
AgentHandle operator*() const { return *box_iterator_; }
/// Version where empty neighbor boxes are allowed
NeighborIterator& operator++() {
++box_iterator_;
// if iterator of current box has come to an end, continue with next box
if (box_iterator_.IsAtEnd()) {
return ForwardToNonEmptyBox(grid_timestamp_);
}
return *this;
}
private:
/// The 27 neighbor boxes that will be searched for agents
const FixedSizeVector<const Box*, 27>& neighbor_boxes_;
/// The box that shall be considered to iterate over for finding simulation
/// objects
typename Box::Iterator box_iterator_;
uint64_t grid_timestamp_;
/// The id of the box to be considered (i.e. value between 0 - 26)
uint16_t box_idx_ = 0;
/// Flag to indicate that all the neighbor boxes have been searched through
bool is_end_ = false;
/// Forwards the iterator to the next non empty box and returns itself
/// If there are no non empty boxes is_end_ is set to true
NeighborIterator& ForwardToNonEmptyBox(uint64_t grid_timestamp) {
// increment box id until non empty box has been found
while (++box_idx_ < neighbor_boxes_.size()) {
// box is empty or uninitialized (padding box) -> continue
if (neighbor_boxes_[box_idx_]->IsEmpty(grid_timestamp)) {
continue;
}
// a non-empty box has been found
box_iterator_ = neighbor_boxes_[box_idx_]->begin();
return *this;
}
// all remaining boxes have been empty; reached end
is_end_ = true;
return *this;
}
};
/// Enum that determines the degree of adjacency in search neighbor boxes
// todo(ahmad): currently only kHigh is supported (hardcoded 26 several
// places)
enum Adjacency {
kLow, /**< The closest 8 neighboring boxes */
kMedium, /**< The closest 18 neighboring boxes */
kHigh /**< The closest 26 neighboring boxes */
};
explicit UniformGridEnvironment(Adjacency adjacency = kHigh)
: adjacency_(adjacency), lbi_(this) {}
UniformGridEnvironment(UniformGridEnvironment const&) = delete;
void operator=(UniformGridEnvironment const&) = delete;
virtual ~UniformGridEnvironment() {}
/// Clears the grid
void Clear() override {
if (!is_custom_box_length_) {
box_length_ = 1;
}
box_length_squared_ = 1;
num_boxes_axis_ = {{0}};
num_boxes_xy_ = 0;
int32_t inf = std::numeric_limits<int32_t>::max();
grid_dimensions_ = {inf, -inf, inf, -inf, inf, -inf};
threshold_dimensions_ = {inf, -inf};
successors_.clear();
has_grown_ = false;
}
struct AssignToBoxesFunctor : public Functor<void, Agent*, AgentHandle> {
explicit AssignToBoxesFunctor(UniformGridEnvironment* grid) : grid_(grid) {}
void operator()(Agent* agent, AgentHandle ah) override {
const auto& position = agent->GetPosition();
auto idx = grid_->GetBoxIndex(position);
auto box = grid_->GetBoxPointer(idx);
box->AddObject(ah, &(grid_->successors_), grid_);
agent->SetBoxIdx(idx);
}
private:
UniformGridEnvironment* grid_ = nullptr;
};
void SetBoxLength(int32_t bl) {
box_length_ = bl;
is_custom_box_length_ = true;
}
int32_t GetBoxLength() { return box_length_; }
/// Updates the grid, as agents may have moved, added or deleted
void Update() override {
auto* rm = Simulation::GetActive()->GetResourceManager();
if (rm->GetNumAgents() != 0) {
Clear();
timestamp_++;
auto inf = Math::kInfinity;
std::array<double, 6> tmp_dim = {{inf, -inf, inf, -inf, inf, -inf}};
CalcSimDimensionsAndLargestAgent(&tmp_dim);
RoundOffGridDimensions(tmp_dim);
// If the box_length_ is not set manually, we set it to the largest agent
// size
if (!is_custom_box_length_) {
auto los = ceil(GetLargestAgentSize());
assert(
los > 0 &&
"The largest object size was found to be 0. Please check if your "
"cells are correctly initialized.");
box_length_ = los;
}
box_length_squared_ = box_length_ * box_length_;
for (int i = 0; i < 3; i++) {
int dimension_length =
grid_dimensions_[2 * i + 1] - grid_dimensions_[2 * i];
int r = dimension_length % box_length_;
// If the grid is not perfectly divisible along each dimension by the
// resolution, extend the grid so that it is
if (r != 0) {
// std::abs for the case that box_length_ > dimension_length
grid_dimensions_[2 * i + 1] += (box_length_ - r);
} else {
// Else extend the grid dimension with one row, because the outmost
// object lies exactly on the border
grid_dimensions_[2 * i + 1] += box_length_;
}
}
// Pad the grid to avoid out of bounds check when search neighbors
for (int i = 0; i < 3; i++) {
grid_dimensions_[2 * i] -= box_length_;
grid_dimensions_[2 * i + 1] += box_length_;
}
// Calculate how many boxes fit along each dimension
for (int i = 0; i < 3; i++) {
int dimension_length =
grid_dimensions_[2 * i + 1] - grid_dimensions_[2 * i];
assert((dimension_length % box_length_ == 0) &&
"The grid dimensions are not a multiple of its box length");
num_boxes_axis_[i] = dimension_length / box_length_;
}
num_boxes_xy_ = num_boxes_axis_[0] * num_boxes_axis_[1];
total_num_boxes_ = num_boxes_xy_ * num_boxes_axis_[2];
CheckGridGrowth();
// resize boxes_
if (boxes_.size() != total_num_boxes_) {
if (boxes_.capacity() < total_num_boxes_) {
boxes_.reserve(total_num_boxes_ * 2);
}
boxes_.resize(total_num_boxes_);
}
successors_.reserve();
// Assign agents to boxes
auto* param = Simulation::GetActive()->GetParam();
AssignToBoxesFunctor functor(this);
rm->ForEachAgentParallel(param->scheduling_batch_size, functor);
if (param->bound_space) {
int min = param->min_bound;
int max = param->max_bound;
threshold_dimensions_ = {min, max};
}
if (param->thread_safety_mechanism ==
Param::ThreadSafetyMechanism::kAutomatic) {
nb_mutex_builder_->Update();
}
} else {
// There are no agents in this simulation
auto* param = Simulation::GetActive()->GetParam();
bool uninitialized = boxes_.size() == 0;
if (uninitialized && param->bound_space) {
// Simulation has never had any agents
// Initialize grid dimensions with `Param::min_bound` and
// `Param::max_bound`
// This is required for the DiffusionGrid
int min = param->min_bound;
int max = param->max_bound;
grid_dimensions_ = {min, max, min, max, min, max};
threshold_dimensions_ = {min, max};
has_grown_ = true;
} else if (!uninitialized) {
// all agents have been removed in the last iteration
// grid state remains the same, but we have to set has_grown_ to false
// otherwise the DiffusionGrid will attempt to resize
has_grown_ = false;
} else {
Log::Fatal(
"UniformGridEnvironment",
"You tried to initialize an empty simulation without bound space. "
"Therefore we cannot determine the size of the simulation space. "
"Please add agents, or set Param::bound_space, "
"Param::min_bound, and Param::max_bound.");
}
}
}
/// @brief Calculates the squared euclidian distance between two points
/// in 3D
///
/// @param[in] pos1 Position of the first point
/// @param[in] pos2 Position of the second point
///
/// @return The distance between the two points
///
inline double SquaredEuclideanDistance(const Double3& pos1,
const Double3& pos2) const {
const double dx = pos2[0] - pos1[0];
const double dy = pos2[1] - pos1[1];
const double dz = pos2[2] - pos1[2];
return (dx * dx + dy * dy + dz * dz);
}
inline bool WithinSquaredEuclideanDistance(double squared_radius,
const Double3& pos1,
const Double3& pos2) const {
const double dx = pos2[0] - pos1[0];
const double dx2 = dx * dx;
if (dx2 > squared_radius) {
return false;
}
const double dy = pos2[1] - pos1[1];
const double dy2_plus_dx2 = dy * dy + dx2;
if (dy2_plus_dx2 > squared_radius) {
return false;
}
const double dz = pos2[2] - pos1[2];
const double distance = dz * dz + dy2_plus_dx2;
return distance < squared_radius;
}
LoadBalanceInfo* GetLoadBalanceInfo() override {
lbi_.Update();
return &lbi_;
}
/// @brief Applies the given lambda to each neighbor of the specified
/// agent is within the squared radius (i.e. the criteria)
///
/// In simulation code do not use this function directly. Use the same
/// function from the execution context (e.g. `InPlaceExecutionContext`)
///
/// @param[in] lambda The operation as a lambda
/// @param query The query object
/// @param squared_radius The squared search radius (type: double*)
///
void ForEachNeighbor(Functor<void, Agent*, double>& lambda,
const Agent& query, double squared_radius) override {
if (squared_radius > box_length_squared_) {
Log::Fatal(
"UniformGridEnvironment::ForEachNeighbor",
"The requested search radius (", std::sqrt(squared_radius), ")",
" of the neighborhood search exceeds the "
"box length (",
box_length_, "). The resulting neighborhood would be incomplete.");
}
const auto& position = query.GetPosition();
auto idx = query.GetBoxIdx();
FixedSizeVector<const Box*, 27> neighbor_boxes;
GetMooreBoxes(&neighbor_boxes, idx);
auto* rm = Simulation::GetActive()->GetResourceManager();
NeighborIterator ni(neighbor_boxes, timestamp_);
const unsigned batch_size = 64;
uint64_t size = 0;
Agent* agents[batch_size] __attribute__((aligned(64)));
double x[batch_size] __attribute__((aligned(64)));
double y[batch_size] __attribute__((aligned(64)));
double z[batch_size] __attribute__((aligned(64)));
double squared_distance[batch_size] __attribute__((aligned(64)));
auto process_batch = [&]() {
#pragma omp simd
for (uint64_t i = 0; i < size; ++i) {
const double dx = x[i] - position[0];
const double dy = y[i] - position[1];
const double dz = z[i] - position[2];
squared_distance[i] = dx * dx + dy * dy + dz * dz;
}
for (uint64_t i = 0; i < size; ++i) {
if (squared_distance[i] < squared_radius) {
lambda(agents[i], squared_distance[i]);
}
}
size = 0;
};
while (!ni.IsAtEnd()) {
auto ah = *ni;
// increment iterator already here to hide memory latency
++ni;
auto* agent = rm->GetAgent(ah);
if (agent != &query) {
agents[size] = agent;
const auto& pos = agent->GetPosition();
x[size] = pos[0];
y[size] = pos[1];
z[size] = pos[2];
size++;
if (size == batch_size) {
process_batch();
}
}
}
process_batch();
}
/// @brief Return the box index in the one dimensional array of the box
/// that contains the position
///
/// @param[in] position The position of the object
///
/// @return The box index.
///
size_t GetBoxIndex(const Double3& position) const {
std::array<uint64_t, 3> box_coord;
box_coord[0] = (floor(position[0]) - grid_dimensions_[0]) / box_length_;
box_coord[1] = (floor(position[1]) - grid_dimensions_[2]) / box_length_;
box_coord[2] = (floor(position[2]) - grid_dimensions_[4]) / box_length_;
return GetBoxIndex(box_coord);
}
std::array<int32_t, 6> GetDimensions() const override {
return grid_dimensions_;
}
std::array<int32_t, 2> GetDimensionThresholds() const override {
return threshold_dimensions_;
}
void GetNumBoxesAxis(uint32_t* nba) {
nba[0] = num_boxes_axis_[0];
nba[1] = num_boxes_axis_[1];
nba[2] = num_boxes_axis_[2];
}
uint64_t GetNumBoxes() const { return boxes_.size(); }
std::array<uint64_t, 3> GetBoxCoordinates(size_t box_idx) const {
std::array<uint64_t, 3> box_coord;
box_coord[2] = box_idx / num_boxes_xy_;
auto remainder = box_idx % num_boxes_xy_;
box_coord[1] = remainder / num_boxes_axis_[0];
box_coord[0] = remainder % num_boxes_axis_[0];
return box_coord;
}
// NeighborMutex ---------------------------------------------------------
/// This class ensures thread-safety for the InPlaceExecutionContext for the
/// case
/// that an agent modifies its neighbors.
class GridNeighborMutexBuilder : public Environment::NeighborMutexBuilder {
public:
/// The NeighborMutex class is a synchronization primitive that can be
/// used to protect agents data from being simultaneously accessed by
/// multiple threads.
class GridNeighborMutex
: public Environment::NeighborMutexBuilder::NeighborMutex {
public:
GridNeighborMutex(const FixedSizeVector<uint64_t, 27>& mutex_indices,
GridNeighborMutexBuilder* mutex_builder)
: mutex_indices_(mutex_indices), mutex_builder_(mutex_builder) {
// Deadlocks occur if mutliple threads try to acquire the same locks,
// but in different order.
// -> sort to avoid deadlocks - see lock ordering
std::sort(mutex_indices_.begin(), mutex_indices_.end());
}
virtual ~GridNeighborMutex() {}
void lock() override { // NOLINT
for (auto idx : mutex_indices_) {
auto& mutex = mutex_builder_->mutexes_[idx].mutex_;
// acquire lock (and spin if another thread is holding it)
while (mutex.test_and_set(std::memory_order_acquire)) {
}
}
}
void unlock() override { // NOLINT
for (auto idx : mutex_indices_) {
auto& mutex = mutex_builder_->mutexes_[idx].mutex_;
mutex.clear(std::memory_order_release);
}
}
void SetMutexIndices(const FixedSizeVector<uint64_t, 27>& indices) {
mutex_indices_ = indices;
std::sort(mutex_indices_.begin(), mutex_indices_.end());
}
private:
FixedSizeVector<uint64_t, 27> mutex_indices_;
GridNeighborMutexBuilder* mutex_builder_;
};
/// Used to store mutexes in a vector.
/// Always creates a new mutex (even for the copy constructor)
struct MutexWrapper {
MutexWrapper() {}
MutexWrapper(const MutexWrapper&) {}
std::atomic_flag mutex_ = ATOMIC_FLAG_INIT;
};
virtual ~GridNeighborMutexBuilder() {}
void Update() {
auto* grid = static_cast<UniformGridEnvironment*>(
Simulation::GetActive()->GetEnvironment());
mutexes_.resize(grid->GetNumBoxes());
}
NeighborMutex* GetMutex(uint64_t box_idx) override;
private:
/// one mutex for each box in `UniformGridEnvironment::boxes_`
std::vector<MutexWrapper> mutexes_;
};
/// Returns the `NeighborMutexBuilder`. The client use it to create a
/// `NeighborMutex`.
NeighborMutexBuilder* GetNeighborMutexBuilder() override {
return nb_mutex_builder_.get();
}
private:
class LoadBalanceInfoUG : public LoadBalanceInfo {
public:
LoadBalanceInfoUG(UniformGridEnvironment* grid);
virtual ~LoadBalanceInfoUG();
void Update();
void CallHandleIteratorConsumer(
uint64_t start, uint64_t end,
Functor<void, Iterator<AgentHandle>*>& f) const override;
private:
UniformGridEnvironment* grid_;
MortonOrder mo_;
ParallelResizeVector<Box*> sorted_boxes_;
ParallelResizeVector<uint64_t> cummulated_agents_;
struct InitializeVectorFunctor : public Functor<void, Iterator<uint64_t>*> {
UniformGridEnvironment* grid;
uint64_t start;
ParallelResizeVector<Box*>& sorted_boxes;
ParallelResizeVector<uint64_t>& cummulated_agents;
InitializeVectorFunctor(UniformGridEnvironment* grid, uint64_t start,
decltype(sorted_boxes) sorted_boxes,
decltype(cummulated_agents) cummulated_agents);
virtual ~InitializeVectorFunctor();
void operator()(Iterator<uint64_t>* it) override;
};
void AllocateMemory();
void InitializeVectors();
};
/// The vector containing all the boxes in the grid
/// Using parallel resize vector to enable parallel initialization and thus
/// better scalability.
ParallelResizeVector<Box> boxes_;
/// is incremented at each call to Update
/// This is used to decide if boxes should be reinitialized
uint32_t timestamp_ = 0;
/// Length of a Box
int32_t box_length_ = 1;
/// Length of a Box squared
int32_t box_length_squared_ = 1;
/// True when the box length was set manually
bool is_custom_box_length_ = false;
/// Stores the number of Boxes for each axis
std::array<uint64_t, 3> num_boxes_axis_ = {{0}};
/// Number of boxes in the xy plane (=num_boxes_axis_[0] * num_boxes_axis_[1])
size_t num_boxes_xy_ = 0;
/// The total number of boxes in the uniform grid
uint64_t total_num_boxes_ = 0;
/// Implements linked list - array index = key, value: next element
///
/// // Usage
/// AgentHandle current_element = ...;
/// AgentHandle next_element = successors_[current_element];
AgentVector<AgentHandle> successors_;
/// Determines which boxes to search neighbors in (see enum Adjacency)
Adjacency adjacency_;
/// Cube which contains all agents
/// {x_min, x_max, y_min, y_max, z_min, z_max}
std::array<int32_t, 6> grid_dimensions_;
/// Stores the min / max dimension value that need to be surpassed in order
/// to trigger a diffusion grid change
std::array<int32_t, 2> threshold_dimensions_;
LoadBalanceInfoUG lbi_; //!
/// Holds instance of NeighborMutexBuilder.
/// NeighborMutexBuilder is updated if `Param::thread_safety_mechanism`
/// is set to `kAutomatic`
std::unique_ptr<GridNeighborMutexBuilder> nb_mutex_builder_ =
std::make_unique<GridNeighborMutexBuilder>();
void CheckGridGrowth() {
// Determine if the grid dimensions have changed (changed in the sense that
// the grid has grown outwards)
auto min_gd =
*std::min_element(grid_dimensions_.begin(), grid_dimensions_.end());
auto max_gd =
*std::max_element(grid_dimensions_.begin(), grid_dimensions_.end());
if (min_gd < threshold_dimensions_[0]) {
threshold_dimensions_[0] = min_gd;
has_grown_ = true;
}
if (max_gd > threshold_dimensions_[1]) {
Log::Info("UniformGridEnvironment",
"Your agents are getting near the edge of "
"the simulation space. Be aware of boundary conditions that "
"may come into play!");
threshold_dimensions_[1] = max_gd;
has_grown_ = true;
}
}
void RoundOffGridDimensions(const std::array<double, 6>& grid_dimensions) {
grid_dimensions_[0] = floor(grid_dimensions[0]);
grid_dimensions_[2] = floor(grid_dimensions[2]);
grid_dimensions_[4] = floor(grid_dimensions[4]);
grid_dimensions_[1] = ceil(grid_dimensions[1]);
grid_dimensions_[3] = ceil(grid_dimensions[3]);
grid_dimensions_[5] = ceil(grid_dimensions[5]);
}
/// @brief Gets the Moore (i.e adjacent) boxes of the query boxAlso adds
/// the
/// query box.
///
/// @param[out] neighbor_boxes The neighbor boxes
/// @param[in] box_idx The query box
///
void GetMooreBoxes(FixedSizeVector<const Box*, 27>* neighbor_boxes,
size_t box_idx) const {
neighbor_boxes->push_back(GetBoxPointer(box_idx));
// Adjacent 6 (top, down, left, right, front and back)
if (adjacency_ >= kLow) {
neighbor_boxes->push_back(GetBoxPointer(box_idx - num_boxes_xy_));
neighbor_boxes->push_back(GetBoxPointer(box_idx + num_boxes_xy_));
neighbor_boxes->push_back(GetBoxPointer(box_idx - num_boxes_axis_[0]));
neighbor_boxes->push_back(GetBoxPointer(box_idx + num_boxes_axis_[0]));
neighbor_boxes->push_back(GetBoxPointer(box_idx - 1));
neighbor_boxes->push_back(GetBoxPointer(box_idx + 1));
}
// Adjacent 12
if (adjacency_ >= kMedium) {
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_xy_ - num_boxes_axis_[0]));
neighbor_boxes->push_back(GetBoxPointer(box_idx - num_boxes_xy_ - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_axis_[0] - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_xy_ - num_boxes_axis_[0]));
neighbor_boxes->push_back(GetBoxPointer(box_idx + num_boxes_xy_ - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_axis_[0] - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_xy_ + num_boxes_axis_[0]));
neighbor_boxes->push_back(GetBoxPointer(box_idx - num_boxes_xy_ + 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_axis_[0] + 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_xy_ + num_boxes_axis_[0]));
neighbor_boxes->push_back(GetBoxPointer(box_idx + num_boxes_xy_ + 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_axis_[0] + 1));
}
// Adjacent 8
if (adjacency_ >= kHigh) {
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_xy_ - num_boxes_axis_[0] - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_xy_ - num_boxes_axis_[0] + 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_xy_ + num_boxes_axis_[0] - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_xy_ + num_boxes_axis_[0] + 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_xy_ - num_boxes_axis_[0] - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_xy_ - num_boxes_axis_[0] + 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_xy_ + num_boxes_axis_[0] - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_xy_ + num_boxes_axis_[0] + 1));
}
}
/// @brief Gets the box indices of all adjacent boxes. Also adds the
/// query box index.
///
/// @param[out] box_indices Result containing all box indices
/// @param[in] box_idx The query box
///
void GetMooreBoxIndices(FixedSizeVector<uint64_t, 27>* box_indices,
size_t box_idx) const {
box_indices->push_back(box_idx);
// Adjacent 6 (top, down, left, right, front and back)
if (adjacency_ >= kLow) {
box_indices->push_back(box_idx - num_boxes_xy_);
box_indices->push_back(box_idx + num_boxes_xy_);
box_indices->push_back(box_idx - num_boxes_axis_[0]);
box_indices->push_back(box_idx + num_boxes_axis_[0]);
box_indices->push_back(box_idx - 1);
box_indices->push_back(box_idx + 1);
}
// Adjacent 12
if (adjacency_ >= kMedium) {
box_indices->push_back(box_idx - num_boxes_xy_ - num_boxes_axis_[0]);
box_indices->push_back(box_idx - num_boxes_xy_ - 1);
box_indices->push_back(box_idx - num_boxes_axis_[0] - 1);
box_indices->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0]);
box_indices->push_back(box_idx + num_boxes_xy_ - 1);
box_indices->push_back(box_idx + num_boxes_axis_[0] - 1);
box_indices->push_back(box_idx - num_boxes_xy_ + num_boxes_axis_[0]);
box_indices->push_back(box_idx - num_boxes_xy_ + 1);
box_indices->push_back(box_idx - num_boxes_axis_[0] + 1);
box_indices->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0]);
box_indices->push_back(box_idx + num_boxes_xy_ + 1);
box_indices->push_back(box_idx + num_boxes_axis_[0] + 1);
}
// Adjacent 8
if (adjacency_ >= kHigh) {
box_indices->push_back(box_idx - num_boxes_xy_ - num_boxes_axis_[0] - 1);
box_indices->push_back(box_idx - num_boxes_xy_ - num_boxes_axis_[0] + 1);
box_indices->push_back(box_idx - num_boxes_xy_ + num_boxes_axis_[0] - 1);
box_indices->push_back(box_idx - num_boxes_xy_ + num_boxes_axis_[0] + 1);
box_indices->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0] - 1);
box_indices->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0] + 1);
box_indices->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0] - 1);
box_indices->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0] + 1);
}
}
/// Determines current box based on parameter box_idx and adds it together
/// with half of the surrounding boxes to the vector.
/// Legend: C = center, N = north, E = east, S = south, W = west, F = front,
/// B = back
/// For each box pair which is centro-symmetric only one box is taken --
/// e.g. E-W: E, or BNW-FSE: BNW
///
/// (x-axis to the right \ y-axis up)
/// z=1
/// +-----+----+-----+
/// | BNW | BN | BNE |
/// +-----+----+-----+
/// | NW | N | NE |
/// +-----+----+-----+
/// | FNW | FN | FNE |
/// +-----+----+-----+
///
/// z = 0
/// +-----+----+-----+
/// | BW | B | BE |
/// +-----+----+-----+
/// | W | C | E |
/// +-----+----+-----+
/// | FW | F | FE |
/// +-----+----+-----+
///
/// z = -1
/// +-----+----+-----+
/// | BSW | BS | BSE |
/// +-----+----+-----+
/// | SW | S | SE |
/// +-----+----+-----+
/// | FSW | FS | FSE |
/// +-----+----+-----+
///
void GetHalfMooreBoxIndices(FixedSizeVector<size_t, 14>* neighbor_boxes,
size_t box_idx) const {
// C
neighbor_boxes->push_back(box_idx);
// BW
neighbor_boxes->push_back(box_idx + num_boxes_axis_[0] - 1);
// FNW
neighbor_boxes->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0] - 1);
// NW
neighbor_boxes->push_back(box_idx + num_boxes_xy_ - 1);
// BNW
neighbor_boxes->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0] - 1);
// B
neighbor_boxes->push_back(box_idx + num_boxes_axis_[0]);
// FN
neighbor_boxes->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0]);
// N
neighbor_boxes->push_back(box_idx + num_boxes_xy_);
// BN
neighbor_boxes->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0]);
// E
neighbor_boxes->push_back(box_idx + 1);
// BE
neighbor_boxes->push_back(box_idx + num_boxes_axis_[0] + 1);
// FNE
neighbor_boxes->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0] + 1);
// NE
neighbor_boxes->push_back(box_idx + num_boxes_xy_ + 1);
// BNE
neighbor_boxes->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0] + 1);
}
/// @brief Gets the pointer to the box with the given index
///
/// @param[in] index The index of the box
///
/// @return The pointer to the box
///
const Box* GetBoxPointer(size_t index) const { return &(boxes_[index]); }
/// @brief Gets the pointer to the box with the given index
///
/// @param[in] index The index of the box
///
/// @return The pointer to the box
///
Box* GetBoxPointer(size_t index) { return &(boxes_[index]); }
/// Returns the box index in the one dimensional array based on box
/// coordinates in space
///
/// @param box_coord box coordinates in space (x, y, z)
///
/// @return The box index.
///
size_t GetBoxIndex(const std::array<uint64_t, 3>& box_coord) const {
return box_coord[2] * num_boxes_xy_ + box_coord[1] * num_boxes_axis_[0] +
box_coord[0];
}
};
} // namespace bdm
#endif // CORE_ENVIRONMENT_UNIFORM_GRID_ENVIRONMENT_H_
|
feature.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF EEEEE AAA TTTTT U U RRRR EEEEE %
% F E A A T U U R R E %
% FFF EEE AAAAA T U U RRRR EEE %
% F E A A T U U R R E %
% F EEEEE A A T UUU R R EEEEE %
% %
% %
% MagickCore Image Feature Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/animate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/display.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/feature.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magic.h"
#include "MagickCore/magick.h"
#include "MagickCore/matrix.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/timer.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a n n y E d g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CannyEdgeImage() uses a multi-stage algorithm to detect a wide range of
% edges in images.
%
% The format of the CannyEdgeImage method is:
%
% Image *CannyEdgeImage(const Image *image,const double radius,
% const double sigma,const double lower_percent,
% const double upper_percent,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the gaussian smoothing filter.
%
% o sigma: the sigma of the gaussian smoothing filter.
%
% o lower_percent: percentage of edge pixels in the lower threshold.
%
% o upper_percent: percentage of edge pixels in the upper threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _CannyInfo
{
double
magnitude,
intensity;
int
orientation;
ssize_t
x,
y;
} CannyInfo;
static inline MagickBooleanType IsAuthenticPixel(const Image *image,
const ssize_t x,const ssize_t y)
{
if ((x < 0) || (x >= (ssize_t) image->columns))
return(MagickFalse);
if ((y < 0) || (y >= (ssize_t) image->rows))
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType TraceEdges(Image *edge_image,CacheView *edge_view,
MatrixInfo *canny_cache,const ssize_t x,const ssize_t y,
const double lower_threshold,ExceptionInfo *exception)
{
CannyInfo
edge,
pixel;
MagickBooleanType
status;
Quantum
*q;
ssize_t
i;
q=GetCacheViewAuthenticPixels(edge_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
*q=QuantumRange;
status=SyncCacheViewAuthenticPixels(edge_view,exception);
if (status == MagickFalse)
return(MagickFalse);
if (GetMatrixElement(canny_cache,0,0,&edge) == MagickFalse)
return(MagickFalse);
edge.x=x;
edge.y=y;
if (SetMatrixElement(canny_cache,0,0,&edge) == MagickFalse)
return(MagickFalse);
for (i=1; i != 0; )
{
ssize_t
v;
i--;
status=GetMatrixElement(canny_cache,i,0,&edge);
if (status == MagickFalse)
return(MagickFalse);
for (v=(-1); v <= 1; v++)
{
ssize_t
u;
for (u=(-1); u <= 1; u++)
{
if ((u == 0) && (v == 0))
continue;
if (IsAuthenticPixel(edge_image,edge.x+u,edge.y+v) == MagickFalse)
continue;
/*
Not an edge if gradient value is below the lower threshold.
*/
q=GetCacheViewAuthenticPixels(edge_view,edge.x+u,edge.y+v,1,1,
exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
status=GetMatrixElement(canny_cache,edge.x+u,edge.y+v,&pixel);
if (status == MagickFalse)
return(MagickFalse);
if ((GetPixelIntensity(edge_image,q) == 0.0) &&
(pixel.intensity >= lower_threshold))
{
*q=QuantumRange;
status=SyncCacheViewAuthenticPixels(edge_view,exception);
if (status == MagickFalse)
return(MagickFalse);
edge.x+=u;
edge.y+=v;
status=SetMatrixElement(canny_cache,i,0,&edge);
if (status == MagickFalse)
return(MagickFalse);
i++;
}
}
}
}
return(MagickTrue);
}
MagickExport Image *CannyEdgeImage(const Image *image,const double radius,
const double sigma,const double lower_percent,const double upper_percent,
ExceptionInfo *exception)
{
#define CannyEdgeImageTag "CannyEdge/Image"
CacheView
*edge_view;
CannyInfo
element;
char
geometry[MagickPathExtent];
double
lower_threshold,
max,
min,
upper_threshold;
Image
*edge_image;
KernelInfo
*kernel_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MatrixInfo
*canny_cache;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Filter out noise.
*/
(void) FormatLocaleString(geometry,MagickPathExtent,
"blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
edge_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
if (TransformImageColorspace(edge_image,GRAYColorspace,exception) == MagickFalse)
{
edge_image=DestroyImage(edge_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(edge_image,OffAlphaChannel,exception);
/*
Find the intensity gradient of the image.
*/
canny_cache=AcquireMatrixInfo(edge_image->columns,edge_image->rows,
sizeof(CannyInfo),exception);
if (canny_cache == (MatrixInfo *) NULL)
{
edge_image=DestroyImage(edge_image);
return((Image *) NULL);
}
status=MagickTrue;
edge_view=AcquireVirtualCacheView(edge_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(edge_image,edge_image,edge_image->rows,1)
#endif
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns+1,2,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
pixel;
double
dx,
dy;
const Quantum
*magick_restrict kernel_pixels;
ssize_t
v;
static double
Gx[2][2] =
{
{ -1.0, +1.0 },
{ -1.0, +1.0 }
},
Gy[2][2] =
{
{ +1.0, +1.0 },
{ -1.0, -1.0 }
};
(void) memset(&pixel,0,sizeof(pixel));
dx=0.0;
dy=0.0;
kernel_pixels=p;
for (v=0; v < 2; v++)
{
ssize_t
u;
for (u=0; u < 2; u++)
{
double
intensity;
intensity=GetPixelIntensity(edge_image,kernel_pixels+u);
dx+=0.5*Gx[v][u]*intensity;
dy+=0.5*Gy[v][u]*intensity;
}
kernel_pixels+=edge_image->columns+1;
}
pixel.magnitude=hypot(dx,dy);
pixel.orientation=0;
if (fabs(dx) > MagickEpsilon)
{
double
slope;
slope=dy/dx;
if (slope < 0.0)
{
if (slope < -2.41421356237)
pixel.orientation=0;
else
if (slope < -0.414213562373)
pixel.orientation=1;
else
pixel.orientation=2;
}
else
{
if (slope > 2.41421356237)
pixel.orientation=0;
else
if (slope > 0.414213562373)
pixel.orientation=3;
else
pixel.orientation=2;
}
}
if (SetMatrixElement(canny_cache,x,y,&pixel) == MagickFalse)
continue;
p+=GetPixelChannels(edge_image);
}
}
edge_view=DestroyCacheView(edge_view);
/*
Non-maxima suppression, remove pixels that are not considered to be part
of an edge.
*/
progress=0;
(void) GetMatrixElement(canny_cache,0,0,&element);
max=element.intensity;
min=element.intensity;
edge_view=AcquireAuthenticCacheView(edge_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(edge_image,edge_image,edge_image->rows,1)
#endif
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(edge_view,0,y,edge_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
alpha_pixel,
beta_pixel,
pixel;
(void) GetMatrixElement(canny_cache,x,y,&pixel);
switch (pixel.orientation)
{
case 0:
default:
{
/*
0 degrees, north and south.
*/
(void) GetMatrixElement(canny_cache,x,y-1,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x,y+1,&beta_pixel);
break;
}
case 1:
{
/*
45 degrees, northwest and southeast.
*/
(void) GetMatrixElement(canny_cache,x-1,y-1,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x+1,y+1,&beta_pixel);
break;
}
case 2:
{
/*
90 degrees, east and west.
*/
(void) GetMatrixElement(canny_cache,x-1,y,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x+1,y,&beta_pixel);
break;
}
case 3:
{
/*
135 degrees, northeast and southwest.
*/
(void) GetMatrixElement(canny_cache,x+1,y-1,&beta_pixel);
(void) GetMatrixElement(canny_cache,x-1,y+1,&alpha_pixel);
break;
}
}
pixel.intensity=pixel.magnitude;
if ((pixel.magnitude < alpha_pixel.magnitude) ||
(pixel.magnitude < beta_pixel.magnitude))
pixel.intensity=0;
(void) SetMatrixElement(canny_cache,x,y,&pixel);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CannyEdgeImage)
#endif
{
if (pixel.intensity < min)
min=pixel.intensity;
if (pixel.intensity > max)
max=pixel.intensity;
}
*q=0;
q+=GetPixelChannels(edge_image);
}
if (SyncCacheViewAuthenticPixels(edge_view,exception) == MagickFalse)
status=MagickFalse;
}
edge_view=DestroyCacheView(edge_view);
/*
Estimate hysteresis threshold.
*/
lower_threshold=lower_percent*(max-min)+min;
upper_threshold=upper_percent*(max-min)+min;
/*
Hysteresis threshold.
*/
edge_view=AcquireAuthenticCacheView(edge_image,exception);
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
ssize_t
x;
if (status == MagickFalse)
continue;
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
pixel;
const Quantum
*magick_restrict p;
/*
Edge if pixel gradient higher than upper threshold.
*/
p=GetCacheViewVirtualPixels(edge_view,x,y,1,1,exception);
if (p == (const Quantum *) NULL)
continue;
status=GetMatrixElement(canny_cache,x,y,&pixel);
if (status == MagickFalse)
continue;
if ((GetPixelIntensity(edge_image,p) == 0.0) &&
(pixel.intensity >= upper_threshold))
status=TraceEdges(edge_image,edge_view,canny_cache,x,y,lower_threshold,
exception);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CannyEdgeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
edge_view=DestroyCacheView(edge_view);
/*
Free resources.
*/
canny_cache=DestroyMatrixInfo(canny_cache);
return(edge_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e F e a t u r e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageFeatures() returns features for each channel in the image in
% each of four directions (horizontal, vertical, left and right diagonals)
% for the specified distance. The features include the angular second
% moment, contrast, correlation, sum of squares: variance, inverse difference
% moment, sum average, sum varience, sum entropy, entropy, difference variance,
% difference entropy, information measures of correlation 1, information
% measures of correlation 2, and maximum correlation coefficient. You can
% access the red channel contrast, for example, like this:
%
% channel_features=GetImageFeatures(image,1,exception);
% contrast=channel_features[RedPixelChannel].contrast[0];
%
% Use MagickRelinquishMemory() to free the features buffer.
%
% The format of the GetImageFeatures method is:
%
% ChannelFeatures *GetImageFeatures(const Image *image,
% const size_t distance,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o distance: the distance.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelFeatures *GetImageFeatures(const Image *image,
const size_t distance,ExceptionInfo *exception)
{
typedef struct _ChannelStatistics
{
PixelInfo
direction[4]; /* horizontal, vertical, left and right diagonals */
} ChannelStatistics;
CacheView
*image_view;
ChannelFeatures
*channel_features;
ChannelStatistics
**cooccurrence,
correlation,
*density_x,
*density_xy,
*density_y,
entropy_x,
entropy_xy,
entropy_xy1,
entropy_xy2,
entropy_y,
mean,
**Q,
*sum,
sum_squares,
variance;
PixelPacket
gray,
*grays;
MagickBooleanType
status;
ssize_t
i,
r;
size_t
length;
unsigned int
number_grays;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->columns < (distance+1)) || (image->rows < (distance+1)))
return((ChannelFeatures *) NULL);
length=MaxPixelChannels+1UL;
channel_features=(ChannelFeatures *) AcquireQuantumMemory(length,
sizeof(*channel_features));
if (channel_features == (ChannelFeatures *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_features,0,length*
sizeof(*channel_features));
/*
Form grays.
*/
grays=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*grays));
if (grays == (PixelPacket *) NULL)
{
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
for (i=0; i <= (ssize_t) MaxMap; i++)
{
grays[i].red=(~0U);
grays[i].green=(~0U);
grays[i].blue=(~0U);
grays[i].alpha=(~0U);
grays[i].black=(~0U);
}
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (r=0; r < (ssize_t) image->rows; r++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,r,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
grays[ScaleQuantumToMap(GetPixelRed(image,p))].red=
ScaleQuantumToMap(GetPixelRed(image,p));
grays[ScaleQuantumToMap(GetPixelGreen(image,p))].green=
ScaleQuantumToMap(GetPixelGreen(image,p));
grays[ScaleQuantumToMap(GetPixelBlue(image,p))].blue=
ScaleQuantumToMap(GetPixelBlue(image,p));
if (image->colorspace == CMYKColorspace)
grays[ScaleQuantumToMap(GetPixelBlack(image,p))].black=
ScaleQuantumToMap(GetPixelBlack(image,p));
if (image->alpha_trait != UndefinedPixelTrait)
grays[ScaleQuantumToMap(GetPixelAlpha(image,p))].alpha=
ScaleQuantumToMap(GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
grays=(PixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
return(channel_features);
}
(void) memset(&gray,0,sizeof(gray));
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if (grays[i].red != ~0U)
grays[gray.red++].red=grays[i].red;
if (grays[i].green != ~0U)
grays[gray.green++].green=grays[i].green;
if (grays[i].blue != ~0U)
grays[gray.blue++].blue=grays[i].blue;
if (image->colorspace == CMYKColorspace)
if (grays[i].black != ~0U)
grays[gray.black++].black=grays[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
if (grays[i].alpha != ~0U)
grays[gray.alpha++].alpha=grays[i].alpha;
}
/*
Allocate spatial dependence matrix.
*/
number_grays=gray.red;
if (gray.green > number_grays)
number_grays=gray.green;
if (gray.blue > number_grays)
number_grays=gray.blue;
if (image->colorspace == CMYKColorspace)
if (gray.black > number_grays)
number_grays=gray.black;
if (image->alpha_trait != UndefinedPixelTrait)
if (gray.alpha > number_grays)
number_grays=gray.alpha;
cooccurrence=(ChannelStatistics **) AcquireQuantumMemory(number_grays,
sizeof(*cooccurrence));
density_x=(ChannelStatistics *) AcquireQuantumMemory(number_grays+1,
2*sizeof(*density_x));
density_xy=(ChannelStatistics *) AcquireQuantumMemory(number_grays+1,
2*sizeof(*density_xy));
density_y=(ChannelStatistics *) AcquireQuantumMemory(number_grays+1,
2*sizeof(*density_y));
Q=(ChannelStatistics **) AcquireQuantumMemory(number_grays,sizeof(*Q));
sum=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(*sum));
if ((cooccurrence == (ChannelStatistics **) NULL) ||
(density_x == (ChannelStatistics *) NULL) ||
(density_xy == (ChannelStatistics *) NULL) ||
(density_y == (ChannelStatistics *) NULL) ||
(Q == (ChannelStatistics **) NULL) ||
(sum == (ChannelStatistics *) NULL))
{
if (Q != (ChannelStatistics **) NULL)
{
for (i=0; i < (ssize_t) number_grays; i++)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
}
if (sum != (ChannelStatistics *) NULL)
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
if (density_y != (ChannelStatistics *) NULL)
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
if (density_xy != (ChannelStatistics *) NULL)
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
if (density_x != (ChannelStatistics *) NULL)
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
if (cooccurrence != (ChannelStatistics **) NULL)
{
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(
cooccurrence);
}
grays=(PixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
(void) memset(&correlation,0,sizeof(correlation));
(void) memset(density_x,0,2*(number_grays+1)*sizeof(*density_x));
(void) memset(density_xy,0,2*(number_grays+1)*sizeof(*density_xy));
(void) memset(density_y,0,2*(number_grays+1)*sizeof(*density_y));
(void) memset(&mean,0,sizeof(mean));
(void) memset(sum,0,number_grays*sizeof(*sum));
(void) memset(&sum_squares,0,sizeof(sum_squares));
(void) memset(density_xy,0,2*number_grays*sizeof(*density_xy));
(void) memset(&entropy_x,0,sizeof(entropy_x));
(void) memset(&entropy_xy,0,sizeof(entropy_xy));
(void) memset(&entropy_xy1,0,sizeof(entropy_xy1));
(void) memset(&entropy_xy2,0,sizeof(entropy_xy2));
(void) memset(&entropy_y,0,sizeof(entropy_y));
(void) memset(&variance,0,sizeof(variance));
for (i=0; i < (ssize_t) number_grays; i++)
{
cooccurrence[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,
sizeof(**cooccurrence));
Q[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(**Q));
if ((cooccurrence[i] == (ChannelStatistics *) NULL) ||
(Q[i] == (ChannelStatistics *) NULL))
break;
(void) memset(cooccurrence[i],0,number_grays*
sizeof(**cooccurrence));
(void) memset(Q[i],0,number_grays*sizeof(**Q));
}
if (i < (ssize_t) number_grays)
{
for (i--; i >= 0; i--)
{
if (Q[i] != (ChannelStatistics *) NULL)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
if (cooccurrence[i] != (ChannelStatistics *) NULL)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
}
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
grays=(PixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
/*
Initialize spatial dependence matrix.
*/
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
for (r=0; r < (ssize_t) image->rows; r++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
ssize_t
offset,
u,
v;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-(ssize_t) distance,r,image->columns+
2*distance,distance+2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p+=distance*GetPixelChannels(image);;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < 4; i++)
{
switch (i)
{
case 0:
default:
{
/*
Horizontal adjacency.
*/
offset=(ssize_t) distance;
break;
}
case 1:
{
/*
Vertical adjacency.
*/
offset=(ssize_t) (image->columns+2*distance);
break;
}
case 2:
{
/*
Right diagonal adjacency.
*/
offset=(ssize_t) ((image->columns+2*distance)-distance);
break;
}
case 3:
{
/*
Left diagonal adjacency.
*/
offset=(ssize_t) ((image->columns+2*distance)+distance);
break;
}
}
u=0;
v=0;
while (grays[u].red != ScaleQuantumToMap(GetPixelRed(image,p)))
u++;
while (grays[v].red != ScaleQuantumToMap(GetPixelRed(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].red++;
cooccurrence[v][u].direction[i].red++;
u=0;
v=0;
while (grays[u].green != ScaleQuantumToMap(GetPixelGreen(image,p)))
u++;
while (grays[v].green != ScaleQuantumToMap(GetPixelGreen(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].green++;
cooccurrence[v][u].direction[i].green++;
u=0;
v=0;
while (grays[u].blue != ScaleQuantumToMap(GetPixelBlue(image,p)))
u++;
while (grays[v].blue != ScaleQuantumToMap(GetPixelBlue(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].blue++;
cooccurrence[v][u].direction[i].blue++;
if (image->colorspace == CMYKColorspace)
{
u=0;
v=0;
while (grays[u].black != ScaleQuantumToMap(GetPixelBlack(image,p)))
u++;
while (grays[v].black != ScaleQuantumToMap(GetPixelBlack(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].black++;
cooccurrence[v][u].direction[i].black++;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
u=0;
v=0;
while (grays[u].alpha != ScaleQuantumToMap(GetPixelAlpha(image,p)))
u++;
while (grays[v].alpha != ScaleQuantumToMap(GetPixelAlpha(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].alpha++;
cooccurrence[v][u].direction[i].alpha++;
}
}
p+=GetPixelChannels(image);
}
}
grays=(PixelPacket *) RelinquishMagickMemory(grays);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
/*
Normalize spatial dependence matrix.
*/
for (i=0; i < 4; i++)
{
double
normalize;
ssize_t
y;
switch (i)
{
case 0:
default:
{
/*
Horizontal adjacency.
*/
normalize=2.0*image->rows*(image->columns-distance);
break;
}
case 1:
{
/*
Vertical adjacency.
*/
normalize=2.0*(image->rows-distance)*image->columns;
break;
}
case 2:
{
/*
Right diagonal adjacency.
*/
normalize=2.0*(image->rows-distance)*(image->columns-distance);
break;
}
case 3:
{
/*
Left diagonal adjacency.
*/
normalize=2.0*(image->rows-distance)*(image->columns-distance);
break;
}
}
normalize=PerceptibleReciprocal(normalize);
for (y=0; y < (ssize_t) number_grays; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
cooccurrence[x][y].direction[i].red*=normalize;
cooccurrence[x][y].direction[i].green*=normalize;
cooccurrence[x][y].direction[i].blue*=normalize;
if (image->colorspace == CMYKColorspace)
cooccurrence[x][y].direction[i].black*=normalize;
if (image->alpha_trait != UndefinedPixelTrait)
cooccurrence[x][y].direction[i].alpha*=normalize;
}
}
}
/*
Compute texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
ssize_t
y;
for (y=0; y < (ssize_t) number_grays; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Angular second moment: measure of homogeneity of the image.
*/
channel_features[RedPixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].red*
cooccurrence[x][y].direction[i].red;
channel_features[GreenPixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].green*
cooccurrence[x][y].direction[i].green;
channel_features[BluePixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].blue*
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].black*
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].alpha*
cooccurrence[x][y].direction[i].alpha;
/*
Correlation: measure of linear-dependencies in the image.
*/
sum[y].direction[i].red+=cooccurrence[x][y].direction[i].red;
sum[y].direction[i].green+=cooccurrence[x][y].direction[i].green;
sum[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
sum[y].direction[i].black+=cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
sum[y].direction[i].alpha+=cooccurrence[x][y].direction[i].alpha;
correlation.direction[i].red+=x*y*cooccurrence[x][y].direction[i].red;
correlation.direction[i].green+=x*y*
cooccurrence[x][y].direction[i].green;
correlation.direction[i].blue+=x*y*
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
correlation.direction[i].black+=x*y*
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
correlation.direction[i].alpha+=x*y*
cooccurrence[x][y].direction[i].alpha;
/*
Inverse Difference Moment.
*/
channel_features[RedPixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].red/((y-x)*(y-x)+1);
channel_features[GreenPixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].green/((y-x)*(y-x)+1);
channel_features[BluePixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].blue/((y-x)*(y-x)+1);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].black/((y-x)*(y-x)+1);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].alpha/((y-x)*(y-x)+1);
/*
Sum average.
*/
density_xy[y+x+2].direction[i].red+=
cooccurrence[x][y].direction[i].red;
density_xy[y+x+2].direction[i].green+=
cooccurrence[x][y].direction[i].green;
density_xy[y+x+2].direction[i].blue+=
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_xy[y+x+2].direction[i].black+=
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
density_xy[y+x+2].direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
/*
Entropy.
*/
channel_features[RedPixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].red*
MagickLog10(cooccurrence[x][y].direction[i].red);
channel_features[GreenPixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].green*
MagickLog10(cooccurrence[x][y].direction[i].green);
channel_features[BluePixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].blue*
MagickLog10(cooccurrence[x][y].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].black*
MagickLog10(cooccurrence[x][y].direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].alpha*
MagickLog10(cooccurrence[x][y].direction[i].alpha);
/*
Information Measures of Correlation.
*/
density_x[x].direction[i].red+=cooccurrence[x][y].direction[i].red;
density_x[x].direction[i].green+=cooccurrence[x][y].direction[i].green;
density_x[x].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->alpha_trait != UndefinedPixelTrait)
density_x[x].direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
if (image->colorspace == CMYKColorspace)
density_x[x].direction[i].black+=
cooccurrence[x][y].direction[i].black;
density_y[y].direction[i].red+=cooccurrence[x][y].direction[i].red;
density_y[y].direction[i].green+=cooccurrence[x][y].direction[i].green;
density_y[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_y[y].direction[i].black+=
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
density_y[y].direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
}
mean.direction[i].red+=y*sum[y].direction[i].red;
sum_squares.direction[i].red+=y*y*sum[y].direction[i].red;
mean.direction[i].green+=y*sum[y].direction[i].green;
sum_squares.direction[i].green+=y*y*sum[y].direction[i].green;
mean.direction[i].blue+=y*sum[y].direction[i].blue;
sum_squares.direction[i].blue+=y*y*sum[y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
{
mean.direction[i].black+=y*sum[y].direction[i].black;
sum_squares.direction[i].black+=y*y*sum[y].direction[i].black;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
mean.direction[i].alpha+=y*sum[y].direction[i].alpha;
sum_squares.direction[i].alpha+=y*y*sum[y].direction[i].alpha;
}
}
/*
Correlation: measure of linear-dependencies in the image.
*/
channel_features[RedPixelChannel].correlation[i]=
(correlation.direction[i].red-mean.direction[i].red*
mean.direction[i].red)/(sqrt(sum_squares.direction[i].red-
(mean.direction[i].red*mean.direction[i].red))*sqrt(
sum_squares.direction[i].red-(mean.direction[i].red*
mean.direction[i].red)));
channel_features[GreenPixelChannel].correlation[i]=
(correlation.direction[i].green-mean.direction[i].green*
mean.direction[i].green)/(sqrt(sum_squares.direction[i].green-
(mean.direction[i].green*mean.direction[i].green))*sqrt(
sum_squares.direction[i].green-(mean.direction[i].green*
mean.direction[i].green)));
channel_features[BluePixelChannel].correlation[i]=
(correlation.direction[i].blue-mean.direction[i].blue*
mean.direction[i].blue)/(sqrt(sum_squares.direction[i].blue-
(mean.direction[i].blue*mean.direction[i].blue))*sqrt(
sum_squares.direction[i].blue-(mean.direction[i].blue*
mean.direction[i].blue)));
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].correlation[i]=
(correlation.direction[i].black-mean.direction[i].black*
mean.direction[i].black)/(sqrt(sum_squares.direction[i].black-
(mean.direction[i].black*mean.direction[i].black))*sqrt(
sum_squares.direction[i].black-(mean.direction[i].black*
mean.direction[i].black)));
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].correlation[i]=
(correlation.direction[i].alpha-mean.direction[i].alpha*
mean.direction[i].alpha)/(sqrt(sum_squares.direction[i].alpha-
(mean.direction[i].alpha*mean.direction[i].alpha))*sqrt(
sum_squares.direction[i].alpha-(mean.direction[i].alpha*
mean.direction[i].alpha)));
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
ssize_t
x;
for (x=2; x < (ssize_t) (2*number_grays); x++)
{
/*
Sum average.
*/
channel_features[RedPixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].red;
channel_features[GreenPixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].green;
channel_features[BluePixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].alpha;
/*
Sum entropy.
*/
channel_features[RedPixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].red*
MagickLog10(density_xy[x].direction[i].red);
channel_features[GreenPixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].green*
MagickLog10(density_xy[x].direction[i].green);
channel_features[BluePixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].blue*
MagickLog10(density_xy[x].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].black*
MagickLog10(density_xy[x].direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].alpha*
MagickLog10(density_xy[x].direction[i].alpha);
/*
Sum variance.
*/
channel_features[RedPixelChannel].sum_variance[i]+=
(x-channel_features[RedPixelChannel].sum_entropy[i])*
(x-channel_features[RedPixelChannel].sum_entropy[i])*
density_xy[x].direction[i].red;
channel_features[GreenPixelChannel].sum_variance[i]+=
(x-channel_features[GreenPixelChannel].sum_entropy[i])*
(x-channel_features[GreenPixelChannel].sum_entropy[i])*
density_xy[x].direction[i].green;
channel_features[BluePixelChannel].sum_variance[i]+=
(x-channel_features[BluePixelChannel].sum_entropy[i])*
(x-channel_features[BluePixelChannel].sum_entropy[i])*
density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].sum_variance[i]+=
(x-channel_features[BlackPixelChannel].sum_entropy[i])*
(x-channel_features[BlackPixelChannel].sum_entropy[i])*
density_xy[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].sum_variance[i]+=
(x-channel_features[AlphaPixelChannel].sum_entropy[i])*
(x-channel_features[AlphaPixelChannel].sum_entropy[i])*
density_xy[x].direction[i].alpha;
}
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
ssize_t
y;
for (y=0; y < (ssize_t) number_grays; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Sum of Squares: Variance
*/
variance.direction[i].red+=(y-mean.direction[i].red+1)*
(y-mean.direction[i].red+1)*cooccurrence[x][y].direction[i].red;
variance.direction[i].green+=(y-mean.direction[i].green+1)*
(y-mean.direction[i].green+1)*cooccurrence[x][y].direction[i].green;
variance.direction[i].blue+=(y-mean.direction[i].blue+1)*
(y-mean.direction[i].blue+1)*cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
variance.direction[i].black+=(y-mean.direction[i].black+1)*
(y-mean.direction[i].black+1)*cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
variance.direction[i].alpha+=(y-mean.direction[i].alpha+1)*
(y-mean.direction[i].alpha+1)*
cooccurrence[x][y].direction[i].alpha;
/*
Sum average / Difference Variance.
*/
density_xy[MagickAbsoluteValue(y-x)].direction[i].red+=
cooccurrence[x][y].direction[i].red;
density_xy[MagickAbsoluteValue(y-x)].direction[i].green+=
cooccurrence[x][y].direction[i].green;
density_xy[MagickAbsoluteValue(y-x)].direction[i].blue+=
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_xy[MagickAbsoluteValue(y-x)].direction[i].black+=
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
density_xy[MagickAbsoluteValue(y-x)].direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
/*
Information Measures of Correlation.
*/
entropy_xy.direction[i].red-=cooccurrence[x][y].direction[i].red*
MagickLog10(cooccurrence[x][y].direction[i].red);
entropy_xy.direction[i].green-=cooccurrence[x][y].direction[i].green*
MagickLog10(cooccurrence[x][y].direction[i].green);
entropy_xy.direction[i].blue-=cooccurrence[x][y].direction[i].blue*
MagickLog10(cooccurrence[x][y].direction[i].blue);
if (image->colorspace == CMYKColorspace)
entropy_xy.direction[i].black-=cooccurrence[x][y].direction[i].black*
MagickLog10(cooccurrence[x][y].direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
entropy_xy.direction[i].alpha-=
cooccurrence[x][y].direction[i].alpha*MagickLog10(
cooccurrence[x][y].direction[i].alpha);
entropy_xy1.direction[i].red-=(cooccurrence[x][y].direction[i].red*
MagickLog10(density_x[x].direction[i].red*density_y[y].direction[i].red));
entropy_xy1.direction[i].green-=(cooccurrence[x][y].direction[i].green*
MagickLog10(density_x[x].direction[i].green*
density_y[y].direction[i].green));
entropy_xy1.direction[i].blue-=(cooccurrence[x][y].direction[i].blue*
MagickLog10(density_x[x].direction[i].blue*density_y[y].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_xy1.direction[i].black-=(
cooccurrence[x][y].direction[i].black*MagickLog10(
density_x[x].direction[i].black*density_y[y].direction[i].black));
if (image->alpha_trait != UndefinedPixelTrait)
entropy_xy1.direction[i].alpha-=(
cooccurrence[x][y].direction[i].alpha*MagickLog10(
density_x[x].direction[i].alpha*density_y[y].direction[i].alpha));
entropy_xy2.direction[i].red-=(density_x[x].direction[i].red*
density_y[y].direction[i].red*MagickLog10(density_x[x].direction[i].red*
density_y[y].direction[i].red));
entropy_xy2.direction[i].green-=(density_x[x].direction[i].green*
density_y[y].direction[i].green*MagickLog10(density_x[x].direction[i].green*
density_y[y].direction[i].green));
entropy_xy2.direction[i].blue-=(density_x[x].direction[i].blue*
density_y[y].direction[i].blue*MagickLog10(density_x[x].direction[i].blue*
density_y[y].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_xy2.direction[i].black-=(density_x[x].direction[i].black*
density_y[y].direction[i].black*MagickLog10(
density_x[x].direction[i].black*density_y[y].direction[i].black));
if (image->alpha_trait != UndefinedPixelTrait)
entropy_xy2.direction[i].alpha-=(density_x[x].direction[i].alpha*
density_y[y].direction[i].alpha*MagickLog10(
density_x[x].direction[i].alpha*density_y[y].direction[i].alpha));
}
}
channel_features[RedPixelChannel].variance_sum_of_squares[i]=
variance.direction[i].red;
channel_features[GreenPixelChannel].variance_sum_of_squares[i]=
variance.direction[i].green;
channel_features[BluePixelChannel].variance_sum_of_squares[i]=
variance.direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].variance_sum_of_squares[i]=
variance.direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].variance_sum_of_squares[i]=
variance.direction[i].alpha;
}
/*
Compute more texture features.
*/
(void) memset(&variance,0,sizeof(variance));
(void) memset(&sum_squares,0,sizeof(sum_squares));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Difference variance.
*/
variance.direction[i].red+=density_xy[x].direction[i].red;
variance.direction[i].green+=density_xy[x].direction[i].green;
variance.direction[i].blue+=density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
variance.direction[i].black+=density_xy[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
variance.direction[i].alpha+=density_xy[x].direction[i].alpha;
sum_squares.direction[i].red+=density_xy[x].direction[i].red*
density_xy[x].direction[i].red;
sum_squares.direction[i].green+=density_xy[x].direction[i].green*
density_xy[x].direction[i].green;
sum_squares.direction[i].blue+=density_xy[x].direction[i].blue*
density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
sum_squares.direction[i].black+=density_xy[x].direction[i].black*
density_xy[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
sum_squares.direction[i].alpha+=density_xy[x].direction[i].alpha*
density_xy[x].direction[i].alpha;
/*
Difference entropy.
*/
channel_features[RedPixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].red*
MagickLog10(density_xy[x].direction[i].red);
channel_features[GreenPixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].green*
MagickLog10(density_xy[x].direction[i].green);
channel_features[BluePixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].blue*
MagickLog10(density_xy[x].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].black*
MagickLog10(density_xy[x].direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].alpha*
MagickLog10(density_xy[x].direction[i].alpha);
/*
Information Measures of Correlation.
*/
entropy_x.direction[i].red-=(density_x[x].direction[i].red*
MagickLog10(density_x[x].direction[i].red));
entropy_x.direction[i].green-=(density_x[x].direction[i].green*
MagickLog10(density_x[x].direction[i].green));
entropy_x.direction[i].blue-=(density_x[x].direction[i].blue*
MagickLog10(density_x[x].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_x.direction[i].black-=(density_x[x].direction[i].black*
MagickLog10(density_x[x].direction[i].black));
if (image->alpha_trait != UndefinedPixelTrait)
entropy_x.direction[i].alpha-=(density_x[x].direction[i].alpha*
MagickLog10(density_x[x].direction[i].alpha));
entropy_y.direction[i].red-=(density_y[x].direction[i].red*
MagickLog10(density_y[x].direction[i].red));
entropy_y.direction[i].green-=(density_y[x].direction[i].green*
MagickLog10(density_y[x].direction[i].green));
entropy_y.direction[i].blue-=(density_y[x].direction[i].blue*
MagickLog10(density_y[x].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_y.direction[i].black-=(density_y[x].direction[i].black*
MagickLog10(density_y[x].direction[i].black));
if (image->alpha_trait != UndefinedPixelTrait)
entropy_y.direction[i].alpha-=(density_y[x].direction[i].alpha*
MagickLog10(density_y[x].direction[i].alpha));
}
/*
Difference variance.
*/
channel_features[RedPixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].red)-
(variance.direction[i].red*variance.direction[i].red))/
((double) number_grays*number_grays*number_grays*number_grays);
channel_features[GreenPixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].green)-
(variance.direction[i].green*variance.direction[i].green))/
((double) number_grays*number_grays*number_grays*number_grays);
channel_features[BluePixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].blue)-
(variance.direction[i].blue*variance.direction[i].blue))/
((double) number_grays*number_grays*number_grays*number_grays);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].black)-
(variance.direction[i].black*variance.direction[i].black))/
((double) number_grays*number_grays*number_grays*number_grays);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].alpha)-
(variance.direction[i].alpha*variance.direction[i].alpha))/
((double) number_grays*number_grays*number_grays*number_grays);
/*
Information Measures of Correlation.
*/
channel_features[RedPixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].red-entropy_xy1.direction[i].red)/
(entropy_x.direction[i].red > entropy_y.direction[i].red ?
entropy_x.direction[i].red : entropy_y.direction[i].red);
channel_features[GreenPixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].green-entropy_xy1.direction[i].green)/
(entropy_x.direction[i].green > entropy_y.direction[i].green ?
entropy_x.direction[i].green : entropy_y.direction[i].green);
channel_features[BluePixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].blue-entropy_xy1.direction[i].blue)/
(entropy_x.direction[i].blue > entropy_y.direction[i].blue ?
entropy_x.direction[i].blue : entropy_y.direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].black-entropy_xy1.direction[i].black)/
(entropy_x.direction[i].black > entropy_y.direction[i].black ?
entropy_x.direction[i].black : entropy_y.direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].alpha-entropy_xy1.direction[i].alpha)/
(entropy_x.direction[i].alpha > entropy_y.direction[i].alpha ?
entropy_x.direction[i].alpha : entropy_y.direction[i].alpha);
channel_features[RedPixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].red-
entropy_xy.direction[i].red)))));
channel_features[GreenPixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].green-
entropy_xy.direction[i].green)))));
channel_features[BluePixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].blue-
entropy_xy.direction[i].blue)))));
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].black-
entropy_xy.direction[i].black)))));
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].alpha-
entropy_xy.direction[i].alpha)))));
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
ssize_t
z;
for (z=0; z < (ssize_t) number_grays; z++)
{
ssize_t
y;
ChannelStatistics
pixel;
(void) memset(&pixel,0,sizeof(pixel));
for (y=0; y < (ssize_t) number_grays; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Contrast: amount of local variations present in an image.
*/
if (((y-x) == z) || ((x-y) == z))
{
pixel.direction[i].red+=cooccurrence[x][y].direction[i].red;
pixel.direction[i].green+=cooccurrence[x][y].direction[i].green;
pixel.direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
pixel.direction[i].black+=cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
pixel.direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
}
/*
Maximum Correlation Coefficient.
*/
if ((fabs(density_x[z].direction[i].red) > MagickEpsilon) &&
(fabs(density_y[x].direction[i].red) > MagickEpsilon))
Q[z][y].direction[i].red+=cooccurrence[z][x].direction[i].red*
cooccurrence[y][x].direction[i].red/density_x[z].direction[i].red/
density_y[x].direction[i].red;
if ((fabs(density_x[z].direction[i].green) > MagickEpsilon) &&
(fabs(density_y[x].direction[i].red) > MagickEpsilon))
Q[z][y].direction[i].green+=cooccurrence[z][x].direction[i].green*
cooccurrence[y][x].direction[i].green/
density_x[z].direction[i].green/density_y[x].direction[i].red;
if ((fabs(density_x[z].direction[i].blue) > MagickEpsilon) &&
(fabs(density_y[x].direction[i].blue) > MagickEpsilon))
Q[z][y].direction[i].blue+=cooccurrence[z][x].direction[i].blue*
cooccurrence[y][x].direction[i].blue/
density_x[z].direction[i].blue/density_y[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
if ((fabs(density_x[z].direction[i].black) > MagickEpsilon) &&
(fabs(density_y[x].direction[i].black) > MagickEpsilon))
Q[z][y].direction[i].black+=cooccurrence[z][x].direction[i].black*
cooccurrence[y][x].direction[i].black/
density_x[z].direction[i].black/density_y[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
if ((fabs(density_x[z].direction[i].alpha) > MagickEpsilon) &&
(fabs(density_y[x].direction[i].alpha) > MagickEpsilon))
Q[z][y].direction[i].alpha+=
cooccurrence[z][x].direction[i].alpha*
cooccurrence[y][x].direction[i].alpha/
density_x[z].direction[i].alpha/
density_y[x].direction[i].alpha;
}
}
channel_features[RedPixelChannel].contrast[i]+=z*z*
pixel.direction[i].red;
channel_features[GreenPixelChannel].contrast[i]+=z*z*
pixel.direction[i].green;
channel_features[BluePixelChannel].contrast[i]+=z*z*
pixel.direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].contrast[i]+=z*z*
pixel.direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].contrast[i]+=z*z*
pixel.direction[i].alpha;
}
/*
Maximum Correlation Coefficient.
Future: return second largest eigenvalue of Q.
*/
channel_features[RedPixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
channel_features[GreenPixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
channel_features[BluePixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
}
/*
Relinquish resources.
*/
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
for (i=0; i < (ssize_t) number_grays; i++)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
return(channel_features);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H o u g h L i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Use HoughLineImage() in conjunction with any binary edge extracted image (we
% recommand Canny) to identify lines in the image. The algorithm accumulates
% counts for every white pixel for every possible orientation (for angles from
% 0 to 179 in 1 degree increments) and distance from the center of the image to
% the corner (in 1 px increments) and stores the counts in an accumulator
% matrix of angle vs distance. The size of the accumulator is 180x(diagonal/2).
% Next it searches this space for peaks in counts and converts the locations
% of the peaks to slope and intercept in the normal x,y input image space. Use
% the slope/intercepts to find the endpoints clipped to the bounds of the
% image. The lines are then drawn. The counts are a measure of the length of
% the lines.
%
% The format of the HoughLineImage method is:
%
% Image *HoughLineImage(const Image *image,const size_t width,
% const size_t height,const size_t threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width, height: find line pairs as local maxima in this neighborhood.
%
% o threshold: the line count threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
static Image *RenderHoughLines(const ImageInfo *image_info,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define BoundingBox "viewbox"
DrawInfo
*draw_info;
Image
*image;
MagickBooleanType
status;
/*
Open image.
*/
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image->columns=columns;
image->rows=rows;
draw_info=CloneDrawInfo(image_info,(DrawInfo *) NULL);
draw_info->affine.sx=image->resolution.x == 0.0 ? 1.0 : image->resolution.x/
DefaultResolution;
draw_info->affine.sy=image->resolution.y == 0.0 ? 1.0 : image->resolution.y/
DefaultResolution;
image->columns=(size_t) (draw_info->affine.sx*image->columns);
image->rows=(size_t) (draw_info->affine.sy*image->rows);
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
if (SetImageBackgroundColor(image,exception) == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Render drawing.
*/
if (GetBlobStreamData(image) == (unsigned char *) NULL)
draw_info->primitive=FileToString(image->filename,~0UL,exception);
else
{
draw_info->primitive=(char *) AcquireQuantumMemory(1,(size_t)
GetBlobSize(image)+1);
if (draw_info->primitive != (char *) NULL)
{
(void) memcpy(draw_info->primitive,GetBlobStreamData(image),
(size_t) GetBlobSize(image));
draw_info->primitive[GetBlobSize(image)]='\0';
}
}
(void) DrawImage(image,draw_info,exception);
draw_info=DestroyDrawInfo(draw_info);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
MagickExport Image *HoughLineImage(const Image *image,const size_t width,
const size_t height,const size_t threshold,ExceptionInfo *exception)
{
#define HoughLineImageTag "HoughLine/Image"
CacheView
*image_view;
char
message[MagickPathExtent],
path[MagickPathExtent];
const char
*artifact;
double
hough_height;
Image
*lines_image = NULL;
ImageInfo
*image_info;
int
file;
MagickBooleanType
status;
MagickOffsetType
progress;
MatrixInfo
*accumulator;
PointInfo
center;
ssize_t
y;
size_t
accumulator_height,
accumulator_width,
line_count;
/*
Create the accumulator.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
accumulator_width=180;
hough_height=((sqrt(2.0)*(double) (image->rows > image->columns ?
image->rows : image->columns))/2.0);
accumulator_height=(size_t) (2.0*hough_height);
accumulator=AcquireMatrixInfo(accumulator_width,accumulator_height,
sizeof(double),exception);
if (accumulator == (MatrixInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
if (NullMatrix(accumulator) == MagickFalse)
{
accumulator=DestroyMatrixInfo(accumulator);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Populate the accumulator.
*/
status=MagickTrue;
progress=0;
center.x=(double) image->columns/2.0;
center.y=(double) image->rows/2.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelIntensity(image,p) > (QuantumRange/2.0))
{
ssize_t
i;
for (i=0; i < 180; i++)
{
double
count,
radius;
radius=(((double) x-center.x)*cos(DegreesToRadians((double) i)))+
(((double) y-center.y)*sin(DegreesToRadians((double) i)));
(void) GetMatrixElement(accumulator,i,(ssize_t)
MagickRound(radius+hough_height),&count);
count++;
(void) SetMatrixElement(accumulator,i,(ssize_t)
MagickRound(radius+hough_height),&count);
}
}
p+=GetPixelChannels(image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CannyEdgeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
accumulator=DestroyMatrixInfo(accumulator);
return((Image *) NULL);
}
/*
Generate line segments from accumulator.
*/
file=AcquireUniqueFileResource(path);
if (file == -1)
{
accumulator=DestroyMatrixInfo(accumulator);
return((Image *) NULL);
}
(void) FormatLocaleString(message,MagickPathExtent,
"# Hough line transform: %.20gx%.20g%+.20g\n",(double) width,
(double) height,(double) threshold);
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
(void) FormatLocaleString(message,MagickPathExtent,
"viewbox 0 0 %.20g %.20g\n",(double) image->columns,(double) image->rows);
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
(void) FormatLocaleString(message,MagickPathExtent,
"# x1,y1 x2,y2 # count angle distance\n");
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
line_count=image->columns > image->rows ? image->columns/4 : image->rows/4;
if (threshold != 0)
line_count=threshold;
for (y=0; y < (ssize_t) accumulator_height; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) accumulator_width; x++)
{
double
count;
(void) GetMatrixElement(accumulator,x,y,&count);
if (count >= (double) line_count)
{
double
maxima;
SegmentInfo
line;
ssize_t
v;
/*
Is point a local maxima?
*/
maxima=count;
for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++)
{
ssize_t
u;
for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++)
{
if ((u != 0) || (v !=0))
{
(void) GetMatrixElement(accumulator,x+u,y+v,&count);
if (count > maxima)
{
maxima=count;
break;
}
}
}
if (u < (ssize_t) (width/2))
break;
}
(void) GetMatrixElement(accumulator,x,y,&count);
if (maxima > count)
continue;
if ((x >= 45) && (x <= 135))
{
/*
y = (r-x cos(t))/sin(t)
*/
line.x1=0.0;
line.y1=((double) (y-(accumulator_height/2.0))-((line.x1-
(image->columns/2.0))*cos(DegreesToRadians((double) x))))/
sin(DegreesToRadians((double) x))+(image->rows/2.0);
line.x2=(double) image->columns;
line.y2=((double) (y-(accumulator_height/2.0))-((line.x2-
(image->columns/2.0))*cos(DegreesToRadians((double) x))))/
sin(DegreesToRadians((double) x))+(image->rows/2.0);
}
else
{
/*
x = (r-y cos(t))/sin(t)
*/
line.y1=0.0;
line.x1=((double) (y-(accumulator_height/2.0))-((line.y1-
(image->rows/2.0))*sin(DegreesToRadians((double) x))))/
cos(DegreesToRadians((double) x))+(image->columns/2.0);
line.y2=(double) image->rows;
line.x2=((double) (y-(accumulator_height/2.0))-((line.y2-
(image->rows/2.0))*sin(DegreesToRadians((double) x))))/
cos(DegreesToRadians((double) x))+(image->columns/2.0);
}
(void) FormatLocaleString(message,MagickPathExtent,
"line %g,%g %g,%g # %g %g %g\n",line.x1,line.y1,line.x2,line.y2,
maxima,(double) x,(double) y);
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
}
}
}
(void) close(file);
/*
Render lines to image canvas.
*/
image_info=AcquireImageInfo();
image_info->background_color=image->background_color;
(void) FormatLocaleString(image_info->filename,MagickPathExtent,"%s",path);
artifact=GetImageArtifact(image,"background");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"background",artifact);
artifact=GetImageArtifact(image,"fill");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"fill",artifact);
artifact=GetImageArtifact(image,"stroke");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"stroke",artifact);
artifact=GetImageArtifact(image,"strokewidth");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"strokewidth",artifact);
lines_image=RenderHoughLines(image_info,image->columns,image->rows,exception);
artifact=GetImageArtifact(image,"hough-lines:accumulator");
if ((lines_image != (Image *) NULL) &&
(IsStringTrue(artifact) != MagickFalse))
{
Image
*accumulator_image;
accumulator_image=MatrixToImage(accumulator,exception);
if (accumulator_image != (Image *) NULL)
AppendImageToList(&lines_image,accumulator_image);
}
/*
Free resources.
*/
accumulator=DestroyMatrixInfo(accumulator);
image_info=DestroyImageInfo(image_info);
(void) RelinquishUniqueFileResource(path);
return(GetFirstImageInList(lines_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M e a n S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MeanShiftImage() delineate arbitrarily shaped clusters in the image. For
% each pixel, it visits all the pixels in the neighborhood specified by
% the window centered at the pixel and excludes those that are outside the
% radius=(window-1)/2 surrounding the pixel. From those pixels, it finds those
% that are within the specified color distance from the current mean, and
% computes a new x,y centroid from those coordinates and a new mean. This new
% x,y centroid is used as the center for a new window. This process iterates
% until it converges and the final mean is replaces the (original window
% center) pixel value. It repeats this process for the next pixel, etc.,
% until it processes all pixels in the image. Results are typically better with
% colorspaces other than sRGB. We recommend YIQ, YUV or YCbCr.
%
% The format of the MeanShiftImage method is:
%
% Image *MeanShiftImage(const Image *image,const size_t width,
% const size_t height,const double color_distance,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width, height: find pixels in this neighborhood.
%
% o color_distance: the color distance.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MeanShiftImage(const Image *image,const size_t width,
const size_t height,const double color_distance,ExceptionInfo *exception)
{
#define MaxMeanShiftIterations 100
#define MeanShiftImageTag "MeanShift/Image"
CacheView
*image_view,
*mean_view,
*pixel_view;
Image
*mean_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
mean_image=CloneImage(image,0,0,MagickTrue,exception);
if (mean_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(mean_image,DirectClass,exception) == MagickFalse)
{
mean_image=DestroyImage(mean_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
pixel_view=AcquireVirtualCacheView(image,exception);
mean_view=AcquireAuthenticCacheView(mean_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status,progress) \
magick_number_threads(mean_image,mean_image,mean_image->rows,1)
#endif
for (y=0; y < (ssize_t) mean_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(mean_view,0,y,mean_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) mean_image->columns; x++)
{
PixelInfo
mean_pixel,
previous_pixel;
PointInfo
mean_location,
previous_location;
ssize_t
i;
GetPixelInfo(image,&mean_pixel);
GetPixelInfoPixel(image,p,&mean_pixel);
mean_location.x=(double) x;
mean_location.y=(double) y;
for (i=0; i < MaxMeanShiftIterations; i++)
{
double
distance,
gamma;
PixelInfo
sum_pixel;
PointInfo
sum_location;
ssize_t
count,
v;
sum_location.x=0.0;
sum_location.y=0.0;
GetPixelInfo(image,&sum_pixel);
previous_location=mean_location;
previous_pixel=mean_pixel;
count=0;
for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++)
{
ssize_t
u;
for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++)
{
if ((v*v+u*u) <= (ssize_t) ((width/2)*(height/2)))
{
PixelInfo
pixel;
status=GetOneCacheViewVirtualPixelInfo(pixel_view,(ssize_t)
MagickRound(mean_location.x+u),(ssize_t) MagickRound(
mean_location.y+v),&pixel,exception);
distance=(mean_pixel.red-pixel.red)*(mean_pixel.red-pixel.red)+
(mean_pixel.green-pixel.green)*(mean_pixel.green-pixel.green)+
(mean_pixel.blue-pixel.blue)*(mean_pixel.blue-pixel.blue);
if (distance <= (color_distance*color_distance))
{
sum_location.x+=mean_location.x+u;
sum_location.y+=mean_location.y+v;
sum_pixel.red+=pixel.red;
sum_pixel.green+=pixel.green;
sum_pixel.blue+=pixel.blue;
sum_pixel.alpha+=pixel.alpha;
count++;
}
}
}
}
gamma=PerceptibleReciprocal(count);
mean_location.x=gamma*sum_location.x;
mean_location.y=gamma*sum_location.y;
mean_pixel.red=gamma*sum_pixel.red;
mean_pixel.green=gamma*sum_pixel.green;
mean_pixel.blue=gamma*sum_pixel.blue;
mean_pixel.alpha=gamma*sum_pixel.alpha;
distance=(mean_location.x-previous_location.x)*
(mean_location.x-previous_location.x)+
(mean_location.y-previous_location.y)*
(mean_location.y-previous_location.y)+
255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)*
255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)+
255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)*
255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)+
255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue)*
255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue);
if (distance <= 3.0)
break;
}
SetPixelRed(mean_image,ClampToQuantum(mean_pixel.red),q);
SetPixelGreen(mean_image,ClampToQuantum(mean_pixel.green),q);
SetPixelBlue(mean_image,ClampToQuantum(mean_pixel.blue),q);
SetPixelAlpha(mean_image,ClampToQuantum(mean_pixel.alpha),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(mean_image);
}
if (SyncCacheViewAuthenticPixels(mean_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MeanShiftImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
mean_view=DestroyCacheView(mean_view);
pixel_view=DestroyCacheView(pixel_view);
image_view=DestroyCacheView(image_view);
return(mean_image);
}
|
header.h | /*--------------------------------------------------------------------
c---------------------------------------------------------------------
c
c header.h
c
c---------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c The following include file is generated automatically by the
c "setparams" utility. It defines
c maxcells: the square root of the maximum number of processors
c problem_size: 12, 64, 102, 162 (for class T, A, B, C)
c dt_default: default time step for this problem size if no
c config file
c niter_default: default number of iterations for this problem size
--------------------------------------------------------------------*/
#ifndef _HEADER_H_
#define _HEADER_H_
#include "npbparams.h"
#include "../math/nas_math.h"
#define AA 0
#define BB 1
#define CC 2
#define BLOCK_SIZE 5
/* COMMON block: global */
static int grid_points[3]; /* grid_ponts(1:3) */
/* COMMON block: constants */
static double tx1, tx2, tx3, ty1, ty2, ty3, tz1, tz2, tz3;
static double dx1, dx2, dx3, dx4, dx5;
static double dy1, dy2, dy3, dy4, dy5;
static double dz1, dz2, dz3, dz4, dz5;
static double dssp, dt;
static double ce[5][13]; /* ce(5,13) */
static double dxmax, dymax, dzmax;
static double xxcon1, xxcon2, xxcon3, xxcon4, xxcon5;
static double dx1tx1, dx2tx1, dx3tx1, dx4tx1, dx5tx1;
static double yycon1, yycon2, yycon3, yycon4, yycon5;
static double dy1ty1, dy2ty1, dy3ty1, dy4ty1, dy5ty1;
static double zzcon1, zzcon2, zzcon3, zzcon4, zzcon5;
static double dz1tz1, dz2tz1, dz3tz1, dz4tz1, dz5tz1;
static double dnxm1, dnym1, dnzm1, c1c2, c1c5, c3c4, c1345;
static double conz1, c1, c2, c3, c4, c5, c4dssp, c5dssp, dtdssp;
static double dttx1, dttx2, dtty1, dtty2, dttz1, dttz2;
static double c2dttx1, c2dtty1, c2dttz1, comz1, comz4, comz5, comz6;
static double c3c4tx3, c3c4ty3, c3c4tz3, c2iv, con43, con16;
#define IMAX PROBLEM_SIZE
#define JMAX PROBLEM_SIZE
#define KMAX PROBLEM_SIZE
/*
c to improve cache performance, grid dimensions padded by 1
c for even number sizes only.
*/
/* COMMON block: fields */
static double *us_ptr;
static double *vs_ptr;
static double *ws_ptr;
static double *qs_ptr;
static double *rho_i_ptr;
static double *square_ptr;
static double *forcing_ptr;
static double *u_ptr;
static double *rhs_ptr;
static double *lhs_ptr;
static double *fjac_ptr;
static double *njac_ptr;
typedef double s_matrix_t[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
typedef double f_matrix_t[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][5+1];
typedef double u_matrix_t[(IMAX+1)/2*2+1][(JMAX+1)/2*2+1][(KMAX+1)/2*2+1][5];
typedef double rhs_matrix_t[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][5];
typedef double lhs_matrix_t[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][3][5][5];
typedef double jac_matrix_t[IMAX/2*2+1][JMAX/2*2+1][KMAX-1+1][5][5];
#define ACAST(T, ptr) (*(T*)ptr)
#define us ACAST(s_matrix_t, us_ptr)
#define vs ACAST(s_matrix_t, vs_ptr)
#define ws ACAST(s_matrix_t, ws_ptr)
#define qs ACAST(s_matrix_t, qs_ptr)
#define rho_i ACAST(s_matrix_t, rho_i_ptr)
#define square ACAST(s_matrix_t, square_ptr)
#define forcing ACAST(f_matrix_t, forcing_ptr)
#define u ACAST(u_matrix_t, u_ptr)
#define rhs ACAST(rhs_matrix_t, rhs_ptr)
#define lhs ACAST(lhs_matrix_t, lhs_ptr)
#define fjac ACAST(jac_matrix_t, fjac_ptr)
#define njac ACAST(jac_matrix_t, njac_ptr)
// static double us[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
// static double vs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
// static double ws[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
// static double qs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
// static double rho_i[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
// static double square[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
// static double forcing[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][5+1];
// static double u[(IMAX+1)/2*2+1][(JMAX+1)/2*2+1][(KMAX+1)/2*2+1][5];
// static double rhs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][5];
// static double lhs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][3][5][5];
/* COMMON block: work_1d */
double cuf[PROBLEM_SIZE];
double q[PROBLEM_SIZE];
double ue[PROBLEM_SIZE][5];
double buf[PROBLEM_SIZE][5];
#pragma omp threadprivate(cuf, q, ue, buf)
/*
c to improve cache performance, grid dimensions (first two for these
c to arrays) padded by 1 for even number sizes only.
*/
/* COMMON block: work_lhs */
// static double fjac[IMAX/2*2+1][JMAX/2*2+1][KMAX-1+1][5][5];
// /* fjac(5, 5, 0:IMAX/2*2, 0:JMAX/2*2, 0:KMAX-1) */
// static double njac[IMAX/2*2+1][JMAX/2*2+1][KMAX-1+1][5][5];
/* njac(5, 5, 0:IMAX/2*2, 0:JMAX/2*2, 0:KMAX-1) */
static double tmp1, tmp2, tmp3;
#endif
|
GB_to_nonhyper.c | //------------------------------------------------------------------------------
// GB_to_nonhyper: convert a matrix to non-hypersparse form
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// On input, the matrix may have shallow A->p and A->h content; it is safely
// removed. On output, the matrix is always non-hypersparse (even if out of
// memory). If the input matrix is hypersparse, it is given a new A->p that is
// not shallow. If the input matrix is already non-hypersparse, nothing is
// changed (and in that case A->p remains shallow on output if shallow on
// input). The A->x and A->i content is not changed; it remains in whatever
// shallow/non-shallow state that it had on input).
// If an out-of-memory condition occurs, all content of the matrix is cleared.
// The input matrix may be jumbled; this is not an error condition.
#include "GB.h"
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_to_nonhyper // convert a matrix to non-hypersparse
(
GrB_Matrix A, // matrix to convert to non-hypersparse
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT_MATRIX_OK_OR_JUMBLED (A, "A being converted to nonhyper", GB0) ;
ASSERT (GB_ZOMBIES_OK (A)) ;
//--------------------------------------------------------------------------
// convert A to non-hypersparse form
//--------------------------------------------------------------------------
if (A->is_hyper)
{
//----------------------------------------------------------------------
// determine the number of threads to use
//----------------------------------------------------------------------
int64_t n = A->vdim ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (n, chunk, nthreads_max) ;
int ntasks = (nthreads == 1) ? 1 : (8 * nthreads) ;
ntasks = GB_IMIN (ntasks, n) ;
ntasks = GB_IMAX (ntasks, 1) ;
//----------------------------------------------------------------------
// allocate the new Ap array, of size n+1
//----------------------------------------------------------------------
int64_t *GB_RESTRICT Ap_new ;
GB_MALLOC_MEMORY (Ap_new, n+1, sizeof (int64_t)) ;
if (Ap_new == NULL)
{
// out of memory
A->is_hyper = false ; // A is non-hypersparse, but invalid
GB_PHIX_FREE (A) ;
return (GB_OUT_OF_MEMORY) ;
}
#ifdef GB_DEBUG
// to ensure all values of Ap_new are assigned below.
for (int64_t j = 0 ; j <= n ; j++) Ap_new [j] = -99999 ;
#endif
//----------------------------------------------------------------------
// get the old hyperlist
//----------------------------------------------------------------------
int64_t nvec = A->nvec ; // # of vectors in Ah_old
int64_t *GB_RESTRICT Ap_old = A->p ; // size nvec+1
int64_t *GB_RESTRICT Ah_old = A->h ; // size nvec
int64_t nvec_nonempty = 0 ; // recompute A->nvec_nonempty
int64_t anz = GB_NNZ (A) ;
//----------------------------------------------------------------------
// construct the new vector pointers
//----------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nvec_nonempty)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t jstart, jend, my_nvec_nonempty = 0 ;
GB_PARTITION (jstart, jend, n, tid, ntasks) ;
ASSERT (0 <= jstart && jstart <= jend && jend <= n) ;
// task tid computes Ap_new [jstart:jend-1] from Ap_old, Ah_old.
// GB_SPLIT_BINARY_SEARCH of Ah_old [0..nvec-1] for jstart:
// If found is true then Ah_old [k] == jstart.
// If found is false, and nvec > 0 then
// Ah_old [0 ... k-1] < jstart < Ah_old [k ... nvec-1]
// Whether or not i is found, if nvec > 0
// Ah_old [0 ... k-1] < jstart <= Ah_old [k ... nvec-1]
// If nvec == 0, then k == 0 and found will be false. In this
// case, jstart cannot be compared with any content of Ah_old,
// since Ah_old is completely empty (Ah_old [0] is invalid).
int64_t k = 0, pright = nvec-1 ;
bool found ;
GB_SPLIT_BINARY_SEARCH (jstart, Ah_old, k, pright, found) ;
ASSERT (k >= 0 && k <= nvec) ;
ASSERT (GB_IMPLIES (nvec == 0, !found && k == 0)) ;
ASSERT (GB_IMPLIES (found, jstart == Ah_old [k])) ;
ASSERT (GB_IMPLIES (!found && k < nvec, jstart < Ah_old [k])) ;
// Let jk = Ah_old [k], jlast = Ah_old [k-1], and pk = Ah_old [k].
// Then Ap_new [jlast+1:jk] must be set to pk. This must be done
// for all k = 0:nvec-1. In addition, the last vector k=nvec-1
// must be terminated by setting Ap_new [jk+1:n-1] to Ap_old [nvec].
// A task owns the kth vector if jk is in jstart:jend-1, inclusive.
// It counts all non-empty vectors that it owns. However, the task
// must also set Ap_new [...] = pk for any jlast+1:jk that overlaps
// jstart:jend-1, even if it does not own that particular vector k.
// This happens only at the tail end of jstart:jend-1.
int64_t jlast = (k == 0) ? (-1) : Ah_old [k-1] ;
jlast = GB_IMAX (jstart-1, jlast) ;
bool done = false ;
for ( ; k <= nvec && !done ; k++)
{
//--------------------------------------------------------------
// get the kth vector in Ah_old, which is vector index jk.
//--------------------------------------------------------------
int64_t jk = (k < nvec) ? Ah_old [k] : n ;
int64_t pk = (k < nvec) ? Ap_old [k] : anz ;
//--------------------------------------------------------------
// determine if this task owns jk
//--------------------------------------------------------------
int64_t jfin ;
if (jk >= jend)
{
// This is the last iteration for this task. This task
// does not own the kth vector. However, it does own the
// vector indices jlast+1:jend-1, and these vectors must
// be handled by this task.
jfin = jend - 1 ;
done = true ;
}
else
{
// This task owns the kth vector, which is vector index jk.
// Ap must be set to pk for all vector indices jlast+1:jk.
jfin = jk ;
ASSERT (k >= 0 && k < nvec && nvec > 0) ;
if (pk < Ap_old [k+1]) my_nvec_nonempty++ ;
}
//--------------------------------------------------------------
// set Ap_new for this vector
//--------------------------------------------------------------
// Ap_new [jlast+1:jk] must be set to pk. This tasks handles
// the intersection of jlast+1:jk with jstart:jend-1.
for (int64_t j = jlast+1 ; j <= jfin ; j++)
{
Ap_new [j] = pk ;
}
//--------------------------------------------------------------
// keep track of the prior vector index
//--------------------------------------------------------------
jlast = jk ;
}
nvec_nonempty += my_nvec_nonempty ;
//------------------------------------------------------------------
// no task owns Ap_new [n] so it is set by the last task
//------------------------------------------------------------------
if (tid == ntasks-1)
{
ASSERT (jend == n) ;
Ap_new [n] = anz ;
}
}
// free the old A->p and A->h hyperlist content.
// this clears A->nvec_nonempty so it must be restored below.
GB_ph_free (A) ;
// transplant the new vector pointers; matrix is no longer hypersparse
A->p = Ap_new ;
A->h = NULL ;
A->is_hyper = false ;
A->nvec = n ;
A->nvec_nonempty = nvec_nonempty ;
A->plen = n ;
A->p_shallow = false ;
A->h_shallow = false ;
A->magic = GB_MAGIC ;
ASSERT (anz == GB_NNZ (A)) ;
ASSERT (A->nvec_nonempty == GB_nvec_nonempty (A, Context)) ;
}
//--------------------------------------------------------------------------
// A is now in non-hypersparse form
//--------------------------------------------------------------------------
ASSERT_MATRIX_OK_OR_JUMBLED (A, "A converted to nonhypersparse", GB0) ;
ASSERT (!(A->is_hyper)) ;
return (GrB_SUCCESS) ;
}
|
md5.c | /*
* This is an OpenSSL-compatible implementation of the RSA Data Security, Inc.
* MD5 Message-Digest Algorithm (RFC 1321).
*
* Homepage:
* http://openwall.info/wiki/people/solar/software/public-domain-source-code/md5
*
* Author:
* Alexander Peslyak, better known as Solar Designer <solar at openwall.com>
*
* This software was written by Alexander Peslyak in 2001. No copyright is
* claimed, and the software is hereby placed in the public domain.
* In case this attempt to disclaim copyright and place the software in the
* public domain is deemed null and void, then the software is
* Copyright (c) 2001 Alexander Peslyak and it is hereby released to the
* general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* There's ABSOLUTELY NO WARRANTY, express or implied.
*
* (This is a heavily cut-down "BSD license".)
*
* This differs from Colin Plumb's older public domain implementation in that
* no exactly 32-bit integer data type is required (any 32-bit or wider
* unsigned integer data type will do), there's no compile-time endianness
* configuration, and the function prototypes match OpenSSL's. No code from
* Colin Plumb's implementation has been reused; this comment merely compares
* the properties of the two independent implementations.
*
* The primary goals of this implementation are portability and ease of use.
* It is meant to be fast, but not as fast as possible. Some known
* optimizations are not included to reduce source code size and avoid
* compile-time configuration.
*/
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#if defined(_OPENMP)
# include <omp.h>
# define MD5_PARALLELISM_DEGREE 4
#endif
#include "md5.h"
#if !defined(S_ISREG) && defined(S_IFMT) && defined(S_IFREG)
#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG)
#endif
/*
* The basic MD5 functions.
*
* F and G are optimized compared to their RFC 1321 definitions for
* architectures that lack an AND-NOT instruction, just like in Colin Plumb's
* implementation.
*/
#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
#define G(x, y, z) ((y) ^ ((z) & ((x) ^ (y))))
#define H(x, y, z) (((x) ^ (y)) ^ (z))
#define H2(x, y, z) ((x) ^ ((y) ^ (z)))
#define I(x, y, z) ((y) ^ ((x) | ~(z)))
/*
* The MD5 transformation for all four rounds.
*/
#define STEP(f, a, b, c, d, x, t, s) \
(a) += f((b), (c), (d)) + (x) + (t); \
(a) = (((a) << (s)) | (((a) & 0xffffffff) >> (32 - (s)))); \
(a) += (b);
/*
* SET reads 4 input bytes in little-endian byte order and stores them
* in a properly aligned word in host byte order.
*
* The check for little-endian architectures that tolerate unaligned
* memory accesses is just an optimization. Nothing will break if it
* doesn't work.
*/
#if defined(__i386__) || defined(__x86_64__) || defined(__vax__)
#define SET(n) \
(*(MD5_u32plus *)&ptr[(n) * 4])
#define GET(n) \
SET(n)
#else
#define SET(n) \
(ctx->block[(n)] = \
(MD5_u32plus)ptr[(n) * 4] | \
((MD5_u32plus)ptr[(n) * 4 + 1] << 8) | \
((MD5_u32plus)ptr[(n) * 4 + 2] << 16) | \
((MD5_u32plus)ptr[(n) * 4 + 3] << 24))
#define GET(n) \
(ctx->block[(n)])
#endif
/*
* This processes one or more 64-byte data blocks, but does NOT update
* the bit counters. There are no alignment requirements.
*/
static const void *body(MD5_CTX *ctx, const void *data, unsigned long size)
{
const unsigned char *ptr;
MD5_u32plus a, b, c, d;
MD5_u32plus saved_a, saved_b, saved_c, saved_d;
ptr = (const unsigned char *)data;
a = ctx->a;
b = ctx->b;
c = ctx->c;
d = ctx->d;
do {
saved_a = a;
saved_b = b;
saved_c = c;
saved_d = d;
/* Round 1 */
STEP(F, a, b, c, d, SET(0), 0xd76aa478, 7)
STEP(F, d, a, b, c, SET(1), 0xe8c7b756, 12)
STEP(F, c, d, a, b, SET(2), 0x242070db, 17)
STEP(F, b, c, d, a, SET(3), 0xc1bdceee, 22)
STEP(F, a, b, c, d, SET(4), 0xf57c0faf, 7)
STEP(F, d, a, b, c, SET(5), 0x4787c62a, 12)
STEP(F, c, d, a, b, SET(6), 0xa8304613, 17)
STEP(F, b, c, d, a, SET(7), 0xfd469501, 22)
STEP(F, a, b, c, d, SET(8), 0x698098d8, 7)
STEP(F, d, a, b, c, SET(9), 0x8b44f7af, 12)
STEP(F, c, d, a, b, SET(10), 0xffff5bb1, 17)
STEP(F, b, c, d, a, SET(11), 0x895cd7be, 22)
STEP(F, a, b, c, d, SET(12), 0x6b901122, 7)
STEP(F, d, a, b, c, SET(13), 0xfd987193, 12)
STEP(F, c, d, a, b, SET(14), 0xa679438e, 17)
STEP(F, b, c, d, a, SET(15), 0x49b40821, 22)
/* Round 2 */
STEP(G, a, b, c, d, GET(1), 0xf61e2562, 5)
STEP(G, d, a, b, c, GET(6), 0xc040b340, 9)
STEP(G, c, d, a, b, GET(11), 0x265e5a51, 14)
STEP(G, b, c, d, a, GET(0), 0xe9b6c7aa, 20)
STEP(G, a, b, c, d, GET(5), 0xd62f105d, 5)
STEP(G, d, a, b, c, GET(10), 0x02441453, 9)
STEP(G, c, d, a, b, GET(15), 0xd8a1e681, 14)
STEP(G, b, c, d, a, GET(4), 0xe7d3fbc8, 20)
STEP(G, a, b, c, d, GET(9), 0x21e1cde6, 5)
STEP(G, d, a, b, c, GET(14), 0xc33707d6, 9)
STEP(G, c, d, a, b, GET(3), 0xf4d50d87, 14)
STEP(G, b, c, d, a, GET(8), 0x455a14ed, 20)
STEP(G, a, b, c, d, GET(13), 0xa9e3e905, 5)
STEP(G, d, a, b, c, GET(2), 0xfcefa3f8, 9)
STEP(G, c, d, a, b, GET(7), 0x676f02d9, 14)
STEP(G, b, c, d, a, GET(12), 0x8d2a4c8a, 20)
/* Round 3 */
STEP(H, a, b, c, d, GET(5), 0xfffa3942, 4)
STEP(H2, d, a, b, c, GET(8), 0x8771f681, 11)
STEP(H, c, d, a, b, GET(11), 0x6d9d6122, 16)
STEP(H2, b, c, d, a, GET(14), 0xfde5380c, 23)
STEP(H, a, b, c, d, GET(1), 0xa4beea44, 4)
STEP(H2, d, a, b, c, GET(4), 0x4bdecfa9, 11)
STEP(H, c, d, a, b, GET(7), 0xf6bb4b60, 16)
STEP(H2, b, c, d, a, GET(10), 0xbebfbc70, 23)
STEP(H, a, b, c, d, GET(13), 0x289b7ec6, 4)
STEP(H2, d, a, b, c, GET(0), 0xeaa127fa, 11)
STEP(H, c, d, a, b, GET(3), 0xd4ef3085, 16)
STEP(H2, b, c, d, a, GET(6), 0x04881d05, 23)
STEP(H, a, b, c, d, GET(9), 0xd9d4d039, 4)
STEP(H2, d, a, b, c, GET(12), 0xe6db99e5, 11)
STEP(H, c, d, a, b, GET(15), 0x1fa27cf8, 16)
STEP(H2, b, c, d, a, GET(2), 0xc4ac5665, 23)
/* Round 4 */
STEP(I, a, b, c, d, GET(0), 0xf4292244, 6)
STEP(I, d, a, b, c, GET(7), 0x432aff97, 10)
STEP(I, c, d, a, b, GET(14), 0xab9423a7, 15)
STEP(I, b, c, d, a, GET(5), 0xfc93a039, 21)
STEP(I, a, b, c, d, GET(12), 0x655b59c3, 6)
STEP(I, d, a, b, c, GET(3), 0x8f0ccc92, 10)
STEP(I, c, d, a, b, GET(10), 0xffeff47d, 15)
STEP(I, b, c, d, a, GET(1), 0x85845dd1, 21)
STEP(I, a, b, c, d, GET(8), 0x6fa87e4f, 6)
STEP(I, d, a, b, c, GET(15), 0xfe2ce6e0, 10)
STEP(I, c, d, a, b, GET(6), 0xa3014314, 15)
STEP(I, b, c, d, a, GET(13), 0x4e0811a1, 21)
STEP(I, a, b, c, d, GET(4), 0xf7537e82, 6)
STEP(I, d, a, b, c, GET(11), 0xbd3af235, 10)
STEP(I, c, d, a, b, GET(2), 0x2ad7d2bb, 15)
STEP(I, b, c, d, a, GET(9), 0xeb86d391, 21)
a += saved_a;
b += saved_b;
c += saved_c;
d += saved_d;
ptr += 64;
} while (size -= 64);
ctx->a = a;
ctx->b = b;
ctx->c = c;
ctx->d = d;
return ptr;
}
void MD5_Init(MD5_CTX *ctx)
{
ctx->a = 0x67452301;
ctx->b = 0xefcdab89;
ctx->c = 0x98badcfe;
ctx->d = 0x10325476;
ctx->lo = 0;
ctx->hi = 0;
}
void MD5_Update(MD5_CTX *ctx, const void *data, unsigned long size)
{
MD5_u32plus saved_lo;
unsigned long used, available;
saved_lo = ctx->lo;
if ((ctx->lo = (saved_lo + size) & 0x1fffffff) < saved_lo)
ctx->hi++;
ctx->hi += size >> 29;
used = saved_lo & 0x3f;
if (used) {
available = 64 - used;
if (size < available) {
memcpy(&ctx->buffer[used], data, size);
return;
}
memcpy(&ctx->buffer[used], data, available);
data = (const unsigned char *)data + available;
size -= available;
body(ctx, ctx->buffer, 64);
}
if (size >= 64) {
data = body(ctx, data, size & ~(unsigned long)0x3f);
size &= 0x3f;
}
memcpy(ctx->buffer, data, size);
}
void MD5_Final(MD5_CTX *ctx, unsigned char *result)
{
unsigned long used, available;
used = ctx->lo & 0x3f;
ctx->buffer[used++] = 0x80;
available = 64 - used;
if (available < 8) {
memset(&ctx->buffer[used], 0, available);
body(ctx, ctx->buffer, 64);
used = 0;
available = 64;
}
memset(&ctx->buffer[used], 0, available - 8);
ctx->lo <<= 3;
ctx->buffer[56] = ctx->lo;
ctx->buffer[57] = ctx->lo >> 8;
ctx->buffer[58] = ctx->lo >> 16;
ctx->buffer[59] = ctx->lo >> 24;
ctx->buffer[60] = ctx->hi;
ctx->buffer[61] = ctx->hi >> 8;
ctx->buffer[62] = ctx->hi >> 16;
ctx->buffer[63] = ctx->hi >> 24;
body(ctx, ctx->buffer, 64);
result[0] = ctx->a;
result[1] = ctx->a >> 8;
result[2] = ctx->a >> 16;
result[3] = ctx->a >> 24;
result[4] = ctx->b;
result[5] = ctx->b >> 8;
result[6] = ctx->b >> 16;
result[7] = ctx->b >> 24;
result[8] = ctx->c;
result[9] = ctx->c >> 8;
result[10] = ctx->c >> 16;
result[11] = ctx->c >> 24;
result[12] = ctx->d;
result[13] = ctx->d >> 8;
result[14] = ctx->d >> 16;
result[15] = ctx->d >> 24;
memset(ctx, 0, sizeof(*ctx));
}
void MD5_Data(const void *data, unsigned long size, unsigned char *result)
{
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, data, size);
MD5_Final(&ctx, result);
}
static const size_t buflen = 8*1024;
int MD5_File(const char *filename, unsigned char *result)
{
memset(result, 0, MD5_OUTBYTES);
struct stat st;
if(stat(filename, &st)!=0 || !S_ISREG(st.st_mode) || st.st_size==0) {
// file not exist || not regular file || empty file
return -1;
}
FILE *f = fopen(filename, "rb");
if(f)
{
unsigned char *buf = malloc(buflen);
MD5_CTX ctx;
MD5_Init(&ctx);
int i;
while ((i = fread(buf, 1, buflen, f)) > 0)
MD5_Update(&ctx, buf, i);
MD5_Final(&ctx, result);
free(buf);
fclose(f);
return 0;
}
return -1;
}
static const long minlen = 1024*1024;
int MD5_File_Parallel( const char *filename, unsigned char *result )
{
memset(result, 0, MD5_OUTBYTES);
struct stat st;
if(stat(filename, &st)!=0 || !S_ISREG(st.st_mode) || st.st_size==0) {
// file not exist || not regular file || empty file
return -1;
}
if(st.st_size < minlen) return MD5_File(filename, result);
const size_t parallel_size = (st.st_size + MD5_PARALLELISM_DEGREE - 1) / MD5_PARALLELISM_DEGREE;
unsigned char sum[MD5_PARALLELISM_DEGREE][MD5_OUTBYTES];
MD5_CTX S[MD5_PARALLELISM_DEGREE][1];
#if defined(_OPENMP)
#pragma omp parallel shared(S), num_threads(MD5_PARALLELISM_DEGREE)
#else
for( size_t id__ = 0; id__ < MD5_PARALLELISM_DEGREE; ++id__ )
#endif
{
#if defined(_OPENMP)
size_t id__ = omp_get_thread_num();
#endif
FILE *file = fopen(filename, "rb");
if(file)
{
MD5_Init(S[id__]);
size_t read_pos = id__ * parallel_size;
size_t read_len = (id__ == MD5_PARALLELISM_DEGREE-1) ? (st.st_size - read_pos) : (parallel_size);
unsigned char *buf = malloc(buflen);
fseek(file, read_pos, SEEK_SET);
while(read_len >= buflen)
{
fread(buf, buflen, 1, file);
MD5_Update( S[id__], buf, buflen);
read_len -= buflen;
}
if(read_len > 0)
{
fread(buf, read_len, 1, file);
MD5_Update( S[id__], buf, read_len);
}
MD5_Final( S[id__], sum[id__]);
free(buf);
fclose(file);
}//end of if(file)
}//end of for(MD5_PARALLELISM_DEGREE)
MD5_CTX FS[1];
MD5_Init(FS);
for( size_t i = 0; i < MD5_PARALLELISM_DEGREE; ++i )
MD5_Update( FS, sum[i], MD5_OUTBYTES );
MD5_Final( FS, result );
return 0;
}
|
GB_binop__plus_fc64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__plus_fc64)
// A.*B function (eWiseMult): GB (_AemultB_01__plus_fc64)
// A.*B function (eWiseMult): GB (_AemultB_02__plus_fc64)
// A.*B function (eWiseMult): GB (_AemultB_03__plus_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_fc64)
// A*D function (colscale): GB (_AxD__plus_fc64)
// D*A function (rowscale): GB (_DxB__plus_fc64)
// C+=B function (dense accum): GB (_Cdense_accumB__plus_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__plus_fc64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_fc64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_fc64)
// C=scalar+B GB (_bind1st__plus_fc64)
// C=scalar+B' GB (_bind1st_tran__plus_fc64)
// C=A+scalar GB (_bind2nd__plus_fc64)
// C=A'+scalar GB (_bind2nd_tran__plus_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// B,b type: GxB_FC64_t
// BinaryOp: cij = GB_FC64_add (aij, bij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC64_add (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_FC64 || GxB_NO_PLUS_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__plus_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__plus_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__plus_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__plus_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__plus_fc64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC64_add (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__plus_fc64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC64_add (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_add (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__plus_fc64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_add (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
network.h | // == mojo ====================================================================
//
// Copyright (c) gnawice@gnawice.com. All rights reserved.
// See LICENSE in root folder
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files(the "Software"),
// to deal in the Software without restriction, including without
// limitation the rights to use, copy, modify, merge, publish, distribute,
// sublicense, and/or sell copies of the Software, and to permit persons to
// whom the Software is furnished to do so, subject to the following
// conditions :
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT
// OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
// THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
// ============================================================================
// network.h: The main artificial neural network graph for mojo
// ==================================================================== mojo ==
#pragma once
#include <string>
#include <iostream> // cout
#include <fstream>
#include <sstream>
#include <map>
#include <vector>
#include "layer.h"
#include "solver.h"
#include "activation.h"
#include "cost.h"
// hack for VS2010 to handle c++11 for(:)
#if (_MSC_VER == 1600)
#ifndef __for__
#define __for__ for each
#define __in__ in
#endif
#else
#ifndef __for__
#define __for__ for
#define __in__ :
#endif
#endif
#if defined(MOJO_CV2) || defined(MOJO_CV3)
#ifdef MOJO_CV2
#include "opencv2/opencv.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/contrib/contrib.hpp"
#pragma comment(lib, "opencv_core249")
#pragma comment(lib, "opencv_highgui249")
#pragma comment(lib, "opencv_imgproc249")
#pragma comment(lib, "opencv_contrib249")
#else //#ifdef MOJO_CV3
#include "opencv2/opencv.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#pragma comment(lib, "opencv_world310")
#endif
#endif
#define blocksize 100000
namespace mojo {
#if defined(MOJO_CV2) || defined(MOJO_CV3)
// forward declare these for data augmentation
cv::Mat matrix2cv(const mojo::matrix &m, bool uc8 = false);
mojo::matrix cv2matrix(cv::Mat &m);
mojo::matrix transform(const mojo::matrix in, const int x_center, const int y_center, int out_dim, float theta = 0, float scale = 1.f);
#endif
#ifdef MOJO_PROFILE_LAYERS
#ifdef _WIN32
//* used for profiling layers
double PCFreq = 0.0;
__int64 CounterStart = 0;
void StartCounter()
{
LARGE_INTEGER li;
if (!QueryPerformanceFrequency(&li)) return;
PCFreq = double(li.QuadPart) / 1000.0;
QueryPerformanceCounter(&li);
CounterStart = li.QuadPart;
}
double GetCounter()
{
LARGE_INTEGER li;
QueryPerformanceCounter(&li);
return double(li.QuadPart - CounterStart) / PCFreq;
}
#else
void StartCounter(){}
double GetCounter(){return 0;}
#endif
#endif
//*/
void replace_str(std::string& str, const std::string& from, const std::string& to) {
if (from.empty())
return;
size_t start_pos = 0;
while ((start_pos = str.find(from, start_pos)) != std::string::npos) {
str.replace(start_pos, from.length(), to);
start_pos += to.length(); // In case 'to' contains 'from', like replacing 'x' with 'yx'
}
}
// returns Energy (euclidian distance / 2) and max index
float match_labels(const float *out, const float *target, const int size, int *best_index = NULL)
{
float E = 0;
int max_j = 0;
for (int j = 0; j<size; j++)
{
E += (out[j] - target[j])*(out[j] - target[j]);
if (out[max_j]<out[j]) max_j = j;
}
if (best_index) *best_index = max_j;
E *= 0.5;
return E;
}
// returns index of highest value (argmax)
int arg_max(const float *out, const int size)
{
int max_j = 0;
for (int j = 0; j<size; j++)
if (out[max_j]<out[j])
{max_j = j; }//std::cout <<j<<",";}
return max_j;
}
//----------------------------------------------------------------------
// network
// - class that holds all the layers and connection information
// - runs forward prediction
class network
{
int _size; // output size
int _thread_count; // determines number of layer sets (copys of layers)
int _internal_thread_count; // used for speeding up convolutions, etc..
static const int MAIN_LAYER_SET = 0;
// training related stuff
int _batch_size; // determines number of dW sets
float _skip_energy_level;
bool _smart_train;
std::vector <float> _running_E;
double _running_sum_E;
cost_function *_cost_function;
solver *_solver;
static const unsigned char BATCH_RESERVED = 1, BATCH_FREE = 0, BATCH_COMPLETE = 2;
static const int BATCH_FILLED_COMPLETE = -2, BATCH_FILLED_IN_PROCESS = -1;
#ifdef MOJO_OMP
omp_lock_t _lock_batch;
void lock_batch() {omp_set_lock(&_lock_batch);}
void unlock_batch() {omp_unset_lock(&_lock_batch);}
void init_lock() {omp_init_lock(&_lock_batch);}
void destroy_lock() {omp_destroy_lock(&_lock_batch);}
int get_thread_num() {return omp_get_thread_num();}
#else
void lock_batch() {}
void unlock_batch() {}
void init_lock(){}
void destroy_lock() {}
int get_thread_num() {return 0;}
#endif
public:
// training progress stuff
int train_correct;
int train_skipped;
int stuck_counter;
int train_updates;
int train_samples;
int epoch_count;
int max_epochs;
float best_estimated_accuracy;
int best_accuracy_count;
float old_estimated_accuracy;
float estimated_accuracy;
// data augmentation stuff
int use_augmentation; // 0=off, 1=mojo, 2=opencv
int augment_x, augment_y;
int augment_h_flip, augment_v_flip;
mojo::pad_type augment_pad;
float augment_theta;
float augment_scale;
// here we have multiple sets of the layers to allow threading and batch processing
// a separate layer set is needed for each independent thread
std::vector< std::vector<base_layer *>> layer_sets;
std::map<std::string, int> layer_map; // name-to-index of layer for layer management
std::vector<std::pair<std::string, std::string>> layer_graph; // pairs of names of layers that are connected
std::vector<matrix *> W; // these are the weights between/connecting layers
// these sets are needed because we need copies for each item in mini-batch
std::vector< std::vector<matrix>> dW_sets; // only for training, will have _batch_size of these
std::vector< std::vector<matrix>> dbias_sets; // only for training, will have _batch_size of these
std::vector< unsigned char > batch_open; // only for training, will have _batch_size of these
network(const char* opt_name=NULL): _thread_count(1), _skip_energy_level(0.f), _batch_size(1)
{
_internal_thread_count=1;
_size=0;
_solver = new_solver(opt_name);
_cost_function = NULL;
//std::vector<base_layer *> layer_set;
//layer_sets.push_back(layer_set);
layer_sets.resize(1);
dW_sets.resize(_batch_size);
dbias_sets.resize(_batch_size);
batch_open.resize(_batch_size);
_running_sum_E = 0.;
train_correct = 0;
train_samples = 0;
train_skipped = 0;
epoch_count = 0;
max_epochs = 1000;
train_updates = 0;
estimated_accuracy = 0;
old_estimated_accuracy = 0;
stuck_counter = 0;
best_estimated_accuracy=0;
best_accuracy_count=0;
use_augmentation=0;
augment_x = 0; augment_y = 0; augment_h_flip = 0; augment_v_flip = 0;
augment_pad =mojo::edge;
augment_theta=0; augment_scale=0;
init_lock();
#ifdef USE_AF
af::setDevice(0);
af::info();
#endif
}
~network()
{
clear();
if (_cost_function) delete _cost_function;
if(_solver) delete _solver;
destroy_lock();
}
// call clear if you want to load a different configuration/model
void clear()
{
for(int i=0; i<(int)layer_sets.size(); i++)
{
__for__(auto l __in__ layer_sets[i]) delete l;
layer_sets.clear();
}
layer_sets.clear();
__for__(auto w __in__ W) if(w) delete w;
W.clear();
layer_map.clear();
layer_graph.clear();
}
// output size of final layer;
int out_size() {return _size;}
// get input size
bool get_input_size(int *w, int *h, int *c)
{
if(layer_sets[MAIN_LAYER_SET].size()<1) return false;
*w=layer_sets[MAIN_LAYER_SET][0]->node.cols;*h=layer_sets[MAIN_LAYER_SET][0]->node.rows;*c=layer_sets[MAIN_LAYER_SET][0]->node.chans;
return true;
}
// sets up number of layer copies to run over multiple threads
void build_layer_sets()
{
int layer_cnt = (int)layer_sets.size();
if (layer_cnt<_thread_count) layer_sets.resize(_thread_count);
// ToDo: add shrink back / else if(layer_cnt>_thread_count)
sync_layer_sets();
}
inline int get_thread_count() {return _thread_count;}
// must call this with max thread count before constructing layers
// value <1 will result in thread count = # cores (including hyperthreaded)
// must call this with max thread count before constructing layers
// value <1 will result in thread count = # cores (including hyperthreaded)
void enable_external_threads(int threads = -1)
{
#ifdef MOJO_OMP
if (threads < 1) threads = omp_get_num_procs();
_thread_count = threads;
if(_internal_thread_count<=_thread_count) omp_set_num_threads(_thread_count);
omp_set_nested(1);
#else
if (threads < 1) _thread_count = 1;
else _thread_count = threads;
if (threads > 1) bail("must define MOJO_OMP to used threading");
#endif
build_layer_sets();
}
void enable_internal_threads(int threads = -1)
{
#ifdef MOJO_OMP
if (threads < 1) {threads = omp_get_num_procs(); threads = threads-1;} // one less than core count
if(threads<1) _internal_thread_count=1;
else _internal_thread_count=threads;
omp_set_nested(1);
#else
_internal_thread_count=1;
#endif
}
// when using threads, need to get bias data synched between all layer sets,
// call this after bias update in main layer set to copy the bias to the other sets
void sync_layer_sets()
{
for(int i=1; i<(int)layer_sets.size();i++)
for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++)
for(int k=0; k<layer_sets[MAIN_LAYER_SET][j]->bias.size(); k++)
(layer_sets[i])[j]->bias.x[k]=(layer_sets[MAIN_LAYER_SET])[j]->bias.x[k];
}
// used to add some noise to weights
void heat_weights()
{
__for__(auto w __in__ W)
{
if (!w) continue;
matrix noise(w->cols, w->rows, w->chans);
noise.fill_random_normal(1.f/ noise.size());
//noise *= *w;
*w += noise;
}
}
// used to add some noise to weights
void remove_means()
{
__for__(auto w __in__ W)
if(w) w->remove_mean();
}
// used to push a layer back in the ORDERED list of layers
// if connect_all() is used, then the order of the push_back is used to connect the layers
// when forward or backward propogation, this order is used for the serialized order of calculations
// Layer_name must be unique.
bool push_back(const char *layer_name, const char *layer_config)
{
if(layer_map[layer_name]) return false; //already exists
base_layer *l=new_layer(layer_name, layer_config);
// set map to index
// make sure there is a 'set' to add layers to
if(layer_sets.size()<1)
{
std::vector<base_layer *> layer_set;
layer_sets.push_back(layer_set);
}
// make sure layer_sets are created
build_layer_sets();
layer_map[layer_name] = (int)layer_sets[MAIN_LAYER_SET].size();
layer_sets[MAIN_LAYER_SET].push_back(l);
// upadate as potential last layer - so it sets the out size
_size=l->fan_size();
// add other copies needed for threading
for(int i=1; i<(int)layer_sets.size();i++) layer_sets[i].push_back(new_layer(layer_name, layer_config));
return true;
}
// connect 2 layers together and initialize weights
// top and bottom concepts are reversed from literature
// my 'top' is the input of a forward() pass and the 'bottom' is the output
// perhaps 'top' traditionally comes from the brain model, but my 'top' comes
// from reading order (information flows top to bottom)
void connect(const char *layer_name_top, const char *layer_name_bottom)
{
size_t i_top=layer_map[layer_name_top];
size_t i_bottom=layer_map[layer_name_bottom];
base_layer *l_top= layer_sets[MAIN_LAYER_SET][i_top];
base_layer *l_bottom= layer_sets[MAIN_LAYER_SET][i_bottom];
int w_i=(int)W.size();
matrix *w = l_bottom->new_connection(*l_top, w_i);
W.push_back(w);
layer_graph.push_back(std::make_pair(layer_name_top,layer_name_bottom));
// need to build connections for other batches/threads
for(int i=1; i<(int)layer_sets.size(); i++)
{
l_top= layer_sets[i][i_top];
l_bottom= layer_sets[i][i_bottom];
delete l_bottom->new_connection(*l_top, w_i);
}
// we need to let solver prepare space for stateful information
if (_solver)
{
if (w)_solver->push_back(w->cols, w->rows, w->chans);
else _solver->push_back(1, 1, 1);
}
int fan_in=l_bottom->fan_size();
int fan_out=l_top->fan_size();
// ToDo: this may be broke when 2 layers connect to one. need to fix (i.e. resnet)
// after all connections, run through and do weights with correct fan count
// initialize weights - ToDo: separate and allow users to configure(?)
if (w && l_bottom->has_weights())
{
if (strcmp(l_bottom->p_act->name, "tanh") == 0)
{
// xavier : for tanh
float weight_base = (float)(std::sqrt(6. / ((double)fan_in + (double)fan_out)));
// float weight_base = (float)(std::sqrt(.25/( (double)fan_in)));
w->fill_random_uniform(weight_base);
}
else if ((strcmp(l_bottom->p_act->name, "sigmoid") == 0) || (strcmp(l_bottom->p_act->name, "sigmoid") == 0))
{
// xavier : for sigmoid
float weight_base = 4.f*(float)(std::sqrt(6. / ((double)fan_in + (double)fan_out)));
w->fill_random_uniform(weight_base);
}
else if ((strcmp(l_bottom->p_act->name, "lrelu") == 0) || (strcmp(l_bottom->p_act->name, "relu") == 0)
|| (strcmp(l_bottom->p_act->name, "vlrelu") == 0) || (strcmp(l_bottom->p_act->name, "elu") == 0))
{
// he : for relu
float weight_base = (float)(std::sqrt(2. / (double)fan_in));
w->fill_random_normal(weight_base);
}
else
{
// lecun : orig
float weight_base = (float)(std::sqrt(1. / (double)fan_in));
w->fill_random_uniform(weight_base);
}
}
else if (w) w->fill(0);
}
// automatically connect all layers in the order they were provided
// easy way to go, but can't deal with branch/highway/resnet/inception types of architectures
void connect_all()
{
for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size()-1; j++)
connect(layer_sets[MAIN_LAYER_SET][j]->name.c_str(), layer_sets[MAIN_LAYER_SET][j+1]->name.c_str());
}
int get_layer_index(const char *name)
{
for (int j = 0; j < (int)layer_sets[MAIN_LAYER_SET].size(); j++)
if (layer_sets[MAIN_LAYER_SET][j]->name.compare(name) == 0)
return j;
return -1;
}
// get the list of layers used (but not connection information)
std::string get_configuration()
{
std::string str;
std::string space(" ");
std::string symbol(" : ");
// print all layer configs
for (int j = 0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++)
{
//std::string jstr(dtoa(j));
std::string jstr;
if(j == 0) jstr = "0";
else jstr = dtoa(j);
std::string lname(layer_sets[MAIN_LAYER_SET][j]->name);
std::string lsets(layer_sets[MAIN_LAYER_SET][j]->get_config_string());
//str+= " "+ std::to_string((long long)j) +" : " +layer_sets[MAIN_LAYER_SET][j]->name +" : " + layer_sets[MAIN_LAYER_SET][j]->get_config_string();
str += space + jstr + symbol + lname + symbol + lsets;
}
str += "\n";
// print layer links
if (layer_graph.size() <= 0) return str;
for (int j = 0; j < (int)layer_graph.size(); j++)
{
if (j % 3 == 0) str += " ";
if((j % 3 == 1)|| (j % 3 == 2)) str += ", ";
str +=layer_graph[j].first + "-" + layer_graph[j].second;
if (j % 3 == 2) str += "\n";
}
return str;
}
// performs forward pass and returns class index
// do not delete or modify the returned pointer. it is a live pointer to the last layer in the network
// if calling over multiple threads, provide the thread index since the interal data is not otherwise thread safe
int predict_class(const float *in, int _thread_number = -1)
{
const float* out = forward(in, _thread_number);
// for(int i = 0; i < out_size(); i++)
// printf("%d: %f\n", i, out[i]);
return arg_max(out, out_size());
}
//----------------------------------------------------------------------------------------------------------
// F O R W A R D
//
// the main forward pass
// if calling over multiple threads, provide the thread index since the interal data is not otherwise thread safe
// train parameter is used to designate the forward pass is used in training (it turns on dropout layers, etc..)
float* forward(const float *in, int _thread_number=-1, int _train=0)
{
// for(int i = W[0]->size()-10; i < W[0]->size(); i++)
// printf("W[i]->x[%d] = %f\n", i, W[0]->x[i]);
if(_thread_number<0) _thread_number=get_thread_num();
if (_thread_number > _thread_count && _thread_count>0) bail("need to enable threading\n");
if (_thread_number >= (int)layer_sets.size()) bail("need to enable threading\n");
//std::cout << get_thread_num() << ",";
// clear nodes to zero & find input layers
std::vector<base_layer *> inputs;
__for__(auto layer __in__ layer_sets[_thread_number])
{
if (dynamic_cast<input_layer*> (layer) != NULL) inputs.push_back(layer);
layer->set_threading(_internal_thread_count);
layer->node.fill(0.f);
}
// first layer assumed input. copy input to it
const float *in_ptr = in;
//base_layer * layer = layer_sets[_thread_number][0];
//memcpy(layer->node.x, in, sizeof(float)*layer->node.size());
__for__(auto layer __in__ inputs)
{
memcpy(layer->node.x, in_ptr, sizeof(float)*layer->node.size());
in_ptr += layer->node.size();
}
//for (int i = 0; i < layer->node.size(); i++)
// layer_sets[_thread_number][0]->node.x[i] = in[i];
// for all layers
__for__(auto layer __in__ layer_sets[_thread_number])
{
// add bias and activate these outputs (they should all be summed up from other branches at this point)
//for(int j=0; j<layer->node.chans; j+=10) for (int i=0; i<layer->node.cols*layer->node.rows; i+=10) std::cout<< layer->node.x[i+j*layer->node.chan_stride] <<"|";
layer->activate_nodes();
//for(int j=0; j<layer->node.chans; j++) for (int i=0; i<layer->node.cols*layer->node.rows; i+=10) std::cout<< layer->node.x[i+j*layer->node.chan_stride] <<"|";
// send output signal downstream (note in this code 'top' is input layer, 'bottom' is output - bucking tradition
__for__ (auto &link __in__ layer->forward_linked_layers)
{
// instead of having a list of paired connections, just use the shape of W to determine connections
// this is harder to read, but requires less look-ups
// the 'link' variable is a std::pair created during the connect() call for the layers
int connection_index = link.first;
base_layer *p_bottom = link.second;
// weight distribution of the signal to layers under it
#ifdef MOJO_PROFILE_LAYERS
StartCounter();
#endif
p_bottom->accumulate_signal(*layer, *W[connection_index], _train);
//if (p_bottom->has_weights())
//for(int j=0; j<layer->node.chans; j++)
//int j=0; for (int i=0; i<layer->node.cols*layer->node.rows; i+=10) std::cout<< layer->node.x[i+j*layer->node.chan_stride] <<"|";
#ifdef MOJO_PROFILE_LAYERS
std::cout << p_bottom->name << "\t" << GetCounter() << "ms\n";
#endif
}
}
// return pointer to float * result from last layer
/* std::cout << "out:";
for (int i = 0; i < 10; i++)
{
std::cout << layer_sets[_thread_number][layer_sets[_thread_number].size() - 1]->node.x[i] <<",";
}
std::cout << "\n";
*/
return layer_sets[_thread_number][layer_sets[_thread_number].size()-1]->node.x;
}
void fprint_networkfile(const char *fmt, ...) {
char buf[BUFSIZ] = { '\0' };
va_list ap;
va_start(ap, fmt);
vsnprintf(buf, BUFSIZ, fmt, ap);
va_end(ap);
ocall_fprint_networkfile(buf);
}
//----------------------------------------------------------------------------------------------------------
// W R I T E
//
// write parameters to stream/file
// note that this does not persist intermediate training information that could be needed to 'pickup where you left off'
bool write(char *filename, bool binary = false, bool final = false)
{
int retocall;
open_outputnetworkfile(&retocall, filename);
// save layers
int layer_cnt = (int)layer_sets[MAIN_LAYER_SET].size();
// int ignore_cnt = 0;
// for (int j = 0; j<(int)layer_sets[0].size(); j++)
// if (dynamic_cast<dropout_layer*> (layer_sets[0][j]) != NULL) ignore_cnt++;
fprint_networkfile("mojo01\n");
fprint_networkfile("%d\n", (int)(layer_cnt));
// ofs<<"mojo01" << std::endl;
// ofs<<(int)(layer_cnt)<<std::endl;
for(int j=0; j<(int)layer_sets[0].size(); j++)
fprint_networkfile("%s\n%s", layer_sets[MAIN_LAYER_SET][j]->name, layer_sets[MAIN_LAYER_SET][j]->get_config_string().c_str());
// ofs << layer_sets[MAIN_LAYER_SET][j]->name << std::endl << layer_sets[MAIN_LAYER_SET][j]->get_config_string();
// if (dynamic_cast<dropout_layer*> (layer_sets[0][j]) != NULL)
// save graph
fprint_networkfile("%d\n", (int)layer_graph.size());
//ofs<<(int)layer_graph.size()<<std::endl;
for(int j=0; j<(int)layer_graph.size(); j++)
fprint_networkfile("%s\n%s\n", layer_graph[j].first.c_str(), layer_graph[j].second.c_str());
// ofs<<layer_graph[j].first << std::endl << layer_graph[j].second << std::endl;
if(binary)
{
//ofs<<(int)1<<std::endl; // flags that this is binary data
fprint_networkfile("1\n");
// binary version to save space if needed
// save bias info
for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++)
if(layer_sets[MAIN_LAYER_SET][j]->use_bias())
{
int breakdown = 0; //
while(breakdown + blocksize < layer_sets[MAIN_LAYER_SET][j]->bias.size())
{
ocall_write((char*)layer_sets[MAIN_LAYER_SET][j]->bias.x + breakdown*sizeof(float), blocksize*sizeof(float));
breakdown += blocksize;
}
ocall_write((char*)layer_sets[MAIN_LAYER_SET][j]->bias.x + breakdown*sizeof(float), (layer_sets[MAIN_LAYER_SET][j]->bias.size()-breakdown)*sizeof(float));
// ocall_write((char*)layer_sets[MAIN_LAYER_SET][j]->bias.x, layer_sets[MAIN_LAYER_SET][j]->bias.size()*sizeof(float));
}
// for(int k = 0; k < layer_sets[MAIN_LAYER_SET][j]->bias.size()*sizeof(float); k++)
// fprint_networkfile("%c", (char*)layer_sets[MAIN_LAYER_SET][j]->bias.x+k);
//ofs.write((char*)layer_sets[MAIN_LAYER_SET][j]->bias.x, layer_sets[MAIN_LAYER_SET][j]->bias.size()*sizeof(float));
// save weights
for (int j = 0; j < (int)W.size(); j++)
{
if (W[j])
{
int breakdown = 0; //
while(breakdown + blocksize < W[j]->size())
{
ocall_write((char*)W[j]->x + breakdown*sizeof(float), blocksize*sizeof(float));
breakdown += blocksize;
}
ocall_write((char*)W[j]->x + breakdown*sizeof(float), (W[j]->size()-breakdown)*sizeof(float));
}
// for(int k = 0; k < W[j]->size()*sizeof(float); k++)
// fprint_networkfile("%c", (char*)W[j]->x+k);
// ofs.write((char*)W[j]->x, W[j]->size()*sizeof(float));
}
}
else
{
//ofs<<(int)0<<std::endl;
fprint_networkfile("0\n");
// save bias info
for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++)
{
if (layer_sets[MAIN_LAYER_SET][j]->use_bias())
{
for (int k = 0; k < layer_sets[MAIN_LAYER_SET][j]->bias.size(); k++)
fprint_networkfile("%f ", layer_sets[MAIN_LAYER_SET][j]->bias.x[k]);
//ofs << layer_sets[MAIN_LAYER_SET][j]->bias.x[k] << " ";
fprint_networkfile("\n");
//ofs << std::endl;
}
}
// save weights
for(int j=0; j<(int)W.size(); j++)
{
if (W[j])
{
for (int i = 0; i < W[j]->size(); i++)
fprint_networkfile("%f ", W[j]->x[i]);
//ofs << W[j]->x[i] << " ";
fprint_networkfile("\n");
//ofs << std::endl;
}
}
}
//ofs.flush();
close_outputnetworkfile();
return true;
}
// read network from a file/stream
bool endoffile;
std::string getcleanline()
{
std::string s;
// The characters in the stream are read one-by-one using a std::streambuf.
// That is faster than reading them one-by-one using the std::istream.
// Code that uses streambuf this way must be guarded by a sentry object.
// The sentry object performs various tasks,
// such as thread synchronization and updating the stream state.
//std::istream::sentry se(ifs, true);
//std::streambuf* sb = ifs.rdbuf();
for (;;) {
char c;
ocall_fread_networkfile(&c);//sb->sbumpc();
//printf("%d\n", c);
switch (c) {
case '\n':
//printf("s = %s\n", s);
return s;
case '\r':
//if (sb->sgetc() == '\n') sb->sbumpc();
char cc; ocall_fread_networkfile(&cc);
//printf("\\r got, %d\n", c, cc);
if (cc == '\n')
return s;
case EOF:
endoffile = true;
//printf("end of file, %d, %s\n", c, s);
// Also handle the case when the last line has no line ending
if (s.empty()) //ifs.setstate(std::ios::eofbit);
return s;
default:
s += (char)c;
}
}
}
//----------------------------------------------------------------------------------------------------------
// R E A D
//
bool read()
{
// if(!ifs.good()) return false;
std::string s;
s = getcleanline();
int layer_count;
int version = 0;
if (s.compare("mojo01")==0)
{
s = getcleanline();
layer_count = atoi(s.c_str());
version = 1;
printf("version = 1, layer_count: %d, line: %s\n", layer_count, s);
}
else if (s.find("mojo:") == 0)
{
//printf("version = -1\n");
version = -1;
int cnt = 1;
while (!endoffile)
{
s = getcleanline();
if (s.empty()) continue;
if(s[0]=='#') continue;
push_back(dtoa(cnt), s.c_str());
printf("layer %d: %s\n", cnt, s.c_str());
cnt++;
}
connect_all();
// copies batch=0 stuff to other batches
sync_layer_sets();
return true;
}
else
{
layer_count = atoi(s.c_str());
printf("layer_count: %d, line: %s\n", layer_count, s);
}
// read layer def
std::string layer_name;
std::string layer_def;
for (auto i=0; i<layer_count; i++)
{
layer_name = getcleanline();
layer_def = getcleanline();
push_back(layer_name.c_str(), layer_def.c_str());
printf("%s: %s\n", layer_name.c_str(), layer_def.c_str());
}
// read graph
int graph_count;
//ifs>>graph_count;
ocall_getint(&graph_count);
end_this_line();
//printf("graph_count: %d\n", graph_count);
//getline(ifs,s); // get endline; just want to end reading the line? the result is not important
if (graph_count <= 0)
{
connect_all();
}
else
{
std::string layer_name1;
std::string layer_name2;
for (auto i=0; i<graph_count; i++)
{
layer_name1= getcleanline();
layer_name2 = getcleanline();
printf("%d: %s", i, layer_name1.c_str());
printf("\t%s", layer_name2.c_str());
printf("\n");
connect(layer_name1.c_str(), layer_name2.c_str());
}
}
int binary;
s=getcleanline(); // get endline
binary = atoi(s.c_str());
printf("binary: %d\n", binary);
// binary version to save space if needed
if(binary==1)
{
for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++)
if (layer_sets[MAIN_LAYER_SET][j]->use_bias())
{
//int c = layer_sets[MAIN_LAYER_SET][j]->bias.chans;
//int cs = layer_sets[MAIN_LAYER_SET][j]->bias.chan_stride
// use ocall_read instead, ww31
// ocall_read((char*)layer_sets[MAIN_LAYER_SET][j]->bias.x, layer_sets[MAIN_LAYER_SET][j]->bias.size()*sizeof(float));
int breakdown = 0; //
while(breakdown + blocksize < layer_sets[MAIN_LAYER_SET][j]->bias.size())
{
ocall_read((char*)layer_sets[MAIN_LAYER_SET][j]->bias.x + breakdown*sizeof(float), blocksize*sizeof(float));
breakdown += blocksize;
}
ocall_read((char*)layer_sets[MAIN_LAYER_SET][j]->bias.x + breakdown*sizeof(float), (layer_sets[MAIN_LAYER_SET][j]->bias.size()-breakdown)*sizeof(float));
// ifs.read((char*)layer_sets[MAIN_LAYER_SET][j]->bias.x, layer_sets[MAIN_LAYER_SET][j]->bias.size()*sizeof(float));
}
for (int j = 0; j < (int)W.size(); j++)
{
if (W[j])
{
printf("loading weight for %d-th layer: %d\n", j, W[j]->size());
// ocall_read((char*)W[j]->x, W[j]->size()*sizeof(float));
int breakdown = 0; //
while(breakdown + blocksize < W[j]->size())
{
ocall_read((char*)W[j]->x + breakdown*sizeof(float), blocksize*sizeof(float));
breakdown += blocksize;
}
ocall_read((char*)W[j]->x + breakdown*sizeof(float), (W[j]->size()-breakdown)*sizeof(float));
// ifs.read((char*)W[j]->x, W[j]->size()*sizeof(float));
}else
printf("loading weight for %d-th layer: 0\n", j);
}
}
else if(binary==0)// text version
{
// read bias
for(int j=0; j<layer_count; j++)
{
if (layer_sets[MAIN_LAYER_SET][j]->use_bias())
{
// int c = layer_sets[MAIN_LAYER_SET][j]->bias.chans;
// int cs = layer_sets[MAIN_LAYER_SET][j]->bias.chan_stride;
// for (int i = 0; i < c; i++)
for (int k = 0; k < layer_sets[MAIN_LAYER_SET][j]->bias.size(); k++)
{
//ifs >> layer_sets[MAIN_LAYER_SET][j]->bias.x[k];
ocall_getfloat(&layer_sets[MAIN_LAYER_SET][j]->bias.x[k]);
//std::cout << layer_sets[MAIN_LAYER_SET][j]->bias.x[k] << ",";
}
//ifs.ignore();// getline(ifs, s); // get endline
end_this_line();
}
}
// read weights
for (auto j=0; j<(int)W.size(); j++)
{
if (W[j])
{
for (int i = 0; i < W[j]->size(); i++)
ocall_getfloat(&W[j]->x[i]);
//ifs >> W[j]->x[i];
//ifs.ignore(); //getline(ifs, s); // get endline
end_this_line();
}
}
}
// copies batch=0 stuff to other batches
sync_layer_sets();
return true;
}
bool read(char *filename)
{
//std::ifstream fs(filename.c_str(),std::ios::binary);
int retocall;
open_networkfile(&retocall, filename);
if (retocall == 0)
{
endoffile = false;
bool ret = read();
close_networkfile();
return ret;
}
else return false;
}
// bool read(const char *filename) { return read(std::string(filename)); }
#ifndef MOJO_NO_TRAINING // this is surely broke by now and will need to be fixed
// ===========================================================================
// training part
// ===========================================================================
// resets the state of all batches to 'free' state
//void reset_mini_batch() { memset(batch_open.data(), BATCH_FREE, batch_open.size()); }
void reset_mini_batch()
{
for(int i = 0; i < batch_open.size(); i++)
memset(&batch_open[i], BATCH_FREE, sizeof(char));
}
// sets up number of mini batches (storage for sets of weight deltas)
void set_mini_batch_size(int batch_cnt)
{
if (batch_cnt<1) batch_cnt = 1;
_batch_size = batch_cnt;
dW_sets.resize(_batch_size);
dbias_sets.resize(_batch_size);
batch_open.resize(_batch_size);
reset_mini_batch();
}
int get_mini_batch_size() { return _batch_size; }
// return index of next free batch
// or returns -2 (BATCH_FILLED_COMPLETE) if no free batches - all complete (need a sync call)
// or returns -1 (BATCH_FILLED_IN_PROCESS) if no free batches - some still in progress (must wait to see if one frees)
int get_next_open_batch()
{
int reserved = 0;
int filled = 0;
for (int i = 0; i<batch_open.size(); i++)
{
if (batch_open[i] == BATCH_FREE) return i;
if (batch_open[i] == BATCH_RESERVED) reserved++;
if (batch_open[i] == BATCH_COMPLETE) filled++;
}
if (reserved>0) return BATCH_FILLED_IN_PROCESS; // all filled but wainting for reserves
if (filled == batch_open.size()) return BATCH_FILLED_COMPLETE; // all filled and complete
bail("threading error"); // should not get here unless threading problem
}
//----------------------------------------------------------------------------------------------------------
// s y n c m i n i b a t c h
//
// apply all weights to first set of dW, then apply to model weights
void sync_mini_batch()
{
// need to ensure no batches in progress (reserved)
int next = get_next_open_batch();
if (next == BATCH_FILLED_IN_PROCESS) bail("thread lock");
int layer_cnt = (int)layer_sets[MAIN_LAYER_SET].size();
base_layer *layer;
// sum contributions
for (int k = layer_cnt - 1; k >= 0; k--)
{
layer = layer_sets[MAIN_LAYER_SET][k];
__for__(auto &link __in__ layer->backward_linked_layers)
{
int w_index = (int)link.first;
// if batch free, then make sure it is zero'd out because we will increment dW set [0]
if (batch_open[0] == BATCH_FREE) dW_sets[0][w_index].fill(0);
for (int b = 1; b< _batch_size; b++)
{
if (batch_open[b] == BATCH_COMPLETE) dW_sets[0][w_index] += dW_sets[b][w_index];
}
}
if (dynamic_cast<convolution_layer*> (layer) != NULL) continue;
// bias stuff... that needs to be fixed for conv layers perhaps
if (batch_open[0] == BATCH_FREE) dbias_sets[0][k].fill(0);
for (int b = 1; b< _batch_size; b++)
{
if (batch_open[b] == BATCH_COMPLETE) dbias_sets[0][k] += dbias_sets[b][k];
}
}
// update weights
for (int k = layer_cnt - 1; k >= 0; k--)
{
layer = layer_sets[MAIN_LAYER_SET][k];
__for__(auto &link __in__ layer->backward_linked_layers)
{
int w_index = (int)link.first;
if (dW_sets[MAIN_LAYER_SET][w_index].size() > 0)
if(W[w_index]) _solver->increment_w(W[w_index], w_index, dW_sets[MAIN_LAYER_SET][w_index]); // -- 10%
}
layer->update_bias(dbias_sets[0][k], _solver->learning_rate);
}
// prepare to start mini batch over
reset_mini_batch();
train_updates++; // could have no updates .. so this is not exact
sync_layer_sets();
}
// reserve_next.. is used to reserve a space in the minibatch for the existing training sample
int reserve_next_batch()
{
lock_batch();
int my_batch_index = -3;
while (my_batch_index < 0)
{
my_batch_index = get_next_open_batch();
if (my_batch_index >= 0) // valid index
{
batch_open[my_batch_index] = BATCH_RESERVED;
unlock_batch();
return my_batch_index;
}
else if (my_batch_index == BATCH_FILLED_COMPLETE) // all index are complete
{
sync_mini_batch(); // resets _batch_index to 0
my_batch_index = get_next_open_batch();
batch_open[my_batch_index] = BATCH_RESERVED;
unlock_batch();
return my_batch_index;
}
// need to wait for ones in progress to finish
unlock_batch();
mojo_sleep(1);
lock_batch();
}
return -3;
}
float get_learning_rate() {if(!_solver) bail("set solver"); return _solver->learning_rate;}
void set_learning_rate(float alpha) {if(!_solver) bail("set solver"); _solver->learning_rate=alpha;}
void reset_solver() {if(!_solver) bail("set solver"); _solver->reset();}
bool get_smart_training() {return _smart_train;}
void set_smart_training(bool _use_train) { _smart_train = _use_train;}
float get_smart_train_level() { return _skip_energy_level; }
void set_smart_train_level(float _level) { _skip_energy_level = _level; }
void set_max_epochs(int max_e) { if (max_e <= 0) max_e = 1; max_epochs = max_e; }
int get_epoch() { return epoch_count; }
// goal here is to update the weights W.
// use w_new = w_old - alpha dE/dw
// E = sum: 1/2*||y-target||^2
// note y = f(x*w)
// dE = (target-y)*dy/dw = (target-y)*df/dw = (target-y)*df/dx* dx/dw = (target-y) * df * y_prev
// similarly for cross entropy
// ===========================================================================
// training part
// ===========================================================================
void set_random_augmentation(int translate_x, int translate_y,
int flip_h, int flip_v, mojo::pad_type padding = mojo::edge)
{
use_augmentation = 1;
augment_x = translate_x;
augment_y = translate_y;
augment_h_flip = flip_h;
augment_v_flip = flip_v;
augment_pad = padding;
augment_theta = 0;
augment_scale = 0;
}
void set_random_augmentation(int translate_x, int translate_y,
int flip_h, int flip_v, float rotation_deg, float scale, mojo::pad_type padding = mojo::edge)
{
use_augmentation = 2;
augment_x = translate_x;
augment_y = translate_y;
augment_h_flip = flip_h;
augment_v_flip = flip_v;
augment_pad = padding;
augment_theta = rotation_deg;
augment_scale = scale;
}
// call before starting training for current epoch
void start_epoch(std::string loss_function="mse")
{
_cost_function=new_cost_function(loss_function);
train_correct = 0;
train_skipped = 0;
train_updates = 0;
train_samples = 0;
if (epoch_count == 0) reset_solver();
// accuracy not improving .. slow learning
if(_smart_train && (best_accuracy_count > 4))
{
stuck_counter++;
set_learning_rate((0.5f)*get_learning_rate());
if (get_learning_rate() < 0.000001f)
{
// heat_weights();
set_learning_rate(0.000001f);
stuck_counter++;// end of the line.. so speed up end
}
best_accuracy_count = 0;
}
old_estimated_accuracy = estimated_accuracy;
estimated_accuracy = 0;
//_skip_energy_level = 0.05;
_running_sum_E = 0;
}
// time to stop?
bool elvis_left_the_building()
{
// 2 stuck x 4 non best accuracy to quit = 8 times no improvement
if ((epoch_count>max_epochs) || (stuck_counter > 3)) return true;
else return false;
}
// call after putting all training samples through this epoch
bool end_epoch()
{
// run leftovers through mini-batch
sync_mini_batch();
epoch_count++;
// estimate accuracy of validation run
estimated_accuracy = 100.f*train_correct / train_samples;
if (train_correct > best_estimated_accuracy)
{
best_estimated_accuracy = (float)train_correct;
best_accuracy_count = 0;
stuck_counter = 0;
}
else best_accuracy_count++;
return elvis_left_the_building();
}
// if smart training was thinking about exiting, calling reset will make it think everything is OK
void reset_smart_training()
{
stuck_counter=0;
best_accuracy_count = 0;
best_estimated_accuracy = 0;
}
//----------------------------------------------------------------------------------------------------------
// u p d a t e _ s m a r t _ t r a i n
//
void update_smart_train(const float E, bool correct)
{
#ifdef MOJO_OMP
#pragma omp critical
#endif
{
train_samples++;
if (correct) train_correct++;
if (_smart_train)
{
_running_E.push_back(E);
_running_sum_E += E;
const int SMART_TRAIN_SAMPLE_SIZE = 1000;
int s = (int)_running_E.size();
if (s >= SMART_TRAIN_SAMPLE_SIZE)
{
_running_sum_E /= (double)s;
std::sort(_running_E.begin(), _running_E.end());
float top_fraction = (float)_running_sum_E*10.f; //10.
const float max_fraction = 0.75f;
const float min_fraction = 0.075f;// 0.03f;
if (top_fraction > max_fraction) top_fraction = max_fraction;
if (top_fraction < min_fraction) top_fraction = min_fraction;
int index = s - 1 - (int)(top_fraction*(s - 1));
if (_running_E[index] > 0) _skip_energy_level = _running_E[index];
_running_sum_E = 0;
_running_E.clear();
}
}
if (E > 0 && E < _skip_energy_level)
{
//std::cout << "E=" << E;
train_skipped++;
}
} // omp critical
}
// finish back propogation through the hidden layers
void backward_hidden(const int my_batch_index, const int thread_number)
{
const int layer_cnt = (int)layer_sets[thread_number].size();
const int last_layer_index = layer_cnt - 1;
base_layer *layer;// = layer_sets[thread_number][last_layer_index];
// update hidden layers
// start at lower layer and push information up to previous layer
// handle dropout first
for (int k = last_layer_index; k >= 0; k--)
{
layer = layer_sets[thread_number][k];
// all the signals should be summed up to this layer by now, so we go through and take the grad of activiation
int nodes = layer->node.size();
// already did last layer, so skip it
if (k< last_layer_index)
for (int i = 0; i< nodes; i++)
layer->delta.x[i] *= layer->df(layer->node.x, i, nodes);
// now pass that signal upstream
__for__(auto &link __in__ layer->backward_linked_layers) // --- 50% of time this loop
{
base_layer *p_top = link.second;
// note all the delta[connections[i].second] should have been calculated by time we get here
layer->distribute_delta(*p_top, *W[link.first]);
}
}
// update weights - shouldn't matter the direction we update these
// we can stay in backwards direction...
// it was not faster to combine distribute_delta and increment_w into the same loop
int size_W = (int)W.size();
dW_sets[my_batch_index].resize(size_W);
dbias_sets[my_batch_index].resize(layer_cnt);
for (int k = last_layer_index; k >= 0; k--)
{
layer = layer_sets[thread_number][k];
__for__(auto &link __in__ layer->backward_linked_layers)
{
base_layer *p_top = link.second;
int w_index = (int)link.first;
//if (dynamic_cast<max_pooling_layer*> (layer) != NULL) continue;
layer->calculate_dw(*p_top, dW_sets[my_batch_index][w_index]);// --- 20%
// moved this out to sync_mini_batch();
//_solver->increment_w( W[w_index],w_index, dW_sets[_batch_index][w_index]); // -- 10%
}
if (dynamic_cast<convolution_layer*> (layer) != NULL) continue;
dbias_sets[my_batch_index][k] = layer->delta;
}
// if all batches finished, update weights
lock_batch();
batch_open[my_batch_index] = BATCH_COMPLETE;
int next_index = get_next_open_batch();
if (next_index == BATCH_FILLED_COMPLETE) // all complete
sync_mini_batch(); // resets _batch_index to 0
unlock_batch();
}
mojo::matrix make_input(float *in, const int _thread_number)
{
mojo::matrix augmented_input;// = auto_augmentation();
std::vector<base_layer *> inputs;
int in_size = 0;
__for__(auto layer __in__ layer_sets[_thread_number])
{
if (dynamic_cast<input_layer*> (layer) != NULL)
{
inputs.push_back(layer);
in_size += layer->node.size();
}
}
if (use_augmentation > 0)
{
augmented_input.resize(in_size, 1, 1);
unsigned int randint[6];
sgx_read_rand((unsigned char *)randint, sizeof(int)*6);
float s = ((float)(randint[0] % 101) / 50.f - 1.f)*augment_scale;
float t = ((float)(randint[1] % 101) / 50.f - 1.f)*augment_theta;
//float s = ((float)(rand() % 101) / 50.f - 1.f)*augment_scale;
//float t = ((float)(rand() % 101) / 50.f - 1.f)*augment_theta;
bool flip_h = ((randint[2] % 2)*augment_h_flip) ? true: false;
bool flip_v = ((randint[3] % 2)*augment_v_flip) ? true: false;
int shift_x = (randint[4] % (augment_x * 2 + 1)) - augment_x;
int shift_y = (randint[5] % (augment_y * 2 + 1)) - augment_y;
int offset = 0;
__for__(auto layer __in__ inputs)
{
//memcpy(layer->node.x, in_ptr, sizeof(float)*layer->node.size());
//in_ptr += layer->node.size();
// copy input to matrix type
mojo::matrix m(layer->node.cols, layer->node.rows, layer->node.chans, in + offset);
if (m.rows > 1 && m.cols > 1)
{
#if defined(MOJO_CV2) || defined(MOJO_CV3)
if ((augment_theta > 0 || augment_scale > 0))
m = transform(m, m.cols / 2, m.rows / 2, m.cols, t, 1 + s);
#endif
if (flip_v)m = m.flip_cols();
if (flip_h) m = m.flip_rows();
mojo::matrix aug = m.shift(shift_x, shift_y, augment_pad);
memcpy(augmented_input.x + offset, aug.x, sizeof(float)*aug.size());
offset += aug.size();
}
else
{
memcpy(augmented_input.x + offset, m.x, sizeof(float)*m.size());
offset += m.size();
}
}
// input = augmented_input.x;
}
else
{
augmented_input.resize(in_size, 1, 1);
memcpy(augmented_input.x, in, sizeof(float)*in_size);
}
return augmented_input;
}
//----------------------------------------------------------------------------------------------------------
// T R A I N C L A S S
//
// after starting epoch, call this to train against a class label
// label_index must be 0 to out_size()-1
// for thread safety, you must pass in the thread_index if calling from different threads
bool train_class(float *in, int label_index, int _thread_number = -1)
{
if (_solver == NULL) bail("set solver");
if (_thread_number < 0) _thread_number = get_thread_num();
if (_thread_number > _thread_count) bail("call allow_threads()");
const int thread_number = _thread_number;
/*
mojo::matrix augmented_input = make_input(in, thread_number);
/*/
float *input = in;
mojo::matrix augmented_input;
if (use_augmentation > 0)
{
//augment_h_flip = flip_h;
//augment_v_flip = flip_v;
// copy input to matrix type
mojo::matrix m(layer_sets[thread_number][0]->node.cols, layer_sets[thread_number][0]->node.rows, layer_sets[thread_number][0]->node.chans, in);
#if defined(MOJO_CV2) || defined(MOJO_CV3)
if (augment_theta > 0 || augment_scale > 0)
{
float s = ((float)(rand() % 101) / 50.f - 1.f)*augment_scale;
float t = ((float)(rand() % 101) / 50.f - 1.f)*augment_theta;
m = transform(m, m.cols / 2, m.rows / 2, m.cols, t, 1+s);
}
#endif
unsigned int randint[4];
sgx_read_rand((unsigned char *)randint, 4);
if (augment_h_flip)
if ((randint[0] % 2) == 0)
m = m.flip_cols();
if (augment_v_flip)
if ((randint[1] % 2) == 0)
m = m.flip_rows();
augmented_input = m.shift((randint[2] % (augment_x * 2 + 1)) - augment_x, (randint[3] % (augment_y * 2 + 1)) - augment_y, augment_pad);
input = augmented_input.x;
}
//*/
// get next free mini_batch slot
// this is tied to the current state of the model
int my_batch_index = reserve_next_batch();
// out of data or an error if index is negative
if (my_batch_index < 0) return false;
// run through forward to get nodes activated
forward(input, thread_number, 1);
// set all deltas to zero
__for__(auto layer __in__ layer_sets[thread_number]) layer->delta.fill(0.f);
int layer_cnt = (int)layer_sets[thread_number].size();
// calc delta for last layer to prop back up through network
// d = (target-out)* grad_activiation(out)
const int last_layer_index = layer_cnt - 1;
base_layer *layer = layer_sets[thread_number][last_layer_index];
const int layer_node_size = layer->node.size();
const int layer_delta_size = layer->delta.size();
if (dynamic_cast<dropout_layer*> (layer) != NULL) bail("can't have dropout on last layer");
float E = 0;
int max_j_out = 0;
int max_j_target = label_index;
// was passing this in, but may as well just create it on the fly
// a vector mapping the label index to the desired target output node values
// all -1 except target node 1
std::vector<float> target;
if((std::string("sigmoid").compare(layer->p_act->name) == 0) || (std::string("softmax").compare(layer->p_act->name) == 0)|| (std::string("brokemax").compare(layer->p_act->name) == 0))
target = std::vector<float>(layer_node_size, 0);
else
target = std::vector<float>(layer_node_size, -1);
if(label_index>=0 && label_index<layer_node_size) target[label_index] = 1;
//const float grad_fudge = 1.0f;
// because of numerator/demoninator cancellations which prevent a divide by zero issue,
// we need to handle some things special on output layer
float cost_activation_type = 0;
if ((std::string("sigmoid").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 1;
else if ((std::string("softmax").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 1;
else if ((std::string("tanh").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 4;
for (int j = 0; j < layer_node_size; j++)
{
if(cost_activation_type>0)
layer->delta.x[j] = cost_activation_type*(layer->node.x[j]- target[j]);
else
layer->delta.x[j] = _cost_function->d_cost(layer->node.x[j], target[j])*layer->df(layer->node.x, j, layer_node_size);
// pick best response
if (layer->node.x[max_j_out] < layer->node.x[j]) max_j_out = j;
// for better E maybe just look at 2 highest scores so zeros don't dominate
float f= mse::cost(layer->node.x[j], target[j]);
E += f;//mse::cost(layer->node.x[j], target[j]);
}
E /= (float)layer_node_size;
// check for NAN
if (E != E) bail("network blew up - try lowering learning rate\n");
// critical section in here, blocking update
bool match = false;
if ((max_j_target == max_j_out)) match = true;
update_smart_train(E, match);
if (E>0 && E<_skip_energy_level && _smart_train && match)
{
lock_batch();
batch_open[my_batch_index] = BATCH_FREE;
unlock_batch();
return false; // return without doing training
}
backward_hidden(my_batch_index, thread_number);
return true;
}
//----------------------------------------------------------------------------------------------------------
// T R A I N T A R G E T
//
// after starting epoch, call this to train against a target vector
// for thread safety, you must pass in the thread_index if calling from different threads
// if positive=1, goal is to minimize the distance between in and target
bool train_target(float *in, float *target, int positive=1, int _thread_number = -1)
{
if (_solver == NULL) bail("set solver");
if (_thread_number < 0) _thread_number = get_thread_num();
if (_thread_number > _thread_count) bail("need to enable OMP");
const int thread_number = _thread_number;
mojo::matrix augmented_input = make_input(in, thread_number);
float *input = augmented_input.x;
// get next free mini_batch slot
// this is tied to the current state of the model
int my_batch_index = reserve_next_batch();
// out of data or an error if index is negative
if (my_batch_index < 0) return false;
// run through forward to get nodes activated
float *out=forward(in, thread_number, 1);
// set all deltas to zero
__for__(auto layer __in__ layer_sets[thread_number]) layer->delta.fill(0.f);
int layer_cnt = (int)layer_sets[thread_number].size();
// calc delta for last layer to prop back up through network
// d = (target-out)* grad_activiation(out)
const int last_layer_index = layer_cnt - 1;
base_layer *layer = layer_sets[thread_number][last_layer_index];
const int layer_node_size = layer->node.size();
if (dynamic_cast<dropout_layer*> (layer) != NULL) bail("can't have dropout on last layer");
float E = 0;
int max_j_out = 0;
//int max_j_target = label_index;
// was passing this in, but may as well just create it on the fly
// a vector mapping the label index to the desired target output node values
// all -1 except target node 1
// std::vector<float> target;
//if ((std::string("sigmoid").compare(layer->p_act->name) == 0) || (std::string("softmax").compare(layer->p_act->name) == 0))
// target = std::vector<float>(layer_node_size, 0);
// else
// target = std::vector<float>(layer_node_size, -1);
// if (label_index >= 0 && label_index<layer_node_size) target[label_index] = 1;
const float grad_fudge = 1.0f;
// because of numerator/demoninator cancellations which prevent a divide by zero issue,
// we need to handle some things special on output layer
float cost_activation_type = 0;
if ((std::string("sigmoid").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 1;
else if ((std::string("softmax").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 1;
else if ((std::string("brokemax").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 1;
else if ((std::string("tanh").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 4;
for (int j = 0; j < layer_node_size; j++)
{
if (positive) // want to minimize distance
{
if (cost_activation_type > 0)
layer->delta.x[j] = grad_fudge*cost_activation_type*(layer->node.x[j] - target[j]);
else
layer->delta.x[j] = grad_fudge*_cost_function->d_cost(layer->node.x[j], target[j])*layer->df(layer->node.x, j, layer_node_size);
}
else
{
if (cost_activation_type > 0)
layer->delta.x[j] = grad_fudge*cost_activation_type*(1.f-abs(layer->node.x[j] - target[j]));
else
layer->delta.x[j] = grad_fudge*(1.f-abs(_cost_function->d_cost(layer->node.x[j], target[j])))*layer->df(layer->node.x, j, layer_node_size);
}
// pick best response
if (layer->node.x[max_j_out] < layer->node.x[j]) max_j_out = j;
// for better E maybe just look at 2 highest scores so zeros don't dominate
// L2 distance x 2
E += mse::cost(layer->node.x[j], target[j]);
}
E /= (float)layer_node_size;
// check for NAN
if (E != E) bail("network blew up - try lowering learning rate\n");
// critical section in here, blocking update
bool match = false;
// FIxME if ((max_j_target == max_j_out)) match = true;
if (E < 0.01 && positive) match = true;
else if (E > 0.1 && !positive) match = true;
update_smart_train(E, match);
if (E>0 && E<_skip_energy_level && _smart_train && match)
{
lock_batch();
batch_open[my_batch_index] = BATCH_FREE;
unlock_batch();
return false; // return without doing training
}
backward_hidden(my_batch_index, thread_number);
return true;
}
#else
float get_learning_rate() {return 0;}
void set_learning_rate(float alpha) {}
void train(float *in, float *target){}
void reset() {}
float get_smart_train_level() {return 0;}
void set_smart_train_level(float _level) {}
bool get_smart_train() { return false; }
void set_smart_train(bool _use) {}
#endif
};
}
|
ex2.c | #include <stdlib.h>
#include <omp.h>
#include <stdio.h>
#include <math.h>
#define N 1000
#define CHUNKSIZE 4
int * scan(int * num, int len);
int main(int argc, char *argv[]) {
int i=0;
int read;
int k = 0;
printf("Enter the number k to set the length of the array(len=2^k):\n");
scanf("%d", &read);
k=read;
printf("Ok, enter %d values:\n", (int)pow(2, k));
int len=(int)pow(2, k);
int num[len];
while(i<len){
scanf("%d", &read);
num[i++]=read;
}
int * s = scan(num, len);
for(int i = 0; i<len; i++){
printf("s[%d]=%d\n", i, s[i]);
}
return 0;
}
int * scan(int * num, int len){
if(len==1){
return num;
}
int chunk = CHUNKSIZE;
int i=0;
int * s = malloc(len * sizeof(int));
int y[(int)(len/2)];
int z[(int)(len/2)];
for(i=0; i<len; i++){
s[i]=num[i];
}
#pragma omp parallel shared(y,num) private(i)
{
#pragma omp for schedule(static,chunk)
for (i=0; i < (int)(len/2); i++){
y[i]=num[2*i]+num[(2*i)+1];
}
}
int * ret=scan(y, len/2);
#pragma omp parallel shared(s,z,num) private(i)
{
#pragma omp for schedule(static,chunk)
for (i=0; i < (int)(len/2); i++){
z[i]=ret[i];
}
#pragma omp for schedule(static,chunk)
for (i=0; i < len; i++){
if(i%2==1){
s[i]=z[(int)i/2];
}else if(i==0){
s[i]=num[i];
}else{
s[i]=z[(int)((i-1)/2)]+num[i];
}
}
} /* end of parallel region */
return s;
}
|
nestedpar1.c | #include<omp.h>
#include <stdio.h>
void paroutput(char* s)
{
}
int main(void)
{
#ifdef _OPENMP
omp_set_nested(1);
#endif
#pragma omp parallel
{
#pragma omp parallel
printf("before single.\n");
#pragma omp single
{
#pragma omp parallel
printf("Inside single.\n");
}
#pragma omp parallel
printf("after single.\n");
}
return 0;
}
|
libgomp.h | /* Copyright (C) 2005-2017 Free Software Foundation, Inc.
Contributed by Richard Henderson <rth@redhat.com>.
This file is part of the GNU Offloading and Multi Processing Library
(libgomp).
Libgomp is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* This file contains data types and function declarations that are not
part of the official OpenACC or OpenMP user interfaces. There are
declarations in here that are part of the GNU Offloading and Multi
Processing ABI, in that the compiler is required to know about them
and use them.
The convention is that the all caps prefix "GOMP" is used group items
that are part of the external ABI, and the lower case prefix "gomp"
is used group items that are completely private to the library. */
#ifndef LIBGOMP_H
#define LIBGOMP_H 1
#ifndef _LIBGOMP_CHECKING_
/* Define to 1 to perform internal sanity checks. */
#define _LIBGOMP_CHECKING_ 0
#endif
#include "config.h"
#include "gstdint.h"
#include "libgomp-plugin.h"
#ifdef HAVE_PTHREAD_H
#include <pthread.h>
#endif
#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
/* Needed for memset in priority_queue.c. */
#if _LIBGOMP_CHECKING_
# ifdef STRING_WITH_STRINGS
# include <string.h>
# include <strings.h>
# else
# ifdef HAVE_STRING_H
# include <string.h>
# else
# ifdef HAVE_STRINGS_H
# include <strings.h>
# endif
# endif
# endif
#endif
#ifdef HAVE_ATTRIBUTE_VISIBILITY
# pragma GCC visibility push(hidden)
#endif
/* If we were a C++ library, we'd get this from <std/atomic>. */
enum memmodel
{
MEMMODEL_RELAXED = 0,
MEMMODEL_CONSUME = 1,
MEMMODEL_ACQUIRE = 2,
MEMMODEL_RELEASE = 3,
MEMMODEL_ACQ_REL = 4,
MEMMODEL_SEQ_CST = 5
};
/* alloc.c */
extern void *gomp_malloc (size_t) __attribute__((malloc));
extern void *gomp_malloc_cleared (size_t) __attribute__((malloc));
extern void *gomp_realloc (void *, size_t);
/* Avoid conflicting prototypes of alloca() in system headers by using
GCC's builtin alloca(). */
#define gomp_alloca(x) __builtin_alloca(x)
/* error.c */
extern void gomp_vdebug (int, const char *, va_list);
extern void gomp_debug (int, const char *, ...)
__attribute__ ((format (printf, 2, 3)));
#define gomp_vdebug(KIND, FMT, VALIST) \
do { \
if (__builtin_expect (gomp_debug_var, 0)) \
(gomp_vdebug) ((KIND), (FMT), (VALIST)); \
} while (0)
#define gomp_debug(KIND, ...) \
do { \
if (__builtin_expect (gomp_debug_var, 0)) \
(gomp_debug) ((KIND), __VA_ARGS__); \
} while (0)
extern void gomp_verror (const char *, va_list);
extern void gomp_error (const char *, ...)
__attribute__ ((format (printf, 1, 2)));
extern void gomp_vfatal (const char *, va_list)
__attribute__ ((noreturn));
extern void gomp_fatal (const char *, ...)
__attribute__ ((noreturn, format (printf, 1, 2)));
struct gomp_task;
struct gomp_taskgroup;
struct htab;
#include "priority_queue.h"
#include "sem.h"
#include "mutex.h"
#include "bar.h"
#include "simple-bar.h"
#include "ptrlock.h"
/* This structure contains the data to control one work-sharing construct,
either a LOOP (FOR/DO) or a SECTIONS. */
enum gomp_schedule_type
{
GFS_RUNTIME,
GFS_STATIC,
GFS_DYNAMIC,
GFS_GUIDED,
GFS_AUTO,
GFS_HETPROBE,
GFS_HIERARCHY_STATIC,
GFS_HIERARCHY_DYNAMIC,
};
struct gomp_doacross_work_share
{
union {
/* chunk_size copy, as ws->chunk_size is multiplied by incr for
GFS_DYNAMIC. */
long chunk_size;
/* Likewise, but for ull implementation. */
unsigned long long chunk_size_ull;
/* For schedule(static,0) this is the number
of iterations assigned to the last thread, i.e. number of
iterations / number of threads. */
long q;
/* Likewise, but for ull implementation. */
unsigned long long q_ull;
};
/* Size of each array entry (padded to cache line size). */
unsigned long elt_sz;
/* Number of dimensions in sink vectors. */
unsigned int ncounts;
/* True if the iterations can be flattened. */
bool flattened;
/* Actual array (of elt_sz sized units), aligned to cache line size.
This is indexed by team_id for GFS_STATIC and outermost iteration
/ chunk_size for other schedules. */
unsigned char *array;
/* These two are only used for schedule(static,0). */
/* This one is number of iterations % number of threads. */
long t;
union {
/* And this one is cached t * (q + 1). */
long boundary;
/* Likewise, but for the ull implementation. */
unsigned long long boundary_ull;
};
/* Array of shift counts for each dimension if they can be flattened. */
unsigned int shift_counts[];
};
struct gomp_work_share
{
/* This member records the SCHEDULE clause to be used for this construct.
The user specification of "runtime" will already have been resolved.
If this is a SECTIONS construct, this value will always be DYNAMIC. */
enum gomp_schedule_type sched;
int mode;
union {
struct {
/* This is the chunk_size argument to the SCHEDULE clause. */
long chunk_size;
/* This is the iteration end point. If this is a SECTIONS construct,
this is the number of contained sections. */
long end;
/* This is the iteration step. If this is a SECTIONS construct, this
is always 1. */
long incr;
};
struct {
/* The same as above, but for the unsigned long long loop variants. */
unsigned long long chunk_size_ull;
unsigned long long end_ull;
unsigned long long incr_ull;
};
};
union {
/* This is a circular queue that details which threads will be allowed
into the ordered region and in which order. When a thread allocates
iterations on which it is going to work, it also registers itself at
the end of the array. When a thread reaches the ordered region, it
checks to see if it is the one at the head of the queue. If not, it
blocks on its RELEASE semaphore. */
unsigned *ordered_team_ids;
/* This is a pointer to DOACROSS work share data. */
struct gomp_doacross_work_share *doacross;
};
/* This is the number of threads that have registered themselves in
the circular queue ordered_team_ids. */
unsigned ordered_num_used;
/* This is the team_id of the currently acknowledged owner of the ordered
section, or -1u if the ordered section has not been acknowledged by
any thread. This is distinguished from the thread that is *allowed*
to take the section next. */
unsigned ordered_owner;
/* This is the index into the circular queue ordered_team_ids of the
current thread that's allowed into the ordered reason. */
unsigned ordered_cur;
/* This is a chain of allocated gomp_work_share blocks, valid only
in the first gomp_work_share struct in the block. */
struct gomp_work_share *next_alloc;
/* The above fields are written once during workshare initialization,
or related to ordered worksharing. Make sure the following fields
are in a different cache line. */
/* This lock protects the update of the following members. */
gomp_mutex_t lock __attribute__((aligned (64)));
/* This is the count of the number of threads that have exited the work
share construct. If the construct was marked nowait, they have moved on
to other work; otherwise they're blocked on a barrier. The last member
of the team to exit the work share construct must deallocate it. */
unsigned threads_completed;
union {
/* This is the next iteration value to be allocated. In the case of
GFS_STATIC loops, this the iteration start point and never changes. */
long next;
/* The same, but with unsigned long long type. */
unsigned long long next_ull;
/* This is the returned data structure for SINGLE COPYPRIVATE. */
void *copyprivate;
};
union {
/* Link to gomp_work_share struct for next work sharing construct
encountered after this one. */
gomp_ptrlock_t next_ws;
/* gomp_work_share structs are chained in the free work share cache
through this. */
struct gomp_work_share *next_free;
};
/* If only few threads are in the team, ordered_team_ids can point
to this array which fills the padding at the end of this struct. */
unsigned inline_ordered_team_ids[0];
};
/* This structure contains all of the thread-local data associated with
a thread team. This is the data that must be saved when a thread
encounters a nested PARALLEL construct. */
struct gomp_team_state
{
/* This is the team of which the thread is currently a member. */
struct gomp_team *team;
/* This is the work share construct which this thread is currently
processing. Recall that with NOWAIT, not all threads may be
processing the same construct. */
struct gomp_work_share *work_share;
/* This is the previous work share construct or NULL if there wasn't any.
When all threads are done with the current work sharing construct,
the previous one can be freed. The current one can't, as its
next_ws field is used. */
struct gomp_work_share *last_work_share;
/* This is the ID of this thread within the team. This value is
guaranteed to be between 0 and N-1, where N is the number of
threads in the team. */
unsigned team_id;
/* Nesting level. */
unsigned level;
/* Active nesting level. Only active parallel regions are counted. */
unsigned active_level;
/* Place-partition-var, offset and length into gomp_places_list array. */
unsigned place_partition_off;
unsigned place_partition_len;
#ifdef HAVE_SYNC_BUILTINS
/* Number of single stmts encountered. */
unsigned long single_count;
#endif
/* For GFS_RUNTIME loops that resolved to GFS_STATIC, this is the
trip number through the loop. So first time a particular loop
is encountered this number is 0, the second time through the loop
is 1, etc. This is unused when the compiler knows in advance that
the loop is statically scheduled. */
unsigned long static_trip;
};
struct target_mem_desc;
/* These are the OpenMP 4.0 Internal Control Variables described in
section 2.3.1. Those described as having one copy per task are
stored within the structure; those described as having one copy
for the whole program are (naturally) global variables. */
struct gomp_task_icv
{
unsigned long nthreads_var;
enum gomp_schedule_type run_sched_var;
int run_sched_chunk_size;
int default_device_var;
unsigned int thread_limit_var;
bool dyn_var;
bool nest_var;
char bind_var;
/* Internal ICV. */
struct target_mem_desc *target_data;
};
extern struct gomp_task_icv gomp_global_icv;
#ifndef HAVE_SYNC_BUILTINS
extern gomp_mutex_t gomp_managed_threads_lock;
extern gomp_mutex_t popcorn_tid_lock;
#endif
extern unsigned long gomp_max_active_levels_var;
extern bool gomp_cancel_var;
extern int gomp_max_task_priority_var;
extern unsigned long long gomp_spin_count_var, gomp_throttled_spin_count_var;
extern unsigned long gomp_available_cpus, gomp_managed_threads;
extern unsigned long *gomp_nthreads_var_list, gomp_nthreads_var_list_len;
extern char *gomp_bind_var_list;
extern unsigned long gomp_bind_var_list_len;
extern void **gomp_places_list;
extern unsigned long gomp_places_list_len;
extern unsigned int gomp_num_teams_var;
extern int gomp_debug_var;
extern int goacc_device_num;
extern char *goacc_device_type;
/* Popcorn profiling machinery. */
extern bool popcorn_profiling;
extern const char *popcorn_prof_fn;
extern FILE *popcorn_prof_fp;
enum gomp_task_kind
{
/* Implicit task. */
GOMP_TASK_IMPLICIT,
/* Undeferred task. */
GOMP_TASK_UNDEFERRED,
/* Task created by GOMP_task and waiting to be run. */
GOMP_TASK_WAITING,
/* Task currently executing or scheduled and about to execute. */
GOMP_TASK_TIED,
/* Used for target tasks that have vars mapped and async run started,
but not yet completed. Once that completes, they will be readded
into the queues as GOMP_TASK_WAITING in order to perform the var
unmapping. */
GOMP_TASK_ASYNC_RUNNING
};
struct gomp_task_depend_entry
{
/* Address of dependency. */
void *addr;
struct gomp_task_depend_entry *next;
struct gomp_task_depend_entry *prev;
/* Task that provides the dependency in ADDR. */
struct gomp_task *task;
/* Depend entry is of type "IN". */
bool is_in;
bool redundant;
bool redundant_out;
};
struct gomp_dependers_vec
{
size_t n_elem;
size_t allocated;
struct gomp_task *elem[];
};
/* Used when in GOMP_taskwait or in gomp_task_maybe_wait_for_dependencies. */
struct gomp_taskwait
{
bool in_taskwait;
bool in_depend_wait;
/* Number of tasks we are waiting for. */
size_t n_depend;
gomp_sem_t taskwait_sem;
};
/* This structure describes a "task" to be run by a thread. */
struct gomp_task
{
/* Parent of this task. */
struct gomp_task *parent;
/* Children of this task. */
struct priority_queue children_queue;
/* Taskgroup this task belongs in. */
struct gomp_taskgroup *taskgroup;
/* Tasks that depend on this task. */
struct gomp_dependers_vec *dependers;
struct htab *depend_hash;
struct gomp_taskwait *taskwait;
/* Number of items in DEPEND. */
size_t depend_count;
/* Number of tasks this task depends on. Once this counter reaches
0, we have no unsatisfied dependencies, and this task can be put
into the various queues to be scheduled. */
size_t num_dependees;
/* Priority of this task. */
int priority;
/* The priority node for this task in each of the different queues.
We put this here to avoid allocating space for each priority
node. Then we play offsetof() games to convert between pnode[]
entries and the gomp_task in which they reside. */
struct priority_node pnode[3];
struct gomp_task_icv icv;
void (*fn) (void *);
void *fn_data;
enum gomp_task_kind kind;
bool in_tied_task;
bool final_task;
bool copy_ctors_done;
/* Set for undeferred tasks with unsatisfied dependencies which
block further execution of their parent until the dependencies
are satisfied. */
bool parent_depends_on;
/* Dependencies provided and/or needed for this task. DEPEND_COUNT
is the number of items available. */
struct gomp_task_depend_entry depend[];
};
/* This structure describes a single #pragma omp taskgroup. */
struct gomp_taskgroup
{
struct gomp_taskgroup *prev;
/* Queue of tasks that belong in this taskgroup. */
struct priority_queue taskgroup_queue;
bool in_taskgroup_wait;
bool cancelled;
gomp_sem_t taskgroup_sem;
size_t num_children;
};
/* Various state of OpenMP async offloading tasks. */
enum gomp_target_task_state
{
GOMP_TARGET_TASK_DATA,
GOMP_TARGET_TASK_BEFORE_MAP,
GOMP_TARGET_TASK_FALLBACK,
GOMP_TARGET_TASK_READY_TO_RUN,
GOMP_TARGET_TASK_RUNNING,
GOMP_TARGET_TASK_FINISHED
};
/* This structure describes a target task. */
struct gomp_target_task
{
struct gomp_device_descr *devicep;
void (*fn) (void *);
size_t mapnum;
size_t *sizes;
unsigned short *kinds;
unsigned int flags;
enum gomp_target_task_state state;
struct target_mem_desc *tgt;
struct gomp_task *task;
struct gomp_team *team;
/* Device-specific target arguments. */
void **args;
void *hostaddrs[];
};
/* This structure describes a "team" of threads. These are the threads
that are spawned by a PARALLEL constructs, as well as the work sharing
constructs that the team encounters. */
struct gomp_team
{
/* This is the number of threads in the current team. */
unsigned nthreads;
/* This is number of gomp_work_share structs that have been allocated
as a block last time. */
unsigned work_share_chunk;
/* This is the saved team state that applied to a master thread before
the current thread was created. */
struct gomp_team_state prev_ts;
/* This semaphore should be used by the master thread instead of its
"native" semaphore in the thread structure. Required for nested
parallels, as the master is a member of two teams. */
gomp_sem_t master_release;
/* This points to an array with pointers to the release semaphore
of the threads in the team. */
gomp_sem_t **ordered_release;
/* List of work shares on which gomp_fini_work_share hasn't been
called yet. If the team hasn't been cancelled, this should be
equal to each thr->ts.work_share, but otherwise it can be a possibly
long list of workshares. */
struct gomp_work_share *work_shares_to_free;
/* List of gomp_work_share structs chained through next_free fields.
This is populated and taken off only by the first thread in the
team encountering a new work sharing construct, in a critical
section. */
struct gomp_work_share *work_share_list_alloc;
/* List of gomp_work_share structs freed by free_work_share. New
entries are atomically added to the start of the list, and
alloc_work_share can safely only move all but the first entry
to work_share_list alloc, as free_work_share can happen concurrently
with alloc_work_share. */
struct gomp_work_share *work_share_list_free;
#ifdef HAVE_SYNC_BUILTINS
/* Number of simple single regions encountered by threads in this
team. */
unsigned long single_count;
#else
/* Mutex protecting addition of workshares to work_share_list_free. */
gomp_mutex_t work_share_list_free_lock;
#endif
/* This barrier is used for most synchronization of the team. */
gomp_barrier_t barrier;
/* Initial work shares, to avoid allocating any gomp_work_share
structs in the common case. */
struct gomp_work_share work_shares[8];
gomp_mutex_t task_lock;
/* Scheduled tasks. */
struct priority_queue task_queue;
/* Number of all GOMP_TASK_{WAITING,TIED} tasks in the team. */
unsigned int task_count;
/* Number of GOMP_TASK_WAITING tasks currently waiting to be scheduled. */
unsigned int task_queued_count;
/* Number of GOMP_TASK_{WAITING,TIED} tasks currently running
directly in gomp_barrier_handle_tasks; tasks spawned
from e.g. GOMP_taskwait or GOMP_taskgroup_end don't count, even when
that is called from a task run from gomp_barrier_handle_tasks.
task_running_count should be always <= team->nthreads,
and if current task isn't in_tied_task, then it will be
even < team->nthreads. */
unsigned int task_running_count;
int work_share_cancelled;
int team_cancelled;
/* This array contains structures for implicit tasks. */
struct gomp_task implicit_task[];
};
/* This structure contains all data that is private to libgomp and is
allocated per thread. */
struct gomp_thread
{
/* This is the function that the thread should run upon launch. */
void (*fn) (void *data);
void *data;
/* This is the current team state for this thread. The ts.team member
is NULL only if the thread is idle. */
struct gomp_team_state ts;
/* This is the task that the thread is currently executing. */
struct gomp_task *task;
/* This semaphore is used for ordered loops. */
gomp_sem_t release;
/* Place this thread is bound to plus one, or zero if not bound
to any place. */
unsigned int place;
/* User pthread thread pool */
struct gomp_thread_pool *thread_pool;
/* Popcorn's TID, basically this thread's number out of the total number of
threads created by the runtime over the lifetime of the application. */
int popcorn_created_tid;
/* Node ID on which this thread is executing in Popcorn. */
int popcorn_nid;
/* Reduction method for variables currently being reduced. */
int reduction_method;
/* Time stamp for this thread's probe start. */
struct timespec probe_start;
};
struct gomp_thread_pool
{
/* This array manages threads spawned from the top level, which will
return to the idle loop once the current PARALLEL construct ends. */
struct gomp_thread **threads;
unsigned threads_size;
unsigned threads_used;
/* The last team is used for non-nested teams to delay their destruction to
make sure all the threads in the team move on to the pool's barrier before
the team's barrier is destroyed. */
struct gomp_team *last_team;
/* Number of threads running in this contention group. */
unsigned long threads_busy;
/* This barrier holds and releases threads waiting in thread pools. */
gomp_simple_barrier_t threads_dock;
};
enum gomp_cancel_kind
{
GOMP_CANCEL_PARALLEL = 1,
GOMP_CANCEL_LOOP = 2,
GOMP_CANCEL_FOR = GOMP_CANCEL_LOOP,
GOMP_CANCEL_DO = GOMP_CANCEL_LOOP,
GOMP_CANCEL_SECTIONS = 4,
GOMP_CANCEL_TASKGROUP = 8
};
/* ... and here is that TLS data. */
#if defined __nvptx__
extern struct gomp_thread *nvptx_thrs __attribute__((shared));
static inline struct gomp_thread *gomp_thread (void)
{
int tid;
asm ("mov.u32 %0, %%tid.y;" : "=r" (tid));
return nvptx_thrs + tid;
}
#elif defined HAVE_TLS || defined USE_EMUTLS
extern __thread struct gomp_thread gomp_tls_data;
static inline struct gomp_thread *gomp_thread (void)
{
return &gomp_tls_data;
}
#else
extern pthread_key_t gomp_tls_key;
static inline struct gomp_thread *gomp_thread (void)
{
return pthread_getspecific (gomp_tls_key);
}
#endif
extern struct gomp_task_icv *gomp_new_icv (void);
/* Here's how to access the current copy of the ICVs. */
static inline struct gomp_task_icv *gomp_icv (bool write)
{
struct gomp_task *task = gomp_thread ()->task;
if (task)
return &task->icv;
else if (write)
return gomp_new_icv ();
else
return &gomp_global_icv;
}
#ifdef LIBGOMP_USE_PTHREADS
/* The attributes to be used during thread creation. */
extern pthread_attr_t gomp_thread_attr;
extern pthread_key_t gomp_thread_destructor;
#endif
/* Function prototypes. */
/* affinity.c */
extern void gomp_init_affinity (void);
#ifdef LIBGOMP_USE_PTHREADS
extern void gomp_init_thread_affinity (pthread_attr_t *, unsigned int);
#endif
extern void **gomp_affinity_alloc (unsigned long, bool);
extern void gomp_affinity_init_place (void *);
extern bool gomp_affinity_add_cpus (void *, unsigned long, unsigned long,
long, bool);
extern bool gomp_affinity_remove_cpu (void *, unsigned long);
extern bool gomp_affinity_copy_place (void *, void *, long);
extern bool gomp_affinity_same_place (void *, void *);
extern bool gomp_affinity_finalize_place_list (bool);
extern bool gomp_affinity_init_level (int, unsigned long, bool);
extern void gomp_affinity_print_place (void *);
extern void gomp_get_place_proc_ids_8 (int, int64_t *);
extern bool popcorn_affinity_init_nodes (unsigned long *, unsigned long, bool);
extern bool popcorn_affinity_init_nodes_uniform (unsigned long, bool);
extern bool popcorn_affinity_init_node_ratings (unsigned long *, unsigned long,
bool);
/* iter.c */
extern bool gomp_iter_is_last (long);
extern bool gomp_iter_is_last_ull (unsigned long long);
extern int gomp_iter_static_next (long *, long *);
extern bool gomp_iter_dynamic_next_locked (long *, long *);
extern bool gomp_iter_dynamic_next_locked_ws (long *, long *,
struct gomp_work_share *);
extern bool gomp_iter_dynamic_next_locked_raw (long *, long *,
struct gomp_work_share *,
long);
extern bool gomp_iter_guided_next_locked (long *, long *);
#ifdef HAVE_SYNC_BUILTINS
extern bool gomp_iter_dynamic_next (long *, long *);
extern bool gomp_iter_dynamic_next_ws (long *, long *,
struct gomp_work_share *);
extern bool gomp_iter_dynamic_next_raw (long *, long *,
struct gomp_work_share *,
long);
extern bool gomp_iter_guided_next (long *, long *);
#endif
/* iter_ull.c */
extern int gomp_iter_ull_static_next (unsigned long long *,
unsigned long long *);
extern bool gomp_iter_ull_dynamic_next_locked (unsigned long long *,
unsigned long long *);
extern bool gomp_iter_ull_dynamic_next_locked_ws (unsigned long long *,
unsigned long long *,
struct gomp_work_share *);
extern bool gomp_iter_ull_dynamic_next_locked_raw (unsigned long long *,
unsigned long long *,
struct gomp_work_share *,
unsigned long long);
extern bool gomp_iter_ull_guided_next_locked (unsigned long long *,
unsigned long long *);
#if defined HAVE_SYNC_BUILTINS && defined __LP64__
extern bool gomp_iter_ull_dynamic_next (unsigned long long *,
unsigned long long *);
extern bool gomp_iter_ull_dynamic_next_ws (unsigned long long *,
unsigned long long *,
struct gomp_work_share *);
extern bool gomp_iter_ull_dynamic_next_raw (unsigned long long *,
unsigned long long *,
struct gomp_work_share *,
unsigned long long);
extern bool gomp_iter_ull_guided_next (unsigned long long *,
unsigned long long *);
#endif
/* ordered.c */
extern void gomp_ordered_first (void);
extern void gomp_ordered_last (void);
extern void gomp_ordered_next (void);
extern void gomp_ordered_static_init (void);
extern void gomp_ordered_static_next (void);
extern void gomp_ordered_sync (void);
extern void gomp_doacross_init (unsigned, long *, long);
extern void gomp_doacross_ull_init (unsigned, unsigned long long *,
unsigned long long);
/* parallel.c */
extern unsigned gomp_resolve_num_threads (unsigned, unsigned);
/* proc.c (in config/) */
extern int gomp_parse_cpuinfo(void);
extern void gomp_init_num_threads (void);
extern unsigned gomp_dynamic_max_threads (void);
/* task.c */
extern void gomp_init_task (struct gomp_task *, struct gomp_task *,
struct gomp_task_icv *);
extern void gomp_end_task (void);
extern void gomp_barrier_handle_tasks (gomp_barrier_state_t);
extern void gomp_task_maybe_wait_for_dependencies (void **);
extern bool gomp_create_target_task (struct gomp_device_descr *,
void (*) (void *), size_t, void **,
size_t *, unsigned short *, unsigned int,
void **, void **,
enum gomp_target_task_state);
static void inline
gomp_finish_task (struct gomp_task *task)
{
if (__builtin_expect (task->depend_hash != NULL, 0))
free (task->depend_hash);
}
/* team.c */
extern struct gomp_team *gomp_new_team (unsigned);
extern void gomp_team_start (void (*) (void *), void *, unsigned,
unsigned, struct gomp_team *);
extern void gomp_team_end (void);
extern void gomp_free_thread (void *);
/* target.c */
extern void gomp_init_targets_once (void);
extern int gomp_get_num_devices (void);
extern bool gomp_target_task_fn (void *);
/* Splay tree definitions. */
typedef struct splay_tree_node_s *splay_tree_node;
typedef struct splay_tree_s *splay_tree;
typedef struct splay_tree_key_s *splay_tree_key;
struct target_var_desc {
/* Splay key. */
splay_tree_key key;
/* True if data should be copied from device to host at the end. */
bool copy_from;
/* True if data always should be copied from device to host at the end. */
bool always_copy_from;
/* Relative offset against key host_start. */
uintptr_t offset;
/* Actual length. */
uintptr_t length;
};
struct target_mem_desc {
/* Reference count. */
uintptr_t refcount;
/* All the splay nodes allocated together. */
splay_tree_node array;
/* Start of the target region. */
uintptr_t tgt_start;
/* End of the targer region. */
uintptr_t tgt_end;
/* Handle to free. */
void *to_free;
/* Previous target_mem_desc. */
struct target_mem_desc *prev;
/* Number of items in following list. */
size_t list_count;
/* Corresponding target device descriptor. */
struct gomp_device_descr *device_descr;
/* List of target items to remove (or decrease refcount)
at the end of region. */
struct target_var_desc list[];
};
/* Special value for refcount - infinity. */
#define REFCOUNT_INFINITY (~(uintptr_t) 0)
/* Special value for refcount - tgt_offset contains target address of the
artificial pointer to "omp declare target link" object. */
#define REFCOUNT_LINK (~(uintptr_t) 1)
struct splay_tree_key_s {
/* Address of the host object. */
uintptr_t host_start;
/* Address immediately after the host object. */
uintptr_t host_end;
/* Descriptor of the target memory. */
struct target_mem_desc *tgt;
/* Offset from tgt->tgt_start to the start of the target object. */
uintptr_t tgt_offset;
/* Reference count. */
uintptr_t refcount;
/* Pointer to the original mapping of "omp declare target link" object. */
splay_tree_key link_key;
};
/* The comparison function. */
static inline int
splay_compare (splay_tree_key x, splay_tree_key y)
{
if (x->host_start == x->host_end
&& y->host_start == y->host_end)
return 0;
if (x->host_end <= y->host_start)
return -1;
if (x->host_start >= y->host_end)
return 1;
return 0;
}
#include "splay-tree.h"
typedef struct acc_dispatch_t
{
/* This is a linked list of data mapped using the
acc_map_data/acc_unmap_data or "acc enter data"/"acc exit data" pragmas.
Unlike mapped_data in the goacc_thread struct, unmapping can
happen out-of-order with respect to mapping. */
/* This is guarded by the lock in the "outer" struct gomp_device_descr. */
struct target_mem_desc *data_environ;
/* Execute. */
__typeof (GOMP_OFFLOAD_openacc_exec) *exec_func;
/* Async cleanup callback registration. */
__typeof (GOMP_OFFLOAD_openacc_register_async_cleanup)
*register_async_cleanup_func;
/* Asynchronous routines. */
__typeof (GOMP_OFFLOAD_openacc_async_test) *async_test_func;
__typeof (GOMP_OFFLOAD_openacc_async_test_all) *async_test_all_func;
__typeof (GOMP_OFFLOAD_openacc_async_wait) *async_wait_func;
__typeof (GOMP_OFFLOAD_openacc_async_wait_async) *async_wait_async_func;
__typeof (GOMP_OFFLOAD_openacc_async_wait_all) *async_wait_all_func;
__typeof (GOMP_OFFLOAD_openacc_async_wait_all_async)
*async_wait_all_async_func;
__typeof (GOMP_OFFLOAD_openacc_async_set_async) *async_set_async_func;
/* Create/destroy TLS data. */
__typeof (GOMP_OFFLOAD_openacc_create_thread_data) *create_thread_data_func;
__typeof (GOMP_OFFLOAD_openacc_destroy_thread_data)
*destroy_thread_data_func;
/* NVIDIA target specific routines. */
struct {
__typeof (GOMP_OFFLOAD_openacc_cuda_get_current_device)
*get_current_device_func;
__typeof (GOMP_OFFLOAD_openacc_cuda_get_current_context)
*get_current_context_func;
__typeof (GOMP_OFFLOAD_openacc_cuda_get_stream) *get_stream_func;
__typeof (GOMP_OFFLOAD_openacc_cuda_set_stream) *set_stream_func;
} cuda;
} acc_dispatch_t;
/* Various state of the accelerator device. */
enum gomp_device_state
{
GOMP_DEVICE_UNINITIALIZED,
GOMP_DEVICE_INITIALIZED,
GOMP_DEVICE_FINALIZED
};
/* This structure describes accelerator device.
It contains name of the corresponding libgomp plugin, function handlers for
interaction with the device, ID-number of the device, and information about
mapped memory. */
struct gomp_device_descr
{
/* Immutable data, which is only set during initialization, and which is not
guarded by the lock. */
/* The name of the device. */
const char *name;
/* Capabilities of device (supports OpenACC, OpenMP). */
unsigned int capabilities;
/* This is the ID number of device among devices of the same type. */
int target_id;
/* This is the TYPE of device. */
enum offload_target_type type;
/* Function handlers. */
__typeof (GOMP_OFFLOAD_get_name) *get_name_func;
__typeof (GOMP_OFFLOAD_get_caps) *get_caps_func;
__typeof (GOMP_OFFLOAD_get_type) *get_type_func;
__typeof (GOMP_OFFLOAD_get_num_devices) *get_num_devices_func;
__typeof (GOMP_OFFLOAD_init_device) *init_device_func;
__typeof (GOMP_OFFLOAD_fini_device) *fini_device_func;
__typeof (GOMP_OFFLOAD_version) *version_func;
__typeof (GOMP_OFFLOAD_load_image) *load_image_func;
__typeof (GOMP_OFFLOAD_unload_image) *unload_image_func;
__typeof (GOMP_OFFLOAD_alloc) *alloc_func;
__typeof (GOMP_OFFLOAD_free) *free_func;
__typeof (GOMP_OFFLOAD_dev2host) *dev2host_func;
__typeof (GOMP_OFFLOAD_host2dev) *host2dev_func;
__typeof (GOMP_OFFLOAD_dev2dev) *dev2dev_func;
__typeof (GOMP_OFFLOAD_can_run) *can_run_func;
__typeof (GOMP_OFFLOAD_run) *run_func;
__typeof (GOMP_OFFLOAD_async_run) *async_run_func;
/* Splay tree containing information about mapped memory regions. */
struct splay_tree_s mem_map;
/* Mutex for the mutable data. */
gomp_mutex_t lock;
/* Current state of the device. OpenACC allows to move from INITIALIZED state
back to UNINITIALIZED state. OpenMP allows only to move from INITIALIZED
to FINALIZED state (at program shutdown). */
enum gomp_device_state state;
/* OpenACC-specific data and functions. */
/* This is mutable because of its mutable data_environ and target_data
members. */
acc_dispatch_t openacc;
};
/* Kind of the pragma, for which gomp_map_vars () is called. */
enum gomp_map_vars_kind
{
GOMP_MAP_VARS_OPENACC,
GOMP_MAP_VARS_TARGET,
GOMP_MAP_VARS_DATA,
GOMP_MAP_VARS_ENTER_DATA
};
extern void gomp_acc_insert_pointer (size_t, void **, size_t *, void *);
extern void gomp_acc_remove_pointer (void *, bool, int, int);
extern struct target_mem_desc *gomp_map_vars (struct gomp_device_descr *,
size_t, void **, void **,
size_t *, void *, bool,
enum gomp_map_vars_kind);
extern void gomp_unmap_vars (struct target_mem_desc *, bool);
extern void gomp_init_device (struct gomp_device_descr *);
extern void gomp_free_memmap (struct splay_tree_s *);
extern void gomp_unload_device (struct gomp_device_descr *);
/* work.c */
extern void gomp_init_work_share (struct gomp_work_share *, bool, unsigned);
extern void gomp_fini_work_share (struct gomp_work_share *);
extern bool gomp_work_share_start (bool);
extern void gomp_work_share_end (void);
extern bool gomp_work_share_end_cancel (void);
extern void gomp_work_share_end_nowait (void);
static inline void
gomp_work_share_init_done (void)
{
struct gomp_thread *thr = gomp_thread ();
if (__builtin_expect (thr->ts.last_work_share != NULL, 1))
gomp_ptrlock_set (&thr->ts.last_work_share->next_ws, thr->ts.work_share);
}
#ifdef HAVE_ATTRIBUTE_VISIBILITY
# pragma GCC visibility pop
#endif
/* Now that we're back to default visibility, include the globals. */
#include "libgomp_g.h"
/* Include omp.h by parts. */
#include "omp-lock.h"
#define _LIBGOMP_OMP_LOCK_DEFINED 1
#include "omp.h.in"
#if !defined (HAVE_ATTRIBUTE_VISIBILITY) \
|| !defined (HAVE_ATTRIBUTE_ALIAS) \
|| !defined (HAVE_AS_SYMVER_DIRECTIVE) \
|| !defined (PIC) \
|| !defined (HAVE_SYMVER_SYMBOL_RENAMING_RUNTIME_SUPPORT)
# undef LIBGOMP_GNU_SYMBOL_VERSIONING
#endif
#ifdef LIBGOMP_GNU_SYMBOL_VERSIONING
extern void gomp_init_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
extern void gomp_destroy_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
extern void gomp_set_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
extern void gomp_unset_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
extern int gomp_test_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
extern void gomp_init_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
extern void gomp_destroy_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
extern void gomp_set_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
extern void gomp_unset_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
extern int gomp_test_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
extern void gomp_init_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
extern void gomp_destroy_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
extern void gomp_set_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
extern void gomp_unset_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
extern int gomp_test_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
extern void gomp_init_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
extern void gomp_destroy_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
extern void gomp_set_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
extern void gomp_unset_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
extern int gomp_test_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
# define strong_alias(fn, al) \
extern __typeof (fn) al __attribute__ ((alias (#fn)));
# define omp_lock_symver(fn) \
__asm (".symver g" #fn "_30, " #fn "@@OMP_3.0"); \
__asm (".symver g" #fn "_25, " #fn "@OMP_1.0");
#else
# define gomp_init_lock_30 omp_init_lock
# define gomp_destroy_lock_30 omp_destroy_lock
# define gomp_set_lock_30 omp_set_lock
# define gomp_unset_lock_30 omp_unset_lock
# define gomp_test_lock_30 omp_test_lock
# define gomp_init_nest_lock_30 omp_init_nest_lock
# define gomp_destroy_nest_lock_30 omp_destroy_nest_lock
# define gomp_set_nest_lock_30 omp_set_nest_lock
# define gomp_unset_nest_lock_30 omp_unset_nest_lock
# define gomp_test_nest_lock_30 omp_test_nest_lock
#endif
#ifdef HAVE_ATTRIBUTE_VISIBILITY
# define attribute_hidden __attribute__ ((visibility ("hidden")))
#else
# define attribute_hidden
#endif
#ifdef HAVE_ATTRIBUTE_ALIAS
# define ialias_ulp ialias_str1(__USER_LABEL_PREFIX__)
# define ialias_str1(x) ialias_str2(x)
# define ialias_str2(x) #x
# define ialias(fn) \
extern __typeof (fn) gomp_ialias_##fn \
__attribute__ ((alias (#fn))) attribute_hidden;
# define ialias_redirect(fn) \
extern __typeof (fn) fn __asm__ (ialias_ulp "gomp_ialias_" #fn) attribute_hidden;
# define ialias_call(fn) gomp_ialias_ ## fn
#else
# define ialias(fn)
# define ialias_redirect(fn)
# define ialias_call(fn) fn
#endif
/* Helper function for priority_node_to_task() and
task_to_priority_node().
Return the offset from a task to its priority_node entry. The
priority_node entry is has a type of TYPE. */
static inline size_t
priority_queue_offset (enum priority_queue_type type)
{
return offsetof (struct gomp_task, pnode[(int) type]);
}
/* Return the task associated with a priority NODE of type TYPE. */
static inline struct gomp_task *
priority_node_to_task (enum priority_queue_type type,
struct priority_node *node)
{
return (struct gomp_task *) ((char *) node - priority_queue_offset (type));
}
/* Return the priority node of type TYPE for a given TASK. */
static inline struct priority_node *
task_to_priority_node (enum priority_queue_type type,
struct gomp_task *task)
{
return (struct priority_node *) ((char *) task
+ priority_queue_offset (type));
}
#define NS( ts ) ((ts.tv_sec * 1000000000ULL) + ts.tv_nsec)
#define ELAPSED( start, end ) (NS(end) - NS(start))
/* Time parallel sections & related statistics */
#define _TIME_PARALLEL 1
#if defined _TIME_PARALLEL || defined _TIME_BARRIER
# include <time.h>
# include <debug/log.h>
#endif
/* kmp.c */
extern float popcorn_probe_percent;
/* hierarchy.c */
extern bool popcorn_log_statistics;
extern size_t popcorn_max_probes;
extern const char *popcorn_prime_region;
extern int popcorn_preferred_node;
extern void popcorn_init_workshare_cache(size_t);
extern bool popcorn_distributed ();
extern bool popcorn_finished ();
extern bool popcorn_hybrid_barrier ();
extern bool popcorn_hybrid_reduce ();
extern bool popcorn_het_workshare ();
extern void popcorn_set_distributed (bool);
extern void popcorn_set_finished (bool);
extern void popcorn_set_hybrid_barrier (bool);
extern void popcorn_set_hybrid_reduce (bool);
extern void popcorn_set_het_workshare (bool);
extern void popcorn_get_page_faults (unsigned long long *,
unsigned long long *);
extern void hierarchy_hybrid_barrier_final (int, const char *);
/* Shorthand to select between hierarchical & normal barriers */
static inline void gomp_team_barrier_wait_final_select (gomp_barrier_t *bar)
{
struct gomp_thread *thr;
if (popcorn_hybrid_barrier())
{
thr = gomp_thread ();
hierarchy_hybrid_barrier_final (thr->popcorn_nid, "End parallel");
}
else
gomp_team_barrier_wait_final (bar);
}
static inline void gomp_simple_barrier_wait_select (gomp_simple_barrier_t *bar)
{
// TODO make hierarchical nospin
if (popcorn_hybrid_barrier()) gomp_simple_barrier_wait_nospin (bar);
else gomp_simple_barrier_wait (bar);
}
#endif /* LIBGOMP_H */
|
OscarIntegration.h | #pragma once
#include <algorithm>
#include <execution>
#include <iostream>
#include <mutex>
#include <random>
#include <unordered_map>
#include <unordered_set>
#include <thread>
#include <path_finder/storage/CellIdStore.h>
#include <path_finder/graphs/CHGraph.h>
namespace pathFinder {
inline namespace std_threads_variant {
class OscarIntegrator {
public:
template <typename GeoPoint, typename CellIdsForEdge>
static void writeCellIdsForEdges(const CHGraph &graph, CellIdStore &cellIdStore, CellIdsForEdge edge2CellIds, std::size_t numThreads = 1) {
struct State {
const CHGraph &graph;
CellIdStore &cellIdStore;
CellIdsForEdge & edge2CellIds;
std::vector<std::atomic<uint64_t>> finishedNodes;
std::atomic<std::size_t> progress{0};
std::size_t edgeProgress{0};
const std::size_t numberOfNodes;
std::mutex cellIdStoreLock;
State(const CHGraph &graph, CellIdStore &cellIdStore, CellIdsForEdge & edge2CellIds) :
graph(graph),
cellIdStore(cellIdStore),
edge2CellIds(edge2CellIds),
finishedNodes(graph.getNumberOfNodes() / 64 + 1),
numberOfNodes(graph.getNumberOfNodes())
{
for(auto & x : finishedNodes) {
x = 0;
}
}
bool takeNode(NodeId nodeId) {
std::size_t chunk = nodeId / 64;
std::size_t bit = nodeId % 64;
uint64_t flag = static_cast<uint64_t>(1) << bit;
uint64_t prev = finishedNodes.at(chunk).fetch_or(flag, std::memory_order_relaxed);
if (prev & flag) { //already taken
return false;
}
auto p = progress.fetch_add(1, std::memory_order_relaxed);
if (p%1000 == 0) {
std::cout << '\xd' << p << "/" << numberOfNodes << "=" << double(p)/numberOfNodes*100 << "%" << std::flush;
}
return true;
}
} state(graph, cellIdStore, edge2CellIds);
//A worker explores the graph depth-first
//For each node we store the hint given by the cellId operator and reuse it when backtracking
//Since multiple workers work in parallel we have to make sure that a node is not visited twice
//This is tracked in the finishedNodes vector
//If the node queue is empty then we choose a new random node and try to explore from there
//If the node is already taken then we sample all nodes from the beginning to make sure that each node is visited
struct Worker {
struct DFSElement {
decltype(graph.edgesFor(0, EdgeDirection::FORWARD)) edges;
decltype(graph.edgesFor(0, EdgeDirection::FORWARD).begin()) it;
typename CellIdsForEdge::Hint hint;
DFSElement(decltype(edges) const & edges, decltype(hint) hint) :
edges(edges),
it(edges.begin()),
hint(hint)
{}
};
State *state;
CellIdsForEdge edge2CellIds;
std::vector<DFSElement> stack;
std::default_random_engine rndgen;
std::uniform_int_distribution<uint32_t> nodeIdRnd;
std::vector< std::pair<std::size_t, std::vector<uint32_t>> > buffer; //edgeId -> cellIds
std::size_t apxBufferSizeInBytes{0};
Worker(State *state) :
state(state),
edge2CellIds(state->edge2CellIds),
nodeIdRnd(0, state->numberOfNodes-1)
{}
~Worker() {
flush();
}
void operator()() {
while (true) {
if (state->progress >= state->numberOfNodes) {
break;
}
// first try a random nodeId if that fails, sample all
NodeId nid = nodeIdRnd(rndgen);
if (!state->takeNode(nid)) { //try all from the beginning
nid = state->numberOfNodes;
for(std::size_t i(0), s(state->finishedNodes.size()); i < s && nid >= state->numberOfNodes; ++i) {
uint64_t tmp = state->finishedNodes[i].load(std::memory_order_relaxed);
while(tmp != std::numeric_limits<uint64_t>::max() && nid >= state->numberOfNodes) {
tmp = ~tmp;
static_assert(std::is_same<unsigned long, decltype(tmp)>::value);
//tmp cannot be 0 otherwise we wouldn't be here
nid = i*64 + 63-__builtin_clzl(tmp);
if (!state->takeNode(nid)) {
nid = state->numberOfNodes;
tmp = state->finishedNodes[i].load(std::memory_order_relaxed);
}
}
}
if (nid >= state->numberOfNodes) {
//No node found, this means that all were processed during search for a new one
flush();
assert(state->progress == state->numberOfNodes);
break;
}
}
assert(nid < state->numberOfNodes);
{
auto node = state->graph.getNode(nid);
typename CellIdsForEdge::Hint fh;
//Get the face hint for this node
edge2CellIds(node.latLng.lat, node.latLng.lng, node.latLng.lat, node.latLng.lng, fh);
stack.emplace_back(state->graph.edgesFor(nid, EdgeDirection::FORWARD), fh);
}
while (stack.size()) {
//Check if we're at the end of our edge list
if (stack.back().it == stack.back().edges.end()) {
stack.pop_back();
continue;
}
//expand next edge
auto edge = *stack.back().it;
++stack.back().it; //move to next edge immediately in case we need to skip this edge
if (edge.child1.has_value()) { //skip shortcut edges
continue;
}
const auto sourceNode = state->graph.getNode(edge.source);
const auto targetNode = state->graph.getNode(edge.target);
auto hint = stack.back().hint;
auto edgePos = state->graph.getEdgePosition(edge, EdgeDirection::FORWARD);
if (edgePos) {
buffer.emplace_back(
edgePos.value(),
edge2CellIds( sourceNode.latLng.lat, sourceNode.latLng.lng,
targetNode.latLng.lat, targetNode.latLng.lng,
hint
)
);
}
else {
std::cerr << "BUG: Edge " << edge << " has no forward position" << std::endl;
}
apxBufferSizeInBytes += sizeof(typename std::decay_t<decltype(buffer)>::value_type) + buffer.back().second.size()*sizeof(uint32_t);
//check if we can descend into the node
if (state->takeNode(targetNode.id)) {
stack.emplace_back(state->graph.edgesFor(targetNode.id, EdgeDirection::FORWARD), hint);
}
//check if we need to flush our buffer
if (apxBufferSizeInBytes > 128*1024*1024) {
flush();
}
}
}
}
void flush() {
if (buffer.size()) {
std::lock_guard<std::mutex> lck(state->cellIdStoreLock);
for(auto & x : buffer) {
state->cellIdStore.storeCellIds(x.first, std::move(x.second));
}
state->edgeProgress += buffer.size();
buffer.clear();
apxBufferSizeInBytes = 0;
}
}
};
std::cout << "Computing cell ids for regular edges..." << std::endl;
if (numThreads > 1) {
std::vector<std::thread> threads;
threads.reserve(numThreads);
for(std::size_t i(0); i < numThreads; ++i) {
threads.emplace_back(Worker(&state));
}
for(auto & x : threads) {
x.join();
}
}
else {
Worker w(&state);
w();
}
std::cout << "\nFound " << state.edgeProgress << " regular edges our of a total of " << graph.getNumberOfEdges() << std::endl;
struct PendingEdge {
uint8_t pending = 2;
uint32_t edgePos;
};
int progress = 0;
const auto &edges = graph.getEdges();
std::unordered_map<uint32_t, uint8_t> pendingChildren; //shortcut -> num unfinished children
std::unordered_multimap<uint32_t, uint32_t> edgeParents; //maps from shortcut-children->shortcuts, note that an edge may have multiple parents
std::unordered_set<uint32_t> edgesWithChildren; //contains all shortcuts for which both children have their cellids computed
//first get all shortcuts and set them as parent of their respective children
std::cout << "Computing shortcut dependency tree..." << std::flush;
for (uint32_t i(0), s(graph.getNumberOfEdges()); i < s; ++i) {
const auto &edge = edges[i];
if (edge.child1.has_value()) {
assert(edge.child1 != edge.child2);
edgeParents.emplace(edge.child1.value(), i);
edgeParents.emplace(edge.child2.value(), i);
pendingChildren[i] = 2;
}
else {
state.edgeProgress -= 1;
}
}
std::cout << "done" << std::endl;
assert(!state.edgeProgress);
//Find shortcuts that have a regular edge as a parent
std::cout << "Computing cell ids for shortcut edges..." << std::flush;
for (uint32_t i(0), s(graph.getNumberOfEdges()); i < s; ++i) {
auto const & edge = edges[i];
if (edge.child1.has_value()) {
continue;
}
if (!edgeParents.count(i)) {
continue;
}
auto parents = edgeParents.equal_range(i);
for(auto it(parents.first); it != parents.second; ++it) {
auto parent = it->second;
auto & x = pendingChildren.at(parent);
x -= 1;
assert(x <= 2);
if (x == 0) { //our parent has all of its children set
edgesWithChildren.insert(parent);
pendingChildren.erase(parent);
}
}
}
//now compute the union of the cellids of the children
//We do this recursivley kind of bottom up, but the unordered_set defines the actual order
//however this should not reduce the performance since the edges have static dependencies and
//thus the work to be done does not depend on the order we do it (apart from cache issues)
while (edgesWithChildren.size()) {
uint32_t edgeId = *edgesWithChildren.begin();
auto const & edge = edges[edgeId];
//get the cellids of the children, these are sorted, thus we can use std::set_union
auto c1cids = cellIdStore.getCellIds(edge.child1.value());
auto c2cids = cellIdStore.getCellIds(edge.child2.value());
assert(std::is_sorted(c1cids.begin(), c1cids.end()));
assert(std::is_sorted(c2cids.begin(), c2cids.end()));
std::vector<std::decay_t<decltype(c2cids)>::value_type> ecids;
std::set_union(c1cids.begin(), c1cids.end(), c2cids.begin(), c2cids.end(), std::back_inserter(ecids));
cellIdStore.storeCellIds(edgeId, std::move(ecids));
//take care of parent
auto parents = edgeParents.equal_range(edgeId);
for(auto it(parents.first); it != parents.second; ++it) {
auto parent = it->second;
auto & x = pendingChildren.at(parent);
x -= 1;
assert(x <= 2);
if (x == 0) { //our parent has all of its children set
edgesWithChildren.insert(parent);
pendingChildren.erase(parent);
}
}
//remove ourself
edgesWithChildren.erase(edgeId);
}
std::cout << "done" << std::endl;
if (pendingChildren.size()) {
throw std::runtime_error("Could not compute all shortcut cellids");
}
cellIdStore.shrink_to_fit();
}
};
}
namespace omp_variant {
class OscarIntegrator {
public:
template <typename GeoPoint, typename CellIdsForEdge, typename KVStore>
static void writeCellIdsForEdges(const CHGraph &graph, CellIdStore &cellIdStore,
KVStore &store) {
const auto& edges = graph.getEdges();
int progress = 0;
#pragma omp parallel for default(none) shared(edges, graph, cellIdStore, store, progress, std::cout) num_threads(16)
for (int i = 0; i < graph.getNumberOfNodes(); ++i) {
CellIdsForEdge cellIdsForEdge(store);
for(const auto& edge : graph.edgesFor(i, EdgeDirection::FORWARD)) {
if (edge.child1.has_value()) {
continue;
}
std::vector<uint32_t> cellIds;
const auto sourceNode = graph.getNode(edge.source);
const auto targetNode = graph.getNode(edge.target);
GeoPoint sourcePoint;
sourcePoint.lat() = sourceNode.latLng.lat;
sourcePoint.lon() = sourceNode.latLng.lng;
GeoPoint targetPoint;
targetPoint.lat() = targetNode.latLng.lat;
targetPoint.lon() = targetNode.latLng.lng;
try {
auto cellIdsEdge = cellIdsForEdge(sourcePoint, targetPoint);
cellIds.insert(cellIds.end(), cellIdsEdge.begin(), cellIdsEdge.end());
} catch (std::exception &e) {
}
cellIds.erase(std::remove(cellIds.begin(), cellIds.end(), 4294967295), cellIds.end());
#pragma omp critical
{
cellIdStore.storeCellIds(graph.getEdgePosition(edge, EdgeDirection::FORWARD).value(), cellIds);
++progress;
if (false) //progress % 1000 == 0)
std::cout << "progress: " << progress << "/" << graph.getNumberOfEdges() << '\n';
// std::cout << "count: " << cellIds.size() << '\n';
}
}
}
for (int i = 0; i < graph.getNumberOfEdges(); ++i) {
const auto &edge = edges[i];
if (edge.child1.has_value()) {
const auto fullEdges = graph.getPathFromShortcut(edge, 0);
std::vector<size_t> fullEdgeIds;
fullEdgeIds.reserve(fullEdges.size());
for (const auto fullEdge : fullEdges) {
fullEdgeIds.emplace_back(graph.getEdgePosition(fullEdge, EdgeDirection::FORWARD).value());
}
auto fullCellIds = cellIdStore.getCellIds(fullEdgeIds);
sort(fullCellIds.begin(), fullCellIds.end());
(fullCellIds).erase(unique(fullCellIds.begin(), fullCellIds.end()), fullCellIds.end());
cellIdStore.storeCellIds(i, fullCellIds);
++progress;
if (progress % 1000 == 0)
std::cout << "progress: " << progress << "/" << graph.getNumberOfEdges() << '\n';
}
}
cellIdStore.shrink_to_fit();
}
};
}
} // namespace pathFinder
|
Stmt.h | //===- Stmt.h - Classes for representing statements -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Stmt interface and subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMT_H
#define LLVM_CLANG_AST_STMT_H
#include "clang/AST/DeclGroup.h"
#include "clang/AST/DependenceFlags.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/Specifiers.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <string>
namespace llvm {
class FoldingSetNodeID;
} // namespace llvm
namespace clang {
class ASTContext;
class Attr;
class CapturedDecl;
class Decl;
class Expr;
class AddrLabelExpr;
class LabelDecl;
class ODRHash;
class PrinterHelper;
struct PrintingPolicy;
class RecordDecl;
class SourceManager;
class StringLiteral;
class Token;
class VarDecl;
class WhereClause;
//===----------------------------------------------------------------------===//
// AST classes for statements.
//===----------------------------------------------------------------------===//
/// Stmt - This represents one statement.
///
class alignas(void *) Stmt {
public:
enum StmtClass {
NoStmtClass = 0,
#define STMT(CLASS, PARENT) CLASS##Class,
#define STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class,
#define LAST_STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
#define ABSTRACT_STMT(STMT)
#include "clang/AST/StmtNodes.inc"
};
// Make vanilla 'new' and 'delete' illegal for Stmts.
protected:
friend class ASTStmtReader;
friend class ASTStmtWriter;
void *operator new(size_t bytes) noexcept {
llvm_unreachable("Stmts cannot be allocated with regular 'new'.");
}
void operator delete(void *data) noexcept {
llvm_unreachable("Stmts cannot be released with regular 'delete'.");
}
//===--- Statement bitfields classes ---===//
class StmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class Stmt;
/// The statement class.
unsigned sClass : 8;
};
enum { NumStmtBits = 8 };
class NullStmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class NullStmt;
unsigned : NumStmtBits;
/// True if the null statement was preceded by an empty macro, e.g:
/// @code
/// #define CALL(x)
/// CALL(0);
/// @endcode
unsigned HasLeadingEmptyMacro : 1;
/// The location of the semi-colon.
SourceLocation SemiLoc;
};
class CompoundStmtBitfields {
friend class ASTStmtReader;
friend class CompoundStmt;
unsigned : NumStmtBits;
unsigned NumStmts : 32 - NumStmtBits;
/// The location of the opening "{".
SourceLocation LBraceLoc;
};
class LabelStmtBitfields {
friend class LabelStmt;
unsigned : NumStmtBits;
SourceLocation IdentLoc;
};
class AttributedStmtBitfields {
friend class ASTStmtReader;
friend class AttributedStmt;
unsigned : NumStmtBits;
/// Number of attributes.
unsigned NumAttrs : 32 - NumStmtBits;
/// The location of the attribute.
SourceLocation AttrLoc;
};
class IfStmtBitfields {
friend class ASTStmtReader;
friend class IfStmt;
unsigned : NumStmtBits;
/// True if this if statement is a constexpr if.
unsigned IsConstexpr : 1;
/// True if this if statement has storage for an else statement.
unsigned HasElse : 1;
/// True if this if statement has storage for a variable declaration.
unsigned HasVar : 1;
/// True if this if statement has storage for an init statement.
unsigned HasInit : 1;
/// The location of the "if".
SourceLocation IfLoc;
};
class SwitchStmtBitfields {
friend class SwitchStmt;
unsigned : NumStmtBits;
/// True if the SwitchStmt has storage for an init statement.
unsigned HasInit : 1;
/// True if the SwitchStmt has storage for a condition variable.
unsigned HasVar : 1;
/// If the SwitchStmt is a switch on an enum value, records whether all
/// the enum values were covered by CaseStmts. The coverage information
/// value is meant to be a hint for possible clients.
unsigned AllEnumCasesCovered : 1;
/// The location of the "switch".
SourceLocation SwitchLoc;
};
class WhileStmtBitfields {
friend class ASTStmtReader;
friend class WhileStmt;
unsigned : NumStmtBits;
/// True if the WhileStmt has storage for a condition variable.
unsigned HasVar : 1;
/// The location of the "while".
SourceLocation WhileLoc;
};
class DoStmtBitfields {
friend class DoStmt;
unsigned : NumStmtBits;
/// The location of the "do".
SourceLocation DoLoc;
};
class ForStmtBitfields {
friend class ForStmt;
unsigned : NumStmtBits;
/// The location of the "for".
SourceLocation ForLoc;
};
class GotoStmtBitfields {
friend class GotoStmt;
friend class IndirectGotoStmt;
unsigned : NumStmtBits;
/// The location of the "goto".
SourceLocation GotoLoc;
};
class ContinueStmtBitfields {
friend class ContinueStmt;
unsigned : NumStmtBits;
/// The location of the "continue".
SourceLocation ContinueLoc;
};
class BreakStmtBitfields {
friend class BreakStmt;
unsigned : NumStmtBits;
/// The location of the "break".
SourceLocation BreakLoc;
};
class ReturnStmtBitfields {
friend class ReturnStmt;
unsigned : NumStmtBits;
/// True if this ReturnStmt has storage for an NRVO candidate.
unsigned HasNRVOCandidate : 1;
/// The location of the "return".
SourceLocation RetLoc;
};
class SwitchCaseBitfields {
friend class SwitchCase;
friend class CaseStmt;
unsigned : NumStmtBits;
/// Used by CaseStmt to store whether it is a case statement
/// of the form case LHS ... RHS (a GNU extension).
unsigned CaseStmtIsGNURange : 1;
/// The location of the "case" or "default" keyword.
SourceLocation KeywordLoc;
};
//===--- Expression bitfields classes ---===//
class ExprBitfields {
friend class ASTStmtReader; // deserialization
friend class AtomicExpr; // ctor
friend class BlockDeclRefExpr; // ctor
friend class CallExpr; // ctor
friend class CXXConstructExpr; // ctor
friend class CXXDependentScopeMemberExpr; // ctor
friend class CXXNewExpr; // ctor
friend class CXXUnresolvedConstructExpr; // ctor
friend class DeclRefExpr; // computeDependence
friend class DependentScopeDeclRefExpr; // ctor
friend class DesignatedInitExpr; // ctor
friend class Expr;
friend class InitListExpr; // ctor
friend class ObjCArrayLiteral; // ctor
friend class ObjCDictionaryLiteral; // ctor
friend class ObjCMessageExpr; // ctor
friend class OffsetOfExpr; // ctor
friend class OpaqueValueExpr; // ctor
friend class OverloadExpr; // ctor
friend class ParenListExpr; // ctor
friend class PseudoObjectExpr; // ctor
friend class ShuffleVectorExpr; // ctor
unsigned : NumStmtBits;
unsigned ValueKind : 2;
unsigned ObjectKind : 3;
unsigned /*ExprDependence*/ Dependent : llvm::BitWidth<ExprDependence>;
};
enum { NumExprBits = NumStmtBits + 5 + llvm::BitWidth<ExprDependence> };
class ConstantExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class ConstantExpr;
unsigned : NumExprBits;
/// The kind of result that is tail-allocated.
unsigned ResultKind : 2;
/// The kind of Result as defined by APValue::Kind.
unsigned APValueKind : 4;
/// When ResultKind == RSK_Int64, true if the tail-allocated integer is
/// unsigned.
unsigned IsUnsigned : 1;
/// When ResultKind == RSK_Int64. the BitWidth of the tail-allocated
/// integer. 7 bits because it is the minimal number of bits to represent a
/// value from 0 to 64 (the size of the tail-allocated integer).
unsigned BitWidth : 7;
/// When ResultKind == RSK_APValue, true if the ASTContext will cleanup the
/// tail-allocated APValue.
unsigned HasCleanup : 1;
/// True if this ConstantExpr was created for immediate invocation.
unsigned IsImmediateInvocation : 1;
};
class PredefinedExprBitfields {
friend class ASTStmtReader;
friend class PredefinedExpr;
unsigned : NumExprBits;
/// The kind of this PredefinedExpr. One of the enumeration values
/// in PredefinedExpr::IdentKind.
unsigned Kind : 4;
/// True if this PredefinedExpr has a trailing "StringLiteral *"
/// for the predefined identifier.
unsigned HasFunctionName : 1;
/// The location of this PredefinedExpr.
SourceLocation Loc;
};
class DeclRefExprBitfields {
friend class ASTStmtReader; // deserialization
friend class DeclRefExpr;
unsigned : NumExprBits;
unsigned HasQualifier : 1;
unsigned HasTemplateKWAndArgsInfo : 1;
unsigned HasFoundDecl : 1;
unsigned HadMultipleCandidates : 1;
unsigned RefersToEnclosingVariableOrCapture : 1;
unsigned NonOdrUseReason : 2;
/// The location of the declaration name itself.
SourceLocation Loc;
};
class FloatingLiteralBitfields {
friend class FloatingLiteral;
unsigned : NumExprBits;
unsigned Semantics : 3; // Provides semantics for APFloat construction
unsigned IsExact : 1;
};
class StringLiteralBitfields {
friend class ASTStmtReader;
friend class StringLiteral;
unsigned : NumExprBits;
/// The kind of this string literal.
/// One of the enumeration values of StringLiteral::StringKind.
unsigned Kind : 3;
/// The width of a single character in bytes. Only values of 1, 2,
/// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps
/// the target + string kind to the appropriate CharByteWidth.
unsigned CharByteWidth : 3;
unsigned IsPascal : 1;
/// The number of concatenated token this string is made of.
/// This is the number of trailing SourceLocation.
unsigned NumConcatenated;
};
class CharacterLiteralBitfields {
friend class CharacterLiteral;
unsigned : NumExprBits;
unsigned Kind : 3;
};
enum { NumBoundsCheckKindBits = 2 };
class UnaryOperatorBitfields {
friend class UnaryOperator;
unsigned : NumExprBits;
unsigned Opc : 5;
unsigned CanOverflow : 1;
//
/// This is only meaningful for operations on floating point
/// types when additional values need to be in trailing storage.
/// It is 0 otherwise.
unsigned HasFPFeatures : 1;
unsigned BoundsCheckKind : NumBoundsCheckKindBits;
SourceLocation Loc;
};
class UnaryExprOrTypeTraitExprBitfields {
friend class UnaryExprOrTypeTraitExpr;
unsigned : NumExprBits;
unsigned Kind : 3;
unsigned IsType : 1; // true if operand is a type, false if an expression.
};
class ArrayOrMatrixSubscriptExprBitfields {
friend class ArraySubscriptExpr;
friend class MatrixSubscriptExpr;
unsigned : NumExprBits;
unsigned BoundsCheckKind : NumBoundsCheckKindBits;
SourceLocation RBracketLoc;
};
class ArraySubscriptExprBitfields {
friend class ArraySubscriptExpr;
unsigned : NumExprBits;
unsigned BoundsCheckKind : NumBoundsCheckKindBits;
SourceLocation RBracketLoc;
};
class CallExprBitfields {
friend class CallExpr;
unsigned : NumExprBits;
unsigned NumPreArgs : 1;
/// True if the callee of the call expression was found using ADL.
unsigned UsesADL : 1;
/// Padding used to align OffsetToTrailingObjects to a byte multiple.
unsigned : 24 - 2 - NumExprBits;
/// The offset in bytes from the this pointer to the start of the
/// trailing objects belonging to CallExpr. Intentionally byte sized
/// for faster access.
unsigned OffsetToTrailingObjects : 8;
};
enum { NumCallExprBits = 32 };
class MemberExprBitfields {
friend class ASTStmtReader;
friend class MemberExpr;
unsigned : NumExprBits;
/// IsArrow - True if this is "X->F", false if this is "X.F".
unsigned IsArrow : 1;
/// True if this member expression used a nested-name-specifier to
/// refer to the member, e.g., "x->Base::f", or found its member via
/// a using declaration. When true, a MemberExprNameQualifier
/// structure is allocated immediately after the MemberExpr.
unsigned HasQualifierOrFoundDecl : 1;
/// True if this member expression specified a template keyword
/// and/or a template argument list explicitly, e.g., x->f<int>,
/// x->template f, x->template f<int>.
/// When true, an ASTTemplateKWAndArgsInfo structure and its
/// TemplateArguments (if any) are present.
unsigned HasTemplateKWAndArgsInfo : 1;
/// True if this member expression refers to a method that
/// was resolved from an overloaded set having size greater than 1.
unsigned HadMultipleCandidates : 1;
/// Value of type NonOdrUseReason indicating why this MemberExpr does
/// not constitute an odr-use of the named declaration. Meaningful only
/// when naming a static member.
unsigned NonOdrUseReason : 2;
/// This is the location of the -> or . in the expression.
SourceLocation OperatorLoc;
};
class CastExprBitfields {
friend class CastExpr;
friend class ImplicitCastExpr;
unsigned : NumExprBits;
unsigned Kind : 6;
unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr.
unsigned BoundsSafeInterface : 1;
/// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough
/// here. ([implimits] Direct and indirect base classes [16384]).
unsigned BasePathSize;
};
class BinaryOperatorBitfields {
friend class BinaryOperator;
unsigned : NumExprBits;
unsigned Opc : 6;
/// This is only meaningful for operations on floating point
/// types when additional values need to be in trailing storage.
/// It is 0 otherwise.
unsigned HasFPFeatures : 1;
SourceLocation OpLoc;
};
class InitListExprBitfields {
friend class InitListExpr;
unsigned : NumExprBits;
/// Whether this initializer list originally had a GNU array-range
/// designator in it. This is a temporary marker used by CodeGen.
unsigned HadArrayRangeDesignator : 1;
};
class ParenListExprBitfields {
friend class ASTStmtReader;
friend class ParenListExpr;
unsigned : NumExprBits;
/// The number of expressions in the paren list.
unsigned NumExprs;
};
class GenericSelectionExprBitfields {
friend class ASTStmtReader;
friend class GenericSelectionExpr;
unsigned : NumExprBits;
/// The location of the "_Generic".
SourceLocation GenericLoc;
};
class PseudoObjectExprBitfields {
friend class ASTStmtReader; // deserialization
friend class PseudoObjectExpr;
unsigned : NumExprBits;
// These don't need to be particularly wide, because they're
// strictly limited by the forms of expressions we permit.
unsigned NumSubExprs : 8;
unsigned ResultIndex : 32 - 8 - NumExprBits;
};
class SourceLocExprBitfields {
friend class ASTStmtReader;
friend class SourceLocExpr;
unsigned : NumExprBits;
/// The kind of source location builtin represented by the SourceLocExpr.
/// Ex. __builtin_LINE, __builtin_FUNCTION, ect.
unsigned Kind : 2;
};
class StmtExprBitfields {
friend class ASTStmtReader;
friend class StmtExpr;
unsigned : NumExprBits;
/// The number of levels of template parameters enclosing this statement
/// expression. Used to determine if a statement expression remains
/// dependent after instantiation.
unsigned TemplateDepth;
};
//===--- C++ Expression bitfields classes ---===//
class CXXOperatorCallExprBitfields {
friend class ASTStmtReader;
friend class CXXOperatorCallExpr;
unsigned : NumCallExprBits;
/// The kind of this overloaded operator. One of the enumerator
/// value of OverloadedOperatorKind.
unsigned OperatorKind : 6;
};
class CXXRewrittenBinaryOperatorBitfields {
friend class ASTStmtReader;
friend class CXXRewrittenBinaryOperator;
unsigned : NumCallExprBits;
unsigned IsReversed : 1;
};
class CXXBoolLiteralExprBitfields {
friend class CXXBoolLiteralExpr;
unsigned : NumExprBits;
/// The value of the boolean literal.
unsigned Value : 1;
/// The location of the boolean literal.
SourceLocation Loc;
};
class CXXNullPtrLiteralExprBitfields {
friend class CXXNullPtrLiteralExpr;
unsigned : NumExprBits;
/// The location of the null pointer literal.
SourceLocation Loc;
};
class CXXThisExprBitfields {
friend class CXXThisExpr;
unsigned : NumExprBits;
/// Whether this is an implicit "this".
unsigned IsImplicit : 1;
/// The location of the "this".
SourceLocation Loc;
};
class CXXThrowExprBitfields {
friend class ASTStmtReader;
friend class CXXThrowExpr;
unsigned : NumExprBits;
/// Whether the thrown variable (if any) is in scope.
unsigned IsThrownVariableInScope : 1;
/// The location of the "throw".
SourceLocation ThrowLoc;
};
class CXXDefaultArgExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultArgExpr;
unsigned : NumExprBits;
/// The location where the default argument expression was used.
SourceLocation Loc;
};
class CXXDefaultInitExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultInitExpr;
unsigned : NumExprBits;
/// The location where the default initializer expression was used.
SourceLocation Loc;
};
class CXXScalarValueInitExprBitfields {
friend class ASTStmtReader;
friend class CXXScalarValueInitExpr;
unsigned : NumExprBits;
SourceLocation RParenLoc;
};
class CXXNewExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class CXXNewExpr;
unsigned : NumExprBits;
/// Was the usage ::new, i.e. is the global new to be used?
unsigned IsGlobalNew : 1;
/// Do we allocate an array? If so, the first trailing "Stmt *" is the
/// size expression.
unsigned IsArray : 1;
/// Should the alignment be passed to the allocation function?
unsigned ShouldPassAlignment : 1;
/// If this is an array allocation, does the usual deallocation
/// function for the allocated type want to know the allocated size?
unsigned UsualArrayDeleteWantsSize : 1;
/// What kind of initializer do we have? Could be none, parens, or braces.
/// In storage, we distinguish between "none, and no initializer expr", and
/// "none, but an implicit initializer expr".
unsigned StoredInitializationStyle : 2;
/// True if the allocated type was expressed as a parenthesized type-id.
unsigned IsParenTypeId : 1;
/// The number of placement new arguments.
unsigned NumPlacementArgs;
};
class CXXDeleteExprBitfields {
friend class ASTStmtReader;
friend class CXXDeleteExpr;
unsigned : NumExprBits;
/// Is this a forced global delete, i.e. "::delete"?
unsigned GlobalDelete : 1;
/// Is this the array form of delete, i.e. "delete[]"?
unsigned ArrayForm : 1;
/// ArrayFormAsWritten can be different from ArrayForm if 'delete' is
/// applied to pointer-to-array type (ArrayFormAsWritten will be false
/// while ArrayForm will be true).
unsigned ArrayFormAsWritten : 1;
/// Does the usual deallocation function for the element type require
/// a size_t argument?
unsigned UsualArrayDeleteWantsSize : 1;
/// Location of the expression.
SourceLocation Loc;
};
class TypeTraitExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class TypeTraitExpr;
unsigned : NumExprBits;
/// The kind of type trait, which is a value of a TypeTrait enumerator.
unsigned Kind : 8;
/// If this expression is not value-dependent, this indicates whether
/// the trait evaluated true or false.
unsigned Value : 1;
/// The number of arguments to this type trait. According to [implimits]
/// 8 bits would be enough, but we require (and test for) at least 16 bits
/// to mirror FunctionType.
unsigned NumArgs;
};
class DependentScopeDeclRefExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class DependentScopeDeclRefExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
};
class CXXConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXConstructExpr;
unsigned : NumExprBits;
unsigned Elidable : 1;
unsigned HadMultipleCandidates : 1;
unsigned ListInitialization : 1;
unsigned StdInitListInitialization : 1;
unsigned ZeroInitialization : 1;
unsigned ConstructionKind : 3;
SourceLocation Loc;
};
class ExprWithCleanupsBitfields {
friend class ASTStmtReader; // deserialization
friend class ExprWithCleanups;
unsigned : NumExprBits;
// When false, it must not have side effects.
unsigned CleanupsHaveSideEffects : 1;
unsigned NumObjects : 32 - 1 - NumExprBits;
};
class CXXUnresolvedConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXUnresolvedConstructExpr;
unsigned : NumExprBits;
/// The number of arguments used to construct the type.
unsigned NumArgs;
};
class CXXDependentScopeMemberExprBitfields {
friend class ASTStmtReader;
friend class CXXDependentScopeMemberExpr;
unsigned : NumExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether this member expression has info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// See getFirstQualifierFoundInScope() and the comment listing
/// the trailing objects.
unsigned HasFirstQualifierFoundInScope : 1;
/// The location of the '->' or '.' operator.
SourceLocation OperatorLoc;
};
class OverloadExprBitfields {
friend class ASTStmtReader;
friend class OverloadExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// Padding used by the derived classes to store various bits. If you
/// need to add some data here, shrink this padding and add your data
/// above. NumOverloadExprBits also needs to be updated.
unsigned : 32 - NumExprBits - 1;
/// The number of results.
unsigned NumResults;
};
enum { NumOverloadExprBits = NumExprBits + 1 };
class UnresolvedLookupExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedLookupExpr;
unsigned : NumOverloadExprBits;
/// True if these lookup results should be extended by
/// argument-dependent lookup if this is the operand of a function call.
unsigned RequiresADL : 1;
/// True if these lookup results are overloaded. This is pretty trivially
/// rederivable if we urgently need to kill this field.
unsigned Overloaded : 1;
};
static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4,
"UnresolvedLookupExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class UnresolvedMemberExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedMemberExpr;
unsigned : NumOverloadExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether the lookup results contain an unresolved using declaration.
unsigned HasUnresolvedUsing : 1;
};
static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4,
"UnresolvedMemberExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class CXXNoexceptExprBitfields {
friend class ASTStmtReader;
friend class CXXNoexceptExpr;
unsigned : NumExprBits;
unsigned Value : 1;
};
class SubstNonTypeTemplateParmExprBitfields {
friend class ASTStmtReader;
friend class SubstNonTypeTemplateParmExpr;
unsigned : NumExprBits;
/// The location of the non-type template parameter reference.
SourceLocation NameLoc;
};
class LambdaExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class LambdaExpr;
unsigned : NumExprBits;
/// The default capture kind, which is a value of type
/// LambdaCaptureDefault.
unsigned CaptureDefault : 2;
/// Whether this lambda had an explicit parameter list vs. an
/// implicit (and empty) parameter list.
unsigned ExplicitParams : 1;
/// Whether this lambda had the result type explicitly specified.
unsigned ExplicitResultType : 1;
/// The number of captures.
unsigned NumCaptures : 16;
};
class RequiresExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class RequiresExpr;
unsigned : NumExprBits;
unsigned IsSatisfied : 1;
SourceLocation RequiresKWLoc;
};
//===--- C++ Coroutines TS bitfields classes ---===//
class CoawaitExprBitfields {
friend class CoawaitExpr;
unsigned : NumExprBits;
unsigned IsImplicit : 1;
};
//===--- Obj-C Expression bitfields classes ---===//
class ObjCIndirectCopyRestoreExprBitfields {
friend class ObjCIndirectCopyRestoreExpr;
unsigned : NumExprBits;
unsigned ShouldCopy : 1;
};
//===--- Clang Extensions bitfields classes ---===//
class OpaqueValueExprBitfields {
friend class ASTStmtReader;
friend class OpaqueValueExpr;
unsigned : NumExprBits;
/// The OVE is a unique semantic reference to its source expression if this
/// bit is set to true.
unsigned IsUnique : 1;
SourceLocation Loc;
};
enum { NumBoundsExprKindBits = 3 };
class BoundsExprBitfields {
friend class BoundsExpr;
unsigned : NumExprBits;
unsigned Kind : NumBoundsExprKindBits;
unsigned IsCompilerGenerated : 1;
};
enum { NumInteropTypeExprKindBits = 1 };
class InteropTypeExprBitfields {
friend class InteropTypeExpr;
unsigned : NumExprBits;
unsigned IsCompilerGenerated : 1;
};
union {
// Same order as in StmtNodes.td.
// Statements
StmtBitfields StmtBits;
NullStmtBitfields NullStmtBits;
CompoundStmtBitfields CompoundStmtBits;
LabelStmtBitfields LabelStmtBits;
AttributedStmtBitfields AttributedStmtBits;
IfStmtBitfields IfStmtBits;
SwitchStmtBitfields SwitchStmtBits;
WhileStmtBitfields WhileStmtBits;
DoStmtBitfields DoStmtBits;
ForStmtBitfields ForStmtBits;
GotoStmtBitfields GotoStmtBits;
ContinueStmtBitfields ContinueStmtBits;
BreakStmtBitfields BreakStmtBits;
ReturnStmtBitfields ReturnStmtBits;
SwitchCaseBitfields SwitchCaseBits;
// Expressions
ExprBitfields ExprBits;
ConstantExprBitfields ConstantExprBits;
PredefinedExprBitfields PredefinedExprBits;
DeclRefExprBitfields DeclRefExprBits;
FloatingLiteralBitfields FloatingLiteralBits;
StringLiteralBitfields StringLiteralBits;
CharacterLiteralBitfields CharacterLiteralBits;
UnaryOperatorBitfields UnaryOperatorBits;
UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
ArrayOrMatrixSubscriptExprBitfields ArrayOrMatrixSubscriptExprBits;
ArraySubscriptExprBitfields ArraySubscriptExprBits;
CallExprBitfields CallExprBits;
MemberExprBitfields MemberExprBits;
CastExprBitfields CastExprBits;
BinaryOperatorBitfields BinaryOperatorBits;
InitListExprBitfields InitListExprBits;
ParenListExprBitfields ParenListExprBits;
GenericSelectionExprBitfields GenericSelectionExprBits;
PseudoObjectExprBitfields PseudoObjectExprBits;
SourceLocExprBitfields SourceLocExprBits;
// GNU Extensions.
StmtExprBitfields StmtExprBits;
// C++ Expressions
CXXOperatorCallExprBitfields CXXOperatorCallExprBits;
CXXRewrittenBinaryOperatorBitfields CXXRewrittenBinaryOperatorBits;
CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits;
CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits;
CXXThisExprBitfields CXXThisExprBits;
CXXThrowExprBitfields CXXThrowExprBits;
CXXDefaultArgExprBitfields CXXDefaultArgExprBits;
CXXDefaultInitExprBitfields CXXDefaultInitExprBits;
CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits;
CXXNewExprBitfields CXXNewExprBits;
CXXDeleteExprBitfields CXXDeleteExprBits;
TypeTraitExprBitfields TypeTraitExprBits;
DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits;
CXXConstructExprBitfields CXXConstructExprBits;
ExprWithCleanupsBitfields ExprWithCleanupsBits;
CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits;
CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits;
OverloadExprBitfields OverloadExprBits;
UnresolvedLookupExprBitfields UnresolvedLookupExprBits;
UnresolvedMemberExprBitfields UnresolvedMemberExprBits;
CXXNoexceptExprBitfields CXXNoexceptExprBits;
SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits;
LambdaExprBitfields LambdaExprBits;
RequiresExprBitfields RequiresExprBits;
// C++ Coroutines TS expressions
CoawaitExprBitfields CoawaitBits;
BoundsExprBitfields BoundsExprBits;
InteropTypeExprBitfields InteropTypeExprBits;
// Obj-C Expressions
ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits;
// Clang Extensions
OpaqueValueExprBitfields OpaqueValueExprBits;
};
public:
// Only allow allocation of Stmts using the allocator in ASTContext
// or by doing a placement new.
void* operator new(size_t bytes, const ASTContext& C,
unsigned alignment = 8);
void* operator new(size_t bytes, const ASTContext* C,
unsigned alignment = 8) {
return operator new(bytes, *C, alignment);
}
void *operator new(size_t bytes, void *mem) noexcept { return mem; }
void operator delete(void *, const ASTContext &, unsigned) noexcept {}
void operator delete(void *, const ASTContext *, unsigned) noexcept {}
void operator delete(void *, size_t) noexcept {}
void operator delete(void *, void *) noexcept {}
public:
/// A placeholder type used to construct an empty shell of a
/// type, that will be filled in later (e.g., by some
/// de-serialization).
struct EmptyShell {};
protected:
/// Iterator for iterating over Stmt * arrays that contain only T *.
///
/// This is needed because AST nodes use Stmt* arrays to store
/// references to children (to be compatible with StmtIterator).
template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *>
struct CastIterator
: llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *,
std::random_access_iterator_tag, TPtr> {
using Base = typename CastIterator::iterator_adaptor_base;
CastIterator() : Base(nullptr) {}
CastIterator(StmtPtr *I) : Base(I) {}
typename Base::value_type operator*() const {
return cast_or_null<T>(*this->I);
}
};
/// Const iterator for iterating over Stmt * arrays that contain only T *.
template <typename T>
using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>;
using ExprIterator = CastIterator<Expr>;
using ConstExprIterator = ConstCastIterator<Expr>;
private:
/// Whether statistic collection is enabled.
static bool StatisticsEnabled;
protected:
/// Construct an empty statement.
explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
Stmt() = delete;
Stmt(const Stmt &) = delete;
Stmt(Stmt &&) = delete;
Stmt &operator=(const Stmt &) = delete;
Stmt &operator=(Stmt &&) = delete;
Stmt(StmtClass SC) {
static_assert(sizeof(*this) <= 8,
"changing bitfields changed sizeof(Stmt)");
static_assert(sizeof(*this) % alignof(void *) == 0,
"Insufficient alignment!");
StmtBits.sClass = SC;
if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
StmtClass getStmtClass() const {
return static_cast<StmtClass>(StmtBits.sClass);
}
const char *getStmtClassName() const;
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
SourceRange getSourceRange() const LLVM_READONLY;
SourceLocation getBeginLoc() const LLVM_READONLY;
SourceLocation getEndLoc() const LLVM_READONLY;
// global temp stats (until we have a per-module visitor)
static void addStmtClass(const StmtClass s);
static void EnableStatistics();
static void PrintStats();
/// Dumps the specified AST fragment and all subtrees to
/// \c llvm::errs().
void dump() const;
void dump(raw_ostream &OS, const ASTContext &Context) const;
/// \return Unique reproducible object identifier
int64_t getID(const ASTContext &Context) const;
/// dumpColor - same as dump(), but forces color highlighting.
void dumpColor() const;
/// dumpPretty/printPretty - These two methods do a "pretty print" of the AST
/// back to its original source language syntax.
void dumpPretty(const ASTContext &Context) const;
void printPretty(raw_ostream &OS, PrinterHelper *Helper,
const PrintingPolicy &Policy, unsigned Indentation = 0,
StringRef NewlineSymbol = "\n",
const ASTContext *Context = nullptr) const;
/// Pretty-prints in JSON format.
void printJson(raw_ostream &Out, PrinterHelper *Helper,
const PrintingPolicy &Policy, bool AddQuotes) const;
/// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only
/// works on systems with GraphViz (Mac OS X) or dot+gv installed.
void viewAST() const;
/// Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *IgnoreContainers(bool IgnoreCaptured = false);
const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const {
return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured);
}
const Stmt *stripLabelLikeStatements() const;
Stmt *stripLabelLikeStatements() {
return const_cast<Stmt*>(
const_cast<const Stmt*>(this)->stripLabelLikeStatements());
}
/// Child Iterators: All subclasses must implement 'children'
/// to permit easy iteration over the substatements/subexpessions of an
/// AST node. This permits easy iteration over all nodes in the AST.
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<Stmt *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_iterator child_begin() { return children().begin(); }
child_iterator child_end() { return children().end(); }
const_child_iterator child_begin() const { return children().begin(); }
const_child_iterator child_end() const { return children().end(); }
/// Produce a unique representation of the given statement.
///
/// \param ID once the profiling operation is complete, will contain
/// the unique representation of the given statement.
///
/// \param Context the AST context in which the statement resides
///
/// \param Canonical whether the profile should be based on the canonical
/// representation of this statement (e.g., where non-type template
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) const;
/// Calculate a unique representation for a statement that is
/// stable across compiler invocations.
///
/// \param ID profile information will be stored in ID.
///
/// \param Hash an ODRHash object which will be called where pointers would
/// have been used in the Profile function.
void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const;
};
/// DeclStmt - Adaptor class for mixing declarations with statements and
/// expressions. For example, CompoundStmt mixes statements, expressions
/// and declarations (variables, types). Another example is ForStmt, where
/// the first statement can be an expression or a declaration.
class DeclStmt : public Stmt {
DeclGroupRef DG;
SourceLocation StartLoc, EndLoc;
public:
DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc)
: Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {}
/// Build an empty declaration statement.
explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {}
/// isSingleDecl - This method returns true if this DeclStmt refers
/// to a single Decl.
bool isSingleDecl() const { return DG.isSingleDecl(); }
const Decl *getSingleDecl() const { return DG.getSingleDecl(); }
Decl *getSingleDecl() { return DG.getSingleDecl(); }
const DeclGroupRef getDeclGroup() const { return DG; }
DeclGroupRef getDeclGroup() { return DG; }
void setDeclGroup(DeclGroupRef DGR) { DG = DGR; }
void setStartLoc(SourceLocation L) { StartLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclStmtClass;
}
// Iterators over subexpressions.
child_range children() {
return child_range(child_iterator(DG.begin(), DG.end()),
child_iterator(DG.end(), DG.end()));
}
const_child_range children() const {
auto Children = const_cast<DeclStmt *>(this)->children();
return const_child_range(Children);
}
using decl_iterator = DeclGroupRef::iterator;
using const_decl_iterator = DeclGroupRef::const_iterator;
using decl_range = llvm::iterator_range<decl_iterator>;
using decl_const_range = llvm::iterator_range<const_decl_iterator>;
decl_range decls() { return decl_range(decl_begin(), decl_end()); }
decl_const_range decls() const {
return decl_const_range(decl_begin(), decl_end());
}
decl_iterator decl_begin() { return DG.begin(); }
decl_iterator decl_end() { return DG.end(); }
const_decl_iterator decl_begin() const { return DG.begin(); }
const_decl_iterator decl_end() const { return DG.end(); }
using reverse_decl_iterator = std::reverse_iterator<decl_iterator>;
reverse_decl_iterator decl_rbegin() {
return reverse_decl_iterator(decl_end());
}
reverse_decl_iterator decl_rend() {
return reverse_decl_iterator(decl_begin());
}
};
/// NullStmt - This is the null statement ";": C99 6.8.3p3.
///
class NullStmt : public Stmt {
private:
WhereClause *WClause;
public:
NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false)
: Stmt(NullStmtClass), WClause(nullptr) {
NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro;
setSemiLoc(L);
}
/// Build an empty null statement.
explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty),
WClause(nullptr) {}
SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; }
void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; }
bool hasLeadingEmptyMacro() const {
return NullStmtBits.HasLeadingEmptyMacro;
}
SourceLocation getBeginLoc() const { return getSemiLoc(); }
SourceLocation getEndLoc() const { return getSemiLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
void setWhereClause(WhereClause *WC) { WClause = WC; }
WhereClause *getWhereClause() const { return WClause; }
};
// The kind of Checked C checking to do in a scope.
enum class CheckedScopeKind {
// No checking.
Unchecked = 0x1,
/// Check properties for bounds safety.
Bounds = 0x2,
/// Check properties for bounds safety and preventing type confusion.
BoundsAndTypes = 0x4
};
/// CompoundStmt - This represents a group of statements like { stmt stmt }.
class CompoundStmt final : public Stmt,
private llvm::TrailingObjects<CompoundStmt, Stmt *> {
friend class ASTStmtReader;
friend TrailingObjects;
/// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits.
SourceLocation RBraceLoc;
// Written checked scope specifier.
unsigned WrittenCSS : 2;
// Inferred checked scope specifier, using information from parent
// scope also.
unsigned CSS : 2;
// Checked scope keyword (_Checked / _Unchecked) location.
SourceLocation CSSLoc;
// Checked scope modifier (_Bounds_only) location.
SourceLocation CSMLoc;
CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB,
CheckedScopeSpecifier WrittenCSS = CSS_None,
CheckedScopeSpecifier CSS = CSS_Unchecked,
SourceLocation CSSLoc = SourceLocation(),
SourceLocation CSMLoc = SourceLocation());
explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty),
WrittenCSS(CSS_None), CSS(CSS_Unchecked), CSSLoc(), CSMLoc() {}
void setStmts(ArrayRef<Stmt *> Stmts);
public:
static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt*> Stmts,
SourceLocation LB, SourceLocation RB,
CheckedScopeSpecifier WrittenCSS = CSS_None,
CheckedScopeSpecifier CSS = CSS_Unchecked,
SourceLocation CSSLoc = SourceLocation(),
SourceLocation CSMLoc = SourceLocation());
// Build an empty compound statement with a location.
explicit CompoundStmt(SourceLocation Loc)
: Stmt(CompoundStmtClass), RBraceLoc(Loc),
WrittenCSS(CSS_None), CSS(CSS_Unchecked), CSSLoc(Loc), CSMLoc(Loc) {
CompoundStmtBits.NumStmts = 0;
CompoundStmtBits.LBraceLoc = Loc;
}
// Build an empty compound statement.
static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts);
bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
unsigned size() const { return CompoundStmtBits.NumStmts; }
CheckedScopeSpecifier getWrittenCheckedSpecifier() const {
return (CheckedScopeSpecifier) WrittenCSS;
}
CheckedScopeSpecifier getCheckedSpecifier() const {
return (CheckedScopeSpecifier) CSS;
}
void setWrittenCheckedSpecifiers(CheckedScopeSpecifier NS) { WrittenCSS = NS; }
void setCheckedSpecifiers(CheckedScopeSpecifier NS) { CSS = NS; }
bool isCheckedScope() const { return CSS != CSS_Unchecked; }
using body_iterator = Stmt **;
using body_range = llvm::iterator_range<body_iterator>;
body_range body() { return body_range(body_begin(), body_end()); }
body_iterator body_begin() { return getTrailingObjects<Stmt *>(); }
body_iterator body_end() { return body_begin() + size(); }
Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; }
Stmt *body_back() {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using const_body_iterator = Stmt *const *;
using body_const_range = llvm::iterator_range<const_body_iterator>;
body_const_range body() const {
return body_const_range(body_begin(), body_end());
}
const_body_iterator body_begin() const {
return getTrailingObjects<Stmt *>();
}
const_body_iterator body_end() const { return body_begin() + size(); }
const Stmt *body_front() const {
return !body_empty() ? body_begin()[0] : nullptr;
}
const Stmt *body_back() const {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using reverse_body_iterator = std::reverse_iterator<body_iterator>;
reverse_body_iterator body_rbegin() {
return reverse_body_iterator(body_end());
}
reverse_body_iterator body_rend() {
return reverse_body_iterator(body_begin());
}
using const_reverse_body_iterator =
std::reverse_iterator<const_body_iterator>;
const_reverse_body_iterator body_rbegin() const {
return const_reverse_body_iterator(body_end());
}
const_reverse_body_iterator body_rend() const {
return const_reverse_body_iterator(body_begin());
}
// Get the Stmt that StmtExpr would consider to be the result of this
// compound statement. This is used by StmtExpr to properly emulate the GCC
// compound expression extension, which ignores trailing NullStmts when
// getting the result of the expression.
// i.e. ({ 5;;; })
// ^^ ignored
// If we don't find something that isn't a NullStmt, just return the last
// Stmt.
Stmt *getStmtExprResult() {
for (auto *B : llvm::reverse(body())) {
if (!isa<NullStmt>(B))
return B;
}
return body_back();
}
const Stmt *getStmtExprResult() const {
return const_cast<CompoundStmt *>(this)->getStmtExprResult();
}
SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getEndLoc() const { return RBraceLoc; }
SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getRBracLoc() const { return RBraceLoc; }
SourceLocation getCheckedSpecifierLoc() const { return CSSLoc; }
SourceLocation getSpecifierModifierLoc() const { return CSMLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundStmtClass;
}
// Iterators
child_range children() { return child_range(body_begin(), body_end()); }
const_child_range children() const {
return const_child_range(body_begin(), body_end());
}
};
// SwitchCase is the base class for CaseStmt and DefaultStmt,
class SwitchCase : public Stmt {
protected:
/// The location of the ":".
SourceLocation ColonLoc;
// The location of the "case" or "default" keyword. Stored in SwitchCaseBits.
// SourceLocation KeywordLoc;
/// A pointer to the following CaseStmt or DefaultStmt class,
/// used by SwitchStmt.
SwitchCase *NextSwitchCase = nullptr;
SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc)
: Stmt(SC), ColonLoc(ColonLoc) {
setKeywordLoc(KWLoc);
}
SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; }
SwitchCase *getNextSwitchCase() { return NextSwitchCase; }
void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; }
SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; }
void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
inline Stmt *getSubStmt();
const Stmt *getSubStmt() const {
return const_cast<SwitchCase *>(this)->getSubStmt();
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
inline SourceLocation getEndLoc() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass ||
T->getStmtClass() == DefaultStmtClass;
}
};
/// CaseStmt - Represent a case statement. It can optionally be a GNU case
/// statement of the form LHS ... RHS representing a range of cases.
class CaseStmt final
: public SwitchCase,
private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// CaseStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing objects
// at the end but this would impact children().
// The trailing objects are in order:
//
// * A "Stmt *" for the LHS of the case statement. Always present.
//
// * A "Stmt *" for the RHS of the case statement. This is a GNU extension
// which allow ranges in cases statement of the form LHS ... RHS.
// Present if and only if caseStmtIsGNURange() is true.
//
// * A "Stmt *" for the substatement of the case statement. Always present.
//
// * A SourceLocation for the location of the ... if this is a case statement
// with a range. Present if and only if caseStmtIsGNURange() is true.
enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + caseStmtIsGNURange();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return caseStmtIsGNURange();
}
unsigned lhsOffset() const { return LhsOffset; }
unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); }
unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; }
/// Build a case statement assuming that the storage for the
/// trailing objects has been properly allocated.
CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc,
SourceLocation ellipsisLoc, SourceLocation colonLoc)
: SwitchCase(CaseStmtClass, caseLoc, colonLoc) {
// Handle GNU case statements of the form LHS ... RHS.
bool IsGNURange = rhs != nullptr;
SwitchCaseBits.CaseStmtIsGNURange = IsGNURange;
setLHS(lhs);
setSubStmt(nullptr);
if (IsGNURange) {
setRHS(rhs);
setEllipsisLoc(ellipsisLoc);
}
}
/// Build an empty switch case statement.
explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange)
: SwitchCase(CaseStmtClass, Empty) {
SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange;
}
public:
/// Build a case statement.
static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs,
SourceLocation caseLoc, SourceLocation ellipsisLoc,
SourceLocation colonLoc);
/// Build an empty case statement.
static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange);
/// True if this case statement is of the form case LHS ... RHS, which
/// is a GNU extension. In this case the RHS can be obtained with getRHS()
/// and the location of the ellipsis can be obtained with getEllipsisLoc().
bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; }
SourceLocation getCaseLoc() const { return getKeywordLoc(); }
void setCaseLoc(SourceLocation L) { setKeywordLoc(L); }
/// Get the location of the ... in a case statement of the form LHS ... RHS.
SourceLocation getEllipsisLoc() const {
return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
/// Set the location of the ... in a case statement of the form LHS ... RHS.
/// Assert that this case statement is of this form.
void setEllipsisLoc(SourceLocation L) {
assert(
caseStmtIsGNURange() &&
"setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!");
*getTrailingObjects<SourceLocation>() = L;
}
Expr *getLHS() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
const Expr *getLHS() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
void setLHS(Expr *Val) {
getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Expr *getRHS() {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
const Expr *getRHS() const {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
void setRHS(Expr *Val) {
assert(caseStmtIsGNURange() &&
"setRHS but this is not a case stmt of the form LHS ... RHS!");
getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; }
const Stmt *getSubStmt() const {
return getTrailingObjects<Stmt *>()[subStmtOffset()];
}
void setSubStmt(Stmt *S) {
getTrailingObjects<Stmt *>()[subStmtOffset()] = S;
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
// Handle deeply nested case statements with iteration instead of recursion.
const CaseStmt *CS = this;
while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
CS = CS2;
return CS->getSubStmt()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
class DefaultStmt : public SwitchCase {
Stmt *SubStmt;
public:
DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt)
: SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {}
/// Build an empty default statement.
explicit DefaultStmt(EmptyShell Empty)
: SwitchCase(DefaultStmtClass, Empty) {}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *S) { SubStmt = S; }
SourceLocation getDefaultLoc() const { return getKeywordLoc(); }
void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); }
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return SubStmt->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == DefaultStmtClass;
}
// Iterators
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
};
SourceLocation SwitchCase::getEndLoc() const {
if (const auto *CS = dyn_cast<CaseStmt>(this))
return CS->getEndLoc();
else if (const auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getEndLoc();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
Stmt *SwitchCase::getSubStmt() {
if (auto *CS = dyn_cast<CaseStmt>(this))
return CS->getSubStmt();
else if (auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getSubStmt();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
/// Represents a statement that could possibly have a value and type. This
/// covers expression-statements, as well as labels and attributed statements.
///
/// Value statements have a special meaning when they are the last non-null
/// statement in a GNU statement expression, where they determine the value
/// of the statement expression.
class ValueStmt : public Stmt {
protected:
using Stmt::Stmt;
private:
WhereClause *WClause;
public:
const Expr *getExprStmt() const;
Expr *getExprStmt() {
const ValueStmt *ConstThis = this;
return const_cast<Expr*>(ConstThis->getExprStmt());
}
static bool classof(const Stmt *T) {
return T->getStmtClass() >= firstValueStmtConstant &&
T->getStmtClass() <= lastValueStmtConstant;
}
void setWhereClause(WhereClause *WC) { WClause = WC; }
WhereClause *getWhereClause() const { return WClause; }
};
/// LabelStmt - Represents a label, which has a substatement. For example:
/// foo: return;
class LabelStmt : public ValueStmt {
LabelDecl *TheDecl;
Stmt *SubStmt;
public:
/// Build a label statement.
LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt)
: ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) {
setIdentLoc(IL);
}
/// Build an empty label statement.
explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {}
SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; }
void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; }
LabelDecl *getDecl() const { return TheDecl; }
void setDecl(LabelDecl *D) { TheDecl = D; }
const char *getName() const;
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *SS) { SubStmt = SS; }
SourceLocation getBeginLoc() const { return getIdentLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
};
/// Represents an attribute applied to a statement.
///
/// Represents an attribute applied to a statement. For example:
/// [[omp::for(...)]] for (...) { ... }
class AttributedStmt final
: public ValueStmt,
private llvm::TrailingObjects<AttributedStmt, const Attr *> {
friend class ASTStmtReader;
friend TrailingObjects;
Stmt *SubStmt;
AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs,
Stmt *SubStmt)
: ValueStmt(AttributedStmtClass), SubStmt(SubStmt) {
AttributedStmtBits.NumAttrs = Attrs.size();
AttributedStmtBits.AttrLoc = Loc;
std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr());
}
explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs)
: ValueStmt(AttributedStmtClass, Empty) {
AttributedStmtBits.NumAttrs = NumAttrs;
AttributedStmtBits.AttrLoc = SourceLocation{};
std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr);
}
const Attr *const *getAttrArrayPtr() const {
return getTrailingObjects<const Attr *>();
}
const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); }
public:
static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
// Build an empty attributed statement.
static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs);
SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; }
ArrayRef<const Attr *> getAttrs() const {
return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs);
}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
SourceLocation getBeginLoc() const { return getAttrLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == AttributedStmtClass;
}
};
/// IfStmt - This represents an if/then/else.
class IfStmt final
: public Stmt,
private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// IfStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing
// objects at then end but this would change the order of the children.
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact a "Expr *".
//
// * A "Stmt *" for the then statement.
// Always present.
//
// * A "Stmt *" for the else statement.
// Present if and only if hasElseStorage().
//
// * A "SourceLocation" for the location of the "else".
// Present if and only if hasElseStorage().
enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() +
hasInitStorage();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return hasElseStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; }
unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; }
/// Build an if/then/else statement.
IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init,
VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL, Stmt *Else);
/// Build an empty if/then/else statement.
explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit);
public:
/// Create an IfStmt.
static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL,
bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond,
Stmt *Then, SourceLocation EL = SourceLocation(),
Stmt *Else = nullptr);
/// Create an empty IfStmt optionally with storage for an else statement,
/// condition variable and init expression.
static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar,
bool HasInit);
/// True if this IfStmt has the storage for an init statement.
bool hasInitStorage() const { return IfStmtBits.HasInit; }
/// True if this IfStmt has storage for a variable declaration.
bool hasVarStorage() const { return IfStmtBits.HasVar; }
/// True if this IfStmt has storage for an else statement.
bool hasElseStorage() const { return IfStmtBits.HasElse; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; }
const Stmt *getThen() const {
return getTrailingObjects<Stmt *>()[thenOffset()];
}
void setThen(Stmt *Then) {
getTrailingObjects<Stmt *>()[thenOffset()] = Then;
}
Stmt *getElse() {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
const Stmt *getElse() const {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
void setElse(Stmt *Else) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
getTrailingObjects<Stmt *>()[elseOffset()] = Else;
}
/// Retrieve the variable declared in this "if" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// if (int x = foo()) {
/// printf("x is %d", x);
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<IfStmt *>(this)->getConditionVariable();
}
/// Set the condition variable for this if statement.
/// The if statement must have storage for the condition variable.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this IfStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This if statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; }
void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; }
SourceLocation getElseLoc() const {
return hasElseStorage() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
void setElseLoc(SourceLocation ElseLoc) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
*getTrailingObjects<SourceLocation>() = ElseLoc;
}
bool isConstexpr() const { return IfStmtBits.IsConstexpr; }
void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; }
/// If this is an 'if constexpr', determine which substatement will be taken.
/// Otherwise, or if the condition is value-dependent, returns None.
Optional<const Stmt*> getNondiscardedCase(const ASTContext &Ctx) const;
bool isObjCAvailabilityCheck() const;
SourceLocation getBeginLoc() const { return getIfLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
if (getElse())
return getElse()->getEndLoc();
return getThen()->getEndLoc();
}
// Iterators over subexpressions. The iterators will include iterating
// over the initialization expression referenced by the condition variable.
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == IfStmtClass;
}
};
/// SwitchStmt - This represents a 'switch' stmt.
class SwitchStmt final : public Stmt,
private llvm::TrailingObjects<SwitchStmt, Stmt *> {
friend TrailingObjects;
/// Points to a linked list of case and default statements.
SwitchCase *FirstCase;
// SwitchStmt is followed by several trailing objects,
// some of which optional. Note that it would be more convenient to
// put the optional trailing objects at the end but this would change
// the order in children().
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
enum { InitOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
/// Build a switch statement.
SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond);
/// Build a empty switch statement.
explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar);
public:
/// Create a switch statement.
static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var,
Expr *Cond);
/// Create an empty switch statement optionally with storage for
/// an init expression and a condition variable.
static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit,
bool HasVar);
/// True if this SwitchStmt has storage for an init statement.
bool hasInitStorage() const { return SwitchStmtBits.HasInit; }
/// True if this SwitchStmt has storage for a condition variable.
bool hasVarStorage() const { return SwitchStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This switch statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
/// Retrieve the variable declared in this "switch" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// switch (int x = foo()) {
/// case 0: break;
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<SwitchStmt *>(this)->getConditionVariable();
}
/// Set the condition variable in this switch statement.
/// The switch statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *VD);
/// If this SwitchStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SwitchCase *getSwitchCaseList() { return FirstCase; }
const SwitchCase *getSwitchCaseList() const { return FirstCase; }
void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; }
SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; }
void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; }
void setBody(Stmt *S, SourceLocation SL) {
setBody(S);
setSwitchLoc(SL);
}
void addSwitchCase(SwitchCase *SC) {
assert(!SC->getNextSwitchCase() &&
"case/default already added to a switch");
SC->setNextSwitchCase(FirstCase);
FirstCase = SC;
}
/// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a
/// switch over an enum value then all cases have been explicitly covered.
void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; }
/// Returns true if the SwitchStmt is a switch of an enum value and all cases
/// have been explicitly covered.
bool isAllEnumCasesCovered() const {
return SwitchStmtBits.AllEnumCasesCovered;
}
SourceLocation getBeginLoc() const { return getSwitchLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody() ? getBody()->getEndLoc()
: reinterpret_cast<const Stmt *>(getCond())->getEndLoc();
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SwitchStmtClass;
}
};
/// WhileStmt - This represents a 'while' stmt.
class WhileStmt final : public Stmt,
private llvm::TrailingObjects<WhileStmt, Stmt *> {
friend TrailingObjects;
// WhileStmt is followed by several trailing objects,
// some of which optional. Note that it would be more
// convenient to put the optional trailing object at the end
// but this would affect children().
// The trailing objects are in order:
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
//
enum { VarOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
SourceLocation LParenLoc, RParenLoc;
unsigned varOffset() const { return VarOffset; }
unsigned condOffset() const { return VarOffset + hasVarStorage(); }
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasVarStorage();
}
/// Build a while statement.
WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body,
SourceLocation WL, SourceLocation LParenLoc,
SourceLocation RParenLoc);
/// Build an empty while statement.
explicit WhileStmt(EmptyShell Empty, bool HasVar);
public:
/// Create a while statement.
static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
Stmt *Body, SourceLocation WL,
SourceLocation LParenLoc, SourceLocation RParenLoc);
/// Create an empty while statement optionally with storage for
/// a condition variable.
static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar);
/// True if this WhileStmt has storage for a condition variable.
bool hasVarStorage() const { return WhileStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
/// Retrieve the variable declared in this "while" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// while (int x = random()) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<WhileStmt *>(this)->getConditionVariable();
}
/// Set the condition variable of this while statement.
/// The while statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this WhileStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getWhileLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == WhileStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
/// DoStmt - This represents a 'do/while' stmt.
class DoStmt : public Stmt {
enum { BODY, COND, END_EXPR };
Stmt *SubExprs[END_EXPR];
SourceLocation WhileLoc;
SourceLocation RParenLoc; // Location of final ')' in do stmt condition.
public:
DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL,
SourceLocation RP)
: Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) {
setCond(Cond);
setBody(Body);
setDoLoc(DL);
}
/// Build an empty do-while statement.
explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {}
Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); }
const Expr *getCond() const {
return reinterpret_cast<Expr *>(SubExprs[COND]);
}
void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *Body) { SubExprs[BODY] = Body; }
SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; }
void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getDoLoc(); }
SourceLocation getEndLoc() const { return getRParenLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DoStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of
/// the init/cond/inc parts of the ForStmt will be null if they were not
/// specified in the source.
class ForStmt : public Stmt {
enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
SourceLocation LParenLoc, RParenLoc;
public:
ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP);
/// Build an empty for statement.
explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {}
Stmt *getInit() { return SubExprs[INIT]; }
/// Retrieve the variable declared in this "for" statement, if any.
///
/// In the following example, "y" is the condition variable.
/// \code
/// for (int x = random(); int y = mangle(x); ++x) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getForLoc() const { return ForStmtBits.ForLoc; }
void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getForLoc(); }
SourceLocation getEndLoc() const { return getBody()->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ForStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// GotoStmt - This represents a direct goto.
class GotoStmt : public Stmt {
LabelDecl *Label;
SourceLocation LabelLoc;
public:
GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL)
: Stmt(GotoStmtClass), Label(label), LabelLoc(LL) {
setGotoLoc(GL);
}
/// Build an empty goto statement.
explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {}
LabelDecl *getLabel() const { return Label; }
void setLabel(LabelDecl *D) { Label = D; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const { return getLabelLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GotoStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// IndirectGotoStmt - This represents an indirect goto.
class IndirectGotoStmt : public Stmt {
SourceLocation StarLoc;
Stmt *Target;
public:
IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target)
: Stmt(IndirectGotoStmtClass), StarLoc(starLoc) {
setTarget(target);
setGotoLoc(gotoLoc);
}
/// Build an empty indirect goto statement.
explicit IndirectGotoStmt(EmptyShell Empty)
: Stmt(IndirectGotoStmtClass, Empty) {}
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setStarLoc(SourceLocation L) { StarLoc = L; }
SourceLocation getStarLoc() const { return StarLoc; }
Expr *getTarget() { return reinterpret_cast<Expr *>(Target); }
const Expr *getTarget() const {
return reinterpret_cast<const Expr *>(Target);
}
void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); }
/// getConstantTarget - Returns the fixed target of this indirect
/// goto, if one exists.
LabelDecl *getConstantTarget();
const LabelDecl *getConstantTarget() const {
return const_cast<IndirectGotoStmt *>(this)->getConstantTarget();
}
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == IndirectGotoStmtClass;
}
// Iterators
child_range children() { return child_range(&Target, &Target + 1); }
const_child_range children() const {
return const_child_range(&Target, &Target + 1);
}
};
/// ContinueStmt - This represents a continue.
class ContinueStmt : public Stmt {
public:
ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) {
setContinueLoc(CL);
}
/// Build an empty continue statement.
explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {}
SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; }
void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; }
SourceLocation getBeginLoc() const { return getContinueLoc(); }
SourceLocation getEndLoc() const { return getContinueLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ContinueStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// BreakStmt - This represents a break.
class BreakStmt : public Stmt {
public:
BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) {
setBreakLoc(BL);
}
/// Build an empty break statement.
explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {}
SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; }
void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; }
SourceLocation getBeginLoc() const { return getBreakLoc(); }
SourceLocation getEndLoc() const { return getBreakLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// ReturnStmt - This represents a return, optionally of an expression:
/// return;
/// return 4;
///
/// Note that GCC allows return with no argument in a function declared to
/// return a value, and it allows returning a value in functions declared to
/// return void. We explicitly model this in the AST, which means you can't
/// depend on the return type of the function and the presence of an argument.
class ReturnStmt final
: public Stmt,
private llvm::TrailingObjects<ReturnStmt, const VarDecl *> {
friend TrailingObjects;
/// The return expression.
Stmt *RetExpr;
// ReturnStmt is followed optionally by a trailing "const VarDecl *"
// for the NRVO candidate. Present if and only if hasNRVOCandidate().
/// True if this ReturnStmt has storage for an NRVO candidate.
bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; }
unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const {
return hasNRVOCandidate();
}
/// Build a return statement.
ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate);
/// Build an empty return statement.
explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate);
public:
/// Create a return statement.
static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E,
const VarDecl *NRVOCandidate);
/// Create an empty return statement, optionally with
/// storage for an NRVO candidate.
static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate);
Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); }
const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); }
void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); }
/// Retrieve the variable that might be used for the named return
/// value optimization.
///
/// The optimization itself can only be performed if the variable is
/// also marked as an NRVO object.
const VarDecl *getNRVOCandidate() const {
return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>()
: nullptr;
}
/// Set the variable that might be used for the named return value
/// optimization. The return statement must have storage for it,
/// which is the case if and only if hasNRVOCandidate() is true.
void setNRVOCandidate(const VarDecl *Var) {
assert(hasNRVOCandidate() &&
"This return statement has no storage for an NRVO candidate!");
*getTrailingObjects<const VarDecl *>() = Var;
}
SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; }
void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; }
SourceLocation getBeginLoc() const { return getReturnLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return RetExpr ? RetExpr->getEndLoc() : getReturnLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
}
// Iterators
child_range children() {
if (RetExpr)
return child_range(&RetExpr, &RetExpr + 1);
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
if (RetExpr)
return const_child_range(&RetExpr, &RetExpr + 1);
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
class AsmStmt : public Stmt {
protected:
friend class ASTStmtReader;
SourceLocation AsmLoc;
/// True if the assembly statement does not have any input or output
/// operands.
bool IsSimple;
/// If true, treat this inline assembly as having side effects.
/// This assembly statement should not be optimized, deleted or moved.
bool IsVolatile;
unsigned NumOutputs;
unsigned NumInputs;
unsigned NumClobbers;
Stmt **Exprs = nullptr;
AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile,
unsigned numoutputs, unsigned numinputs, unsigned numclobbers)
: Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile),
NumOutputs(numoutputs), NumInputs(numinputs),
NumClobbers(numclobbers) {}
public:
/// Build an empty inline-assembly statement.
explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {}
SourceLocation getAsmLoc() const { return AsmLoc; }
void setAsmLoc(SourceLocation L) { AsmLoc = L; }
bool isSimple() const { return IsSimple; }
void setSimple(bool V) { IsSimple = V; }
bool isVolatile() const { return IsVolatile; }
void setVolatile(bool V) { IsVolatile = V; }
SourceLocation getBeginLoc() const LLVM_READONLY { return {}; }
SourceLocation getEndLoc() const LLVM_READONLY { return {}; }
//===--- Asm String Analysis ---===//
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
unsigned getNumOutputs() const { return NumOutputs; }
/// getOutputConstraint - Return the constraint string for the specified
/// output operand. All output constraints are known to be non-empty (either
/// '=' or '+').
StringRef getOutputConstraint(unsigned i) const;
/// isOutputPlusConstraint - Return true if the specified output constraint
/// is a "+" constraint (which is both an input and an output) or false if it
/// is an "=" constraint (just an output).
bool isOutputPlusConstraint(unsigned i) const {
return getOutputConstraint(i)[0] == '+';
}
const Expr *getOutputExpr(unsigned i) const;
/// getNumPlusOperands - Return the number of output operands that have a "+"
/// constraint.
unsigned getNumPlusOperands() const;
//===--- Input operands ---===//
unsigned getNumInputs() const { return NumInputs; }
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
StringRef getInputConstraint(unsigned i) const;
const Expr *getInputExpr(unsigned i) const;
//===--- Other ---===//
unsigned getNumClobbers() const { return NumClobbers; }
StringRef getClobber(unsigned i) const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass ||
T->getStmtClass() == MSAsmStmtClass;
}
// Input expr iterators.
using inputs_iterator = ExprIterator;
using const_inputs_iterator = ConstExprIterator;
using inputs_range = llvm::iterator_range<inputs_iterator>;
using inputs_const_range = llvm::iterator_range<const_inputs_iterator>;
inputs_iterator begin_inputs() {
return &Exprs[0] + NumOutputs;
}
inputs_iterator end_inputs() {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); }
const_inputs_iterator begin_inputs() const {
return &Exprs[0] + NumOutputs;
}
const_inputs_iterator end_inputs() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_const_range inputs() const {
return inputs_const_range(begin_inputs(), end_inputs());
}
// Output expr iterators.
using outputs_iterator = ExprIterator;
using const_outputs_iterator = ConstExprIterator;
using outputs_range = llvm::iterator_range<outputs_iterator>;
using outputs_const_range = llvm::iterator_range<const_outputs_iterator>;
outputs_iterator begin_outputs() {
return &Exprs[0];
}
outputs_iterator end_outputs() {
return &Exprs[0] + NumOutputs;
}
outputs_range outputs() {
return outputs_range(begin_outputs(), end_outputs());
}
const_outputs_iterator begin_outputs() const {
return &Exprs[0];
}
const_outputs_iterator end_outputs() const {
return &Exprs[0] + NumOutputs;
}
outputs_const_range outputs() const {
return outputs_const_range(begin_outputs(), end_outputs());
}
child_range children() {
return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
};
/// This represents a GCC inline-assembly statement extension.
class GCCAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation RParenLoc;
StringLiteral *AsmStr;
// FIXME: If we wanted to, we could allocate all of these in one big array.
StringLiteral **Constraints = nullptr;
StringLiteral **Clobbers = nullptr;
IdentifierInfo **Names = nullptr;
unsigned NumLabels = 0;
public:
GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple,
bool isvolatile, unsigned numoutputs, unsigned numinputs,
IdentifierInfo **names, StringLiteral **constraints, Expr **exprs,
StringLiteral *asmstr, unsigned numclobbers,
StringLiteral **clobbers, unsigned numlabels,
SourceLocation rparenloc);
/// Build an empty inline-assembly statement.
explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {}
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
//===--- Asm String Analysis ---===//
const StringLiteral *getAsmString() const { return AsmStr; }
StringLiteral *getAsmString() { return AsmStr; }
void setAsmString(StringLiteral *E) { AsmStr = E; }
/// AsmStringPiece - this is part of a decomposed asm string specification
/// (for use with the AnalyzeAsmString function below). An asm string is
/// considered to be a concatenation of these parts.
class AsmStringPiece {
public:
enum Kind {
String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%".
Operand // Operand reference, with optional modifier %c4.
};
private:
Kind MyKind;
std::string Str;
unsigned OperandNo;
// Source range for operand references.
CharSourceRange Range;
public:
AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {}
AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin,
SourceLocation End)
: MyKind(Operand), Str(S), OperandNo(OpNo),
Range(CharSourceRange::getCharRange(Begin, End)) {}
bool isString() const { return MyKind == String; }
bool isOperand() const { return MyKind == Operand; }
const std::string &getString() const { return Str; }
unsigned getOperandNo() const {
assert(isOperand());
return OperandNo;
}
CharSourceRange getRange() const {
assert(isOperand() && "Range is currently used only for Operands.");
return Range;
}
/// getModifier - Get the modifier for this operand, if present. This
/// returns '\0' if there was no modifier.
char getModifier() const;
};
/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
/// it into pieces. If the asm string is erroneous, emit errors and return
/// true, otherwise return false. This handles canonicalization and
/// translation of strings from GCC syntax to LLVM IR syntax, and handles
//// flattening of named references like %[foo] to Operand AsmStringPiece's.
unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces,
const ASTContext &C, unsigned &DiagOffs) const;
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; }
StringRef getOutputName(unsigned i) const {
if (IdentifierInfo *II = getOutputIdentifier(i))
return II->getName();
return {};
}
StringRef getOutputConstraint(unsigned i) const;
const StringLiteral *getOutputConstraintLiteral(unsigned i) const {
return Constraints[i];
}
StringLiteral *getOutputConstraintLiteral(unsigned i) {
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
IdentifierInfo *getInputIdentifier(unsigned i) const {
return Names[i + NumOutputs];
}
StringRef getInputName(unsigned i) const {
if (IdentifierInfo *II = getInputIdentifier(i))
return II->getName();
return {};
}
StringRef getInputConstraint(unsigned i) const;
const StringLiteral *getInputConstraintLiteral(unsigned i) const {
return Constraints[i + NumOutputs];
}
StringLiteral *getInputConstraintLiteral(unsigned i) {
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getInputExpr(i);
}
//===--- Labels ---===//
bool isAsmGoto() const {
return NumLabels > 0;
}
unsigned getNumLabels() const {
return NumLabels;
}
IdentifierInfo *getLabelIdentifier(unsigned i) const {
return Names[i + NumOutputs + NumInputs];
}
AddrLabelExpr *getLabelExpr(unsigned i) const;
StringRef getLabelName(unsigned i) const;
using labels_iterator = CastIterator<AddrLabelExpr>;
using const_labels_iterator = ConstCastIterator<AddrLabelExpr>;
using labels_range = llvm::iterator_range<labels_iterator>;
using labels_const_range = llvm::iterator_range<const_labels_iterator>;
labels_iterator begin_labels() {
return &Exprs[0] + NumOutputs + NumInputs;
}
labels_iterator end_labels() {
return &Exprs[0] + NumOutputs + NumInputs + NumLabels;
}
labels_range labels() {
return labels_range(begin_labels(), end_labels());
}
const_labels_iterator begin_labels() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
const_labels_iterator end_labels() const {
return &Exprs[0] + NumOutputs + NumInputs + NumLabels;
}
labels_const_range labels() const {
return labels_const_range(begin_labels(), end_labels());
}
private:
void setOutputsAndInputsAndClobbers(const ASTContext &C,
IdentifierInfo **Names,
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
unsigned NumInputs,
unsigned NumLabels,
StringLiteral **Clobbers,
unsigned NumClobbers);
public:
//===--- Other ---===//
/// getNamedOperand - Given a symbolic operand reference like %[foo],
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int getNamedOperand(StringRef SymbolicName) const;
StringRef getClobber(unsigned i) const;
StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; }
const StringLiteral *getClobberStringLiteral(unsigned i) const {
return Clobbers[i];
}
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass;
}
};
/// This represents a Microsoft inline-assembly statement extension.
class MSAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation LBraceLoc, EndLoc;
StringRef AsmStr;
unsigned NumAsmToks = 0;
Token *AsmToks = nullptr;
StringRef *Constraints = nullptr;
StringRef *Clobbers = nullptr;
public:
MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
SourceLocation lbraceloc, bool issimple, bool isvolatile,
ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs,
ArrayRef<StringRef> constraints,
ArrayRef<Expr*> exprs, StringRef asmstr,
ArrayRef<StringRef> clobbers, SourceLocation endloc);
/// Build an empty MS-style inline-assembly statement.
explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {}
SourceLocation getLBraceLoc() const { return LBraceLoc; }
void setLBraceLoc(SourceLocation L) { LBraceLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
bool hasBraces() const { return LBraceLoc.isValid(); }
unsigned getNumAsmToks() { return NumAsmToks; }
Token *getAsmToks() { return AsmToks; }
//===--- Asm String Analysis ---===//
StringRef getAsmString() const { return AsmStr; }
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
StringRef getOutputConstraint(unsigned i) const {
assert(i < NumOutputs);
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
StringRef getInputConstraint(unsigned i) const {
assert(i < NumInputs);
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getInputExpr(i);
}
//===--- Other ---===//
ArrayRef<StringRef> getAllConstraints() const {
return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs);
}
ArrayRef<StringRef> getClobbers() const {
return llvm::makeArrayRef(Clobbers, NumClobbers);
}
ArrayRef<Expr*> getAllExprs() const {
return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs),
NumInputs + NumOutputs);
}
StringRef getClobber(unsigned i) const { return getClobbers()[i]; }
private:
void initialize(const ASTContext &C, StringRef AsmString,
ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints,
ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers);
public:
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == MSAsmStmtClass;
}
child_range children() {
return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
};
class SEHExceptStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Children[2];
enum { FILTER_EXPR, BLOCK };
SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block);
explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {}
public:
static SEHExceptStmt* Create(const ASTContext &C,
SourceLocation ExceptLoc,
Expr *FilterExpr,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); }
SourceLocation getExceptLoc() const { return Loc; }
SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); }
Expr *getFilterExpr() const {
return reinterpret_cast<Expr*>(Children[FILTER_EXPR]);
}
CompoundStmt *getBlock() const {
return cast<CompoundStmt>(Children[BLOCK]);
}
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHExceptStmtClass;
}
};
class SEHFinallyStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Block;
SEHFinallyStmt(SourceLocation Loc, Stmt *Block);
explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {}
public:
static SEHFinallyStmt* Create(const ASTContext &C,
SourceLocation FinallyLoc,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); }
SourceLocation getFinallyLoc() const { return Loc; }
SourceLocation getEndLoc() const { return Block->getEndLoc(); }
CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); }
child_range children() {
return child_range(&Block,&Block+1);
}
const_child_range children() const {
return const_child_range(&Block, &Block + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHFinallyStmtClass;
}
};
class SEHTryStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
bool IsCXXTry;
SourceLocation TryLoc;
Stmt *Children[2];
enum { TRY = 0, HANDLER = 1 };
SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try'
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {}
public:
static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry,
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); }
SourceLocation getTryLoc() const { return TryLoc; }
SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); }
bool getIsCXXTry() const { return IsCXXTry; }
CompoundStmt* getTryBlock() const {
return cast<CompoundStmt>(Children[TRY]);
}
Stmt *getHandler() const { return Children[HANDLER]; }
/// Returns 0 if not defined
SEHExceptStmt *getExceptHandler() const;
SEHFinallyStmt *getFinallyHandler() const;
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHTryStmtClass;
}
};
/// Represents a __leave statement.
class SEHLeaveStmt : public Stmt {
SourceLocation LeaveLoc;
public:
explicit SEHLeaveStmt(SourceLocation LL)
: Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {}
/// Build an empty __leave statement.
explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {}
SourceLocation getLeaveLoc() const { return LeaveLoc; }
void setLeaveLoc(SourceLocation L) { LeaveLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHLeaveStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// This captures a statement into a function. For example, the following
/// pragma annotated compound statement can be represented as a CapturedStmt,
/// and this compound statement is the body of an anonymous outlined function.
/// @code
/// #pragma omp parallel
/// {
/// compute();
/// }
/// @endcode
class CapturedStmt : public Stmt {
public:
/// The different capture forms: by 'this', by reference, capture for
/// variable-length array type etc.
enum VariableCaptureKind {
VCK_This,
VCK_ByRef,
VCK_ByCopy,
VCK_VLAType,
};
/// Describes the capture of either a variable, or 'this', or
/// variable-length array type.
class Capture {
llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind;
SourceLocation Loc;
public:
friend class ASTStmtReader;
/// Create a new capture.
///
/// \param Loc The source location associated with this capture.
///
/// \param Kind The kind of capture (this, ByRef, ...).
///
/// \param Var The variable being captured, or null if capturing this.
Capture(SourceLocation Loc, VariableCaptureKind Kind,
VarDecl *Var = nullptr);
/// Determine the kind of capture.
VariableCaptureKind getCaptureKind() const;
/// Retrieve the source location at which the variable or 'this' was
/// first used.
SourceLocation getLocation() const { return Loc; }
/// Determine whether this capture handles the C++ 'this' pointer.
bool capturesThis() const { return getCaptureKind() == VCK_This; }
/// Determine whether this capture handles a variable (by reference).
bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; }
/// Determine whether this capture handles a variable by copy.
bool capturesVariableByCopy() const {
return getCaptureKind() == VCK_ByCopy;
}
/// Determine whether this capture handles a variable-length array
/// type.
bool capturesVariableArrayType() const {
return getCaptureKind() == VCK_VLAType;
}
/// Retrieve the declaration of the variable being captured.
///
/// This operation is only valid if this capture captures a variable.
VarDecl *getCapturedVar() const;
};
private:
/// The number of variable captured, including 'this'.
unsigned NumCaptures;
/// The pointer part is the implicit the outlined function and the
/// int part is the captured region kind, 'CR_Default' etc.
llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind;
/// The record for captured variables, a RecordDecl or CXXRecordDecl.
RecordDecl *TheRecordDecl = nullptr;
/// Construct a captured statement.
CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD);
/// Construct an empty captured statement.
CapturedStmt(EmptyShell Empty, unsigned NumCaptures);
Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); }
Stmt *const *getStoredStmts() const {
return reinterpret_cast<Stmt *const *>(this + 1);
}
Capture *getStoredCaptures() const;
void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; }
public:
friend class ASTStmtReader;
static CapturedStmt *Create(const ASTContext &Context, Stmt *S,
CapturedRegionKind Kind,
ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits,
CapturedDecl *CD, RecordDecl *RD);
static CapturedStmt *CreateDeserialized(const ASTContext &Context,
unsigned NumCaptures);
/// Retrieve the statement being captured.
Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; }
const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; }
/// Retrieve the outlined function declaration.
CapturedDecl *getCapturedDecl();
const CapturedDecl *getCapturedDecl() const;
/// Set the outlined function declaration.
void setCapturedDecl(CapturedDecl *D);
/// Retrieve the captured region kind.
CapturedRegionKind getCapturedRegionKind() const;
/// Set the captured region kind.
void setCapturedRegionKind(CapturedRegionKind Kind);
/// Retrieve the record declaration for captured variables.
const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; }
/// Set the record declaration for captured variables.
void setCapturedRecordDecl(RecordDecl *D) {
assert(D && "null RecordDecl");
TheRecordDecl = D;
}
/// True if this variable has been captured.
bool capturesVariable(const VarDecl *Var) const;
/// An iterator that walks over the captures.
using capture_iterator = Capture *;
using const_capture_iterator = const Capture *;
using capture_range = llvm::iterator_range<capture_iterator>;
using capture_const_range = llvm::iterator_range<const_capture_iterator>;
capture_range captures() {
return capture_range(capture_begin(), capture_end());
}
capture_const_range captures() const {
return capture_const_range(capture_begin(), capture_end());
}
/// Retrieve an iterator pointing to the first capture.
capture_iterator capture_begin() { return getStoredCaptures(); }
const_capture_iterator capture_begin() const { return getStoredCaptures(); }
/// Retrieve an iterator pointing past the end of the sequence of
/// captures.
capture_iterator capture_end() const {
return getStoredCaptures() + NumCaptures;
}
/// Retrieve the number of captures, including 'this'.
unsigned capture_size() const { return NumCaptures; }
/// Iterator that walks over the capture initialization arguments.
using capture_init_iterator = Expr **;
using capture_init_range = llvm::iterator_range<capture_init_iterator>;
/// Const iterator that walks over the capture initialization
/// arguments.
using const_capture_init_iterator = Expr *const *;
using const_capture_init_range =
llvm::iterator_range<const_capture_init_iterator>;
capture_init_range capture_inits() {
return capture_init_range(capture_init_begin(), capture_init_end());
}
const_capture_init_range capture_inits() const {
return const_capture_init_range(capture_init_begin(), capture_init_end());
}
/// Retrieve the first initialization argument.
capture_init_iterator capture_init_begin() {
return reinterpret_cast<Expr **>(getStoredStmts());
}
const_capture_init_iterator capture_init_begin() const {
return reinterpret_cast<Expr *const *>(getStoredStmts());
}
/// Retrieve the iterator pointing one past the last initialization
/// argument.
capture_init_iterator capture_init_end() {
return capture_init_begin() + NumCaptures;
}
const_capture_init_iterator capture_init_end() const {
return capture_init_begin() + NumCaptures;
}
SourceLocation getBeginLoc() const LLVM_READONLY {
return getCapturedStmt()->getBeginLoc();
}
SourceLocation getEndLoc() const LLVM_READONLY {
return getCapturedStmt()->getEndLoc();
}
SourceRange getSourceRange() const LLVM_READONLY {
return getCapturedStmt()->getSourceRange();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CapturedStmtClass;
}
child_range children();
const_child_range children() const;
};
} // namespace clang
#endif // LLVM_CLANG_AST_STMT_H
|
GB_unop__acosh_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__acosh_fp32_fp32
// op(A') function: GB_unop_tran__acosh_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = acoshf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = acoshf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = acoshf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ACOSH || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__acosh_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = acoshf (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__acosh_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
salted_sha1_fmt_plug.c | /*
* generic salted-sha1 support for LDAP style password storage
*
* Copyright (c) 2003 Simon Marechal, salt length fixes (c) 2012 magnum
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_saltedsha;
#elif FMT_REGISTERS_H
john_register_one(&fmt_saltedsha);
#else
#include <string.h>
#include "misc.h"
#include "formats.h"
#include "arch.h"
#include "options.h"
#include "johnswap.h"
#include "salted_sha1_common.h"
#ifdef SIMD_COEF_32
#define NBKEYS (SIMD_COEF_32 * SIMD_PARA_SHA1)
#endif
#include "simd-intrinsics.h"
#include "common.h"
#include "sha.h"
#include "base64_convert.h"
#ifdef _OPENMP
#ifdef SIMD_COEF_64
#ifndef OMP_SCALE
#define OMP_SCALE 1024
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#endif
#include <omp.h>
#endif
#include "memdbg.h"
#define FORMAT_LABEL "Salted-SHA1"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH (55-MAX_SALT_LEN)
#define BINARY_ALIGN 4
#define SALT_SIZE (MAX_SALT_LEN + sizeof(unsigned int))
#define SALT_ALIGN 4
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
#define FMT_IS_BE
#include "common-simd-getpos.h"
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
struct s_salt
{
unsigned int len;
union {
unsigned char c[MAX_SALT_LEN+1];
uint32_t w32;
} data;
};
static struct s_salt *saved_salt;
#ifdef SIMD_COEF_32
static uint32_t (*saved_key)[SHA_BUF_SIZ*NBKEYS];
static uint32_t (*crypt_key)[BINARY_SIZE/4*NBKEYS];
static unsigned int *saved_len;
static int last_salt_size;
#else
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_key)[BINARY_SIZE / 4];
static unsigned int *saved_len;
#endif
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
#ifndef SIMD_COEF_32
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
#else
saved_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS,
sizeof(*saved_key), MEM_ALIGN_SIMD);
crypt_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS,
sizeof(*crypt_key), MEM_ALIGN_SIMD);
#endif
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
}
static void done(void)
{
MEM_FREE(crypt_key);
MEM_FREE(saved_key);
MEM_FREE(saved_len);
}
static void * get_binary(char *ciphertext) {
static char *realcipher;
if (!realcipher) realcipher = mem_alloc_tiny(CIPHERTEXT_LENGTH, MEM_ALIGN_WORD);
ciphertext += NSLDAP_MAGIC_LENGTH;
memset(realcipher, 0, BINARY_SIZE);
base64_convert(ciphertext, e_b64_mime, strlen(ciphertext), realcipher, e_b64_raw, CIPHERTEXT_LENGTH, flg_Base64_DONOT_NULL_TERMINATE, 0);
#if defined(SIMD_COEF_32) && ARCH_LITTLE_ENDIAN==1
alter_endianity((unsigned char *)realcipher, BINARY_SIZE);
#endif
return (void *)realcipher;
}
#define SET_SAVED_LEN
#include "common-simd-setkey32.h"
static void * get_salt(char * ciphertext)
{
static struct s_salt cursalt;
char realcipher[BINARY_SIZE + MAX_SALT_LEN];
int len;
ciphertext += NSLDAP_MAGIC_LENGTH;
memset(realcipher, 0, sizeof(realcipher));
memset(&cursalt, 0, sizeof(struct s_salt));
len = strlen(ciphertext);
cursalt.len = base64_convert(ciphertext, e_b64_mime, len, realcipher, e_b64_raw, BINARY_SIZE+MAX_SALT_LEN, flg_Base64_DONOT_NULL_TERMINATE, 0) - BINARY_SIZE;
memcpy(cursalt.data.c, realcipher+BINARY_SIZE, cursalt.len);
return &cursalt;
}
static int cmp_all(void *binary, int count) {
unsigned int index;
for (index = 0; index < count; index++)
#ifdef SIMD_COEF_32
if (((uint32_t *) binary)[0] == ((uint32_t*)crypt_key)[(index&(SIMD_COEF_32-1)) + index/SIMD_COEF_32*5*SIMD_COEF_32])
#else
if ( ((uint32_t*)binary)[0] == ((uint32_t*)&(crypt_key[index][0]))[0] )
#endif
return 1;
return 0;
}
static int cmp_exact(char *source, int index)
{
return (1);
}
static int cmp_one(void * binary, int index)
{
#ifdef SIMD_COEF_32
int i;
for (i = 0; i < BINARY_SIZE/sizeof(uint32_t); i++)
if (((uint32_t *) binary)[i] != ((uint32_t*)crypt_key)[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*5*SIMD_COEF_32+i*SIMD_COEF_32])
return 0;
return 1;
#else
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
#endif
}
static void set_salt(void *salt) {
saved_salt = salt;
}
#ifdef SIMD_COEF_32
inline static void set_onesalt(int index)
{
unsigned int i, idx=index%NBKEYS;
unsigned char *sk = (unsigned char*)&saved_key[index/NBKEYS];
for (i=0;i<saved_salt->len;++i)
sk[GETPOS(i+saved_len[index], idx)] = saved_salt->data.c[i];
sk[GETPOS(i+saved_len[index], idx)] = 0x80;
while (++i <= last_salt_size)
sk[GETPOS(i+saved_len[index], idx)] = 0;
((unsigned int*)sk)[15*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + idx/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] = (saved_salt->len + saved_len[index])<<3;
}
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#ifdef SIMD_COEF_32
int inc = NBKEYS;
#else
int inc = 1;
#endif
#pragma omp parallel for
for (index=0; index < count; index += inc)
#endif
{
#ifdef SIMD_COEF_32
unsigned int i;
for (i=0;i<NBKEYS;i++)
set_onesalt(i+index);
SIMDSHA1body(saved_key[index/NBKEYS], crypt_key[index/NBKEYS], NULL, SSEi_MIXED_IN);
#else
SHA_CTX ctx;
SHA1_Init( &ctx );
SHA1_Update( &ctx, (unsigned char *) saved_key[index], strlen( saved_key[index] ) );
SHA1_Update( &ctx, (unsigned char *) saved_salt->data.c, saved_salt->len);
SHA1_Final( (unsigned char *)crypt_key[index], &ctx);
#endif
}
#ifdef SIMD_COEF_32
last_salt_size = saved_salt->len;
#endif
return count;
}
#define COMMON_GET_HASH_SIMD32 5
#define COMMON_GET_HASH_VAR crypt_key
#include "common-get-hash.h"
static int salt_hash(void *salt)
{
struct s_salt * mysalt = salt;
return mysalt->data.w32 & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_saltedsha = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD,
{ NULL },
{ NSLDAP_MAGIC },
salted_sha1_common_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
salted_sha1_common_valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
activation.h | // Copyright 2018 Xiaomi, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MACE_OPS_ACTIVATION_H_
#define MACE_OPS_ACTIVATION_H_
#include <algorithm>
#include <cmath>
#include <string>
#include "mace/core/types.h"
#include "mace/ops/arm/activation_neon.h"
#include "mace/utils/logging.h"
namespace mace {
namespace ops {
enum ActivationType {
NOOP = 0,
RELU = 1,
RELUX = 2,
PRELU = 3,
TANH = 4,
SIGMOID = 5,
LEAKYRELU = 6,
};
inline ActivationType StringToActivationType(const std::string type) {
if (type == "RELU") {
return ActivationType::RELU;
} else if (type == "RELUX") {
return ActivationType::RELUX;
} else if (type == "PRELU") {
return ActivationType::PRELU;
} else if (type == "TANH") {
return ActivationType::TANH;
} else if (type == "SIGMOID") {
return ActivationType::SIGMOID;
} else if (type == "NOOP") {
return ActivationType::NOOP;
} else if (type == "LEAKYRELU") {
return ActivationType ::LEAKYRELU;
} else {
LOG(FATAL) << "Unknown activation type: " << type;
}
return ActivationType::NOOP;
}
template <typename T>
void DoActivation(const T *input_ptr,
T *output_ptr,
const index_t size,
const ActivationType type,
const float relux_max_limit) {
MACE_CHECK(DataTypeToEnum<T>::value != DataType::DT_HALF);
switch (type) {
case NOOP:
break;
case RELU:
#pragma omp parallel for schedule(runtime)
for (index_t i = 0; i < size; ++i) {
output_ptr[i] = std::max(input_ptr[i], static_cast<T>(0));
}
break;
case RELUX:
#pragma omp parallel for schedule(runtime)
for (index_t i = 0; i < size; ++i) {
output_ptr[i] = std::min(std::max(input_ptr[i], static_cast<T>(0)),
static_cast<T>(relux_max_limit));
}
break;
case TANH:
#pragma omp parallel for schedule(runtime)
for (index_t i = 0; i < size; ++i) {
output_ptr[i] = std::tanh(input_ptr[i]);
}
break;
case SIGMOID:
#pragma omp parallel for schedule(runtime)
for (index_t i = 0; i < size; ++i) {
output_ptr[i] = 1 / (1 + std::exp(-input_ptr[i]));
}
break;
case LEAKYRELU:
#pragma omp parallel for schedule(runtime)
for (index_t i = 0; i < size; ++i) {
output_ptr[i] = std::max(input_ptr[i],
static_cast<T>(0)) * relux_max_limit;
}
break;
default:
LOG(FATAL) << "Unknown activation type: " << type;
}
}
template<>
inline void DoActivation(const float *input_ptr,
float *output_ptr,
const index_t size,
const ActivationType type,
const float relux_max_limit) {
switch (type) {
case NOOP:
break;
case RELU:
ReluNeon(input_ptr, size, output_ptr);
break;
case RELUX:
ReluxNeon(input_ptr, relux_max_limit, size, output_ptr);
break;
case TANH:
#pragma omp parallel for schedule(runtime)
for (index_t i = 0; i < size; ++i) {
output_ptr[i] = std::tanh(input_ptr[i]);
}
break;
case SIGMOID:
#pragma omp parallel for schedule(runtime)
for (index_t i = 0; i < size; ++i) {
output_ptr[i] = 1 / (1 + std::exp(-input_ptr[i]));
}
break;
case LEAKYRELU:
LeakyReluNeon(input_ptr, relux_max_limit, size, output_ptr);
break;
default:
LOG(FATAL) << "Unknown activation type: " << type;
}
}
template <typename T>
void PReLUActivation(const T *input_ptr,
const index_t outer_size,
const index_t input_chan,
const index_t inner_size,
const T *alpha_ptr,
T *output_ptr) {
#pragma omp parallel for collapse(3) schedule(runtime)
for (index_t i = 0; i < outer_size; ++i) {
for (index_t chan_idx = 0; chan_idx < input_chan; ++chan_idx) {
for (index_t j = 0; j < inner_size; ++j) {
index_t idx = i * input_chan * inner_size + chan_idx * inner_size + j;
if (input_ptr[idx] < 0) {
output_ptr[idx] = input_ptr[idx] * alpha_ptr[chan_idx];
} else {
output_ptr[idx] = input_ptr[idx];
}
}
}
}
}
} // namespace ops
} // namespace mace
#endif // MACE_OPS_ACTIVATION_H_
|
nsincompressible.h | //*****************************************************************************
// Title : src/equation/nsincompressible.h
// Author : Tanabe Yuta
// Date : 2021/10/07
// Copyright : (C)2021 TanabeYuta
//*****************************************************************************
#pragma once
namespace PANSLBM2 {
namespace NSin {
// Function of updating macroscopic values of NS for 2D
template<class T, template<class>class P>
void Macro(T &_rho, T &_ux, T &_uy, const T *_f0, const T *_f, int _idx) {
_rho = _f0[_idx];
_ux = T();
_uy = T();
for (int c = 1; c < P<T>::nc; ++c) {
T f = _f[P<T>::IndexF(_idx, c)];
_rho += f;
_ux += P<T>::cx[c]*f;
_uy += P<T>::cy[c]*f;
}
}
// Function of getting equilibrium of NS for 2D
template<class T, template<class>class P>
void Equilibrium(T *_feq, T _rho, T _ux, T _uy) {
T rhouu = _rho - 1.5*(_ux*_ux + _uy*_uy);
for (int c = 0; c < P<T>::nc; ++c) {
T ciu = P<T>::cx[c]*_ux + P<T>::cy[c]*_uy;
_feq[c] = P<T>::ei[c]*(3.0*ciu + 4.5*ciu*ciu + rhouu);
}
}
// Function of applying external force of NS with Brinkman model for 2D
template<class T, template<class>class P>
void ExternalForceBrinkman(T _rho, T _ux, T _uy, T _alpha, T *_f, int _idx) {
T coef = 3.0*_alpha*_rho/(_rho + _alpha);
for (int c = 1; c < P<T>::nc; ++c) {
_f[P<T>::IndexF(_idx, c)] -= coef*P<T>::ei[c]*(P<T>::cx[c]*_ux + P<T>::cy[c]*_uy);
}
}
// Function of Update macro and Collide of NS for 2D
template<class T, template<class>class P>
void MacroCollide(P<T>& _p, T *_rho, T *_ux, T *_uy, T _viscosity, bool _issave = false) {
T omega = 1.0/(3.0*_viscosity + 0.5), iomega = 1.0 - omega, feq[P<T>::nc];
#pragma omp parallel for private(feq)
for (int idx = 0; idx < _p.nxyz; ++idx) {
// Update macro
T rho, ux, uy;
Macro<T, P>(rho, ux, uy, _p.f0, _p.f, idx);
// Save macro if need
if (_issave) {
_rho[idx] = rho;
_ux[idx] = ux;
_uy[idx] = uy;
}
// Collide
Equilibrium<T, P>(feq, rho, ux, uy);
_p.f0[idx] = iomega*_p.f0[idx] + omega*feq[0];
for (int c = 1; c < P<T>::nc; ++c) {
int idxf = P<T>::IndexF(idx, c);
_p.f[idxf] = iomega*_p.f[idxf] + omega*feq[c];
}
}
}
// Function of Update macro, External force(Brinkman model) and Collide of NS for 2D
template<class T, template<class>class P>
void MacroBrinkmanCollide(P<T>& _p, T *_rho, T *_ux, T *_uy, T _viscosity, const T *_alpha, bool _issave = false) {
T omega = 1.0/(3.0*_viscosity + 0.5), iomega = 1.0 - omega, feq[P<T>::nc];
#pragma omp parallel for private(feq)
for (int idx = 0; idx < _p.nxyz; ++idx) {
// Update macro
T rho, ux, uy;
Macro<T, P>(rho, ux, uy, _p.f0, _p.f, idx);
// External force with Brinkman model
ExternalForceBrinkman<T, P>(rho, ux, uy, _alpha[idx], _p.f, idx);
Macro<T, P>(rho, ux, uy, _p.f0, _p.f, idx);
// Save macro if need
if (_issave) {
_rho[idx] = rho;
_ux[idx] = ux;
_uy[idx] = uy;
}
// Collide
Equilibrium<T, P>(feq, rho, ux, uy);
_p.f0[idx] = iomega*_p.f0[idx] + omega*feq[0];
for (int c = 1; c < P<T>::nc; ++c) {
int idxf = P<T>::IndexF(idx, c);
_p.f[idxf] = iomega*_p.f[idxf] + omega*feq[c];
}
}
}
// Function of setting initial condition of NS for 2D
template<class T, template<class>class P>
void InitialCondition(P<T>& _p, const T *_rho, const T *_ux, const T *_uy) {
T feq[P<T>::nc];
for (int idx = 0; idx < _p.nxyz; ++idx) {
Equilibrium<T, P>(feq, _rho[idx], _ux[idx], _uy[idx]);
_p.f0[idx] = feq[0];
for (int c = 1; c < P<T>::nc; ++c) {
_p.f[P<T>::IndexF(idx, c)] = feq[c];
}
}
}
// Function of setting boundary condition of NS set U for 2D
template<class T, template<class>class P, class Fv0, class Fv1, class Ff>
void BoundaryConditionSetU(P<T>& _p, Fv0 _uxbc, Fv1 _uybc, Ff _bctype) {
// On xmin
if (_p.PEx == 0) {
for (int j = 0; j < _p.ny; ++j) {
if (_bctype(0 + _p.offsetx, j + _p.offsety)) {
int idx = _p.Index(0, j);
T ux = _uxbc(0 + _p.offsetx, j + _p.offsety), uy = _uybc(0 + _p.offsetx, j + _p.offsety);
_p.f[P<T>::IndexF(idx, 1)] = _p.f[P<T>::IndexF(idx, 3)] + 2.0*ux/3.0;
_p.f[P<T>::IndexF(idx, 5)] = _p.f[P<T>::IndexF(idx, 7)] + ux/6.0 - 0.5*(_p.f[P<T>::IndexF(idx, 2)] - _p.f[P<T>::IndexF(idx, 4)] - uy);
_p.f[P<T>::IndexF(idx, 8)] = _p.f[P<T>::IndexF(idx, 6)] + ux/6.0 + 0.5*(_p.f[P<T>::IndexF(idx, 2)] - _p.f[P<T>::IndexF(idx, 4)] - uy);
}
}
}
// On xmax
if (_p.PEx == _p.mx - 1) {
for (int j = 0; j < _p.ny; ++j) {
if (_bctype((_p.nx - 1) + _p.offsetx, j + _p.offsety)) {
int idx = _p.Index(_p.nx - 1, j);
T ux = _uxbc((_p.nx - 1) + _p.offsetx, j + _p.offsety), uy = _uybc((_p.nx - 1) + _p.offsetx, j + _p.offsety);
_p.f[P<T>::IndexF(idx, 3)] = _p.f[P<T>::IndexF(idx, 1)] - 2.0*ux/3.0;
_p.f[P<T>::IndexF(idx, 7)] = _p.f[P<T>::IndexF(idx, 5)] - ux/6.0 + 0.5*(_p.f[P<T>::IndexF(idx, 2)] - _p.f[P<T>::IndexF(idx, 4)] - uy);
_p.f[P<T>::IndexF(idx, 6)] = _p.f[P<T>::IndexF(idx, 8)] - ux/6.0 - 0.5*(_p.f[P<T>::IndexF(idx, 2)] - _p.f[P<T>::IndexF(idx, 4)] - uy);
}
}
}
// On ymin
if (_p.PEy == 0) {
for (int i = 0; i < _p.nx; ++i) {
if (_bctype(i + _p.offsetx, 0 + _p.offsety)) {
int idx = _p.Index(i, 0);
T ux = _uxbc(i + _p.offsetx, 0 + _p.offsety), uy = _uybc(i + _p.offsetx, 0 + _p.offsety);
_p.f[P<T>::IndexF(idx, 2)] = _p.f[P<T>::IndexF(idx, 4)] + 2.0*uy/3.0;
_p.f[P<T>::IndexF(idx, 5)] = _p.f[P<T>::IndexF(idx, 7)] + uy/6.0 - 0.5*(_p.f[P<T>::IndexF(idx, 1)] - _p.f[P<T>::IndexF(idx, 3)] - ux);
_p.f[P<T>::IndexF(idx, 6)] = _p.f[P<T>::IndexF(idx, 8)] + uy/6.0 + 0.5*(_p.f[P<T>::IndexF(idx, 1)] - _p.f[P<T>::IndexF(idx, 3)] - ux);
}
}
}
// On ymax
if (_p.PEy == _p.my - 1) {
for (int i = 0; i < _p.nx; ++i) {
if (_bctype(i + _p.offsetx, (_p.ny - 1) + _p.offsety)) {
int idx = _p.Index(i, _p.ny - 1);
T rho0 = (_p.f0[idx] + _p.f[P<T>::IndexF(idx, 1)] + _p.f[P<T>::IndexF(idx, 3)] + 2.0*(_p.f[P<T>::IndexF(idx, 2)] + _p.f[P<T>::IndexF(idx, 5)] + _p.f[P<T>::IndexF(idx, 6)]))/(1.0 + _uybc(i + _p.offsetx, (_p.ny - 1) + _p.offsety));
T mx = 0.5*(_p.f[P<T>::IndexF(idx, 1)] - _p.f[P<T>::IndexF(idx, 3)] - rho0*_uxbc(i + _p.offsetx, (_p.ny - 1) + _p.offsety));
T my = rho0*_uybc(i + _p.offsetx, (_p.ny - 1) + _p.offsety)/6.0;
T ux = _uxbc(i + _p.offsetx, (_p.ny - 1) + _p.offsety), uy = _uybc(i + _p.offsetx, (_p.ny - 1) + _p.offsety);
_p.f[P<T>::IndexF(idx, 4)] = _p.f[P<T>::IndexF(idx, 2)] - 2.0*uy/3.0;
_p.f[P<T>::IndexF(idx, 7)] = _p.f[P<T>::IndexF(idx, 5)] - uy/6.0 + 0.5*(_p.f[P<T>::IndexF(idx, 1)] - _p.f[P<T>::IndexF(idx, 3)] - ux);
_p.f[P<T>::IndexF(idx, 8)] = _p.f[P<T>::IndexF(idx, 6)] - uy/6.0 - 0.5*(_p.f[P<T>::IndexF(idx, 1)] - _p.f[P<T>::IndexF(idx, 3)] - ux);
}
}
}
}
// Function of setting boundary condition of NS set rho for 2D
template<class T, template<class>class P, class Fv0, class Fv1, class Ff>
void BoundaryConditionSetRho(P<T>& _p, Fv0 _rhobc, Fv1 _usbc, Ff _bctype) {
// On xmin
if (_p.PEx == 0) {
for (int j = 0; j < _p.ny; ++j) {
if (_bctype(0 + _p.offsetx, j + _p.offsety)) {
int idx = _p.Index(0, j);
T ux = _rhobc(0 + _p.offsetx, j + _p.offsety) - (_p.f0[idx] + _p.f[P<T>::IndexF(idx, 2)] + _p.f[P<T>::IndexF(idx, 4)] + 2.0*(_p.f[P<T>::IndexF(idx, 3)] + _p.f[P<T>::IndexF(idx, 6)] + _p.f[P<T>::IndexF(idx, 7)]));
T uy = _usbc(0 + _p.offsetx, j + _p.offsety);
_p.f[P<T>::IndexF(idx, 1)] = _p.f[P<T>::IndexF(idx, 3)] + 2.0*ux/3.0;
_p.f[P<T>::IndexF(idx, 5)] = _p.f[P<T>::IndexF(idx, 7)] + ux/6.0 - 0.5*(_p.f[P<T>::IndexF(idx, 2)] - _p.f[P<T>::IndexF(idx, 4)] - uy);
_p.f[P<T>::IndexF(idx, 8)] = _p.f[P<T>::IndexF(idx, 6)] + ux/6.0 + 0.5*(_p.f[P<T>::IndexF(idx, 2)] - _p.f[P<T>::IndexF(idx, 4)] - uy);
}
}
}
// On xmax
if (_p.PEx == _p.mx - 1) {
for (int j = 0; j < _p.ny; ++j) {
if (_bctype((_p.nx - 1) + _p.offsetx, j + _p.offsety)) {
int idx = _p.Index(_p.nx - 1, j);
T ux = -_rhobc((_p.nx - 1) + _p.offsetx, j + _p.offsety) + (_p.f0[idx] + _p.f[P<T>::IndexF(idx, 2)] + _p.f[P<T>::IndexF(idx, 4)] + 2.0*(_p.f[P<T>::IndexF(idx, 1)] + _p.f[P<T>::IndexF(idx, 5)] + _p.f[P<T>::IndexF(idx, 8)]));
T uy = _usbc((_p.nx - 1) + _p.offsetx, j + _p.offsety);
_p.f[P<T>::IndexF(idx, 3)] = _p.f[P<T>::IndexF(idx, 1)] - 2.0*ux/3.0;
_p.f[P<T>::IndexF(idx, 6)] = _p.f[P<T>::IndexF(idx, 8)] - ux/6.0 - 0.5*(_p.f[P<T>::IndexF(idx, 2)] - _p.f[P<T>::IndexF(idx, 4)] - uy);
_p.f[P<T>::IndexF(idx, 7)] = _p.f[P<T>::IndexF(idx, 5)] - ux/6.0 + 0.5*(_p.f[P<T>::IndexF(idx, 2)] - _p.f[P<T>::IndexF(idx, 4)] - uy);
}
}
}
// On ymin
if (_p.PEy == 0) {
for (int i = 0; i < _p.nx; ++i) {
if (_bctype(i + _p.offsetx, 0 + _p.offsety)) {
int idx = _p.Index(i, 0);
T ux = _usbc(i + _p.offsetx, 0 + _p.offsety);
T uy = _rhobc(i + _p.offsetx, 0 + _p.offsety) - (_p.f0[idx] + _p.f[P<T>::IndexF(idx, 1)] + _p.f[P<T>::IndexF(idx, 3)] + 2.0*(_p.f[P<T>::IndexF(idx, 4)] + _p.f[P<T>::IndexF(idx, 7)] + _p.f[P<T>::IndexF(idx, 8)]));
_p.f[P<T>::IndexF(idx, 2)] = _p.f[P<T>::IndexF(idx, 4)] + 2.0*uy/3.0;
_p.f[P<T>::IndexF(idx, 5)] = _p.f[P<T>::IndexF(idx, 7)] + uy/6.0 - 0.5*(_p.f[P<T>::IndexF(idx, 1)] - _p.f[P<T>::IndexF(idx, 3)] - ux);
_p.f[P<T>::IndexF(idx, 6)] = _p.f[P<T>::IndexF(idx, 8)] + uy/6.0 + 0.5*(_p.f[P<T>::IndexF(idx, 1)] - _p.f[P<T>::IndexF(idx, 3)] - ux);
}
}
}
// On ymax
if (_p.PEy == _p.my - 1) {
for (int i = 0; i < _p.nx; ++i) {
if (_bctype(i + _p.offsetx, (_p.ny - 1) + _p.offsety)) {
int idx = _p.Index(i, _p.ny - 1);
T ux = _usbc(i + _p.offsetx, (_p.ny - 1) + _p.offsety);
T uy = -_rhobc(i + _p.offsetx, (_p.ny - 1) + _p.offsety) + (_p.f0[idx] + _p.f[P<T>::IndexF(idx, 1)] + _p.f[P<T>::IndexF(idx, 3)] + 2.0*(_p.f[P<T>::IndexF(idx, 2)] + _p.f[P<T>::IndexF(idx, 5)] + _p.f[P<T>::IndexF(idx, 6)]));
_p.f[P<T>::IndexF(idx, 4)] = _p.f[P<T>::IndexF(idx, 2)] - 2.0*uy/3.0;
_p.f[P<T>::IndexF(idx, 7)] = _p.f[P<T>::IndexF(idx, 5)] - uy/6.0 + 0.5*(_p.f[P<T>::IndexF(idx, 1)] - _p.f[P<T>::IndexF(idx, 3)] - ux);
_p.f[P<T>::IndexF(idx, 8)] = _p.f[P<T>::IndexF(idx, 6)] - uy/6.0 - 0.5*(_p.f[P<T>::IndexF(idx, 1)] - _p.f[P<T>::IndexF(idx, 3)] - ux);
}
}
}
}
}
} |
dctz-comp-lib.c | /**
* @file dctz-comp-lib.c
* @author Seung Woo Son
* @date July 2019
* @brief DCTZ compression library routine
* (C) 2019 University of Massachuetts Lowell.
See LICENSE in top-level directory.
*/
#include <stdlib.h>
#include <memory.h>
#include <string.h>
#ifdef TIME_DEBUG
#include <sys/time.h>
#endif /* TIME_DEBUG */
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include <pthread.h>
#include "zlib.h"
#include "dctz.h"
#include "dct.h"
#define DEF_MEM_LEVEL 8
union
{
float *f;
double *d;
} a, a_x;
void *compress_thread (void *arg)
{
z_stream *defstream = (z_stream *)arg;
#ifdef DEBUG
printf("compress started ...\n");
#endif
deflate(defstream, Z_FINISH);
#ifdef DEBUG
printf("done! compression...\n");
#endif
uLong ret = defstream->total_out;
deflateEnd(defstream);
pthread_exit((void *)ret);
}
int dctz_compress(t_var *var, int N, size_t *outSize, t_var *var_z, double error_bound)
{
int i, j, nblk, rem;
#ifdef TIME_DEBUG
struct timeval start_t, end_t, gstart_t;
double sf_t, dct_t, DC_AC_t, zlib_t, comp_t, malloc_t, genbin_t;
#endif
double *bin_maxes, *bin_center, bin_width, range_min, range_max;
t_bin_id *bin_index, *bin_indexz, *bin_indexz2;
#ifdef USE_TRUNCATE
float *DC, *DCz, *DCz2, *AC_exact, *AC_exactz, *AC_exactz2;
#else
double *DC, *DCz, *DCz2, *AC_exact, *AC_exactz, *AC_exactz2;
#endif
struct header h;
t_bstat bs;
size_t type_size = 0;
#ifdef USE_QTABLE
double *qtable; /* Quantizer Table */
#endif
if (var->datatype == DOUBLE)
type_size = sizeof(double);
else /* FLOAT */
type_size = sizeof(float);
if (var->datatype == DOUBLE) {
if (NULL == (a_x.d = (double *)malloc(N*type_size))) {
fprintf(stderr, "Out of memory: a_x\n");
exit(1);
}
}
else { /* FLOAT */
if (NULL == (a_x.f = (float *)malloc(N*type_size))) {
fprintf(stderr, "Out of memory: a_x\n");
exit(1);
}
}
if (error_bound < 1E-6) {
printf("ERROR BOUND is not acceptable");
exit(1);
}
/* TODO: bin_maxes and bin_center should be double or float? */
if (NULL == (bin_maxes = (double *)malloc(NBINS*sizeof(double)))) {
fprintf(stderr, "Out of memory: bin_maxes\n");
exit(1);
}
if (NULL == (bin_center = (double *)malloc(NBINS*sizeof(double)))) {
fprintf(stderr, "Out of memory: bin_center\n");
exit(1);
}
#ifdef DEBUG
for (i=0; i<BLK_SZ; i++) { /* show the first block */
printf("a[%d] = %e\n", i, var->buf.d[i]);
if (i%BLK_SZ == 0 && i != 0) printf("\n");
}
#endif
#ifdef USE_QTABLE
/* Start of Initialize Quantizer Table */
if (NULL == (qtable = (double *)malloc(BLK_SZ*sizeof(double)))) {
fprintf(stderr, "Out of memory: qtable\n");
exit(1);
}
for (i=0; i<BLK_SZ; i++) {
qtable[i] = 0.0;
}
if (NULL == (bin_index = (t_bin_id *)malloc(2*N*sizeof(t_bin_id)))) {
fprintf(stderr, "Out of memory: bin_index[]\n");
exit(1);
}
memset(bin_index, 0, sizeof(t_bin_id)*2*N);
#ifdef DEBUG
for (i=0; i<BLK_SZ; i++) {
printf("qtable[%d] = %e\n", i, qtable[i]);
}
#endif
/* End of Initialize Quantizer Table */
#else
if (NULL == (bin_index = (t_bin_id *)malloc(N*sizeof(t_bin_id)))) {
fprintf(stderr, "Out of memory: bin_index[]\n");
exit(1);
}
memset(bin_index, 0, sizeof(t_bin_id)*N);
#endif /* USE_QTABLE */
#ifdef TIME_DEBUG
gettimeofday(&start_t, NULL);
gstart_t = start_t;
#endif
/* determine scaling factor */
calc_data_stat(var, &bs, N);
if (var->datatype == DOUBLE) {
#ifdef DEBUG
printf("scaling factor = %f\n", bs.sf.d);
#endif
double xscale = pow(10, bs.sf.d);
/* apply scaling factor */
if (bs.sf.d != 1.0) {
#ifdef _OPENMP
#pragma omp parallel for private(i) shared(bs.sf.d)
#endif
for (i=0; i<N; i++)
var->buf.d[i] /= xscale;
}
}
else { /* FLOAT */
#ifdef DEBUG
printf("scaling factor = %f\n", bs.sf.f);
#endif
float xscale = pow(10, bs.sf.f);
/* apply scaling factor */
if (bs.sf.f != 1.0) {
#ifdef _OPENMP
#pragma omp parallel for private(i) shared(bs.sf.f)
#endif
for (i=0; i<N; i++)
var->buf.f[i] /= xscale;
}
}
#ifdef TIME_DEBUG
gettimeofday(&end_t, NULL);
sf_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(start_t.tv_sec*1000000 + start_t.tv_usec));
gettimeofday(&start_t, NULL);
#endif
/* DCT over decomposed blocks */
nblk = CEIL(N, BLK_SZ);
rem = N % BLK_SZ;
#ifdef DEBUG
printf("\nnumber of blocks = %d, remainder = %d\n", nblk, rem);
#endif
#ifdef USE_TRUNCATE
if (NULL == (DC = (float *)malloc(nblk*sizeof(float)))) {
fprintf(stderr, "Out of memory: DC[]\n");
exit(1);
}
#else
if (NULL == (DC = (double *)malloc(nblk*sizeof(double)))) {
fprintf(stderr, "Out of memory: DC[]\n");
exit(1);
}
#endif
#ifdef USE_TRUNCATE
if (NULL == (DCz = (float *)malloc(nblk*sizeof(float)))) {
fprintf(stderr, "Out of memory: DCz[]\n");
exit(1);
}
memset(DCz, 0, sizeof(float)*nblk); /* TODO: is it necessary? */
#else
if (NULL == (DCz = (double *)malloc(nblk*sizeof(double)))) {
fprintf(stderr, "Out of memory: DCz[]\n");
exit(1);
}
#endif
if (NULL == (bin_indexz = (t_bin_id *)malloc(N*sizeof(t_bin_id)))) {
fprintf(stderr, "Out of memory: bin_indexz[]\n");
exit(1);
}
memset (bin_indexz, 0, sizeof(t_bin_id)*N);
#ifdef TIME_DEBUG
gettimeofday(&end_t, NULL);
malloc_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(start_t.tv_sec*1000000 + start_t.tv_usec));
gettimeofday(&start_t, NULL);
#endif
if (var->datatype == DOUBLE)
gen_bins(bs.min.d, bs.max.d, bin_maxes, bin_center, NBINS, error_bound);
else /* FLOAT */
gen_bins(bs.min.f, bs.max.f, bin_maxes, bin_center, NBINS, error_bound);
int half = NBINS/2;
bin_width = error_bound*2*BRSF;
range_min = -(half*2+1)*(error_bound*BRSF);
range_max = (half*2+1)*(error_bound*BRSF);
#ifdef TIME_DEBUG
gettimeofday(&end_t, NULL);
genbin_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(start_t.tv_sec*1000000 + start_t.tv_usec));
gettimeofday(&start_t, NULL);
#endif
#ifdef USE_TRUNCATE
if (NULL == (AC_exact = (float *)malloc(N*sizeof(float)))) {
fprintf(stderr, "Out of memory: AC_exact\n");
exit(1);
}
memset(AC_exact, 0, sizeof(float)*N); /* TODO: is it necessary? */
#else
if (NULL == (AC_exact = (double *)malloc(N*sizeof(double)))) {
fprintf(stderr, "Out of memory: AC_exact\n");
exit(1);
}
memset(AC_exact, 0, sizeof(double)*N); /* TODO: is it necessary? */
#endif
#ifdef USE_TRUNCATE
if (NULL == (AC_exactz = (float *)malloc(N*sizeof(float)))) {
fprintf(stderr, "Out of memory: AC_exactz[]\n");
exit(1);
}
memset(AC_exactz, 0, sizeof(float)*N); /* TODO: is it necessary? */
#else
if (NULL == (AC_exactz = (double *)malloc(N*sizeof(double)))) {
fprintf(stderr, "Out of memory: AC_exactz[]\n");
exit(1);
}
memset(AC_exactz, 0, sizeof(double)*N); /* TODO: is it necessary? */
#endif
if (var->datatype == DOUBLE)
dct_init(BLK_SZ);
else /* FLOAT */
dct_init_f(BLK_SZ);
int tot_AC_exact_count = 0;
/* DCT block decomposed */
for (i=0; i<nblk; i++) { /* for each decomposed blk */
int l_blk_sz = ((i==nblk-1)&&(rem!=0))?rem:BLK_SZ;
if ((i==nblk-1) && (rem!=0)) {
if (var->datatype == DOUBLE) {
dct_finish();
dct_init(rem);
}
else { /* FLOAT */
dct_finish_f();
dct_init_f(rem);
}
}
if (var->datatype == DOUBLE)
dct_fftw(var->buf.d+i*BLK_SZ, a_x.d+i*BLK_SZ, l_blk_sz, nblk);
else /* FLOAT */
dct_fftw_f(var->buf.f+i*BLK_SZ, a_x.f+i*BLK_SZ, l_blk_sz, nblk);
#ifdef DEBUG
printf("block %d: after DCT:\n", i);
for (j=0; j<BLK_SZ && (i<3); j++){ /* show the first block only */
printf("a_x[%d] = %e \n", i*BLK_SZ+j, a_x.d[i*BLK_SZ+j]);
}
printf("\n");
#endif
#ifdef USE_TRUNCATE
DC[i] = (float)(var->datatype == DOUBLE ? a_x.d[i*BLK_SZ] : a_x.f[i*BLK_SZ]); /* save DC component in truncated*/
#else
DC[i] = (double)a_x.d[i*BLK_SZ]; /* save DC component */
#endif
bin_index[i*BLK_SZ] = NBINS; /* store as it is */
for (j=1; j<l_blk_sz; j++) {
if (var->datatype == DOUBLE) {
double item = a_x.d[i*BLK_SZ+j];
t_bin_id bin_id;
if (item < range_min || item > range_max) {
bin_id = NBINS;
#ifdef USE_QTABLE
/* The Start of Making Quantizer Table -QT applied to block coefficients */
if (fabs(item) >= qtable[j])
qtable[j] = fabs(item);
#endif /* USE_QTABLE */
}
else
bin_id = (t_bin_id)((item-range_min)/bin_width);
#ifdef DEBUG
printf("bin_id = %d\n", bin_id);
#endif
bin_index[i*BLK_SZ+j] = bin_id;
#ifdef DEBUG
printf("a_x[%d]=%e => %d\n", i*BLK_SZ+j, (double)item, bin_id);
#endif
} /* DOUBLE */
else { /* FLOAT */
float item = a_x.f[i*BLK_SZ+j];
t_bin_id bin_id;
if (item < range_min || item > range_max) {
bin_id = NBINS;
#ifdef USE_QTABLE
/* The Start of Making Quantizer Table -QT applied to block coefficients */
if (fabs(item) >= qtable[j])
qtable[j] = fabs(item);
#endif /* USE_QTABLE */
}
else
bin_id = (t_bin_id)((item-range_min)/bin_width);
#ifdef DEBUG
printf("bin_id = %d\n", bin_id);
#endif
bin_index[i*BLK_SZ+j] = bin_id;
#ifdef DEBUG
printf("a_x[%d]=%e => %d\n", i*BLK_SZ+j, (float)item, bin_id);
#endif
} /* else: FLOAT */
}
/* The End of of Making Quantizer Table */
}
if (var->datatype == DOUBLE)
dct_finish();
else /* FLOAT */
dct_finish_f();
#ifdef DCT_FILE_DEBUG
FILE *fp = fopen("dct_result.bin", "w+");
if (var->datatype == DOUBLE)
fwrite(a_x.d, sizeof(double), N, fp);
else /* FLOAT */
fwrite(a_x.f, sizeof(float), N, fp);
fclose(fp);
#endif
#ifdef USE_QTABLE
#ifdef DEBUG
printf("Quantizer Table:\n");
for (j=0; j<BLK_SZ ; j++) { /* Show Quantizer Table */
printf("before qtable[%d] = %e \n", j, qtable[j]);
}
#endif
for (j=1; j<BLK_SZ; j++) { /* Show Quantizer Table */
//if (qtable[j] < bin_maxes[NBINS-1]) {
if (qtable[j] < 1.0) {
qtable[j] = 1.0;
}
}
#ifdef DEBUG
printf("Quantizer Table:\n");
for (j=0; j<BLK_SZ ; j++) { /* Show Quantizer Table */
printf("after qtable[%d] = %e \n", j, qtable[j]);
}
#endif
#endif
unsigned int k = N;
double qt_factor = (NBINS == 255 ? 10.0 : 2000.0);
for (i=0; i<nblk; i++) {
int l_blk_sz = ((i==nblk-1)&&(rem != 0))?rem:BLK_SZ;
for (j=1; j<l_blk_sz; j++) {
t_bin_id bin_id;
bin_id = bin_index[i*BLK_SZ+j];
if (bin_id == NBINS) {
#ifdef USE_QTABLE
double item = (var->datatype == DOUBLE) ? a_x.d[i*BLK_SZ+j] : a_x.f[i*BLK_SZ+j]; /* in case of FLOAT, it will be cast to double */
/* if out of bin area, normalize it to the area from range_max/range_min to range_max/range_min +/- error_bound */
if (item < range_min) {
item = (item/qtable[j])*error_bound*qt_factor + range_min;
} else if (item > range_max) {
item = (item/qtable[j])*error_bound*qt_factor + range_max;
}
if (var->datatype == DOUBLE)
a_x.d[i*BLK_SZ+j] = item; /* update a_x with updated value */
else /* FLOAT */
a_x.f[i*BLK_SZ+j] = item; /* update a_x with updated value */
if (item < range_min || item > range_max) {
bin_id = NBINS;
#ifdef USE_TRUNCATE
AC_exact[tot_AC_exact_count++] = (float)(var->datatype == DOUBLE ? a_x.d[i*BLK_SZ+j] : a_x.f[i*BLK_SZ+j]);
#else
AC_exact[tot_AC_exact_count++] = (double)(var->datatype == DOUBLE ? a_x.d[i*BLK_SZ+j] | a_x.f[i*BLK_SZ+j]);;
#endif /* USE_TRUNCATE */
}
else
bin_id = (t_bin_id)((item-range_min)/bin_width);
bin_index[k++] = bin_id;
#ifdef DEBUG
printf("a_x[%d]=%e => %d\n", i*BLK_SZ+j, (double)item, bin_id);
#endif /* DEBUG */
#else
#ifdef USE_TRUNCATE
AC_exact[tot_AC_exact_count++] = (float)(var->datatype == DOUBLE ? a_x.d[i*BLK_SZ+j] : a_x.f[i*BLK_SZ+j]);
#else
AC_exact[tot_AC_exact_count++] = (double)(var->datatype == DOUBLE ? a_x.d[i*BLK_SZ+j] | a_x.f[i*BLK_SZ+j]); /* in case of FLOAT, casting to double makes sense? */
#endif
#endif /* USE_QTABLE */
}
}
}
#ifdef DEBUG
printf("total AC_exact_count = %d\n", tot_AC_exact_count);
#endif
#ifdef TIME_DEBUG
gettimeofday(&end_t, NULL);
dct_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(start_t.tv_sec*1000000 + start_t.tv_usec));
gettimeofday(&start_t, NULL);
#endif
free(bin_maxes);
#ifdef DEBUG
int bin_freq[NBINS+1] = {0};
i=0;
while (i < N) {
bin_freq[(int)bin_index[i++]]++;
}
printf("i=%d\n", i);
int sum = 0;
printf("bin_freq: ");
for (i=0; i<NBINS+1; i++) {
printf("%d, ", bin_freq[i]);
sum += bin_freq[i];
}
printf("sum=%d\n", sum);
#endif
#ifdef TIME_DEBUG
gettimeofday(&end_t, NULL);
DC_AC_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(start_t.tv_sec*1000000 + start_t.tv_usec));
gettimeofday(&start_t, NULL);
#endif
char bin_index_file[640];
FILE *fp_index;
sprintf(bin_index_file, "bin_index.bin");
fp_index = fopen(bin_index_file, "wb");
fwrite(bin_index, N, 1, fp_index);
fclose(fp_index);
#ifdef DEBUG
printf("tot_AC_exact_count=%d\n", tot_AC_exact_count);
#ifdef USE_QTABLE
printf("bin_index before compression = %lu\n", k*sizeof(t_bin_id));
#else
printf("bin_index before compression = %lu\n", N*sizeof(t_bin_id));
#endif
#ifdef USE_TRUNCATE
printf("DC before compression = %lu\n", nblk*sizeof(float));
printf("AC_exact before compression = %lu\n", tot_AC_exact_count*sizeof(float));
#else
printf("DC before compression = %lu\n", nblk*sizeof(double));
printf("AC_exact before compression = %lu\n", tot_AC_exact_count*sizeof(double));
#endif
#endif
pthread_t thread[3];
pthread_attr_t attr; /* thread attributes (left at defaults) */
/* set defaults (not all pthread implementations default to joinable) */
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
/* setup for compress */
z_stream defstream[3];
defstream[0].zalloc = Z_NULL;
defstream[0].zfree = Z_NULL;
defstream[0].opaque = Z_NULL;
/* compress bin_index */
#ifdef USE_QTABLE
uLong ucompSize_binindex = k*sizeof(t_bin_id);
#else
uLong ucompSize_binindex = N*sizeof(t_bin_id);
#endif
uLong compSize_binindex = compressBound(ucompSize_binindex);
int windowBits = 14;
deflateInit2(&defstream[0], 1, Z_DEFLATED, windowBits, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY);
defstream[0].avail_in = ucompSize_binindex;
defstream[0].next_in = (Bytef *)bin_index;
defstream[0].avail_out = compSize_binindex;
defstream[0].next_out = (Bytef *)bin_indexz;
defstream[0].data_type = Z_UNKNOWN; /* Z_ASCII, Z_BINARY, Z_UNKNOWN */
if (pthread_create(&thread[0], &attr, compress_thread, (void *)&defstream[0])) {
fprintf(stderr, "Error creating thread\n");
exit(0);
}
/* compress DC */
defstream[1].zalloc = Z_NULL;
defstream[1].zfree = Z_NULL;
defstream[1].opaque = Z_NULL;
#ifdef USE_TRUNCATE
uLong ucompSize_DC = nblk*sizeof(float);
uLong compSize_DC = compressBound(ucompSize_DC);
#else
uLong ucompSize_DC = nblk*sizeof(double);
uLong compSize_DC = compressBound(ucompSize_DC);
#endif
deflateInit2(&defstream[1], 1, Z_DEFLATED, windowBits, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY);
defstream[1].avail_in = ucompSize_DC;
defstream[1].next_in = (Bytef *)DC;
defstream[1].avail_out = compSize_DC;
defstream[1].next_out = (Bytef *)DCz;
defstream[1].data_type = Z_UNKNOWN;
if (pthread_create(&thread[1], &attr, compress_thread, (void *)&defstream[1])) {
fprintf(stderr, "Error creating thread\n");
exit(0);
}
/* compress AC_exact */
defstream[2].zalloc = Z_NULL;
defstream[2].zfree = Z_NULL;
defstream[2].opaque = Z_NULL;
#ifdef USE_TRUNCATE
uLong ucompSize_AC_exact = N*sizeof(float);
uLong compSize_AC_exact = compressBound(ucompSize_AC_exact);
#else
uLong ucompSize_AC_exact = N*sizeof(double);
uLong compSize_AC_exact = compressBound(ucompSize_AC_exact);
#endif
deflateInit2(&defstream[2], 1, Z_DEFLATED, windowBits, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY);
defstream[2].avail_in = ucompSize_AC_exact;
defstream[2].next_in = (Bytef *)AC_exact;
defstream[2].avail_out = compSize_AC_exact;
defstream[2].next_out = (Bytef *)AC_exactz;
defstream[2].data_type = Z_UNKNOWN;
if (pthread_create(&thread[2], &attr, compress_thread, (void *)&defstream[2])) {
fprintf(stderr, "Error creating thread\n");
exit(0);
}
#ifdef USE_TRUNCATE
//uLong ucompSize_AC_exact = tot_AC_exact_count*sizeof(float);
// uLong compSize_AC_exact = compressBound(ucompSize_AC_exact);
#else
//uLong ucompSize_AC_exact = tot_AC_exact_count*sizeof(double);
// uLong compSize_AC_exact = compressBound(ucompSize_AC_exact);
#endif
void *ret;
for (i=0; i<3; i++) {
pthread_join(thread[i], &ret);
#ifdef DEBUG
printf("thread %d joined\n", i);
#endif
switch (i) {
case 0:
compSize_binindex = (uLong)ret;
break;
case 1:
compSize_DC = (uLong)ret;
break;
case 2:
compSize_AC_exact = (uLong)ret;
break;
}
}
pthread_attr_destroy(&attr);
#if 0
compSize_binindex = defstream[0].total_out; /* update with actual size */
deflateEnd(&defstream[0]);
compSize_DC = defstream[1].total_out; /* update with actual size */
deflateEnd(&defstream[1]);
compSize_AC_exact_count = defstream[2].total_out; /* update with actual size */
deflateEnd(&defstream[2]);
#endif
bin_indexz2 = (t_bin_id*)realloc(bin_indexz, compSize_binindex); /* TODO: check error */
#ifdef SIZE_DEBUG
printf("Compressed bin_index size is: %lu\n", compSize_binindex);
#endif
DCz2 = realloc(DCz, compSize_DC); /* TODO: check error */
#ifdef SIZE_DEBUG
printf("Compressed DC size is: %lu\n", compSize_DC);
#endif
AC_exactz2 = realloc(AC_exactz, compSize_AC_exact); /* TODO: check error */
#ifdef SIZE_DEBUG
printf("Compressed AC_exact size is: %lu\n", compSize_AC_exact);
#endif
#ifdef TIME_DEBUG
gettimeofday(&end_t, NULL);
double comp_rate;
zlib_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(start_t.tv_sec*1000000 + start_t.tv_usec));
comp_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(gstart_t.tv_sec*1000000 + gstart_t.tv_usec));
comp_rate = (N*sizeof(double)/(double)(1024*1024))/(comp_t/1000000);
printf("sf_t=%f(s), dct_t=%f(s), zlib_t(compress)=%f(s)\n", sf_t/1000000, dct_t/1000000, zlib_t/1000000);
printf("malloc_t=%f(s), genbin=%f(s), DC_AC_t=%f(s)\n", malloc_t/1000000, genbin_t/1000000, DC_AC_t/1000000);
printf("comp_time = %f (s), compression rate = %f (MB/s)\n", comp_t/1000000, comp_rate);
#endif
*outSize = sizeof(struct header) + compSize_binindex + compSize_DC + compSize_AC_exact;
h.datatype = var->datatype;
h.num_elements = N;
h.error_bound = error_bound;
h.tot_AC_exact_count = tot_AC_exact_count;
if (var->datatype == DOUBLE)
h.scaling_factor.d = bs.sf.d;
else /* FLOAT */
h.scaling_factor.f = bs.sf.f;
h.bindex_sz_compressed = compSize_binindex;
h.DC_sz_compressed = compSize_DC;
h.AC_exact_sz_compressed = compSize_AC_exact;
#ifdef USE_QTABLE
h.bindex_count = k;
#endif
//h.AC_exact_count_sz_compressed = compSize_AC_exact_count;
unsigned char *cur_p;
if (var->datatype == DOUBLE)
cur_p = (unsigned char *)(var_z->buf.d);
else
cur_p = (unsigned char *)(var_z->buf.f);
memcpy(cur_p, &h, sizeof(struct header));
cur_p += sizeof(struct header);
memcpy(cur_p, bin_indexz2, compSize_binindex);
cur_p += compSize_binindex;
//memcpy (cur_p, AC_exact_countz2, compSize_AC_exact_count);
//cur_p += compSize_AC_exact_count;
memcpy(cur_p, DCz2, compSize_DC);
cur_p += compSize_DC;
memcpy(cur_p, AC_exactz2, compSize_AC_exact);
#ifdef USE_QTABLE
cur_p += compSize_AC_exact;
memcpy(cur_p, qtable, BLK_SZ*sizeof(double));
#endif /* USE_QTABLE */
if (var->datatype == DOUBLE)
free(a_x.d);
else /* FLOAT */
free(a_x.f);
free(DC);
free(DCz2);
free(bin_center);
//free(AC_exact_count);
//free(AC_exact_countz2);
free(AC_exact);
free(AC_exactz2);
free(bin_index);
free(bin_indexz2);
#ifdef USE_QTABLE
free(qtable);
#endif
#ifndef SIZE_DEBUG
printf("outSize = %zu\n", *outSize);
#endif
return(1);
}
|
Synchronization.h | //===- Synchronization.h - OpenMP synchronization utilities ------- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//
//===----------------------------------------------------------------------===//
#ifndef OMPTARGET_DEVICERTL_SYNCHRONIZATION_H
#define OMPTARGET_DEVICERTL_SYNCHRONIZATION_H
#include "Types.h"
namespace _OMP {
namespace synchronize {
/// Initialize the synchronization machinery. Must be called by all threads.
void init(bool IsSPMD);
/// Synchronize all threads in a warp identified by \p Mask.
void warp(LaneMaskTy Mask);
/// Synchronize all threads in a block.
void threads();
/// Synchronizing threads is allowed even if they all hit different instances of
/// `synchronize::threads()`. However, `synchronize::threadsAligned()` is more
/// restrictive in that it requires all threads to hit the same instance. The
/// noinline is removed by the openmp-opt pass and helps to preserve the
/// information till then.
///{
#pragma omp begin assumes ext_aligned_barrier
/// Synchronize all threads in a block, they are are reaching the same
/// instruction (hence all threads in the block are "aligned").
__attribute__((noinline)) void threadsAligned();
#pragma omp end assumes
///}
} // namespace synchronize
namespace fence {
/// Memory fence with \p Ordering semantics for the team.
void team(int Ordering);
/// Memory fence with \p Ordering semantics for the contention group.
void kernel(int Ordering);
/// Memory fence with \p Ordering semantics for the system.
void system(int Ordering);
} // namespace fence
namespace atomic {
/// Atomically load \p Addr with \p Ordering semantics.
uint32_t load(uint32_t *Addr, int Ordering);
/// Atomically store \p V to \p Addr with \p Ordering semantics.
void store(uint32_t *Addr, uint32_t V, int Ordering);
/// Atomically increment \p *Addr and wrap at \p V with \p Ordering semantics.
uint32_t inc(uint32_t *Addr, uint32_t V, int Ordering);
/// Atomically add \p V to \p *Addr with \p Ordering semantics.
uint32_t add(uint32_t *Addr, uint32_t V, int Ordering);
/// Atomically add \p V to \p *Addr with \p Ordering semantics.
uint64_t add(uint64_t *Addr, uint64_t V, int Ordering);
} // namespace atomic
} // namespace _OMP
#endif
|
mandel-omp-task.c | /*
* Sequential Mandelbrot program
*
* This program computes and displays all or part of the Mandelbrot
* set. By default, it examines all points in the complex plane
* that have both real and imaginary parts between -2 and 2.
* Command-line parameters allow zooming in on a specific part of
* this range.
*
* Usage:
* mandel [-i maxiter -c x0 y0 -s size -w windowsize]
* where
* maxiter denotes the maximum number of iterations at each point -- by default 1000
* x0, y0, and size specify the range to examine (a square
* centered at (x0 + iy0) of size 2*size by 2*size -- by default,
* a square of size 4 by 4 centered at the origin)
* windowsize denotes the size of the image (diplay window) to compute
*
* Input: none, except the optional command-line arguments
* Output: a graphical display as described in Wilkinson & Allen,
* displayed using the X Window system, plus text output to
* standard output showing the above parameters, plus execution
* time in seconds.
*
* Code based on the original code from Web site for Wilkinson and Allen's
* text on parallel programming:
* http://www.cs.uncc.edu/~abw/parallel/par_prog/
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <malloc.h>
#if _DISPLAY_
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/Xos.h>
#endif
#include <sys/time.h>
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%s: %0.6fs\n",(_m), stamp);
/* Default values for things. */
#define N 2 /* size of problem space (x, y from -N to N) */
#define NPIXELS 800 /* size of display window in pixels */
int row, col; // variables used to traverse the problem space
/* Structure definition for complex numbers */
typedef struct {
double real, imag;
} complex;
#if _DISPLAY_
/* Functions for GUI */
#include "mandelbrot-gui.h" /* has setup(), interact() */
#endif
void mandelbrot(int height,
int width,
double real_min,
double imag_min,
double scale_real,
double scale_imag,
int maxiter,
#if _DISPLAY_
int setup_return,
Display *display,
Window win,
GC gc,
double scale_color,
double min_color)
#else
int ** output)
#endif
{
/* Calculate points and save/display */
#pragma omp parallel
#pragma omp single
for (int row = 0; row < height; ++row) {
for (int col = 0; col < width; ++col) {
#pragma omp task firstprivate(row, col)
{
complex z, c;
z.real = z.imag = 0;
/* Scale display coordinates to actual region */
c.real = real_min + ((double) col * scale_real);
c.imag = imag_min + ((double) (height-1-row) * scale_imag);
/* height-1-row so y axis displays
* with larger values at top
*/
/* Calculate z0, z1, .... until divergence or maximum iterations */
int k = 0;
double lengthsq, temp;
do {
temp = z.real*z.real - z.imag*z.imag + c.real;
z.imag = 2*z.real*z.imag + c.imag;
z.real = temp;
lengthsq = z.real*z.real + z.imag*z.imag;
++k;
} while (lengthsq < (N*N) && k < maxiter);
#if _DISPLAY_
/* Scale color and display point */
long color = (long) ((k-1) * scale_color) + min_color;
if (setup_return == EXIT_SUCCESS) {
#pragma omp critical
{
XSetForeground (display, gc, color);
XDrawPoint (display, win, gc, col, row);
}
}
#else
output[row][col]=k;
#endif
}
}
#pragma omp taskwait
}
}
int main(int argc, char *argv[]) {
int maxiter = 1000;
double real_min;
double real_max;
double imag_min;
double imag_max;
int width = NPIXELS; /* dimensions of display window */
int height = NPIXELS;
double size=N, x0 = 0, y0 = 0;
#if _DISPLAY_
Display *display;
Window win;
GC gc;
int setup_return;
long min_color = 0, max_color = 0;
double scale_color;
#else
int ** output;
FILE *fp = NULL;
#endif
double scale_real, scale_imag;
/* Process command-line arguments */
for (int i=1; i<argc; i++) {
if (strcmp(argv[i], "-i")==0) {
maxiter = atoi(argv[++i]);
}
else if (strcmp(argv[i], "-w")==0) {
width = atoi(argv[++i]);
height = width;
}
else if (strcmp(argv[i], "-s")==0) {
size = atof(argv[++i]);
}
#if !_DISPLAY_
else if (strcmp(argv[i], "-o")==0) {
if((fp=fopen("parallel.out", "wb"))==NULL) {
fprintf(stderr, "Unable to open file\n");
return EXIT_FAILURE;
}
}
#endif
else if (strcmp(argv[i], "-c")==0) {
x0 = atof(argv[++i]);
y0 = atof(argv[++i]);
}
else {
#if _DISPLAY_
fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
#else
fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
fprintf(stderr, " -o to write computed image to disk (default no file generated)\n");
#endif
fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n");
#if _DISPLAY_
fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n");
#else
fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n");
#endif
fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n");
fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n");
return EXIT_FAILURE;
}
}
real_min = x0 - size;
real_max = x0 + size;
imag_min = y0 - size;
imag_max = y0 + size;
/* Produce text output */
fprintf(stdout, "\n");
fprintf(stdout, "Mandelbrot program\n");
fprintf(stdout, "center = (%g, %g), size = %g\n",
(real_max + real_min)/2, (imag_max + imag_min)/2,
(real_max - real_min)/2);
fprintf(stdout, "maximum iterations = %d\n", maxiter);
fprintf(stdout, "\n");
#if _DISPLAY_
/* Initialize for graphical display */
setup_return =
setup(width, height, &display, &win, &gc, &min_color, &max_color);
if (setup_return != EXIT_SUCCESS) {
fprintf(stderr, "Unable to initialize display, continuing\n");
return EXIT_FAILURE;
}
#else
output = malloc(height*sizeof(int *));
for (int row = 0; row < height; ++row)
output[row] = malloc(width*sizeof(int));
#endif
/* Compute factors to scale computational region to window */
scale_real = (double) (real_max - real_min) / (double) width;
scale_imag = (double) (imag_max - imag_min) / (double) height;
#if _DISPLAY_
/* Compute factor for color scaling */
scale_color = (double) (max_color - min_color) / (double) (maxiter - 1);
#endif
/* Start timing */
double stamp;
START_COUNT_TIME;
#if _DISPLAY_
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
setup_return, display, win, gc, scale_color, min_color);
#else
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
output);
#endif
/* End timing */
STOP_COUNT_TIME("Total execution time");
/* Be sure all output is written */
#if _DISPLAY_
if (setup_return == EXIT_SUCCESS) {
XFlush (display);
}
#else
if (fp != NULL)
{
for (int row = 0; row < height; ++row)
if(fwrite(output[row], sizeof(int), width, fp) != width) {
fprintf(stderr, "Output file not written correctly\n");
}
}
#endif
#if _DISPLAY_
/* Wait for user response, then exit program */
if (setup_return == EXIT_SUCCESS) {
interact(display, &win, width, height,
real_min, real_max, imag_min, imag_max);
}
return EXIT_SUCCESS;
#endif
}
|
pvmt-OpenMP.c | #include <stdlib.h>
#include <stdio.h>
#include<time.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
//#define PRINT_ALL
//#define VECTOR_GLOBAL
#define VECTOR_DYNAMIC
#ifdef VECTOR_GLOBAL
#define MAX 32768 //=2^10
double v[MAX], m[MAX][MAX], r[MAX];
#endif
omp_sched_t charToSchedType (char c) {
omp_sched_t t;
if (c == 'S')
t = omp_sched_static;
else if (c == 'D')
t = omp_sched_dynamic;
else if (c == 'G')
t = omp_sched_guided;
else if (c == 'A')
t = omp_sched_auto;
else {
printf(" Error en tipo de schedule.\n"
" Puede ser (S)tatic - (D)ynamic - (G)uided - (A)uto\n");
exit(-1);
}
return t;
}
char* printEnum (omp_sched_t type) {
char * ret;
if (type == omp_sched_static)
ret = "Static";
else if (type == omp_sched_dynamic)
ret = "Dynamic";
else if (type == omp_sched_guided)
ret = "Guided";
else if (type == omp_sched_auto)
ret = "Auto";
return ret;
}
int main(int argc,char** argv){
if (argc<4){
printf("Error en nº de parámetros. Ejecución:\n %s <Tipo de schedule> <chunk> <iteraciones>\n", argv[0]);
printf("El schedule puede ser (S)tatic - (D)ynamic - (G)uided - (A)uto\n");
printf("Introducir chunk = 0 para tomar el valor por defecto según el schedueling.\n");
exit(-1);
}
omp_sched_t sched_type = charToSchedType(argv[1][0]);
int chunk = atoi(argv[2]);
if (chunk == 0) {
if (sched_type == 'S' || sched_type == 'G')
chunk = omp_get_num_threads();
else
chunk = 1;
} else if (chunk < 0) {
chunk = 1;
printf("\n Valor de chunk negativo. Queda fijado a 1.\n");
}
unsigned int N = atoi(argv[3]); // Máximo N =2^32 -1=4294967295 (sizeof(unsigned int) = 4 B)
if (N < 1) {
printf("Error - Número de iteraciones negativo.\n");
exit(-1);
}
struct timespec cgt1,cgt2;
double ncgt; //para tiempo de ejecución
int i, j;
omp_set_schedule(sched_type, chunk);
/*
// Comprobamos que hemos fijado bien el schedueling y el chunk
omp_get_schedule(&sched_type, &chunk);
printf("\n run-shed-var: Schedule %s --- Chunk = %d\n",
printEnum(sched_type), chunk);
*/
#ifdef VECTOR_GLOBAL
if (N>MAX)
N=MAX;
printf("\n Número de iteraciones refijado al máximo posible: %d\n", N);
#endif
#ifdef VECTOR_DYNAMIC
double *v, **m, *r;
v = (double*) malloc(N*sizeof(double)); // malloc necesita el tamaño en bytes
m = (double**) malloc(N*sizeof(double*)); //si no hay espacio suficiente malloc devuelve NULL
for (i=0; i<N; i++)
m[i] = (double*) malloc(N*sizeof(double));
r = (double*) malloc(N*sizeof(double));
if ((v==NULL) || (m==NULL) || (r==NULL)) {
printf("Error en la reserva de espacio para los vectores\n");
exit(-2);
}
#endif
//Inicializar vector y matriz
#pragma omp parallel for private(i)
for (j=0; j<N; j++) {
v[j] = 2.5;
m[0][j] = 1.1;
for (i=1; i<=j; i++)
m[i][j] = -m[i-1][j];
for (; i<N; i++)
m[i][j] = 0;
}
//Comprobamos la incialización
#ifdef PRINT_ALL
printf("\n Vector:\n");
for (i=0; i<N; i++) {
printf("\t%0.1f", v[i]);
}
printf("\n\n Matriz: \n");
for (i=0; i<N; i++) {
for (j=0; j<N; j++)
printf("\t%0.1f", m[i][j]);
printf("\n\n");
}
#endif
clock_gettime(CLOCK_REALTIME,&cgt1);
//Calcular el producto
double sum;
#pragma omp parallel for private(sum, i)
for (j=0; j<N; j++) {
sum = 0;
for (i=0; i<=j; i++)
sum += v[i]*m[i][j];
r[j] = sum;
}
clock_gettime(CLOCK_REALTIME,&cgt2);
ncgt = (double) (cgt2.tv_sec - cgt1.tv_sec) +
(double) ((cgt2.tv_nsec - cgt1.tv_nsec)/(1.e+9));
//Imprimir resultado del producto
printf("\n Resultado:\n");
#ifdef PRINT_ALL
for (i=0; i<N; i++) {
printf("\t%0.2f", r[i]);
}
printf("\n");
#else
printf("Primer valor: %0.1f \t Último valor: %0.1f \n", r[0], r[N-1]);
#endif
printf("\n Tiempo de ejecución(s): %11.9f\n\n", ncgt);
#ifdef VECTOR_DYNAMIC
free(v); // libera el espacio reservado para v
free(m); // libera el espacio reservado para m
free(r);
#endif
return 0;
}
|
GB_binop__lxor_bool.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lxor_bool)
// A.*B function (eWiseMult): GB (_AemultB_08__lxor_bool)
// A.*B function (eWiseMult): GB (_AemultB_02__lxor_bool)
// A.*B function (eWiseMult): GB (_AemultB_04__lxor_bool)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_bool)
// A*D function (colscale): GB (_AxD__lxor_bool)
// D*A function (rowscale): GB (_DxB__lxor_bool)
// C+=B function (dense accum): GB (_Cdense_accumB__lxor_bool)
// C+=b function (dense accum): GB (_Cdense_accumb__lxor_bool)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_bool)
// C=scalar+B GB (_bind1st__lxor_bool)
// C=scalar+B' GB (_bind1st_tran__lxor_bool)
// C=A+scalar GB (_bind2nd__lxor_bool)
// C=A'+scalar GB (_bind2nd_tran__lxor_bool)
// C type: bool
// A type: bool
// A pattern? 0
// B type: bool
// B pattern? 0
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
bool aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
bool bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_BOOL || GxB_NO_LXOR_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lxor_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lxor_bool)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lxor_bool)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lxor_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lxor_bool)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lxor_bool)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
bool alpha_scalar ;
bool beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((bool *) alpha_scalar_in)) ;
beta_scalar = (*((bool *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lxor_bool)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lxor_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lxor_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lxor_bool)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lxor_bool)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
bool bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lxor_bool)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
bool aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__lxor_bool)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__lxor_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
com_qmp.c | /****************** com_qmp.c *****************************************/
/* Communications routines for the SU3 program
MIMD version 7.
This file is communications-scheme dependent.
QMP version - allegedly machine independent
*/
/* Modifications
5/09/02 officially split from com_mpi.c and modified for QMP
4/20/02 added start_general_gather_field C.D.
10/15/01 condensed and modified to use multiple gathers - JCO
1/30/00 combined with Schroedinger functional and
32 sublattice versions - UMH
11/27/98 Corrected g_wvectorsumfloat and made independent of su3.h. C.D.
9/02/97 Revised to allow gathers from temporary fields. neighbor[]
is now list of indices, add start/restart_gather_field D.T.
8/05/97 ANSI prototyping for all routines C.D.
10/05/96 Moved parallel I/O wrappers to io_ansi.c C.D.
9/23/96 Explicit void types for modules with empty returns C.D.
9/20/96 Added restore/save_checkpoint C.D.
9/20/96 Improved sort_site_list C.D.
9/20/96 Added broadcast_bytes and wrappers for system-dependent
parallel file system calls C.D.
*/
/*
Exported Functions:
initialize_machine() does any machine dependent setup at the
very beginning.
normal_exit() closes communications and exits
terminate() halts program abruptly and exits
machine_type() returns string describing communications architecture
mynode() returns node number of this node.
numnodes() returns number of nodes
g_sync() provides a synchronization point for all nodes.
g_floatsum() sums a floating point number over all nodes.
g_intsum() sums an integer over all nodes
g_vecfloatsum() sums a vector of floats over all nodes
g_doublesum() sums a double over all nodes.
g_vecdoublesum() sums a vector of doubles over all nodes.
g_longdoublesum() sums a long double over all nodes.
g_veclongdoublesum() sums a vector of long doubles over all nodes.
g_complexsum() sums a single precision complex number over all nodes.
g_veccomplexsum() sums a vector of single precision complex numbers
over all nodes.
g_dcomplexsum() sums a double precision complex number over all nodes.
g_vecdcomplexsum() sums a vector of double_complex over all nodes
g_wvectorsumfloat() sums a single precision wilson vector over all nodes.
g_xor32() finds global exclusive or of 32-bit word
g_floatmax() finds maximum floating point number over all nodes.
g_doublemax() finds maximum double over all nodes.
broadcast_float() broadcasts a single precision number from
node 0 to all nodes.
broadcast_double() broadcasts a double precision number
broadcast_complex() broadcasts a single precision complex number
broadcast_dcomplex() broadcasts a double precision complex number
broadcast_bytes() broadcasts a number of bytes
send_integer() sends an integer to one other node
receive_integer() receives an integer
send_field() sends a field to one other node.
get_field() receives a field from some other node.
dclock() returns a double precision time, with arbitrary zero
time_stamp() print wall clock time with message
get_utc_datetime() get GM time as ASCII string
sort_eight_gathers() sorts eight contiguous gathers from order
XUP,XDOWN,YUP,YDOWN... to XUP,YUP,...XDOWN,YDOWN...
make_nn_gathers() makes all necessary lists for communications with
nodes containing neighbor sites.
make_gather() calculates and stores necessary communications lists
for a given gather mapping
declare_gather_site() creates a message tag that defines specific details
of a gather to be used later
declare_gather_field() creates a message tag that defines specific
details of a gather from field to be used later
prepare_gather() optional call that allocates buffers for a previously
declared gather. will automatically be called from
do_gather() if not done before.
do_gather() executes a previously declared gather
wait_gather() waits for receives to finish, insuring that the
data has actually arrived.
cleanup_gather() frees all the buffers that were allocated, WHICH
MEANS THAT THE GATHERED DATA MAY SOON DISAPPEAR.
accumulate_gather() combines gathers into single message tag
declare_accumulate_gather_site() does declare_gather() and accumulate_gather()
in single step.
declare_accumulate_gather_field() does declare_gather_field() and
accumulate_gather() in single step.
start_gather_site() older function which does declare/prepare/do_gather
in a single step
start_gather_field() older function which does
declare/prepare/do_gather_field
restart_gather_site() older function which is obsoleted by do_gather()
restart_gather_field() older function which is obsoleted by do_gather()
start_general_gather_site() starts asynchronous sends and receives required
to gather fields at arbitrary displacement.
start_general_gather_field() starts asynchronous sends and receives
required to gather neighbors from a temporary
array of fields.
wait_general_gather() waits for receives to finish, insuring that the
data has actually arrived, and sets pointers to
received data.
cleanup_general_gather() frees all the buffers that were allocated, WHICH
MEANS THAT THE GATHERED DATA MAY SOON DISAPPEAR.
myjobid() The index number of this job
numjobs() Number of jobs in multijob execution
jobgeom() Dimensions of the multijob layout. Product = numjobs
ionodegeom() Dimensions of the I/O partition layout. Product =
number of files.
nodegeom() Allocated dimensions of the nodes.
*/
#include <time.h>
#include <qmp.h>
#include "generic_includes.h"
#include <ctype.h>
#include "../include/config.h"
#include "../include/openmp_defs.h"
#define NOWHERE -1 /* Not an index in array of fields */
#if 0
/* message types used here */
#define SEND_INTEGER_ID 1 /* send an integer to one other node */
#define SEND_FIELD_ID 2 /* id of field sent from one node to another */
#define GENERAL_GATHER_ID 3 /* id used by general_gather routines */
#define GATHER_BASE_ID 4 /* ids greater than or equal to this are used
by the gather routines */
/* macro to compute the message id */
#define GATHER_ID(x) (GATHER_BASE_ID+(x))
#endif
/* If we want to do our own checksums */
#ifdef COM_CRC
u_int32type crc32(u_int32type crc, const unsigned char *buf, size_t len);
#define CRCBYTES 8
#else
#define CRCBYTES 0
#endif
/* hacks needed to unify even/odd and 32 sublattice cases */
#ifdef N_SUBL32
#define NUM_SUBL 32
#undef FORSOMEPARITY
#define FORSOMEPARITY FORSOMESUBLATTICE
#undef FORSOMEPARITY_OMP
#define FORSOMEPARITY_OMP FORSOMESUBLATTICE_OMP
#else
#define NUM_SUBL 2
#endif
/* Precision */
#if PRECISION == 1
#define QMP_sum_Real QMP_sum_float
#define QMP_sum_Real_array QMP_sum_float_array
#define QMP_max_Real QMP_max_float
#else
#define QMP_sum_Real QMP_sum_double
#define QMP_sum_Real_array QMP_sum_double_array
#define QMP_max_Real QMP_max_double
#endif
/**********************************************************************
* INTERNAL DATA TYPES *
**********************************************************************/
/* "comlink" is the basic structure used in gathering neighboring sites.
Each node will maintain one such structure for each direction for each
(other) node that contains sites that are neighbors of the sites on
this node. For example, if the XUP neighbors of sites on this node
are found on two other nodes, then this node will maintain a linked
list of two comlink structures for gathering from the XUP direction.
*/
typedef struct comlink {
struct comlink *nextcomlink; /* pointer to next in list, NULL if last */
int othernode; /* number of the node to which we connect */
int n_subl_connected[NUM_SUBL+1];
/* Number of sites on this node that have neighbors on other node connected
by this "comlink" of certain parity of the receiver.
The indicies 0..NUM_SUBL-1 refer to a specific parity and the
index NUM_SUBL refers to all parities */
int *sitelist[NUM_SUBL+1];
/* Address of list of indices of a certain receiver parity whose
neighbors are found through this comlink. The index is the same as for
n_subl_connected above. */
/* Different comlink structures may point to the same list.
For example, the receive list for one gather may be a send list for
the opposite gather. */
} comlink;
#if 0
/* Linked list type to store id offsets for the sender.
Needed to match the id that receiver is expecting */
typedef struct id_list_t {
int id_offset; /* id offset */
struct id_list_t *next; /* linked list */
} id_list_t;
#endif
/* structure to hold all necessary info for a gather */
typedef struct gather_t {
int *neighbor; /* keeps track if gather neighbor is on our node or not */
comlink *neighborlist; /* comlink for receiving messages */
comlink *neighborlist_send; /* comlink for sending messages */
// id_list_t *id_list; /* list of id offsets for sending */
int n_recv_msgs, n_send_msgs; /* number of messages to receive and send */
// int offset_increment; /* total number of message ids used for this gather */
} gather_t;
/* structure to keep track of details of a declared gather */
typedef struct gmem_t {
char *mem; /* source (destination) address for send (receive) */
int size; /* size of sent field */
int stride; /* stride of source/destination field */
int num; /* number of sites in sitelist */
int *sitelist; /* sites gathered to/from */
struct gmem_t *next; /* linked list */
} gmem_t;
/* Structure to keep track of outstanding sends and receives */
typedef struct {
int msg_node; /* node sending or receiving message */
// int id_offset; /* id offset for this message */
int msg_size; /* size of message in bytes */
QMP_mem_t *qmp_mem;
char *msg_buf; /* address of buffer malloc'd for message */
gmem_t *gmem; /* linked list explaining detailed usage for buffer */
QMP_msgmem_t mm;
QMP_msghandle_t mh; /* message handle returned by system call */
} msg_sr_t;
/* structure to store declared gathers
this is the actual structure used internally
it has the same name as the typedef which contains this structure which
the user sees */
struct msg_tag {
#ifdef CRC_DEBUG
int index;
#endif
int prepared;
// int *ids; /* array of message ids used in gather */
// int nids; /* number of message ids used in gather */
int nrecvs; /* number of messages to receive in gather */
int nsends; /* number of messages to send in gather */
msg_sr_t *recv_msgs; /* array of messages to receive */
msg_sr_t *send_msgs; /* array of messages to send */
QMP_msghandle_t *mhrecvlist, *mhsendlist;
QMP_msghandle_t mhrecv, mhsend;
};
/***************************************************
* Global variables for the communications stuff *
***************************************************/
/* message ids for gather encode a sequence number for the gather
so that if several gathers are going at once, you can read
the message corresponding to the right one. */
/* for computing message id in gather */
/* not needed anymore, but may be used for a check later */
//static int id_offset; /* label gathers by round-robin */
//static int num_gather_ids; /* number of id offsets allowed */
/* keep track of used ids */
//static int *id_array;
/* array storing gather setup info */
static gather_t *gather_array;
/* Number of gathers (mappings) that have been set up */
static int n_gathers, gather_array_len;
static size_t mem_align=QMP_ALIGN_DEFAULT;
static int mem_flags=(QMP_MEM_COMMS);
static int *ionodegeomvals = NULL;
/**********************************************************************
* MISCELLANEOUS UTILITY FUNCTIONS *
**********************************************************************/
static void
get_arg(int argc, char **argv, char *tag, int *first, int *last,
char **c, int **a)
{
int i;
*first = -1;
*last = -1;
*c = NULL;
*a = NULL;
for(i=1; i<argc; i++) {
if(strcmp(argv[i], tag)==0) {
*first = i;
//printf("%i %i\n", i, argc);
if( ((i+1)<argc) && !(isdigit(argv[i+1][0])) ) {
//printf("c %i %s\n", i+1, argv[i+1]);
*c = argv[i+1];
*last = i+1;
} else {
//printf("a %i %s\n", i+1, argv[i+1]);
while( (++i<argc) && isdigit(argv[i][0]) );
*last = i-1;
int n = *last - *first;
if(n) {
int j;
*a = (int *) malloc(n*sizeof(int));
//printf("%i %p\n", n, *a);
for(j=0; j<n; j++) {
(*a)[j] = atoi(argv[*first+1+j]);
//printf(" %i", (*a)[j]);
}
//printf("\n");
}
}
}
}
}
static void
remove_from_args(int *argc, char ***argv, int first, int last)
{
int n = last - first;
if(first>=0) {
int i;
for(i=last+1; i<*argc; i++) (*argv)[i-n-1] = (*argv)[i];
*argc -= n + 1;
}
}
static void
process_ionodes_flag(int *argc, char ***argv){
int nio;
int first, last, *a = NULL;
char *c = NULL;
char myname[] = "process_ionodes_flag";
/* process -ionodes a[0] a[1] a[2] a[3] flag */
/* This option allows the allocated machine to be subdivided into
independent I/O partitions for QIO partfile format. This option
requires a defined mesh topology (usually through the -qmp-geom
option) */
/* The integer a[i] specifies the number of divisions of the ith
geom dimension */
/* The default a[i] = 1 for all i implies no subdivision */
get_arg(*argc, *argv, "-ionodes", &first, &last, &c, &a);
if( c ) {
printf("%s: unknown argument to -ionodes: %s\n", myname, c);
terminate(1);
}
nio = last - first;
if(nio) {
int i;
ionodegeomvals = a;
/* Check sanity of job partition divisions */
if(nodegeom() == NULL){
fprintf(stderr, "-ionodes requires -qmp-geom\n");
terminate(1);
}
if(nio!=4) {
printf("%s: allocated number dimensions %d != ionode dimensions %d\n",
myname, 4, nio);
terminate(1);
}
for(i=0; i<nio; i++){
if(ionodegeomvals[i]<=0){
printf("%s: ionode division[%i] = %d <= 0\n", myname,
i, ionodegeomvals[i]);
}
}
}
remove_from_args(argc, argv, first, last);
}
/**********************************************************************
* BASIC COMMUNICATIONS FUNCTIONS *
**********************************************************************/
/*
** Machine initialization
*/
void
initialize_machine(int *argc, char ***argv)
{
QMP_status_t i;
QMP_thread_level_t provide;
i = QMP_init_msg_passing(argc, argv, QMP_THREAD_SINGLE, &provide);
if(i!=0) {
printf("%s\n", QMP_error_string(i));
printf("com_qmp: Initialize QMP failed.\n");
fflush(stdout);
exit(i);
}
/* check if 32 bit int is set correctly */
#ifdef SHORT_IS_32BIT
if(sizeof(unsigned short)!=4) {
printf("node %d: SHORT_IS_32BIT is set but sizeof(unsigned short)=%d\n",
mynode(), sizeof(unsigned short));
terminate(1);
}
#else
if(sizeof(unsigned int)!=4) {
printf("node %d: SHORT_IS_32BIT is not set but sizeof(unsigned int)=%d\n",
mynode(), (int)sizeof(unsigned int));
terminate(1);
}
#endif
process_ionodes_flag(argc, argv);
#if 0
num_gather_ids = 257 - GATHER_BASE_ID;
if(num_gather_ids>1024) num_gather_ids = 1024;
id_offset = 0;
id_array = (int *)malloc(num_gather_ids*sizeof(int));
for(i=0; i<num_gather_ids; ++i) id_array[i] = 0;
#endif
n_gathers = 0;
gather_array_len = 0;
gather_array = NULL;
}
/*
** version of normal exit for multinode processes
*/
void
normal_exit(int status)
{
time_stamp("exit");
fflush(stdout);
g_sync();
QMP_finalize_msg_passing();
fflush(stdout);
exit(status);
}
/*
** UTC time as ASCII string
*/
void
get_utc_datetime(char *time_string)
{
time_t time_stamp;
struct tm *gmtime_stamp;
time(&time_stamp);
gmtime_stamp = gmtime(&time_stamp);
strncpy(time_string,asctime(gmtime_stamp),64);
/* Remove trailing end-of-line character */
if(time_string[strlen(time_string) - 1] == '\n')
time_string[strlen(time_string) - 1] = '\0';
}
/*
** version of exit for multinode processes -- kill all nodes
*/
void
terminate(int status)
{
time_stamp("termination");
printf("Termination: node %d, status = %d\n", this_node, status);
fflush(stdout);
g_sync(); /* Added for multijob operation. Is this desirable? */
exit(status);
}
/*
** Tell what kind of machine we are on
*/
static char name[]="QMP (portable)";
char *
machine_type(void)
{
return(name);
}
/*
** Return my node number
*/
int
mynode(void)
{
return QMP_get_node_number();
}
/*
** Return number of nodes
*/
int
numnodes(void)
{
return QMP_get_number_of_nodes();
}
/*
** Return my jobid
*/
int
myjobid(void)
{
return QMP_get_job_number();
}
/*
** Return number of jobs
*/
int
numjobs(void)
{
return QMP_get_number_of_jobs();
}
/*
** Return the job geometry
*/
int const *
jobgeom(void)
{
return QMP_get_job_geometry();
}
/*
** Return the ionode geometry
*/
int *
ionodegeom(void)
{
return ionodegeomvals;
}
/*
** Return the allocated dimensions (node geometry) if a grid is being used
*/
int const *
nodegeom(void)
{
return QMP_get_allocated_dimensions();
}
/*
** Synchronize all nodes
*/
void
g_sync(void)
{
QMP_barrier();
}
/*
** Sum signed integer over all nodes
*/
void
g_intsum(int *ipt)
{
QMP_sum_int(ipt);
}
/*
** Sum unsigned 32-bit integer type
*/
/* Binary operation */
static void
sum_u32(void *inout, void *in ){
*(u_int32type *)inout += *(u_int32type *)in;
}
void
g_uint32sum(u_int32type *pt)
{
QMP_binary_reduction(pt, sizeof(u_int32type), sum_u32);
}
/*
** Sum double over all nodes
*/
void
g_doublesum(double *dpt)
{
QMP_sum_double(dpt);
}
/*
** Sum a vector of doubles over all nodes
*/
void
g_vecdoublesum(double *dpt, int ndoubles)
{
QMP_sum_double_array(dpt, ndoubles);
}
/*
** Sum long double over all nodes
*/
void
g_longdoublesum(long double *dpt)
{
QMP_sum_long_double(dpt);
}
/*
** Sum a vector of long doubles over all nodes
*/
void
g_veclongdoublesum(long double *dpt, int count)
{
QMP_sum_long_double_array(dpt, count);
}
/*
** Sum float over all nodes
*/
void
g_floatsum(Real *fpt)
{
QMP_sum_Real(fpt);
}
/*
** Sum a vector of floats over all nodes
*/
void
g_vecfloatsum(Real *fpt, int length)
{
QMP_sum_Real_array(fpt, length);
}
/*
** Sum complex over all nodes
*/
void
g_complexsum(complex *cpt)
{
QMP_sum_Real_array((Real *)cpt, 2);
}
/*
** Sum a vector of complex over all nodes
*/
void
g_veccomplexsum(complex *cpt, int ncomplex)
{
QMP_sum_Real_array((Real *)cpt, 2*ncomplex);
}
/*
** Sum double_complex over all nodes
*/
void
g_dcomplexsum(double_complex *cpt)
{
QMP_sum_double_array((double *)cpt, 2);
}
/*
** Sum a vector of double_complex over all nodes
*/
void
g_vecdcomplexsum(double_complex *cpt, int ncomplex)
{
QMP_sum_double_array((double *)cpt, 2*ncomplex);
}
/*
** Sum wilson_vector over all nodes
*/
void
g_wvectorsumfloat(wilson_vector *wvpt)
{
QMP_sum_Real_array((Real *)wvpt, 24);
}
/*
** Global exclusive or acting on u_int32type
*/
void
g_xor32(u_int32type *pt)
{
unsigned long work;
work = (unsigned long)*pt;
QMP_xor_ulong(&work);
*pt = (u_int32type)work;
}
/*
** Find maximum of generic floats over all nodes
*/
void
g_floatmax(Real *fpt)
{
QMP_max_Real(fpt);
}
/*
** Find maximum of double over all nodes
*/
void
g_doublemax(double *dpt)
{
QMP_max_double(dpt);
}
/*
** Broadcast floating point number from node zero
*/
void
broadcast_float(Real *fpt)
{
QMP_broadcast(fpt, sizeof(Real));
}
/*
** Broadcast double precision floating point number from node zero
*/
void
broadcast_double(double *dpt)
{
QMP_broadcast(dpt, sizeof(double));
}
/*
** Broadcast single precision complex number from node zero
*/
void
broadcast_complex(complex *cpt)
{
QMP_broadcast(cpt, 2*sizeof(Real));
}
/*
** Broadcast double precision complex number from node zero
*/
void
broadcast_dcomplex(double_complex *cpt)
{
QMP_broadcast(cpt, 2*sizeof(double));
}
/*
** Broadcast bytes from node 0 to all others
*/
void
broadcast_bytes(char *buf, int size)
{
QMP_broadcast(buf, size);
}
/******************************
* SEND AND RECEIVE INTEGER *
******************************/
/*
** Send an integer to one other node
** This is to be called only by the node doing the sending
*/
void
send_integer(int tonode, int *address)
{
QMP_msgmem_t mm;
QMP_msghandle_t mh;
mm = QMP_declare_msgmem(address, sizeof(int));
mh = QMP_declare_send_to(mm, tonode, 0);
QMP_start(mh);
QMP_wait(mh);
QMP_free_msghandle(mh);
QMP_free_msgmem(mm);
}
/*
** Receive an integer from another node
*/
void
receive_integer(int fromnode, int *address)
{
QMP_msgmem_t mm;
QMP_msghandle_t mh;
mm = QMP_declare_msgmem(address, sizeof(int));
mh = QMP_declare_receive_from(mm, fromnode, 0);
QMP_start(mh);
QMP_wait(mh);
QMP_free_msghandle(mh);
QMP_free_msgmem(mm);
}
/****************************
* SEND AND RECEIVE FIELD *
****************************/
/*
** send_field is to be called only by the node doing the sending
*/
void
send_field(char *buf, int size, int tonode)
{
QMP_msgmem_t mm;
QMP_msghandle_t mh;
mm = QMP_declare_msgmem(buf, size);
mh = QMP_declare_send_to(mm, tonode, 0);
QMP_start(mh);
QMP_wait(mh);
QMP_free_msghandle(mh);
QMP_free_msgmem(mm);
}
/*
** get_field is to be called only by the node to which the field was sent
*/
void
get_field(char *buf, int size, int fromnode)
{
QMP_msgmem_t mm;
QMP_msghandle_t mh;
mm = QMP_declare_msgmem(buf, size);
mh = QMP_declare_receive_from(mm, fromnode, 0);
QMP_start(mh);
QMP_wait(mh);
QMP_free_msghandle(mh);
QMP_free_msgmem(mm);
}
/*********************
* TIMING ROUTINES *
*********************/
/*
** Double precision CPU time in seconds
*/
double
dclock_cpu(void)
{
long fine;
fine = clock();
return( ((double)fine)/CLOCKS_PER_SEC);
}
/*
** Double precision wall clock time in seconds
*/
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
double dclock(void){
struct timeval tp;
gettimeofday(&tp,NULL);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
#else
double dclock(void){
return dclock_cpu();
}
#endif
/*
** Print time stamp
*/
void
time_stamp(char *msg)
{
time_t time_stamp;
if(mynode()==0){
time(&time_stamp);
printf("%s: %s\n", msg, ctime(&time_stamp));
fflush(stdout);
}
}
/**********************************************************************
* FUNCTIONS USED FOR GATHERS *
**********************************************************************/
/*
** sort a list of eight gather_t structures into the order we want for the
** nearest neighbor gathers: XUP,YUP,ZUP,TUP,TDOWN,ZDOWN,YDOWN,XDOWN,
** starting from the index for the first pointer
*/
void
sort_eight_gathers(int index)
{
gather_t tt[8];
int i;
for(i=0; i<8; i++) memcpy(&tt[i], &gather_array[index+i], sizeof(gather_t));
for(i=XUP; i<=TUP; i++) {
memcpy(&gather_array[index+i], &tt[2*i], sizeof(gather_t));
memcpy(&gather_array[index+OPP_DIR(i)], &tt[2*i+1], sizeof(gather_t));
}
}
/*
** utility function for finding coordinates of neighbor
** This version for use by make_gather for nearest neighbor gathers
*/
static void
neighbor_coords_special(
int x, int y, int z, int t, /* coordinates of site */
int *dirpt, /* direction (eg XUP) */
int fb, /* "forwards/backwards" */
int *x2p, int *y2p, int *z2p, int *t2p)
/* pointers to coordinates of neighbor */
{
int dir;
dir = (fb==FORWARDS) ? *dirpt : OPP_DIR(*dirpt);
*x2p = x; *y2p = y; *z2p = z; *t2p = t;
switch(dir) {
case XUP : *x2p = (x+1)%nx; break;
case XDOWN : *x2p = (x+nx-1)%nx; break;
case YUP : *y2p = (y+1)%ny; break;
case YDOWN : *y2p = (y+ny-1)%ny; break;
case ZUP : *z2p = (z+1)%nz; break;
case ZDOWN : *z2p = (z+nz-1)%nz; break;
case TUP : *t2p = (t+1)%nt; break;
case TDOWN : *t2p = (t+nt-1)%nt; break;
default: printf("BOTCH: bad direction\n"); terminate(1);
}
}
/*
** Set up "comlink" structures needed by nearest neighbor gather routines.
** make_lattice() must be called first.
*/
void
make_nn_gathers(void)
{
int i, gather_parity;
if(n_gathers!=0) {
printf("error: make_nn_gathers must come before any make_gather\n");
terminate(1);
}
gather_array_len = 8;
gather_array = (gather_t *)malloc(gather_array_len*sizeof(gather_t));
if(gather_array==NULL) {
printf("error: not enough room for gather_array in make_nn_gathers\n");
terminate(1);
}
if((nx&1)||(ny&1)||(nz&1)||(nt&1)) gather_parity = SCRAMBLE_PARITY;
else gather_parity = SWITCH_PARITY;
for(i=XUP; i<=TUP; i++)
make_gather( neighbor_coords_special, &i, WANT_INVERSE,
ALLOW_EVEN_ODD, gather_parity );
/* Sort into the order we want for nearest neighbor gathers,
so you can use XUP, XDOWN, etc. as argument in calling them. */
sort_eight_gathers( 0 );
}
/**********************************************************************
* FUNCTIONS USED TO MAKE GATHERS *
**********************************************************************/
#define RECEIVE 0
#define SEND 1
static int
parity_function(int x, int y, int z, int t)
{
#ifndef N_SUBL32
return (x+y+z+t)&1;
#else
return (x%2) + 2*(y%2) + 4*(z%2) + 8*(t%2) + 16*((x/2+y/2+z/2+t/2)%2);
#endif
}
/*
** copy a linked list of comlinks, switching send and receive parity
*/
static comlink *
copy_list_switch(comlink *old_compt, int *send_subl)
{
comlink *firstpt, *compt;
int r_subl, s_subl;
if(old_compt==NULL) return(NULL);
firstpt = compt = (comlink *)malloc( sizeof(comlink) );
do{
compt->othernode = old_compt->othernode;
for(r_subl=0; r_subl<NUM_SUBL; r_subl++) {
s_subl = send_subl[r_subl];
compt->n_subl_connected[s_subl] = old_compt->n_subl_connected[r_subl];
compt->sitelist[s_subl] = old_compt->sitelist[r_subl];
}
compt->n_subl_connected[NUM_SUBL] = old_compt->n_subl_connected[NUM_SUBL];
compt->sitelist[NUM_SUBL] = old_compt->sitelist[NUM_SUBL];
if( old_compt->nextcomlink != NULL)
compt->nextcomlink = (comlink *)malloc( sizeof(comlink) );
else compt->nextcomlink = NULL;
old_compt = old_compt->nextcomlink;
compt = compt->nextcomlink;
} while( old_compt!=NULL );
return(firstpt);
}
/*
** sort a list of sites according to the order of the sites on the
** node with which they comunicate
*/
static void
sort_site_list(
int n, /* number of elements in list */
int *list, /* pointer to list */
void (*func)(int, int, int, int, int *, int, int *, int *, int *, int *),
/* function which defines mapping */
int *args, /* arguments to pass to function */
int forw_back) /* look forwards or backwards in map */
{
register int j,k,in1,in2,flag;
register site *s;
int x,y,z,t;
int *key;
if(n==0) return;
key = (int *)malloc(n*sizeof(int));
if(key == NULL) {
printf("sort_site_list(%d): no room for key\n",mynode());
terminate(1);
}
/* Construct sort key */
for(j=0; j<n; j++) {
s = &(lattice[list[j]]);
func(s->x,s->y,s->z,s->t,args,forw_back,&x,&y,&z,&t);
key[j] = node_index(x,y,z,t);
}
/* bubble sort, if this takes too long fix it later */
for(j = n-1; j>0; j--) {
flag=0;
for(k=0; k<j; k++){
in1 = key[k];
in2 = key[k+1];
if(in1>in2){
flag=1;
key[k] = in2;
key[k+1] = in1;
in1 = list[k];
list[k] = list[k+1];
list[k+1] = in1;
}
}
if(flag==0)break;
}
free(key);
}
/*
** make comlink for send or receive
*/
static comlink *
make_send_receive_list(
void (*func)(int, int, int, int, int *, int, int *, int *, int *, int *),
/* function which defines sites to gather from */
int *args, /* list of arguments, to be passed to function */
int want_even_odd, /* ALLOW_EVEN_ODD or NO_EVEN_ODD */
int forw_back, /* FORWARDS or BACKWARDS */
int send_recv, /* SEND or RECEIVE list */
int *n_msgs) /* returns number of messages in list */
{
int i,j,subl; /* scratch */
site *s; /* scratch */
int x,y,z,t; /* coordinates */
int *sbuf[NUM_SUBL]; /* to be malloc'd */
int *tbuf; /* to be malloc'd */
comlink **combuf; /* to be malloc'd, remember where comlinks are */
comlink *compt,**comptpt;
comlink *firstpt;
/* make temporary buffers of numnodes() integers to count numbers of
neighbors in each sublattice on each node */
for(subl=0; subl<NUM_SUBL; subl++) {
sbuf[subl] = (int *)malloc( numnodes()*sizeof(int) );
/* clear neighbor_numbers */
for(i=0; i<numnodes(); i++) sbuf[subl][i] = 0;
}
tbuf = (int *)malloc( numnodes()*sizeof(int) );
for(i=0; i<numnodes(); i++) tbuf[i] = 0;
combuf = (comlink **)malloc( numnodes()*sizeof(comlink *) );
/* scan sites in lattice */
FORALLSITES(i,s) {
/* find coordinates, node, and sublattice of receiving site */
if( send_recv==RECEIVE ) {
func( s->x, s->y, s->z, s->t, args, forw_back, &x, &y, &z, &t );
subl = parity_function(s->x,s->y,s->z,s->t);
}
else { /* SEND */
func( s->x, s->y, s->z, s->t, args, -forw_back, &x, &y, &z, &t );
subl = parity_function(x,y,z,t);
}
j = node_number(x,y,z,t);
/* if site is off node, increment neighbor_counter */
if( j != mynode() ) {
++tbuf[j];
if(want_even_odd==NO_EVEN_ODD) subl = 0;
++sbuf[subl][j];
}
}
*n_msgs = 0;
firstpt = NULL;
comptpt = &firstpt;
/* for each neighbor_counter that is nonzero, create a comlink */
for(j=0; j<numnodes(); j++) {
if( j==mynode() ) continue; /* not for local node */
if( tbuf[j]==0 ) continue; /* no neighbors on this node */
compt = (comlink *)malloc( sizeof(comlink) );
*comptpt = compt;
combuf[j] = compt; /* to make it easy to find again */
compt->nextcomlink = NULL; /* currently terminates list */
compt->othernode = j;
compt->n_subl_connected[NUM_SUBL] = tbuf[j];
for(subl=0; subl<NUM_SUBL; subl++) {
compt->n_subl_connected[subl] = sbuf[subl][j];
}
compt->sitelist[0] = compt->sitelist[NUM_SUBL] =
(int *)malloc( tbuf[j]*sizeof(int) );
for(subl=1; subl<NUM_SUBL; subl++)
compt->sitelist[subl] = (compt->sitelist[subl-1]) + sbuf[subl-1][j];
/* sitelist[...] must be filled in later */
comptpt = &(compt->nextcomlink); /* linked list, if we
extend it this will get address of next comlink. */
++(*n_msgs);
}
/* clear neighbor_numbers, to be used as counters now */
for(subl=0; subl<NUM_SUBL; subl++) {
for(i=0; i<numnodes(); i++) sbuf[subl][i] = 0;
}
/* scan sites in node again */
FORALLSITES(i,s){
/* find coordinates, node, and sublattice of receiving site */
if( send_recv==RECEIVE ){
func( s->x, s->y, s->z, s->t, args, forw_back, &x,&y,&z,&t);
subl = parity_function(s->x,s->y,s->z,s->t);
}
else { /* SEND */
func( s->x, s->y, s->z, s->t, args, -forw_back, &x,&y,&z,&t);
subl = parity_function(x,y,z,t);
}
j = node_number(x,y,z,t);
/* if neighbor is offnode, add to list in appropriate comlink */
if( j != mynode() ){
if(want_even_odd==NO_EVEN_ODD) subl = 0;
combuf[j]->sitelist[subl][sbuf[subl][j]] = i;
++sbuf[subl][j];
}
}
/* sort the lists of links according to the ordering of their
even neighbors in the lower numbered node. The list of sites
on the lower numbered node is already in order. */
for(compt=firstpt; compt != NULL; compt=compt->nextcomlink) {
if(compt->othernode > this_node)continue;
/* this is lower numbered node, so don't sort */
if( send_recv==RECEIVE ) i = forw_back;
else i = -forw_back;
for(subl=0; subl<NUM_SUBL; subl++)
sort_site_list( compt->n_subl_connected[subl],
compt->sitelist[subl], func, args, i );
}
/* free temporary storage */
free(combuf);
free(tbuf);
for(subl=0; subl<NUM_SUBL; subl++) free(sbuf[subl]);
return(firstpt);
}
#if 0
/*
** determine tag offsets needed by sender
*/
static id_list_t *
make_id_list(
comlink *recv, /* neighborlist */
int n_recv, /* number of receives */
comlink *send) /* neighborlist_send */
{
int i, *buf;
id_list_t *tol_top, *tol, **tol_next;
QMP_msgmem_t *smm, rmm;
QMP_msghandle_t *smh, rmh;
buf = (int *)malloc(n_recv*sizeof(int));
smm = (QMP_msgmem_t *)malloc(n_recv*sizeof(QMP_msgmem_t));
smh = (QMP_msghandle_t *)malloc(n_recv*sizeof(QMP_msghandle_t));
for(i=0; recv!=NULL; ++i, recv=recv->nextcomlink) {
buf[i] = i;
smm[i] = QMP_declare_msgmem(&buf[i], sizeof(int));
smh[i] = QMP_declare_send_to(smm[i], recv->othernode, 0);
QMP_start(smh[i]);
}
if(i!=n_recv) {printf("error i!=n_recv\n"); terminate(1);}
tol_next = &tol_top;
while(send!=NULL) {
tol = *tol_next = (id_list_t *)malloc(sizeof(id_list_t));
rmm = QMP_declare_msgmem(&i, sizeof(int));
rmh = QMP_declare_receive_from(rmm, send->othernode, 0);
QMP_start(rmh);
QMP_wait(rmh);
QMP_free_msghandle(rmh);
QMP_free_msgmem(rmm);
tol->id_offset = i;
tol_next = &(tol->next);
send = send->nextcomlink;
}
*tol_next = NULL;
for(i=0; i<n_recv; ++i) {
QMP_wait(smh[i]);
QMP_free_msghandle(smh[i]);
QMP_free_msgmem(smm[i]);
}
free(smh);
free(smm);
free(buf);
return tol_top;
}
/*
** determine max number of ids needed for gather
*/
static int
get_max_receives(int n_recv)
{
double temp;
temp = n_recv;
QMP_max_double(&temp);
return (int)(temp+0.5);
}
#endif
/*
** add another gather to the list of tables
*/
int
make_gather(
void (*func)(int, int, int, int, int *, int, int *, int *, int *, int *),
/* function which defines sites to gather from */
int *args, /* list of arguments, to be passed to function */
int inverse, /* OWN_INVERSE, WANT_INVERSE, or NO_INVERSE */
int want_even_odd, /* ALLOW_EVEN_ODD or NO_EVEN_ODD */
int parity_conserve) /* {SAME,SWITCH,SCRAMBLE}_PARITY */
{
int i,j,subl; /* scratch */
site *s; /* scratch */
int dir; /* direction */
int x,y,z,t; /* coordinates */
int *send_subl; /* sublist of sender for a given receiver */
/* we will have one or two more gathers */
if( inverse==WANT_INVERSE ) n_gathers += 2;
else n_gathers += 1;
if(n_gathers>gather_array_len) {
gather_array_len = n_gathers;
/* lengthen gather array to add more gathers */
gather_array =
(gather_t *)realloc(gather_array, gather_array_len*sizeof(gather_t));
}
dir = n_gathers - 1; /* index of gather we are working on */
gather_array[dir].neighbor = (int *)malloc( sites_on_node*sizeof(int) );
if( gather_array[dir].neighbor==NULL ) {
printf("make_gather: NODE %d: no room for neighbor vector\n",this_node);
terminate(1);
}
if( inverse==WANT_INVERSE ) {
dir = n_gathers - 2; /* index of gather we are working on */
gather_array[dir].neighbor = (int *)malloc( sites_on_node*sizeof(int) );
if( gather_array[dir].neighbor==NULL ) {
printf("make_gather: NODE %d no room for neighbor vector\n",this_node);
terminate(1);
}
}
if( want_even_odd==ALLOW_EVEN_ODD && parity_conserve!=SCRAMBLE_PARITY ) {
send_subl = (int *)malloc(NUM_SUBL*sizeof(int));
if(send_subl==NULL){
printf("NODE %d: no room for send_subl\n",this_node);
terminate(1);
}
for(subl=0; subl<NUM_SUBL; subl++) send_subl[subl] = NOWHERE;
} else {
send_subl = NULL;
}
/* Check to see if mapping has advertised parity and inverse properties */
/* Also check to see if it returns legal values for coordinates */
FORALLSITES(i,s) {
/* find coordinates of neighbor who sends us data */
func( s->x, s->y, s->z, s->t, args, FORWARDS, &x,&y,&z,&t);
if( x<0 || y<0 || z<0 || t<0 || x>=nx || y>=ny || z>=nz || t>=nt){
printf("DUMMY! Your gather mapping does not stay in lattice\n");
printf("It mapped %d %d %d %d to %d %d %d %d\n",
s->x,s->y,s->z,s->t,x,y,z,t);
terminate(1);
}
if(parity_conserve!=SCRAMBLE_PARITY) {
int r_subl, s_subl;
r_subl = parity_function(s->x,s->y,s->z,s->t);
s_subl = parity_function(x,y,z,t);
if( want_even_odd==ALLOW_EVEN_ODD ) {
if( send_subl[r_subl] == NOWHERE ) {
send_subl[r_subl] = s_subl;
}
else if( send_subl[r_subl] != s_subl ){
printf("DUMMY! Your gather mixes up sublattices: %d vs %d\n",
send_subl[r_subl], s_subl);
printf("on mapping %i %i %i %i -> %i %i %i %i\n",
s->x,s->y,s->z,s->t, x,y,z,t);
terminate(1);
}
}
if( parity_conserve==SAME_PARITY && s_subl!=r_subl ){
printf("DUMMY! Your gather mapping does not obey claimed parity");
printf(", namely SAME_PARITY\n");
printf("It mapped %d %d %d %d with %d to %d %d %d %d with %d\n",
s->x,s->y,s->z,s->t,r_subl,x,y,z,t,s_subl);
terminate(1);
}
if( parity_conserve==SWITCH_PARITY && s_subl==r_subl ){
printf("DUMMY! Your gather mapping does not obey claimed parity");
printf(", namely SWITCH_PARITY\n");
printf("It mapped %d %d %d %d with %d to %d %d %d %d with %d\n",
s->x,s->y,s->z,s->t,r_subl,x,y,z,t,s_subl);
terminate(1);
}
if( inverse==OWN_INVERSE ) {
int x2,y2,z2,t2;
func( x, y, z, t, args, FORWARDS, &x2,&y2,&z2,&t2);
if( s->x!=x2 || s->y!=y2 || s->z!=z2 || s->t!=t2 ) {
printf("DUMMY! Your gather mapping is not its own inverse\n");
printf("It's square mapped %d %d %d %d to %d %d %d %d\n",
s->x,s->y,s->z,s->t,x2,y2,z2,t2);
terminate(1);
}
}
}
}
/* RECEIVE LISTS: */
/* Fill in pointers to sites which are on this node, NOWHERE if
they are off-node */
FORALLSITES_OMP(i,s,private(x,y,z,t,j)){
/* find coordinates of neighbor who sends us data */
func( s->x, s->y, s->z, s->t, args, FORWARDS, &x,&y,&z,&t);
j = node_number(x,y,z,t); /* node for neighbor site */
/* if neighbor is on node, set up pointer */
if( j == mynode() ) gather_array[dir].neighbor[i] = node_index(x,y,z,t);
else gather_array[dir].neighbor[i] = NOWHERE;
} END_LOOP_OMP;
/* make lists of sites which get data from other nodes. */
gather_array[dir].neighborlist =
make_send_receive_list( func, args, want_even_odd, FORWARDS, RECEIVE,
&gather_array[dir].n_recv_msgs );
/* SEND LISTS: */
/* Now make lists of sites to which we send */
/* Under some conditions, if mapping is its own inverse we can use
the lists we have already made */
if( inverse==OWN_INVERSE &&
( want_even_odd!=ALLOW_EVEN_ODD || parity_conserve!=SCRAMBLE_PARITY ) ) {
if( want_even_odd==NO_EVEN_ODD || parity_conserve==SAME_PARITY ) {
gather_array[dir].neighborlist_send = gather_array[dir].neighborlist;
gather_array[dir].n_send_msgs = gather_array[dir].n_recv_msgs;
} else {
gather_array[dir].neighborlist_send =
copy_list_switch( gather_array[dir].neighborlist, send_subl );
gather_array[dir].n_send_msgs = gather_array[dir].n_recv_msgs;
}
} else {
/* Make new linked list of comlinks for send lists */
gather_array[dir].neighborlist_send =
make_send_receive_list( func, args, want_even_odd, FORWARDS, SEND,
&gather_array[dir].n_send_msgs );
} /* End general case for send lists */
#if 0
gather_array[dir].id_list =
make_id_list( gather_array[dir].neighborlist,
gather_array[dir].n_recv_msgs,
gather_array[dir].neighborlist_send );
gather_array[dir].offset_increment =
get_max_receives( gather_array[dir].n_recv_msgs );
#endif
if( inverse != WANT_INVERSE ) {
free(send_subl);
return(dir);
}
/******************
* INVERSE GATHER *
******************/
/* Now, if necessary, make inverse gather */
/* In most cases, we can use the same lists as the gather, in one
form or another. Of course, by the time you get to here
you know that inverse = WANT_INVERSE */
dir++; /* inverse gather has direction one more than original */
/* Always set up pointers to sites on this node */
/* scan sites in lattice */
FORALLSITES(i,s) {
/* find coordinates of neighbor who sends us data */
func( s->x, s->y, s->z, s->t, args, BACKWARDS, &x,&y,&z,&t);
j = node_number(x,y,z,t); /* node for neighbor site */
/* if neighbor is on node, set up pointer */
if( j == mynode() ) gather_array[dir].neighbor[i] = node_index(x,y,z,t);
else gather_array[dir].neighbor[i] = NOWHERE;
}
if( parity_conserve==SAME_PARITY || want_even_odd==NO_EVEN_ODD ) {
/* Use same comlinks as inverse gather, switching send and receive.
Nearest neighbor gathers are an example of this case. */
gather_array[dir].neighborlist = gather_array[dir-1].neighborlist_send;
gather_array[dir].neighborlist_send = gather_array[dir-1].neighborlist;
gather_array[dir].n_recv_msgs = gather_array[dir-1].n_send_msgs;
gather_array[dir].n_send_msgs = gather_array[dir-1].n_recv_msgs;
} else if( parity_conserve==SWITCH_PARITY ) {
/* make new comlinks, but use same lists as inverse gather, switching
send and receive, switching even and odd. */
gather_array[dir].neighborlist =
copy_list_switch( gather_array[dir-1].neighborlist_send, send_subl );
gather_array[dir].neighborlist_send =
copy_list_switch( gather_array[dir-1].neighborlist, send_subl );
gather_array[dir].n_recv_msgs = gather_array[dir-1].n_send_msgs;
gather_array[dir].n_send_msgs = gather_array[dir-1].n_recv_msgs;
} else { /* general case. Really only get here if ALLOW_EVEN_ODD
and SCRAMBLE_PARITY */
/* RECEIVE LISTS */
gather_array[dir].neighborlist =
make_send_receive_list( func, args, want_even_odd, BACKWARDS, RECEIVE,
&gather_array[dir].n_recv_msgs );
/* SEND LISTS */
gather_array[dir].neighborlist_send =
make_send_receive_list( func, args, want_even_odd, BACKWARDS, SEND,
&gather_array[dir].n_send_msgs );
} /* End making new lists for inverse gather */
#if 0
gather_array[dir].id_list =
make_id_list( gather_array[dir].neighborlist,
gather_array[dir].n_recv_msgs,
gather_array[dir].neighborlist_send );
gather_array[dir].offset_increment =
get_max_receives(gather_array[dir].n_recv_msgs);
#endif
free(send_subl);
return(dir-1);
}
/**********************************************************************
* GATHER ROUTINES *
**********************************************************************
declare_strided_gather() returns a pointer to msg_tag which will
be used as input to subsequent prepare_gather() (optional), do_gather(),
wait_gather() and cleanup_gather() calls.
This handles gathers from both the site structure and arrays of
fields and is not called directly by the user. Instead they should
call declare_gather_site() or declare_gather_field().
prepare_gather() allocates buffers needed for the gather. This call is
optional since it will automatically be called from do_gather() if
not explicitly called before.
do_gather() starts the actual gather. This may be repeated after a
wait_gather() to repeat the exact same gather.
wait_gather() waits for the gather to finish.
cleanup_gather() frees memory allocated for the gather including the msg_tag.
example:
msg_tag *tag;
tag = declare_gather_site( F_OFFSET(phi), sizeof(su3_vector), XUP,
EVEN, gen_pt[0] );
prepare_gather(tag); ** this step is optional **
do_gather(tag);
** do other stuff, but don't modify tag or gen_pt[0] **
wait_gather(tag);
** gen_pt[0][i] now contains the address of the phi
vector (or a copy thereof) on the neighbor of site i in the
XUP direction for all even sites i.
Do whatever you want with it here, but don't modify tag or
gen_pt[0].
Do modify the source field phi. **
do_gather(tag);
** do other stuff **
wait_gather(tag);
** gen_pt[0][i] now contains the address of the modified phi.
The restart-wait may be repeated as often as desired. **
cleanup_gather(tag);
** subsequent calls will overwrite the gathered fields. but if you
don't clean up, you will eventually run out of space **
*/
/*
** returns msg_tag containing details for specific gather
** handles gathers from both the site structure and arrays of fields
*/
msg_tag *
declare_strided_gather(
void *field, /* source buffer aligned to desired field */
int stride, /* bytes between fields in source buffer */
int size, /* size in bytes of the field (eg sizeof(su3_vector))*/
int index, /* direction to gather from. eg XUP - index into
neighbor tables */
int subl, /* subl of sites whose neighbors we gather.
It is EVENANDODD, if all sublattices are done. */
char ** dest) /* one of the vectors of pointers */
{
int i; /* scratch */
site *s; /* scratch pointer to site */
msg_tag *mtag; /* message tag structure we will return a pointer to */
msg_sr_t *mrecv, *msend; /* arrays for send and receive lists */
gmem_t *gmem;
comlink *compt; /* pointer to current comlink */
gather_t *gt; /* pointer to current gather */
//id_list_t *idl;
gt = &gather_array[index];
/* set pointers in sites whose neighbors are on this node. (If all
neighbors are on this node, this is the only thing done.) */
if(subl==EVENANDODD) {
FORALLSITES_OMP(i,s,){ if(gt->neighbor[i] != NOWHERE){
dest[i] = ((char *)field) + gt->neighbor[i]*stride;
}} END_LOOP_OMP;
} else {
FORSOMEPARITY_OMP(i,s,subl,){ if(gt->neighbor[i] != NOWHERE){
dest[i] = ((char *)field) + gt->neighbor[i]*stride;
}} END_LOOP_OMP;
}
#ifndef N_SUBL32
switch(subl) {
case EVEN: subl = 0; break;
case ODD: subl = 1; break;
case EVENANDODD: subl = 2; break;
default: printf("ERROR: bad sublattice\n"); terminate(subl);
}
#else
if(subl==EVENANDODD) subl = NUM_SUBL;
#endif
/* allocate the message tag */
mtag = (msg_tag *)malloc(sizeof(msg_tag));
#ifdef CRC_DEBUG
mtag->index = index;
#endif
mtag->prepared = 0;
#if 0
mtag->nids = gt->offset_increment;
mtag->ids = NULL;
#endif
/* allocate a buffer for the msg_sr_t's. This is dynamically allocated
because there may be an arbitrary number of gathers in progress
in any direction. */
mtag->nrecvs = 0;
for( compt = gt->neighborlist; compt != NULL;
compt = compt->nextcomlink ) {
if(compt->n_subl_connected[subl]!=0) mtag->nrecvs++;
}
if( mtag->nrecvs==0 ) mrecv = NULL;
else {
mrecv = (msg_sr_t *)malloc(mtag->nrecvs*sizeof(msg_sr_t));
if(mrecv==NULL) {
printf("NO ROOM for mrecv, node %d\n", mynode());
terminate(1);
}
}
mtag->recv_msgs = mrecv;
mtag->nsends = 0;
for( compt = gt->neighborlist_send; compt != NULL;
compt = compt->nextcomlink ) {
if(compt->n_subl_connected[subl]!=0) mtag->nsends++;
}
if( mtag->nsends==0 ) msend = NULL;
else {
msend = (msg_sr_t *)malloc(mtag->nsends*sizeof(msg_sr_t));
if(msend==NULL) {
printf("NO ROOM for msend, node %d\n", mynode());
terminate(1);
}
}
mtag->send_msgs = msend;
/* for each node which has neighbors of my sites */
for( i=0, compt = gt->neighborlist; compt != NULL;
compt = compt->nextcomlink ) {
if(compt->n_subl_connected[subl]==0) continue;
mrecv[i].msg_node = compt->othernode;
//mrecv[i].id_offset = i;
mrecv[i].msg_size = size*compt->n_subl_connected[subl];
mrecv[i].msg_buf = NULL;
gmem = (gmem_t *)malloc(sizeof(gmem_t));
mrecv[i].gmem = gmem;
gmem->num = compt->n_subl_connected[subl];
gmem->sitelist = compt->sitelist[subl];
gmem->mem = (char *)dest;
gmem->stride = sizeof(char *);
gmem->size = size;
gmem->next = NULL;
i++;
}
/* for each node whose neighbors I have */
//idl = gt->id_list;
for( i=0, compt = gt->neighborlist_send; compt != NULL;
compt = compt->nextcomlink/*, idl = idl->next*/ ) {
if(compt->n_subl_connected[subl]==0) continue;
msend[i].msg_node = compt->othernode;
//msend[i].id_offset = idl->id_offset;
msend[i].msg_size = size*compt->n_subl_connected[subl];
msend[i].msg_buf = NULL;
gmem = (gmem_t *)malloc(sizeof(gmem_t));
msend[i].gmem = gmem;
gmem->num = compt->n_subl_connected[subl];
gmem->sitelist = compt->sitelist[subl];
gmem->mem = (char *)field;
gmem->stride = stride;
gmem->size = size;
gmem->next = NULL;
i++;
}
return mtag;
}
/*
** allocate buffers for gather
*/
void
prepare_gather(msg_tag *mtag)
{
int i, j/*, nids*/;
//int *ids;
msg_sr_t *mrecv,*msend;
gmem_t *gmem;
char *tpt;
if(mtag->prepared) {
printf("error: already prepared\n");
terminate(1);
}
mtag->prepared = 1;
#if 0
nids = mtag->nids;
if(nids!=0) {
mtag->ids = ids = (int *)malloc(nids*sizeof(int));
for(i=0, j=id_offset; i<nids; i++, j=(j+1)%num_gather_ids) {
/* find next available type */
while(id_array[j]!=0) {
j = (j+1)%num_gather_ids;
if(j==id_offset) {
printf("error: not enough message ids\n");
terminate(1);
}
}
ids[i] = j;
id_array[j] = 1;
}
id_offset = j;
}
#endif
if( mtag->nrecvs == 0 )
mtag->mhrecvlist = NULL;
else{
mtag->mhrecvlist = (QMP_msghandle_t *)malloc(mtag->nrecvs*sizeof(QMP_msghandle_t));
if(mtag->mhrecvlist == NULL){
printf("NO ROOM for mrecv, node %d\n", mynode());
terminate(1);
}
}
mrecv = mtag->recv_msgs;
/* for each node which has neighbors of my sites */
for(i=0; i<mtag->nrecvs; ++i) {
mrecv[i].qmp_mem =
QMP_allocate_aligned_memory( mrecv[i].msg_size+CRCBYTES, mem_align, mem_flags );
if(mrecv[i].qmp_mem==NULL) {
printf("NO ROOM for mrecv, node %d\n", mynode());
terminate(1);
}
tpt = (char *) QMP_get_memory_pointer(mrecv[i].qmp_mem);
mrecv[i].msg_buf = tpt;
#ifdef CRC_DEBUG
memset(tpt, '\0', mrecv[i].msg_size+CRCBYTES);
#endif
mrecv[i].mm = QMP_declare_msgmem(mrecv[i].msg_buf, mrecv[i].msg_size+CRCBYTES);
//mrecv[i].mh = QMP_declare_receive_from(mrecv[i].mm, mrecv[i].msg_node, 0);
mtag->mhrecvlist[i] = QMP_declare_receive_from(mrecv[i].mm, mrecv[i].msg_node, 0);
/* set pointers in sites to correct location */
gmem = mrecv[i].gmem;
do {
#ifdef OMP
#pragma omp parallel for private(j,tpt)
#endif
for(j=0; j<gmem->num; ++j,tpt+=gmem->size) {
((char **)gmem->mem)[gmem->sitelist[j]] = tpt;
}
} while((gmem=gmem->next)!=NULL);
}
if(mtag->nrecvs==1) {
mtag->mhrecv = mtag->mhrecvlist[0];
} else if(mtag->nrecvs>1) {
mtag->mhrecv = QMP_declare_multiple( mtag->mhrecvlist, mtag->nrecvs );
}
if(mtag->nsends == 0)
mtag->mhsendlist = NULL;
else{
mtag->mhsendlist = (QMP_msghandle_t *)malloc(mtag->nsends*sizeof(QMP_msghandle_t));
if(mtag->mhsendlist == NULL){
printf("NO ROOM for msend, node %d\n", mynode());
terminate(1);
}
}
msend = mtag->send_msgs;
/* for each node whose neighbors I have */
for(i=0; i<mtag->nsends; ++i) {
msend[i].qmp_mem =
QMP_allocate_aligned_memory( msend[i].msg_size+CRCBYTES, mem_align, mem_flags );
if(msend[i].qmp_mem==NULL) {
printf("NO ROOM for msg_buf, node %d\n",mynode());
terminate(1);
}
msend[i].msg_buf = (char *) QMP_get_memory_pointer(msend[i].qmp_mem);
msend[i].mm = QMP_declare_msgmem(msend[i].msg_buf, msend[i].msg_size+CRCBYTES);
//msend[i].mh = QMP_declare_send_to(msend[i].mm, msend[i].msg_node, 0);
mtag->mhsendlist[i] = QMP_declare_send_to(msend[i].mm, msend[i].msg_node, 0);
}
if(mtag->nsends==1) {
mtag->mhsend = mtag->mhsendlist[0];
} else if(mtag->nsends>1) {
mtag->mhsend = QMP_declare_multiple( mtag->mhsendlist, mtag->nsends );
}
}
/*
** actually execute the gather
*/
void
do_gather(msg_tag *mtag) /* previously returned by start_gather_site */
{
register int i,j; /* scratch */
register char *tpt; /* scratch pointer in buffers */
msg_sr_t *mbuf;
gmem_t *gmem;
if(!mtag->prepared) prepare_gather(mtag);
#if 0
mbuf = mtag->recv_msgs;
/* for each node which has neighbors of my sites */
for(i=0; i<mtag->nrecvs; i++) {
/* post receive */
QMP_start(mbuf[i].mh);
}
#endif
if(mtag->nrecvs>0) QMP_start(mtag->mhrecv);
mbuf = mtag->send_msgs;
/* for each node whose neighbors I have */
for(i=0; i<mtag->nsends; ++i) {
/* gather data into the buffer */
tpt = mbuf[i].msg_buf;
gmem = mbuf[i].gmem;
do {
#ifdef OMP
#pragma omp parallel for private(j,tpt)
#endif
for(j=0; j<gmem->num; ++j,tpt+=gmem->size) {
memcpy( tpt, gmem->mem + gmem->sitelist[j]*gmem->stride, gmem->size );
}
} while((gmem=gmem->next)!=NULL);
/* start the send */
#ifdef COM_CRC
{
int msg_size;
char *crc_pt;
u_int32type *crc;
tpt = mbuf[i].msg_buf;
msg_size = mbuf[i].msg_size;
crc_pt = tpt + msg_size;
crc = (u_int32type *)crc_pt;
*crc = crc32(0, tpt, msg_size );
#ifdef CRC_DEBUG
{
char filename[128];
FILE *dump;
sprintf(filename,"/tmp/send.%d.to.%d.dir%d.msg%d",
mynode(),mbuf[i].msg_node,mtag->index,i);
dump = fopen(filename,"w");
fwrite(tpt, 1, msg_size + CRCBYTES, dump);
fclose(dump);
}
#endif
}
#endif
//QMP_start(mbuf[i].mh);
}
if(mtag->nsends>0) QMP_start(mtag->mhsend);
}
/*
** wait for gather to finish
*/
void
wait_gather(msg_tag *mtag)
{
#ifdef COM_CRC
int i;
int fail = 0;
#endif
/* wait for all receive messages */
#if 0
for(i=0; i<mtag->nrecvs; i++) {
QMP_wait( mtag->recv_msgs[i].mh );
}
#endif
if(mtag->nrecvs>0) QMP_wait( mtag->mhrecv );
/* wait for all send messages */
#if 0
for(i=0; i<mtag->nsends; i++) {
QMP_wait( mtag->send_msgs[i].mh );
}
#endif
if(mtag->nsends>0) QMP_wait( mtag->mhsend );
#if COM_CRC
/* Verify the checksums received */
for(i=0; i<mtag->nrecvs; i++) {
{
u_int32type crcgot;
msg_sr_t *mbuf;
char *tpt;
int msg_size;
char *crc_pt;
u_int32type *crc;
mbuf = mtag->recv_msgs;
tpt = mbuf[i].msg_buf;
msg_size = mbuf[i].msg_size;
crc_pt = tpt + msg_size;
crc = (u_int32type *)crc_pt;
crcgot = crc32(0, tpt, msg_size );
if(*crc != crcgot){
fprintf(stderr,
"Node %d received checksum %x != node %d sent checksum %x\n",
mynode(),*crc, mbuf[i].msg_node, crcgot);
fflush(stdout);
fail = 1;
#ifdef CRC_DEBUG
{
char filename[128];
FILE *dump;
sprintf(filename,"/tmp/receive.%d.from.%d.dir%d.msg%d",mynode(),
mbuf[i].msg_node,mtag->index,i);
dump = fopen(filename,"w");
fwrite(tpt, 1, msg_size + CRCBYTES, dump);
fclose(dump);
}
#endif
}
}
}
#ifdef CRC_DEBUG
QMP_sum_int(&fail);
#endif
if(fail > 0)terminate(1);
#endif
}
/*
** free buffers associated with message tag
*/
void
cleanup_gather(msg_tag *mtag)
{
int i;
gmem_t *gmem, *next;
#if 0
if(mtag->ids!=NULL)
for(i=0; i<mtag->nids; ++i) id_array[mtag->ids[i]] = 0;
#endif
/* free all receive buffers */
if(mtag->nrecvs>0) QMP_free_msghandle( mtag->mhrecv );
for(i=0; i<mtag->nrecvs; i++) {
//QMP_free_msghandle( mtag->recv_msgs[i].mh );
//QMP_free_msghandle( mtag->mhrecvlist[i] );
QMP_free_msgmem( mtag->recv_msgs[i].mm );
QMP_free_memory( mtag->recv_msgs[i].qmp_mem );
gmem = mtag->recv_msgs[i].gmem;
do {
next = gmem->next;
free(gmem);
gmem = next;
} while(gmem!=NULL);
}
/* free all send buffers */
if(mtag->nsends>0) QMP_free_msghandle( mtag->mhsend );
for(i=0; i<mtag->nsends; i++) {
//QMP_free_msghandle( mtag->send_msgs[i].mh );
//QMP_free_msghandle( mtag->mhsendlist[i] );
QMP_free_msgmem( mtag->send_msgs[i].mm );
QMP_free_memory( mtag->send_msgs[i].qmp_mem );
gmem = mtag->send_msgs[i].gmem;
do {
next = gmem->next;
free(gmem);
gmem = next;
} while(gmem!=NULL);
}
/* free the msg_tag buffer */
if(mtag->nrecvs>0) {
free(mtag->recv_msgs);
free(mtag->mhrecvlist);
}
if(mtag->nsends>0) {
free(mtag->send_msgs);
free(mtag->mhsendlist);
}
//free(mtag->ids);
free(mtag);
}
/***********************************************************************
* Convenience Routines for Gathers *
***********************************************************************/
/*
** declare gather with a field offset
*/
msg_tag *
declare_gather_site(
field_offset field, /* which field? Some member of structure "site" */
int size, /* size in bytes of the field (eg sizeof(su3_vector))*/
int index, /* direction to gather from. eg XUP - index into
neighbor tables */
int parity, /* parity of sites whose neighbors we gather.
one of EVEN, ODD or EVENANDODD. */
char ** dest) /* one of the vectors of pointers */
{
return declare_strided_gather( (char *)lattice + field, sizeof(site), size,
index, parity, dest );
}
/*
** old style gather routine which declares and starts in one call
*/
msg_tag *
start_gather_site(
field_offset field, /* which field? Some member of structure "site" */
int size, /* size in bytes of the field (eg sizeof(su3_vector))*/
int index, /* direction to gather from. eg XUP - index into
neighbor tables */
int parity, /* parity of sites whose neighbors we gather.
one of EVEN, ODD or EVENANDODD. */
char ** dest) /* one of the vectors of pointers */
{
msg_tag *mt;
mt = declare_strided_gather( (char *)lattice + field, sizeof(site), size,
index, parity, dest );
prepare_gather(mt);
do_gather(mt);
return mt;
}
/*
** old style routine used to restart a previously waited gather
** this finction is now depreciated and users should call do_gather()
** instead
*/
void
restart_gather_site(
field_offset field, /* which field? Some member of structure "site" */
int size, /* size in bytes of the field (eg sizeof(su3_vector))*/
int index, /* direction to gather from. eg XUP - index into
neighbor tables */
int parity, /* parity of sites whose neighbors we gather.
one of EVEN, ODD or EVENANDODD. */
char ** dest, /* one of the vectors of pointers */
msg_tag *mtag) /* previously returned by start_gather_site */
{
msg_sr_t *mbuf;
if(mtag->nsends!=0) mbuf = mtag->send_msgs;
else mbuf = NULL;
/* sanity checks for improper usage */
if(mbuf!=NULL) {
if(((char *)lattice+field)!=mbuf->gmem->mem) {
printf("error: wrong field in restart gather\n");
terminate(1);
}
if(sizeof(site)!=mbuf->gmem->stride) {
printf("error: wrong stride in restart gather\n");
terminate(1);
}
if(size!=mbuf->gmem->size) {
printf("error: wrong size in restart gather\n");
terminate(1);
}
if(((char *)lattice+field)!=mbuf->gmem->mem) {
printf("error: wrong field in restart gather\n");
terminate(1);
}
}
do_gather(mtag);
}
/*****************************
* gather routines from an array of fields *
*****************************/
/*
** declares a gather from an array of fields
*/
msg_tag *
declare_gather_field(
void * field, /* which field? Pointer returned by malloc() */
int size, /* size in bytes of the field (eg sizeof(su3_vector))*/
int index, /* direction to gather from. eg XUP - index into
neighbor tables */
int parity, /* parity of sites whose neighbors we gather.
one of EVEN, ODD or EVENANDODD. */
char ** dest) /* one of the vectors of pointers */
{
return declare_strided_gather( (char *)field, size, size, index, parity, dest );
}
/*
** old style gather routine which declares and starts in one call
*/
msg_tag *
start_gather_field(
void * field, /* which field? Pointer returned by malloc() */
int size, /* size in bytes of the field (eg sizeof(su3_vector))*/
int index, /* direction to gather from. eg XUP - index into
neighbor tables */
int parity, /* parity of sites whose neighbors we gather.
one of EVEN, ODD or EVENANDODD. */
char ** dest) /* one of the vectors of pointers */
{
msg_tag *mt;
mt = declare_strided_gather( (char *)field, size, size, index, parity, dest );
prepare_gather(mt);
do_gather(mt);
return mt;
}
/*
** old style routine used to restart a previously waited gather
** this finction is now depreciated and users should call do_gather()
** instead
*/
void
restart_gather_field(
void *field, /* which field? Pointer returned by malloc() */
int size, /* size in bytes of the field (eg sizeof(su3_vector))*/
int index, /* direction to gather from. eg XUP - index into
neighbor tables */
int parity, /* parity of sites whose neighbors we gather.
one of EVEN, ODD or EVENANDODD. */
char ** dest, /* one of the vectors of pointers */
msg_tag *mtag) /* previously returned by start_gather_field */
{
msg_sr_t *mbuf;
if(mtag->nsends!=0) mbuf = mtag->send_msgs;
else mbuf = NULL;
/* sanity checks for improper usage */
if(mbuf!=NULL) {
if(field!=mbuf->gmem->mem) {
printf("error: wrong field in restart gather\n");
terminate(1);
}
if(size!=mbuf->gmem->stride) {
printf("error: wrong stride in restart gather\n");
terminate(1);
}
if(size!=mbuf->gmem->size) {
printf("error: wrong size in restart gather\n");
terminate(1);
}
if(field!=mbuf->gmem->mem) {
printf("error: wrong field in restart gather\n");
terminate(1);
}
}
do_gather(mtag);
}
/**********************************************************************
* MULTI-GATHER ROUTINES *
**********************************************************************
accumulate_gather(msg_tag **mtag, msg_tag *tag)
Joins declared gathers together under a single msg_tag.
The second argument (tag) would be merged with the first (mtag).
If mtag is NULL then this just copies tag into mtag.
declare_accumulate_gather_site() declares and joins gathers.
example:
msg_tag *tag1, *tag2, *mtag;
tag1 = declare_gather_site( F_OFFSET(phi), sizeof(su3_vector), XUP,
EVEN, gen_pt1 );
tag2 = declare_gather_site( F_OFFSET(phi), sizeof(su3_vector), XDOWN,
EVEN, gen_pt2 );
mtag = NULL;
accumulate_gather( &mtag, tag1 );
accumulate_gather( &mtag, tag2 );
prepare_gather( mtag ); ** optional **
do_gather( mtag );
wait_gather( mtag );
** stuff **
do_gather( tag1 ); ** this is valid as long as the combined gather
wait_gather( tag1 ); (mtag) has been waited on **
** stuff **
do_gather( mtag );
wait_gather( mtag );
cleanup_gather( mtag );
cleanup_gather( tag1 );
cleanup_gather( tag2 );
Note that mtag must be set to NULL first in this case.
If there is no need to use the single gathers alone one could do:
msg_tag *mtag;
mtag = NULL;
declare_accumulate_gather_site( &mtag, F_OFFSET(phi), sizeof(su3_vector), XUP,
EVEN, gen_pt1 );
declare_accumulate_gather_site( &mtag, F_OFFSET(phi), sizeof(su3_vector), XDOWN,
EVEN, gen_pt2 );
prepare_gather( mtag ); ** optional **
do_gather( mtag );
wait_gather( mtag );
** stuff **
do_gather( mtag );
wait_gather( mtag );
cleanup_gather( mtag );
one coule also replace
mtag = NULL;
declare_accumulate_gather_site( &mtag, F_OFFSET(phi), sizeof(su3_vector), XUP,
EVEN, gen_pt1 );
with
mtag = declare_gather_site( F_OFFSET(phi), sizeof(su3_vector), XUP,
EVEN, gen_pt1 );
since they do the same thing, however the first form is a bit more uniform
in the given example.
*/
/*
** helper function to copy the gmem_t structure
*/
static void
copy_gmem(gmem_t **dest, gmem_t *src)
{
while(*dest!=NULL) dest = &((*dest)->next);
do {
*dest = (gmem_t *)malloc(sizeof(gmem_t));
if(*dest==NULL) {
printf("error copy_gmem malloc node:%i\n",mynode());
terminate(1);
}
memcpy(*dest, src, sizeof(gmem_t));
dest = &((*dest)->next);
src = src->next;
} while(src!=NULL);
*dest = NULL;
}
/*
** helper function that merges a source msg_sr_t structure into the dest
*/
static void
add_msgt(msg_sr_t **dest, int *ndest, msg_sr_t *src, int nsrc/*, int nids*/)
{
int i, j, n;
n = 0;
for(i=0; i<nsrc; ++i) {
for(j=0; j<*ndest; ++j) {
if((*dest)[j].msg_node==src[i].msg_node) {
++n;
break;
}
}
}
n = *ndest + nsrc - n;
*dest = (msg_sr_t *)realloc(*dest, n*sizeof(msg_sr_t));
if(*dest==NULL) {
printf("error add_msgt malloc node:%i\n",mynode());
terminate(1);
}
for(i=0; i<nsrc; ++i) {
for(j=0; j<*ndest; ++j) {
if((*dest)[j].msg_node==src[i].msg_node) break;
}
if(j<*ndest) {
(*dest)[j].msg_size += src[i].msg_size;
copy_gmem(&((*dest)[j].gmem), src[i].gmem);
} else {
(*dest)[*ndest+i].msg_node = src[i].msg_node;
//(*dest)[*ndest+i].id_offset = nids + src[i].id_offset;
(*dest)[*ndest+i].msg_size = src[i].msg_size;
(*dest)[*ndest+i].msg_buf = NULL;
(*dest)[*ndest+i].gmem = NULL;
copy_gmem(&((*dest)[*ndest+i].gmem), src[i].gmem);
}
}
*ndest = n;
}
/*
** merges already declared gather
*/
void
accumulate_gather(msg_tag **mmtag, msg_tag *mtag)
{
msg_tag *amtag;
if(*mmtag==NULL) {
amtag = (msg_tag *)malloc(sizeof(msg_tag));
if(amtag==NULL) {
printf("error accumulate_gather malloc node:%i\n",mynode());
terminate(1);
}
//amtag->nids = 0;
//amtag->ids = NULL;
amtag->nrecvs = 0;
amtag->recv_msgs = NULL;
amtag->nsends = 0;
amtag->send_msgs = NULL;
*mmtag = amtag;
} else {
amtag = *mmtag;
}
add_msgt( &(amtag->recv_msgs), &(amtag->nrecvs),
mtag->recv_msgs, mtag->nrecvs/*, amtag->nids*/ );
add_msgt( &(amtag->send_msgs), &(amtag->nsends),
mtag->send_msgs, mtag->nsends/*, amtag->nids*/ );
//amtag->nids += mtag->nids;
}
/*
** declares and merges gather
** handles both the site structure and an array of fields
*/
static void
declare_accumulate_strided_gather(
msg_tag **mmtag, /* tag to accumulate gather into */
void *field, /* which field? Some member of structure "site" */
int stride, /* bytes between fields in source buffer */
int size, /* size in bytes of the field (eg sizeof(su3_vector))*/
int index, /* direction to gather from. eg XUP - index into
neighbor tables */
int parity, /* parity of sites whose neighbors we gather.
one of EVEN, ODD or EVENANDODD. */
char ** dest) /* one of the vectors of pointers */
{
msg_tag *mtag;
mtag = declare_strided_gather( field, stride, size, index, parity, dest );
if(*mmtag==NULL) {
*mmtag = mtag;
} else {
accumulate_gather( mmtag, mtag );
cleanup_gather( mtag );
}
}
/*
** declares and merges gather from field offset
*/
void
declare_accumulate_gather_site(
msg_tag **mmtag,
field_offset field, /* which field? Some member of structure "site" */
int size, /* size in bytes of the field (eg sizeof(su3_vector))*/
int index, /* direction to gather from. eg XUP - index into
neighbor tables */
int parity, /* parity of sites whose neighbors we gather.
one of EVEN, ODD or EVENANDODD. */
char ** dest) /* one of the vectors of pointers */
{
declare_accumulate_strided_gather( mmtag, (char *)lattice + field,
sizeof(site), size, index, parity, dest );
}
/*
** declares and merges gather from an array of fields
*/
void
declare_accumulate_gather_field(
msg_tag **mmtag,
void * field, /* which field? Pointer returned by malloc() */
int size, /* size in bytes of the field (eg sizeof(su3_vector))*/
int index, /* direction to gather from. eg XUP - index into
neighbor tables */
int parity, /* parity of sites whose neighbors we gather.
one of EVEN, ODD or EVENANDODD. */
char ** dest) /* one of the vectors of pointers */
{
declare_accumulate_strided_gather( mmtag, field, size, size, index, parity,
dest );
}
/**********************************************************************
* GENERAL GATHER ROUTINES *
**********************************************************************
start_general_gather_site() returns a msg_tag which will
be used as input to subsequent wait_general_gather() and
cleanup_general_gather() calls.
usage: tag = start_general_gather_site( source, size, displacement, parity, dest)
example:
msg_tag *tag;
int disp[4];
disp[XUP]=1; disp[YUP]= -1; disp[ZUP] = disp[TUP] = 0;
tag = start_general_gather_site( F_OFFSET(phi), sizeof(su3_vector), disp,
EVEN, gen_pt[0] );
** do other stuff **
wait_general_gather(tag);
** gen_pt[0][i] now contains the address of the phi
vector (or a copy thereof) on the neighbor of site i in the
XUP direction for all even sites i.
Do whatever you want with it here.
**
cleanup_general_gather(tag);
** subsequent calls will overwrite the gathered fields. but if you
don't clean up, you will eventually run out of space **
*/
struct msg_tmp { int node, count; }; /* temporary structure for keeping track
of messages to be sent or received */
static struct msg_tmp *to_nodes, *from_nodes; /* arrays for messages */
static int g_gather_flag=0; /* flag to tell if general gather in progress */
static int tsize; /* size of entry in messages =2*sizeof(int)+size */
static char ** tdest; /* tdest is copy of dest */
/* from_nodes, tsize and tdest are global because they are set in
start_general_gather_site() and used in wait_general_gather(). This
works because we allow only one general_gather in progress at a
time. */
#ifndef N_SUBL32
msg_tag *
start_general_strided_gather(
char *field, /* source buffer aligned to desired field */
int stride, /* bytes between fields in source buffer */
int size, /* size in bytes of the field (eg sizeof(su3_vector))*/
int *displacement, /* displacement to gather from. four components */
int parity, /* parity of sites to which we gather.
one of EVEN, ODD or EVENANDODD. */
char ** dest) /* one of the vectors of pointers */
{
register int i,j; /* scratch */
register site *s; /* scratch pointer to site */
register char *tpt; /* scratch pointer in buffers */
int nsites; /* number of sites in this receive or send */
int disp_parity; /* parity of displacement vector */
int send_parity; /* parity of sites that may be sent */
int tx,ty,tz,tt; /* temporary coordinates */
int othernode; /* node sent to or received from */
msg_sr_t *mrecv,*msend;
msg_tag *mtag; /* message tag, to be returned */
int n_send_msgs, n_recv_msgs;
/* check for gather already in progress */
if(g_gather_flag!=0){
printf("ERROR: node %d, two general_gathers() at once!\n", mynode());
terminate(1);
}
n_recv_msgs = n_send_msgs = 0;
tsize = 2*sizeof(int)+size;
/* Use 2*sizeof int so pointer will be aligned to double word */
tdest = dest;
/* find parity of sites that may be sent */
if( (displacement[XUP]+displacement[YUP]+displacement[ZUP]+
displacement[TUP])%2 == 0 ) disp_parity = EVEN;
else disp_parity = ODD;
switch(parity) {
case EVEN:
if( disp_parity==EVEN ) send_parity = EVEN;
else send_parity = ODD;
break;
case ODD:
if( disp_parity==EVEN ) send_parity = ODD;
else send_parity = EVEN;
break;
default: /* EVENANDODD */
if(parity!=EVENANDODD) {
printf("ERROR: bad parity\n");
terminate(parity);
}
send_parity = EVENANDODD;
break;
}
/* set pointers in sites whose neighbors are on this node. (If all
neighbors are on this node, this is the only thing done.) Make
list of nodes from whom we expect messages */
FORSOMEPARITY(i,s,parity){
if(displacement[XUP]!=0) tx = (s->x + displacement[XUP] + nx)%nx;
else tx = s->x;
if(displacement[YUP]!=0) ty = (s->y + displacement[YUP] + ny)%ny;
else ty = s->y;
if(displacement[ZUP]!=0) tz = (s->z + displacement[ZUP] + nz)%nz;
else tz = s->z;
if(displacement[TUP]!=0) tt = (s->t + displacement[TUP] + nt)%nt;
else tt = s->t;
othernode = node_number(tx,ty,tz,tt);
if( othernode==this_node ) {
dest[i] = field + node_index(tx,ty,tz,tt) * stride;
}
else{
for(j=0;j<n_recv_msgs;j++) if(from_nodes[j].node==othernode) break;
if(j < n_recv_msgs) {
from_nodes[j].count++;
}
else {
if(n_recv_msgs==0) {
from_nodes = (struct msg_tmp *)malloc( sizeof(struct msg_tmp) );
from_nodes[0].node = othernode;
from_nodes[0].count = 1;
n_recv_msgs++;
}
else{
from_nodes = (struct msg_tmp *)
realloc( from_nodes, (n_recv_msgs+1)*sizeof(struct msg_tmp) );
from_nodes[j].node = othernode;
from_nodes[j].count = 1;
n_recv_msgs++;
}
}
}
} END_LOOP;
/* scan sites of parity we are sending, make list of nodes to which
we must send messages and the number of messages to each. */
FORSOMEPARITY(i,s,send_parity) {
if(displacement[XUP]!=0) tx = (s->x - displacement[XUP] + nx)%nx;
else tx = s->x;
if(displacement[YUP]!=0) ty = (s->y - displacement[YUP] + ny)%ny;
else ty = s->y;
if(displacement[ZUP]!=0) tz = (s->z - displacement[ZUP] + nz)%nz;
else tz = s->z;
if(displacement[TUP]!=0) tt = (s->t - displacement[TUP] + nt)%nt;
else tt = s->t;
othernode = node_number(tx,ty,tz,tt);
if( othernode != this_node ) {
for(j=0;j<n_send_msgs;j++) if(to_nodes[j].node==othernode) break;
if(j < n_send_msgs) {
to_nodes[j].count++;
}
else {
if(n_send_msgs==0) {
to_nodes = (struct msg_tmp *)malloc(sizeof(struct msg_tmp));
to_nodes[0].node = othernode;
to_nodes[0].count = 1;
n_send_msgs++;
}
else{
to_nodes = (struct msg_tmp *)
realloc( to_nodes, (n_send_msgs+1)*sizeof(struct msg_tmp) );
to_nodes[j].node = othernode;
to_nodes[j].count = 1;
n_send_msgs++;
}
}
}
} END_LOOP;
mtag = (msg_tag *)malloc(sizeof(msg_tag));
if( n_recv_msgs==0 ) mrecv = NULL;
else {
mrecv = (msg_sr_t *)malloc(n_recv_msgs*sizeof(msg_sr_t));
if(mrecv==NULL) {
printf("NO ROOM for mrecv, node %d\n",mynode());
terminate(1);
}
}
if( n_send_msgs==0 ) msend = NULL;
else {
msend = (msg_sr_t *)malloc(n_send_msgs*sizeof(msg_sr_t));
if(msend==NULL) {
printf("NO ROOM for msend, node %d\n",mynode());
terminate(1);
}
}
mtag->recv_msgs = mrecv;
mtag->send_msgs = msend;
mtag->nrecvs = n_recv_msgs;
mtag->nsends = n_send_msgs;
/* for each node which has neighbors of my sites */
for(i=0; i<n_recv_msgs; i++) {
/* allocate buffer to receive neighbors */
nsites = from_nodes[i].count;
mrecv[i].msg_node = from_nodes[i].node;
mrecv[i].msg_size = nsites*tsize;
mrecv[i].msg_buf = (char *)malloc( nsites*tsize );
if(mrecv[i].msg_buf==NULL){
printf("NO ROOM for msg_buf, node %d\n",mynode());
terminate(1);
}
/* post receive */
mrecv[i].mm = QMP_declare_msgmem(mrecv[i].msg_buf, mrecv[i].msg_size);
mrecv[i].mh = QMP_declare_receive_from(mrecv[i].mm, from_nodes[i].node, 0);
QMP_start(mrecv[i].mh);
}
/* for each node whose neighbors I have */
for(i=0; i<n_send_msgs; i++) {
/* Allocate buffer to gather data. */
tpt=(char *)malloc( to_nodes[i].count*tsize );
if(tpt==NULL) {
printf("NO ROOM for tpt, node %d\n",mynode());
terminate(1);
}
msend[i].msg_node = to_nodes[i].node;
msend[i].msg_size = to_nodes[i].count*tsize;
msend[i].msg_buf = tpt;
}
/* reset to_node counters */
for(i=0; i<n_send_msgs; i++) to_nodes[i].count = 0;
/* gather data into the buffers. Each entry in the buffers consists
of the index of the site to which the data is sent, followed by
the actual data */
FORSOMEPARITY(i, s, send_parity) {
tx = (s->x - displacement[XUP] + nx)%nx;
ty = (s->y - displacement[YUP] + ny)%ny;
tz = (s->z - displacement[ZUP] + nz)%nz;
tt = (s->t - displacement[TUP] + nt)%nt;
othernode = node_number(tx,ty,tz,tt);
if( othernode != this_node ) {
for(j=0; j<n_send_msgs; j++) if(to_nodes[j].node==othernode) break;
tpt = msend[j].msg_buf + to_nodes[j].count*tsize;
*(int *)tpt = node_index(tx,ty,tz,tt);
/* index of site on other node */
memcpy( tpt+2*sizeof(int), field+i*stride, size);
to_nodes[j].count++;
}
} END_LOOP;
/* start the sends */
for(i=0; i<n_send_msgs; i++) {
msend[i].mm = QMP_declare_msgmem(msend[i].msg_buf, msend[i].msg_size);
msend[i].mh = QMP_declare_send_to(msend[i].mm, to_nodes[i].node, 0);
QMP_start(msend[i].mh);
}
/* free temporary arrays */
if(n_send_msgs>0) free(to_nodes);
/* mark gather in progress and return */
g_gather_flag = 1;
return mtag;
}
#else /* N_SUBL32 */
msg_tag *
start_general_strided_gather(
char *field, /* source buffer aligned to desired field */
int stride, /* bytes between fields in source buffer */
int size, /* size in bytes of the field (eg sizeof(su3_vector))*/
int *displacement, /* displacement to gather from. four components */
int subl, /* subl of sites whose neighbors we gather.
It is EVENANDODD, if all sublattices are done. */
char ** dest) /* one of the vectors of pointers */
{
register int i,j; /* scratch */
register site *s; /* scratch pointer to site */
register char *tpt; /* scratch pointer in buffers */
int nsites; /* number of sites in this receive or send */
int send_subl; /* sublattice of sites that may be sent */
int tx,ty,tz,tt; /* temporary coordinates */
int othernode; /* node sent to or received from */
msg_sr_t *mrecv,*msend;
msg_tag *mtag; /* message tag, to be returned */
int n_send_msgs, n_recv_msgs;
/* check for gather already in progress */
if(g_gather_flag!=0) {
printf("ERROR: node %d, two general_gathers() at once!\n", mynode());
terminate(1);
}
n_recv_msgs = n_send_msgs = 0;
tsize = 2*sizeof(int)+size;
/* Use 2*sizeof int so pointer will be aligned to double word */
tdest = dest;
/* find sublattice of sites that may be sent */
/* This is not needed for EVENANDODD */
send_subl = subl;
if( subl != EVENANDODD ) {
/* Displacments by multiples of 4 in any direction does
not change sublattice */
tx = displacement[XUP]%4;
ty = displacement[YUP]%4;
tz = displacement[ZUP]%4;
tt = displacement[TUP]%4;
if( tx < 0 ) {
for(i=0;i<(-tx);i++) send_subl = neighsubl[send_subl][XDOWN];
}
else
for(i=0;i<tx;i++) send_subl = neighsubl[send_subl][XUP];
if( ty < 0 ) {
for(i=0;i<(-ty);i++) send_subl = neighsubl[send_subl][YDOWN];
}
else
for(i=0;i<ty;i++) send_subl = neighsubl[send_subl][YUP];
if( tz < 0 ) {
for(i=0;i<(-tz);i++) send_subl = neighsubl[send_subl][ZDOWN];
}
else
for(i=0;i<tz;i++) send_subl = neighsubl[send_subl][ZUP];
if( tt < 0 ) {
for(i=0;i<(-tt);i++) send_subl = neighsubl[send_subl][TDOWN];
}
else
for(i=0;i<tt;i++) send_subl = neighsubl[send_subl][TUP];
}
/* set pointers in sites whose neighbors are on this node. (If all
neighbors are on this node, this is the only thing done.) Make
list of nodes from whom we expect messages */
if( subl == EVENANDODD ) {
FORALLSITES(i,s) {
if(displacement[XUP]!=0) tx = (s->x + displacement[XUP] + nx)%nx;
else tx = s->x;
if(displacement[YUP]!=0) ty = (s->y + displacement[YUP] + ny)%ny;
else ty = s->y;
if(displacement[ZUP]!=0) tz = (s->z + displacement[ZUP] + nz)%nz;
else tz = s->z;
if(displacement[TUP]!=0) tt = (s->t + displacement[TUP] + nt)%nt;
else tt = s->t;
othernode = node_number(tx,ty,tz,tt);
if( othernode==this_node ) {
dest[i] = field + node_index(tx,ty,tz,tt) * stride;
}
else{
for(j=0;j<n_recv_msgs;j++) if(from_nodes[j].node==othernode) break;
if(j < n_recv_msgs) {
from_nodes[j].count++;
}
else {
if(n_recv_msgs==0) {
from_nodes = (struct msg_tmp *)malloc( sizeof(struct msg_tmp) );
from_nodes[0].node = othernode;
from_nodes[0].count = 1;
n_recv_msgs++;
}
else{
from_nodes = (struct msg_tmp *)
realloc( from_nodes, (n_recv_msgs+1)*sizeof(struct msg_tmp) );
from_nodes[j].node = othernode;
from_nodes[j].count = 1;
n_recv_msgs++;
}
}
}
}
}
else {
FORSOMESUBLATTICE(i,s,subl) {
if(displacement[XUP]!=0) tx = (s->x + displacement[XUP] + nx)%nx;
else tx = s->x;
if(displacement[YUP]!=0) ty = (s->y + displacement[YUP] + ny)%ny;
else ty = s->y;
if(displacement[ZUP]!=0) tz = (s->z + displacement[ZUP] + nz)%nz;
else tz = s->z;
if(displacement[TUP]!=0) tt = (s->t + displacement[TUP] + nt)%nt;
else tt = s->t;
othernode = node_number(tx,ty,tz,tt);
if( othernode==this_node ) {
dest[i] = field + node_index(tx,ty,tz,tt) * stride;
}
else {
for(j=0;j<n_recv_msgs;j++) if(from_nodes[j].node==othernode) break;
if(j < n_recv_msgs) {
from_nodes[j].count++;
}
else {
if(n_recv_msgs==0) {
from_nodes = (struct msg_tmp *)malloc( sizeof(struct msg_tmp) );
from_nodes[0].node = othernode;
from_nodes[0].count = 1;
n_recv_msgs++;
}
else{
from_nodes = (struct msg_tmp *)
realloc( from_nodes, (n_recv_msgs+1)*sizeof(struct msg_tmp) );
from_nodes[j].node = othernode;
from_nodes[j].count = 1;
n_recv_msgs++;
}
}
}
}
}
/* scan sites of sublattice we are sending, make list of nodes to which
we must send messages and the number of messages to each. */
if( subl == EVENANDODD ) {
FORALLSITES(i,s) {
if(displacement[XUP]!=0) tx = (s->x - displacement[XUP] + nx)%nx;
else tx = s->x;
if(displacement[YUP]!=0) ty = (s->y - displacement[YUP] + ny)%ny;
else ty = s->y;
if(displacement[ZUP]!=0) tz = (s->z - displacement[ZUP] + nz)%nz;
else tz = s->z;
if(displacement[TUP]!=0) tt = (s->t - displacement[TUP] + nt)%nt;
else tt = s->t;
othernode = node_number(tx,ty,tz,tt);
if( othernode != this_node ) {
for(j=0;j<n_send_msgs;j++) if(to_nodes[j].node==othernode) break;
if(j < n_send_msgs) {
to_nodes[j].count++;
}
else {
if(n_send_msgs==0) {
to_nodes = (struct msg_tmp *)malloc(sizeof(struct msg_tmp));
to_nodes[0].node = othernode;
to_nodes[0].count = 1;
n_send_msgs++;
}
else {
to_nodes = (struct msg_tmp *)
realloc( to_nodes, (n_send_msgs+1)*sizeof(struct msg_tmp) );
to_nodes[j].node = othernode;
to_nodes[j].count = 1;
n_send_msgs++;
}
}
}
}
}
else {
FORSOMESUBLATTICE(i,s,send_subl) {
if(displacement[XUP]!=0) tx = (s->x - displacement[XUP] + nx)%nx;
else tx = s->x;
if(displacement[YUP]!=0) ty = (s->y - displacement[YUP] + ny)%ny;
else ty = s->y;
if(displacement[ZUP]!=0) tz = (s->z - displacement[ZUP] + nz)%nz;
else tz = s->z;
if(displacement[TUP]!=0) tt = (s->t - displacement[TUP] + nt)%nt;
else tt = s->t;
othernode = node_number(tx,ty,tz,tt);
if( othernode != this_node ) {
for(j=0;j<n_send_msgs;j++) if(to_nodes[j].node==othernode) break;
if(j < n_send_msgs) {
to_nodes[j].count++;
}
else {
if(n_send_msgs==0) {
to_nodes = (struct msg_tmp *)malloc(sizeof(struct msg_tmp));
to_nodes[0].node = othernode;
to_nodes[0].count = 1;
n_send_msgs++;
}
else {
to_nodes = (struct msg_tmp *)
realloc( to_nodes, (n_send_msgs+1)*sizeof(struct msg_tmp) );
to_nodes[j].node = othernode;
to_nodes[j].count = 1;
n_send_msgs++;
}
}
}
}
}
mtag = (msg_tag *)malloc(sizeof(msg_tag));
if( n_recv_msgs==0 ) mrecv = NULL;
else {
mrecv = (msg_sr_t *)malloc( n_recv_msgs*sizeof(msg_sr_t) );
if(mrecv==NULL) {
printf("NO ROOM for mrecv, node %d\n",mynode());
terminate(1);
}
}
if( n_send_msgs==0 ) msend=NULL;
else {
msend = (msg_sr_t *)malloc( n_send_msgs*sizeof(msg_sr_t) );
if(msend==NULL) {
printf("NO ROOM for msend, node %d\n",mynode());
terminate(1);
}
}
mtag->recv_msgs = mrecv;
mtag->send_msgs = msend;
mtag->nrecvs = n_recv_msgs;
mtag->nsends = n_send_msgs;
/* for each node which has neighbors of my sites */
for(i=0; i<n_recv_msgs; i++) {
/* allocate buffer to receive neighbors */
nsites = from_nodes[i].count;
mrecv[i].msg_node = from_nodes[i].node;
mrecv[i].msg_size = nsites*tsize;
mrecv[i].msg_buf = (char *)malloc( nsites*tsize );
if(mrecv[i].msg_buf==NULL){
printf("NO ROOM for msg_buf, node %d\n",mynode());
terminate(1);
}
/* post receive */
mrecv[i].mm = QMP_declare_msgmem(mrecv[i].msg_buf, mrecv[i].msg_size);
mrecv[i].mh = QMP_declare_receive_from(mrecv[i].mm, from_nodes[i].node, 0);
QMP_start(mrecv[i].mh);
}
/* for each node whose neighbors I have */
for(i=0; i<n_send_msgs; i++) {
/* Allocate buffer to gather data. */
tpt = (char *)malloc( to_nodes[i].count*tsize );
if(tpt==NULL) {
printf("NO ROOM for tpt, node %d\n",mynode());
terminate(1);
}
msend[i].msg_node = to_nodes[i].node;
msend[i].msg_size = to_nodes[i].count*tsize;
msend[i].msg_buf = tpt;
}
/* reset to_node counters */
for(i=0; i<n_send_msgs; i++) to_nodes[i].count = 0;
/* gather data into the buffers. Each entry in the buffers consists
of the index of the site to which the data is sent, followed by
the actual data */
if( subl == EVENANDODD ) {
FORALLSITES(i, s) {
tx = (s->x - displacement[XUP] + nx)%nx;
ty = (s->y - displacement[YUP] + ny)%ny;
tz = (s->z - displacement[ZUP] + nz)%nz;
tt = (s->t - displacement[TUP] + nt)%nt;
othernode = node_number(tx,ty,tz,tt);
if( othernode != this_node ) {
for(j=0; j<n_send_msgs; j++) if(to_nodes[j].node==othernode) break;
tpt = msend[j].msg_buf + to_nodes[j].count*tsize;
*(int *)tpt = node_index(tx,ty,tz,tt);
/* index of site on other node */
memcpy( tpt+2*sizeof(int), field+i*stride, size);
to_nodes[j].count++;
}
}
}
else {
FORSOMESUBLATTICE(i, s, send_subl) {
tx = (s->x - displacement[XUP] + nx)%nx;
ty = (s->y - displacement[YUP] + ny)%ny;
tz = (s->z - displacement[ZUP] + nz)%nz;
tt = (s->t - displacement[TUP] + nt)%nt;
othernode = node_number(tx,ty,tz,tt);
if( othernode != this_node ) {
for(j=0; j<n_send_msgs; j++) if(to_nodes[j].node==othernode) break;
tpt = msend[j].msg_buf + to_nodes[j].count*tsize;
*(int *)tpt = node_index(tx,ty,tz,tt);
/* index of site on other node */
memcpy( tpt+2*sizeof(int), field+i*stride, size);
to_nodes[j].count++;
}
}
}
/* start the sends */
for(i=0; i<n_send_msgs; i++) {
msend[i].mm = QMP_declare_msgmem(msend[i].msg_buf, msend[i].msg_size);
msend[i].mh = QMP_declare_send_to(msend[i].mm, to_nodes[i].node, 0);
QMP_start(msend[i].mh);
}
/* free temporary arrays */
if( n_send_msgs > 0) free(to_nodes);
/* mark gather in progress and return */
g_gather_flag = 1;
return mtag;
}
#endif /* N_SUBL32 */
msg_tag *
start_general_gather_site(
field_offset field, /* which field? Some member of structure "site" */
int size, /* size in bytes of the field (eg sizeof(su3_vector))*/
int *displacement, /* displacement to gather from. four components */
int parity, /* parity of sites to which we gather.
one of EVEN, ODD or EVENANDODD. */
char ** dest) /* one of the vectors of pointers */
{
return start_general_strided_gather( ((char *)lattice) + field, sizeof(site),
size, displacement, parity, dest );
}
msg_tag *
start_general_gather_field(
void * field, /* which field? Pointer returned by malloc() */
int size, /* size in bytes of the field (eg sizeof(su3_vector))*/
int *displacement, /* displacement to gather from. four components */
int parity, /* parity of sites to which we gather.
one of EVEN, ODD or EVENANDODD. */
char ** dest) /* one of the vectors of pointers */
{
return start_general_strided_gather( (char *)field, size, size,
displacement, parity, dest );
}
/*
** wait for a general gather to complete
*/
void
wait_general_gather(msg_tag *mtag)
{
int i,j,k;
g_gather_flag=0;
for(i=0; i<mtag->nrecvs; i++) {
QMP_wait( mtag->recv_msgs[i].mh );
/* set pointers in sites to correct location */
for(j=0; j<from_nodes[i].count; j++) {
/* k = index of site on this node, sent in message */
k = *(int *)( mtag->recv_msgs[i].msg_buf + j*tsize );
tdest[k] = mtag->recv_msgs[i].msg_buf + j*tsize + 2*sizeof(int);
}
}
if(i>0) free(from_nodes);
}
/*
** free memory associated with general gather
*/
void
cleanup_general_gather(msg_tag *mtag)
{
int i;
/* free all receive buffers */
for(i=0; i<mtag->nrecvs; i++) {
QMP_free_msghandle( mtag->recv_msgs[i].mh );
QMP_free_msgmem( mtag->recv_msgs[i].mm );
free( mtag->recv_msgs[i].msg_buf );
}
/* wait for all send messages, free all send buffers */
for(i=0; i<mtag->nsends; i++) {
QMP_wait( mtag->send_msgs[i].mh );
QMP_free_msghandle( mtag->send_msgs[i].mh );
QMP_free_msgmem( mtag->send_msgs[i].mm );
free( mtag->send_msgs[i].msg_buf );
}
/* free the msg_tag buffer */
free(mtag->recv_msgs);
free(mtag->send_msgs);
free(mtag);
}
#ifdef COM_CRC
/*
** compute crc32 checksum
*/
/* Taken from the GNU CVS distribution and
modified for SciDAC use C. DeTar 10/11/2003
and MILC use 5/3/2005 */
/* crc32.c -- compute the CRC-32 of a data stream
* Copyright (C) 1995-1996 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
/* Copyright notice reproduced from zlib.h -- (C. DeTar)
version 1.0.4, Jul 24th, 1996.
Copyright (C) 1995-1996 Jean-loup Gailly and Mark Adler
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Jean-loup Gailly Mark Adler
gzip@prep.ai.mit.edu madler@alumni.caltech.edu
The data format used by the zlib library is described by RFCs (Request for
Comments) 1950 to 1952 in the files ftp://ds.internic.net/rfc/rfc1950.txt
(zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format).
*/
typedef u_int32type uLong; /* At least 32 bits */
typedef unsigned char Byte;
typedef Byte Bytef;
typedef uLong uLongf;
#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */
#define local static
#ifdef DYNAMIC_CRC_TABLE
local int crc_table_empty = 1;
local uLongf crc_table[256];
local void make_crc_table OF((void));
/*
Generate a table for a byte-wise 32-bit CRC calculation on the polynomial:
x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x+1.
Polynomials over GF(2) are represented in binary, one bit per coefficient,
with the lowest powers in the most significant bit. Then adding polynomials
is just exclusive-or, and multiplying a polynomial by x is a right shift by
one. If we call the above polynomial p, and represent a byte as the
polynomial q, also with the lowest power in the most significant bit (so the
byte 0xb1 is the polynomial x^7+x^3+x+1), then the CRC is (q*x^32) mod p,
where a mod b means the remainder after dividing a by b.
This calculation is done using the shift-register method of multiplying and
taking the remainder. The register is initialized to zero, and for each
incoming bit, x^32 is added mod p to the register if the bit is a one (where
x^32 mod p is p+x^32 = x^26+...+1), and the register is multiplied mod p by
x (which is shifting right by one and adding x^32 mod p if the bit shifted
out is a one). We start with the highest power (least significant bit) of
q and repeat for all eight bits of q.
The table is simply the CRC of all possible eight bit values. This is all
the information needed to generate CRC's on data a byte at a time for all
combinations of CRC register values and incoming bytes.
*/
local void
make_crc_table()
{
uLong c;
int n, k;
uLong poly; /* polynomial exclusive-or pattern */
/* terms of polynomial defining this crc (except x^32): */
static Byte p[] = {0,1,2,4,5,7,8,10,11,12,16,22,23,26};
/* make exclusive-or pattern from polynomial (0xedb88320L) */
poly = 0L;
for (n = 0; n < sizeof(p)/sizeof(Byte); n++)
poly |= 1L << (31 - p[n]);
for (n = 0; n < 256; n++)
{
c = (uLong)n;
for (k = 0; k < 8; k++)
c = c & 1 ? poly ^ (c >> 1) : c >> 1;
crc_table[n] = c;
}
crc_table_empty = 0;
}
#else
/* ========================================================================
* Table of CRC-32's of all single-byte values (made by make_crc_table)
*/
local uLongf crc_table[256] = {
0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L,
0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L,
0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L,
0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL,
0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL,
0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L,
0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L,
0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L,
0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L,
0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L,
0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL,
0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L,
0x68ddb3f8l, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
0x2d02ef8dL
};
#endif
/* =========================================================================
* This function can be used by asm versions of crc32()
*/
uLongf *get_crc_table()
{
#ifdef DYNAMIC_CRC_TABLE
if (crc_table_empty) make_crc_table();
#endif
return (uLongf *)crc_table;
}
/* ========================================================================= */
#define DO1(buf) crc = crc_table[((int)crc ^ (*buf++)) & 0xff] ^ (crc >> 8);
#define DO2(buf) DO1(buf); DO1(buf);
#define DO4(buf) DO2(buf); DO2(buf);
#define DO8(buf) DO4(buf); DO4(buf);
/* ========================================================================= */
u_int32type
crc32(u_int32type crc, const unsigned char *buf, size_t len)
{
if (buf == Z_NULL) return 0L;
#ifdef DYNAMIC_CRC_TABLE
if (crc_table_empty)
make_crc_table();
#endif
crc = crc ^ 0xffffffffL;
while (len >= 8)
{
DO8(buf);
len -= 8;
}
if (len) do {
DO1(buf);
} while (--len);
return crc ^ 0xffffffffL;
}
#endif
|
GB_binop__lor_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__lor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__lor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__lor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_uint64)
// A*D function (colscale): GB (_AxD__lor_uint64)
// D*A function (rowscale): GB (_DxB__lor_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__lor_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__lor_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_uint64)
// C=scalar+B GB (_bind1st__lor_uint64)
// C=scalar+B' GB (_bind1st_tran__lor_uint64)
// C=A+scalar GB (_bind2nd__lor_uint64)
// C=A'+scalar GB (_bind2nd_tran__lor_uint64)
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = ((aij != 0) || (bij != 0))
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) || (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_UINT64 || GxB_NO_LOR_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lor_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lor_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lor_uint64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lor_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lor_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lor_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lor_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lor_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lor_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) || (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lor_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) || (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) || (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lor_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) || (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
taskwait-depend.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// taskwait with depend clause was introduced with gcc-9
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8
// icc does not yet support taskwait with depend clause
// XFAIL: icc
// support for taskwait with depend clause introduced in clang-14
// UNSUPPORTED: clang-5, clang-6, clang-6, clang-8, clang-9, clang-10, clang-11, clang-12, clang-13
#include "callback.h"
#include <omp.h>
int main() {
int x = 0;
#pragma omp parallel num_threads(2)
{
#pragma omp master
{
print_ids(0);
printf("%" PRIu64 ": address of x: %p\n", ompt_get_thread_data()->value,
&x);
#pragma omp task depend(out : x)
{ x++; }
print_fuzzy_address(1);
#pragma omp taskwait depend(in: x)
print_fuzzy_address(2);
}
}
return 0;
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_dependences'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_depende
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// make sure initial data pointers are null
// CHECK-NOT: 0: new_task_data initially not null
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_implicit_task_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT:0x[0-f]+]],
// CHECK-SAME: reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: address of x: [[ADDRX:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]],
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}},
// CHECK-SAME: new_task_id=[[FIRST_TASK:[0-f]+]],
// CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}},
// CHECK-SAME: task_type=ompt_task_explicit=4, has_dependences=yes
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences:
// CHECK-SAME: task_id=[[FIRST_TASK]], deps=[([[ADDRX]],
// CHECK-SAME: ompt_dependence_type_inout)], ndeps=1
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]],
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}},
// CHECK-SAME: new_task_id=[[SECOND_TASK:[0-f]+]],
// CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}},
// CHECK-SAME: task_type=ompt_task_taskwait|ompt_task_undeferred|
// CHECK-SAME: ompt_task_mergeable=1207959568, has_dependences=yes
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences:
// CHECK-SAME: task_id=[[SECOND_TASK]], deps=[([[ADDRX]],
// CHECK-SAME: ompt_dependence_type_in)], ndeps=1
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_end: task_id=[[SECOND_TASK]]
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 24;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
GB_subref_slice.c | //------------------------------------------------------------------------------
// GB_subref_slice: construct coarse/fine tasks for C = A(I,J)
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Determine the tasks for computing C=A(I,J). The matrix C has Cnvec vectors,
// and these are divided into coarse and fine tasks. A coarse task will
// compute one or more whole vectors of C. A fine task operates on a slice of
// a single vector of C. The slice can be done by the # of entries in the
// corresponding vector of A, or by the list of indices I, depending on how the
// work is done for that method.
// The (kC)th vector will access A(imin:imax,kA) in Ai,Ax [pA:pA_end-1], where
// pA = Ap_start [kC] and pA_end = Ap_end [kC].
// The computation of each vector C(:,kC) = A(I,kA) is by done using one of 12
// different cases, depending on the vector, as determined by GB_subref_method.
// Not all vectors in C are computed using the same method.
// Note that J can have duplicates. kC is unique (0:Cnvec-1) but the
// corresponding vector kA in A may repeat, if J has duplicates. Duplicates in
// J are not exploited, since the coarse/fine tasks are constructed by slicing
// slicing the list of vectors Ch of size Cnvec, not the vectors of A.
// Compare this function with GB_ewise_slice, which constructs coarse/fine
// tasks for the eWise operations (C=A+B, C=A.*B, and C<M>=Z).
#define GB_FREE_WORK \
{ \
GB_FREE (Coarse) ; \
GB_FREE (Cwork) ; \
}
#define GB_FREE_ALL \
{ \
GB_FREE_WORK ; \
GB_FREE (TaskList) ; \
GB_FREE (Mark) ; \
GB_FREE (Inext) ; \
}
#include "GB_subref.h"
GrB_Info GB_subref_slice
(
// output:
GB_task_struct **p_TaskList, // array of structs, of size max_ntasks
int *p_max_ntasks, // size of TaskList
int *p_ntasks, // # of tasks constructed
int *p_nthreads, // # of threads for subref operation
bool *p_post_sort, // true if a final post-sort is needed
int64_t *GB_RESTRICT *p_Mark, // for I inverse, if needed; size avlen
int64_t *GB_RESTRICT *p_Inext, // for I inverse, if needed; size nI
int64_t *p_nduplicates, // # of duplicates, if I inverse computed
// from phase0:
const int64_t *GB_RESTRICT Ap_start, // location of A(imin:imax,kA)
const int64_t *GB_RESTRICT Ap_end,
const int64_t Cnvec, // # of vectors of C
const bool need_qsort, // true if C must be sorted
const int Ikind, // GB_ALL, GB_RANGE, GB_STRIDE or GB_LIST
const int64_t nI, // length of I
const int64_t Icolon [3], // for GB_RANGE and GB_STRIDE
// original input:
const int64_t avlen, // A->vlen
const int64_t anz, // nnz (A)
const GrB_Index *I,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (p_TaskList != NULL) ;
ASSERT (p_max_ntasks != NULL) ;
ASSERT (p_ntasks != NULL) ;
ASSERT (p_nthreads != NULL) ;
ASSERT (p_post_sort != NULL) ;
ASSERT (p_Mark != NULL) ;
ASSERT (p_Inext != NULL) ;
ASSERT (p_nduplicates != NULL) ;
ASSERT ((Cnvec > 0) == (Ap_start != NULL)) ;
ASSERT ((Cnvec > 0) == (Ap_end != NULL)) ;
(*p_TaskList) = NULL ;
(*p_Mark ) = NULL ;
(*p_Inext ) = NULL ;
int64_t *GB_RESTRICT Mark = NULL ;
int64_t *GB_RESTRICT Inext = NULL ;
int64_t *GB_RESTRICT Cwork = NULL ;
int64_t *GB_RESTRICT Coarse = NULL ; // size ntasks1+1
int ntasks1 = 0 ;
GrB_Info info ;
//--------------------------------------------------------------------------
// determine # of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// allocate the initial TaskList
//--------------------------------------------------------------------------
// Allocate the TaskList to hold at least 2*ntask0 tasks. It will grow
// later, if needed. Usually, 64*nthreads_max is enough, but in a few cases
// fine tasks can cause this number to be exceeded. If that occurs,
// TaskList is reallocated.
// When the mask is present, it is often fastest to break the work up
// into tasks, even when nthreads_max is 1.
GB_task_struct *GB_RESTRICT TaskList = NULL ;
int max_ntasks = 0 ;
int ntasks0 = (nthreads_max == 1) ? 1 : (32 * nthreads_max) ;
GB_REALLOC_TASK_LIST (TaskList, ntasks0, max_ntasks) ;
//--------------------------------------------------------------------------
// determine if I_inverse can be constructed
//--------------------------------------------------------------------------
// I_inverse_ok is true if I might be inverted. If false, then I will not
// be inverted. I can be inverted only if the workspace for the inverse
// does not exceed nnz(A). Note that if I was provided on input as an
// explicit list, but consists of a contiguous range imin:imax, then Ikind
// is now GB_LIST and the list I is ignored.
// If I_inverse_ok is true, the inverse of I might still not be needed.
// need_I_inverse becomes true if any C(:,kC) = A (I,kA) computation
// requires I inverse.
int64_t I_inverse_limit = GB_IMAX (4096, anz) ;
bool I_inverse_ok = (Ikind == GB_LIST &&
((nI > avlen / 256) || ((nI + avlen) < I_inverse_limit))) ;
bool need_I_inverse = false ;
bool post_sort = false ;
int64_t iinc = Icolon [GxB_INC] ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
Cwork = GB_MALLOC (Cnvec+1, int64_t) ;
if (Cwork == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// estimate the work required for each vector of C
//--------------------------------------------------------------------------
int nthreads_for_Cwork = GB_nthreads (Cnvec, chunk, nthreads_max) ;
int64_t kC ;
#pragma omp parallel for num_threads(nthreads_for_Cwork) schedule(static) \
reduction(||:need_I_inverse)
for (kC = 0 ; kC < Cnvec ; kC++)
{
// jC is the (kC)th vector of C = A(I,J)
// int64_t jC = (Ch == NULL) ? kC : Ch [kC] ;
// C(:,kC) = A(I,kA) will be constructed
int64_t pA = Ap_start [kC] ;
int64_t pA_end = Ap_end [kC] ;
int64_t alen = pA_end - pA ; // nnz (A (imin:imax,j))
int64_t work ; // amount of work for C(:,kC) = A (I,kA)
bool this_needs_I_inverse ; // true if this vector needs I inverse
// ndupl in I not yet known; it is found when I is inverted. For
// now, assume I has no duplicate entries. All that is needed for now
// is the work required for each C(:,kC), and whether or not I inverse
// must be created. The # of duplicates has no impact on the I inverse
// decision, and a minor effect on the work (which is ignored).
GB_subref_method (&work, &this_needs_I_inverse, alen, avlen,
Ikind, nI, I_inverse_ok, need_qsort, iinc, 0) ;
// log the result
need_I_inverse = need_I_inverse || this_needs_I_inverse ;
Cwork [kC] = work ;
}
//--------------------------------------------------------------------------
// replace Cwork with its cumulative sum
//--------------------------------------------------------------------------
GB_cumsum (Cwork, Cnvec, NULL, nthreads_for_Cwork) ;
double cwork = (double) Cwork [Cnvec] ;
//--------------------------------------------------------------------------
// determine # of threads and tasks to use for C=A(I,J)
//--------------------------------------------------------------------------
int nthreads = GB_nthreads (cwork, chunk, nthreads_max) ;
ntasks1 = (nthreads == 1) ? 1 : (32 * nthreads) ;
double target_task_size = cwork / (double) (ntasks1) ;
target_task_size = GB_IMAX (target_task_size, chunk) ;
//--------------------------------------------------------------------------
// invert I if required
//--------------------------------------------------------------------------
int64_t ndupl = 0 ;
if (need_I_inverse)
{
GB_OK (GB_I_inverse (I, nI, avlen, &Mark, &Inext, &ndupl, Context)) ;
ASSERT (Mark != NULL) ;
ASSERT (Inext != NULL) ;
}
//--------------------------------------------------------------------------
// check for quick return for a single task
//--------------------------------------------------------------------------
if (Cnvec == 0 || ntasks1 == 1)
{
// construct a single coarse task that computes all of C
TaskList [0].kfirst = 0 ;
TaskList [0].klast = Cnvec-1 ;
// free workspace and return result
GB_FREE_WORK ;
(*p_TaskList ) = TaskList ;
(*p_max_ntasks ) = max_ntasks ;
(*p_ntasks ) = (Cnvec == 0) ? 0 : 1 ;
(*p_nthreads ) = 1 ;
(*p_post_sort ) = false ;
(*p_Mark ) = Mark ;
(*p_Inext ) = Inext ;
(*p_nduplicates) = ndupl ;
return (GrB_SUCCESS) ;
}
//--------------------------------------------------------------------------
// slice the work into coarse tasks
//--------------------------------------------------------------------------
if (!GB_pslice (&Coarse, Cwork, Cnvec, ntasks1))
{
// out of memory
GB_FREE_ALL ;
return (GB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// construct all tasks, both coarse and fine
//--------------------------------------------------------------------------
int ntasks = 0 ;
for (int t = 0 ; t < ntasks1 ; t++)
{
//----------------------------------------------------------------------
// coarse task computes C (:,k:klast)
//----------------------------------------------------------------------
int64_t k = Coarse [t] ;
int64_t klast = Coarse [t+1] - 1 ;
if (k >= Cnvec)
{
//------------------------------------------------------------------
// all tasks have been constructed
//------------------------------------------------------------------
break ;
}
else if (k < klast)
{
//------------------------------------------------------------------
// coarse task has 2 or more vectors
//------------------------------------------------------------------
// This is a non-empty coarse-grain task that does two or more
// entire vectors of C, vectors k:klast, inclusive.
GB_REALLOC_TASK_LIST (TaskList, ntasks + 1, max_ntasks) ;
TaskList [ntasks].kfirst = k ;
TaskList [ntasks].klast = klast ;
ntasks++ ;
}
else
{
//------------------------------------------------------------------
// coarse task has 0 or 1 vectors
//------------------------------------------------------------------
// As a coarse-grain task, this task is empty or does a single
// vector, k. Vector k must be removed from the work done by this
// and any other coarse-grain task, and split into one or more
// fine-grain tasks.
for (int tt = t ; tt < ntasks1 ; tt++)
{
// remove k from the initial slice tt
if (Coarse [tt] == k)
{
// remove k from task tt
Coarse [tt] = k+1 ;
}
else
{
// break, k not in task tt
break ;
}
}
//------------------------------------------------------------------
// determine the # of fine-grain tasks to create for vector k
//------------------------------------------------------------------
double ckwork = Cwork [k+1] - Cwork [k] ;
int nfine = ckwork / target_task_size ;
nfine = GB_IMAX (nfine, 1) ;
// make the TaskList bigger, if needed
GB_REALLOC_TASK_LIST (TaskList, ntasks + nfine, max_ntasks) ;
//------------------------------------------------------------------
// create the fine-grain tasks
//------------------------------------------------------------------
if (nfine == 1)
{
//--------------------------------------------------------------
// this is a single coarse task for all of vector k
//--------------------------------------------------------------
TaskList [ntasks].kfirst = k ;
TaskList [ntasks].klast = k ;
ntasks++ ;
}
else
{
//--------------------------------------------------------------
// slice vector k into nfine fine tasks
//--------------------------------------------------------------
// There are two kinds of fine tasks, depending on the method
// used to compute C(:,kC) = A(I,kA). If the method iterates
// across all entries in A(imin:imax,kA), then those entries
// are sliced (of size alen). Three methods (1, 2, and 6)
// iterate across all entries in I instead (of size nI).
int64_t pA = Ap_start [k] ;
int64_t pA_end = Ap_end [k] ;
int64_t alen = pA_end - pA ; // nnz (A (imin:imax,j))
int method = GB_subref_method (NULL, NULL, alen, avlen,
Ikind, nI, I_inverse_ok, need_qsort, iinc, ndupl) ;
if (method == 10)
{
// multiple fine tasks operate on a single vector C(:,kC)
// using method 10, and so a post-sort is needed.
post_sort = true ;
}
if (method == 1 || method == 2 || method == 6)
{
// slice I for this task
nfine = GB_IMIN (nfine, nI) ;
nfine = GB_IMAX (nfine, 1) ;
for (int tfine = 0 ; tfine < nfine ; tfine++)
{
// flag this as a fine task, and record the method.
// Methods 1, 2, and 6 slice I, not A(:,kA)
TaskList [ntasks].kfirst = k ;
TaskList [ntasks].klast = -method ;
// do not partition A(:,kA)
TaskList [ntasks].pA = pA ;
TaskList [ntasks].pA_end = pA_end ;
// partition I for this task
GB_PARTITION (TaskList [ntasks].pB,
TaskList [ntasks].pB_end, nI, tfine, nfine) ;
// unused
TaskList [ntasks].pM = -1 ;
TaskList [ntasks].pM_end = -1 ;
// no post sort
TaskList [ntasks].len = 0 ;
ntasks++ ;
}
}
else
{
// slice A(:,kA) for this task
nfine = GB_IMIN (nfine, alen) ;
nfine = GB_IMAX (nfine, 1) ;
bool reverse = (method == 8 || method == 9) ;
for (int tfine = 0 ; tfine < nfine ; tfine++)
{
// flag this as a fine task, and record the method.
// These methods slice A(:,kA). Methods 8 and 9
// must do so in reverse order.
TaskList [ntasks].kfirst = k ;
TaskList [ntasks].klast = -method ;
// partition the items for this task
GB_PARTITION (TaskList [ntasks].pA,
TaskList [ntasks].pA_end, alen,
(reverse) ? (nfine-tfine-1) : tfine, nfine) ;
TaskList [ntasks].pA += pA ;
TaskList [ntasks].pA_end += pA ;
// do not partition I
TaskList [ntasks].pB = 0 ;
TaskList [ntasks].pB_end = nI ;
// unused
TaskList [ntasks].pM = -1 ;
TaskList [ntasks].pM_end = -1 ;
// flag the task that does the post sort
TaskList [ntasks].len = (tfine == 0 && method == 10) ;
ntasks++ ;
}
}
}
}
}
ASSERT (ntasks > 0) ;
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK ;
(*p_TaskList ) = TaskList ;
(*p_max_ntasks ) = max_ntasks ;
(*p_ntasks ) = ntasks ;
(*p_nthreads ) = nthreads ;
(*p_post_sort ) = post_sort ;
(*p_Mark ) = Mark ;
(*p_Inext ) = Inext ;
(*p_nduplicates) = ndupl ;
return (GrB_SUCCESS) ;
}
|
SubmodularFunction.h | #ifndef EXEMCL_SUBM_FUNCTION_H
#define EXEMCL_SUBM_FUNCTION_H
#include <src/io/DataTypes.h>
#include <thread>
#include <utility>
namespace exemcl {
/**
* Submodular functions represent a special kind of set function, which map subsets (usually denoted as \f$S\f$) of some ground set
* (denoted by \f$V\f$) to a positive real value (sometimes called the "utility"), whilst maintaining a property of diminishing returns.
*
* Formally, we have the ground set \f$V \subset \mathbb{R} \f$ and a function \f$f: \mathcal{P}(V) \rightarrow \mathbb{R}^+\f$.
* The function \f$f\f$ is now submodular, iff \f$\Delta_f(e \mid A) \geq \Delta_f(e \mid B)\f$ for arbitrary \f$A \subseteq B \subseteq V\f$ and \f$e \in V \setminus B\f$.
* The vector \f$e\f$ is sometimes called the "marginal element". \f$\Delta_f\f$ represents the discrete derivative \f$\Delta_f(e | S) := f(S \cup \left\lbrace e \right\rbrace
* - f(S))\f$.
*
* This (abstract) class provides an interface to implementing submodular functions of any kind.
*/
class SubmodularFunction {
public:
/**
* Provides a base constructor, which updates the worker count of the submodular function.
* @param workerCount The number of workers to employ (defaults to -1, i.e. all available cores).
*/
SubmodularFunction(int workerCount = -1) {
setWorkerCount(workerCount);
}
/**
* Calculates the submodular function value for a set.
*
* @param S Set of vectors, to calculate the submodular function for.
* @return The submodular function value \f$f(S)\f$.
*/
virtual double operator()(const MatrixX<double>& S) const = 0;
/**
* Calculates the submodular function value for a set.
*
* @param S Set of vectors, to calculate the submodular function for.
* @return The submodular function value \f$f(S)\f$.
*/
virtual double operator()(const MatrixX<double>& S) = 0;
/**
* Calculates the marginal gain of the submodular function, w.r.t to \f$S\f$ and a marginal element \f$e\f$.
* @param S Set of vectors, to calculate the submodular function for.
* @param elem A marginal element.
* @return The marginal gain of the \f$f(S) - f(S \cup \left\{elem\right\})\f$
*/
virtual double operator()(const MatrixX<double>& S, VectorXRef<double> elem) const {
if (S.cols() == elem.size()) {
std::unique_ptr<MatrixX<double>> S_elem = std::make_unique<MatrixX<double>>(S);
S_elem->conservativeResize(S.rows() + 1, Eigen::NoChange_t());
S_elem->row(S.rows()) << elem.transpose();
// Call subsequent operators.
auto marginalGain = operator()(*S_elem) - operator()(S);
// Return value.
return marginalGain;
} else
throw std::runtime_error("SubmodularFunction::operator(): The number of columns in matrix `S` and the number of elements in vector `elem` do not match ("
+ std::to_string(S.cols()) + " vs. " + std::to_string(elem.size()) + ").");
}
/**
* Calculates the marginal gain of the submodular function, w.r.t to \f$S\f$ and a marginal element \f$e\f$.
* @param S Set of vectors, to calculate the submodular function for.
* @param elem A marginal element.
* @return The marginal gain of the \f$f(S) - f(S \cup \left\{elem\right\})\f$
*/
virtual double operator()(const MatrixX<double>& S, VectorXRef<double> elem) {
return ((const SubmodularFunction*) (this))->operator()(S, elem);
}
/**
* Calculates the submodular function for more than one set.
*
* @param S_multi A set of sets \f$ S = \left\{S_1, ..., S_n\right\}\f$, which should be evaluated using the submodular function.
* @return A set of utility values \f$\left\{f(S_1), ..., f(S_n)\right\}\f$.
*/
virtual std::vector<double> operator()(const std::vector<MatrixX<double>>& S_multi) const {
// Construct vector for storing utilities.
std::vector<double> utilities;
utilities.resize(S_multi.size());
// Calculate utilities.
#pragma omp parallel for num_threads(_workerCount)
for (unsigned long i = 0; i < S_multi.size(); i++)
utilities[i] = operator()(S_multi[i]);
// Return value.
return utilities;
};
/**
* Calculates the submodular function for more than one set.
*
* @param S_multi A set of sets \f$ S = \left\{S_1, ..., S_n\right\}\f$, which should be evaluated using the submodular function.
* @return A set of utility values \f$\left\{f(S_1), ..., f(S_n)\right\}\f$.
*/
virtual std::vector<double> operator()(const std::vector<MatrixX<double>>& S_multi) {
return ((const SubmodularFunction*) (this))->operator()(S_multi);
};
/**
* Calculates the marginal gain for a multi set and a marginal element.
*
* @param S_multi A set of sets \f$ S = \left\{S_1, ..., S_n\right\} \f$, which should be evaluated using the submodular function.
* @param elem A marginal element \f$ e \f$.
* @return A set of marginal gain values \f$\Delta_f(e | S_1), ..., \Delta_f(e | S_n) \f$.
*/
virtual std::vector<double> operator()(const std::vector<MatrixX<double>>& S_multi, VectorXRef<double> elem) const {
auto S_multi_elem = std::make_unique<std::vector<MatrixX<double>>>();
S_multi_elem->reserve(S_multi.size());
// Create a new S_multi set, but include the marginal vector.
for (auto& S_elem : S_multi) {
S_multi_elem->push_back(S_elem);
S_multi_elem->back().conservativeResize(S_elem.rows() + 1, Eigen::NoChange_t());
S_multi_elem->back().row(S_elem.rows()) << elem.transpose();
}
// Evaluate S_multi_elem and S_multi.
auto utilityS_multi = operator()(S_multi);
auto utilityS_multi_elem = operator()(*S_multi_elem);
// Calculate the difference between the utilities of S_multi_elem and S_multi.
std::vector<double> marginalGains;
marginalGains.resize(S_multi.size());
for (unsigned long i = 0; i < utilityS_multi.size(); i++)
marginalGains[i] = utilityS_multi_elem[i] - utilityS_multi[i];
return marginalGains;
}
/**
* Calculates the marginal gain for a multi set and a marginal element.
*
* @param S_multi A set of sets \f$ S = \left\{S_1, ..., S_n\right\} \f$, which should be evaluated using the submodular function.
* @param elem A marginal element \f$ e \f$.
* @return A set of marginal gain values \f$\Delta_f(e | S_1), ..., \Delta_f(e | S_n) \f$.
*/
virtual std::vector<double> operator()(const std::vector<MatrixX<double>>& S_multi, VectorXRef<double> elem) {
return ((const SubmodularFunction*) (this))->operator()(S_multi, elem);
}
/**
* Calculates the marginal gain for a single set \f$S\f$ and a set of marginal vectors.
*
* @param S Set of vectors, which should be used to calculate the marginal value in conjunction with `elems`.
* @param elems A set of marginal vectors \f$ \left\{e_1, ..., e_n \right\}\f$.
* @return A set of marginal gain values \f$\Delta_f(e_1 | S), ..., \Delta_f(e_n | S) \f$.
*/
virtual std::vector<double> operator()(const MatrixX<double>& S, std::vector<VectorXRef<double>> elems) const {
// Create a vector, which will hold {S u e_1}, ..., {S u e_n}
auto S_elems = std::make_unique<std::vector<MatrixX<double>>>(elems.size(), S);
// Build {S u e_1}, ..., {S u e_n}.
for (unsigned int i = 0; i < elems.size(); i++) {
auto& elem = elems[i];
(*S_elems)[i].conservativeResize(S.rows() + 1, Eigen::NoChange_t());
(*S_elems)[i].row(S.rows()) << elem.transpose();
}
// Evaluate S.
auto S_funcValue = operator()(S);
// Evaluate all S with elems.
auto S_elems_funcValue = operator()(*S_elems);
// Create a result vector.
std::vector<double> gains;
gains.resize(elems.size());
// Fill the results.
for (unsigned int i = 0; i < elems.size(); i++)
gains[i] = S_elems_funcValue[i] - S_funcValue;
return gains;
}
/**
* Calculates the marginal gain for a single set \f$S\f$ and a set of marginal vectors.
*
* @param S Set of vectors, which should be used to calculate the marginal value in conjunction with `elems`.
* @param elems A set of marginal vectors \f$ \left\{e_1, ..., e_n \right\}\f$.
* @return A set of marginal gain values \f$\Delta_f(e_1 | S), ..., \Delta_f(e_n | S) \f$.
*/
virtual std::vector<double> operator()(const MatrixX<double>& S, std::vector<VectorXRef<double>> elems) {
return ((const SubmodularFunction*) (this))->operator()(std::move(S), std::move(elems));
}
/**
* Returns the worker count, which is currently assigned to this submodular function.
* @return Worker count.
*/
virtual unsigned int getWorkerCount() const {
return _workerCount;
};
/**
* Updates the worker count for the submodular function. If the supplied value is below one,
* the function will try to update the worker count to the number of cores available to the
* program.
*
* @param workerCount New worker count.
*/
virtual void setWorkerCount(int workerCount) {
if (workerCount >= 1)
_workerCount = workerCount;
else {
auto suggestedThreads = std::thread::hardware_concurrency();
_workerCount = suggestedThreads > 0 ? suggestedThreads : 1;
}
}
/**
* Limits the usable memory by this class. Must be overridden by the implementing class and yields an exception otherwise.
* @param memoryLimit Memory limit (in byte).
*/
virtual void setMemoryLimit(long memoryLimit) {
throw std::runtime_error("SubmodularFunction::setMemoryLimit: Not implemented.");
}
/**
* Destructor.
*/
virtual ~SubmodularFunction() = default;
protected:
unsigned int _workerCount = 1;
};
}
#endif // EXEMCL_SUBM_FUNCTION_H |
test.c | #include <stdio.h>
#include <omp.h>
omp_lock_t my_lock;
int main(int argc, char **argv)
{
int i, thread_id;
int global_nloops, private_nloops;
global_nloops = 0;
#pragma omp parallel private(private_nloops, thread_id)
{
private_nloops = 0;
thread_id = omp_get_thread_num();
#pragma omp for
for (i=0; i<100000; ++i)
{
++private_nloops;
}
#pragma omp critical
{
printf("Thread %d adding its iterations (%d) to the sum (%d)...\n",
thread_id, private_nloops, global_nloops);
global_nloops += private_nloops;
printf("CRITICAL ...total nloops now equals %d.\n", global_nloops);
}
}
//==================================================================================
global_nloops = 0;
omp_init_lock(&my_lock);
#pragma omp parallel private(private_nloops, thread_id)
{
private_nloops = 0;
thread_id = omp_get_thread_num();
#pragma omp for
for (i=0; i<100000; ++i)
{
++private_nloops;
}
omp_set_lock(&my_lock);
{
printf("Thread %d adding its iterations (%d) to the sum (%d)...\n",
thread_id, private_nloops, global_nloops);
global_nloops += private_nloops;
printf("LOCK ...total nloops now equals %d.\n", global_nloops);
}
omp_unset_lock(&my_lock);
}
omp_destroy_lock(&my_lock);
return 0;
}
|
GB_unaryop__lnot_fp32_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp32_fp64
// op(A') function: GB_tran__lnot_fp32_fp64
// C type: float
// A type: double
// cast: float cij = (float) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
double
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP32 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp32_fp64
(
float *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp32_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
depend-1.c | /* { dg-do compile } */
/* { dg-options "-fopenmp" } */
extern int a[][10], a2[][10];
int b[10], c[10][2], d[10], e[10], f[10];
int b2[10], c2[10][2], d2[10], e2[10], f2[10];
int k[10], l[10], m[10], n[10], o;
int *p;
void bar (void);
int t[10];
#pragma omp threadprivate (t)
void
foo (int g[3][10], int h[4][8], int i[2][10], int j[][9],
int g2[3][10], int h2[4][8], int i2[2][10], int j2[][9])
{
#pragma omp task depend(in: bar[2:5]) /* { dg-error "is not a variable" } */
;
#pragma omp task depend(out: t[2:5])
;
#pragma omp task depend(inout: k[0.5:]) /* { dg-error "low bound \[^\n\r]* of array section does not have integral type" } */
;
#pragma omp task depend(in: l[:7.5f]) /* { dg-error "length \[^\n\r]* of array section does not have integral type" } */
;
#pragma omp task depend(out: m[p:]) /* { dg-error "low bound \[^\n\r]* of array section does not have integral type" } */
;
#pragma omp task depend(inout: n[:p]) /* { dg-error "length \[^\n\r]* of array section does not have integral type" } */
;
#pragma omp task depend(in: o[2:5]) /* { dg-error "does not have pointer or array type" } */
;
#pragma omp task depend(out: a[:][2:4]) /* { dg-error "array type length expression must be specified" } */
;
#pragma omp task depend(inout: b[-1:]) /* { dg-error "negative low bound in array section" } */
;
#pragma omp task depend(inout: c[:-3][1:1]) /* { dg-error "negative length in array section" } */
;
#pragma omp task depend(in: d[11:]) /* { dg-error "low bound \[^\n\r]* above array section size" } */
;
#pragma omp task depend(out: e[:11]) /* { dg-error "length \[^\n\r]* above array section size" } */
;
#pragma omp task depend(out: f[1:10]) /* { dg-error "high bound \[^\n\r]* above array section size" } */
;
#pragma omp task depend(in: g[:][2:4]) /* { dg-error "for array function parameter length expression must be specified" } */
;
#pragma omp task depend(in: h[2:2][-1:]) /* { dg-error "negative low bound in array section" } */
;
#pragma omp task depend(inout: h[:1][:-3]) /* { dg-error "negative length in array section" } */
;
#pragma omp task depend(out: i[:1][11:]) /* { dg-error "low bound \[^\n\r]* above array section size" } */
;
#pragma omp task depend(in: j[3:4][:10]) /* { dg-error "length \[^\n\r]* above array section size" } */
;
#pragma omp task depend(out: j[30:10][5:5]) /* { dg-error "high bound \[^\n\r]* above array section size" } */
;
#pragma omp task depend(out: a2[:3][2:4])
;
#pragma omp task depend(inout: b2[0:])
;
#pragma omp task depend(inout: c2[:3][1:1])
;
#pragma omp task depend(in: d2[9:])
;
#pragma omp task depend(out: e2[:10])
;
#pragma omp task depend(out: f2[1:9])
;
#pragma omp task depend(in: g2[:2][2:4])
;
#pragma omp task depend(in: h2[2:2][0:])
;
#pragma omp task depend(inout: h2[:1][:3])
;
#pragma omp task depend(out: i2[:1][9:])
;
#pragma omp task depend(in: j2[3:4][:9])
;
#pragma omp task depend(out: j2[30:10][5:4])
;
}
|
fac_cf_coarsen.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
/******************************************************************************
* OpenMP Problems
*
* Are private static arrays a problem?
*
******************************************************************************/
#include "_hypre_sstruct_ls.h"
#include "fac.h"
#define MapStencilRank(stencil, rank) \
{ \
HYPRE_Int ii,jj,kk; \
ii = hypre_IndexX(stencil); \
jj = hypre_IndexY(stencil); \
kk = hypre_IndexZ(stencil); \
if (ii==-1) \
ii=2; \
if (jj==-1) \
jj=2; \
if (kk==-1) \
kk=2; \
rank = ii + 3*jj + 9*kk; \
}
#define InverseMapStencilRank(rank, stencil) \
{ \
HYPRE_Int ij,ii,jj,kk; \
ij = (rank%9); \
ii = (ij%3); \
jj = (ij-ii)/3; \
kk = (rank-3*jj-ii)/9; \
if (ii==2) \
ii= -1; \
if (jj==2) \
jj= -1; \
if (kk==2) \
kk= -1; \
hypre_SetIndex3(stencil, ii, jj, kk); \
}
#define AbsStencilShape(stencil, abs_shape) \
{ \
HYPRE_Int ii,jj,kk; \
ii = hypre_IndexX(stencil); \
jj = hypre_IndexY(stencil); \
kk = hypre_IndexZ(stencil); \
abs_shape= hypre_abs(ii) + hypre_abs(jj) + hypre_abs(kk); \
}
/*--------------------------------------------------------------------------
* hypre_AMR_CFCoarsen: Coarsens the CF interface to get the stencils
* reaching into a coarsened fbox. Also sets the centre coefficient of CF
* interface nodes to have "preserved" row sum.
*
* On entry, fac_A already has all the coefficient values of the cgrid
* chunks that are not underlying a fbox. Note that A & fac_A have the
* same grid & graph. Therefore, we will use A's grid & graph.
*
* ASSUMING ONLY LIKE-VARIABLES COUPLE THROUGH CF CONNECTIONS.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_AMR_CFCoarsen( hypre_SStructMatrix * A,
hypre_SStructMatrix * fac_A,
hypre_Index refine_factors,
HYPRE_Int level )
{
MPI_Comm comm = hypre_SStructMatrixComm(A);
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(A);
HYPRE_Int graph_type = hypre_SStructGraphObjectType(graph);
hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph);
HYPRE_Int nUventries = hypre_SStructGraphNUVEntries(graph);
HYPRE_IJMatrix ij_A = hypre_SStructMatrixIJMatrix(A);
HYPRE_Int matrix_type= hypre_SStructMatrixObjectType(A);
HYPRE_Int ndim = hypre_SStructMatrixNDim(A);
hypre_SStructPMatrix *A_pmatrix;
hypre_StructMatrix *smatrix_var;
hypre_StructStencil *stencils;
HYPRE_Int stencil_size;
hypre_Index stencil_shape_i;
hypre_Index loop_size;
hypre_Box refined_box;
HYPRE_Real **a_ptrs;
hypre_Box *A_dbox;
HYPRE_Int part_crse= level-1;
HYPRE_Int part_fine= level;
hypre_BoxManager *fboxman;
hypre_BoxManEntry **boxman_entries, *boxman_entry;
HYPRE_Int nboxman_entries;
hypre_Box boxman_entry_box;
hypre_BoxArrayArray ***fgrid_cinterface_extents;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
hypre_Box *cgrid_box;
hypre_Index node_extents;
hypre_Index stridec, stridef;
hypre_BoxArrayArray *cinterface_arrays;
hypre_BoxArray *cinterface_array;
hypre_Box *fgrid_cinterface;
HYPRE_Int centre;
HYPRE_Int ci, fi, boxi;
HYPRE_Int max_stencil_size= 27;
HYPRE_Int falseV= 0;
HYPRE_Int trueV = 1;
HYPRE_Int found;
HYPRE_Int *stencil_ranks, *rank_stencils;
HYPRE_Int rank, startrank;
HYPRE_Real *vals;
HYPRE_Int i, j, iA;
HYPRE_Int nvars, var1;
hypre_Index lindex, zero_index;
hypre_Index index1, index2;
hypre_Index index_temp;
hypre_SStructUVEntry *Uventry;
HYPRE_Int nUentries, cnt1;
HYPRE_Int box_array_size;
HYPRE_Int *ncols, *rows, *cols;
HYPRE_Int *temp1, *temp2;
HYPRE_Int myid;
hypre_MPI_Comm_rank(comm, &myid);
hypre_SetIndex3(zero_index, 0, 0, 0);
hypre_BoxInit(&refined_box, ndim);
hypre_BoxInit(&boxman_entry_box, ndim);
/*--------------------------------------------------------------------------
* Task: Coarsen the CF interface connections of A into fac_A so that
* fac_A will have the stencil coefficients extending into a coarsened
* fbox. The centre coefficient is constructed to preserve the row sum.
*--------------------------------------------------------------------------*/
if (graph_type == HYPRE_SSTRUCT)
{
startrank = hypre_SStructGridGhstartRank(grid);
}
if (graph_type == HYPRE_PARCSR)
{
startrank = hypre_SStructGridStartRank(grid);
}
/*--------------------------------------------------------------------------
* Fine grid strides by the refinement factors.
*--------------------------------------------------------------------------*/
hypre_SetIndex3(stridec, 1, 1, 1);
for (i= 0; i< ndim; i++)
{
stridef[i]= refine_factors[i];
}
for (i= ndim; i< 3; i++)
{
stridef[i]= 1;
}
/*--------------------------------------------------------------------------
* Determine the c/f interface index boxes: fgrid_cinterface_extents.
* These are between fpart= level and cpart= (level-1). The
* fgrid_cinterface_extents are indexed by cboxes, but fboxes that
* abutt a given cbox must be considered. Moreover, for each fbox,
* we can have a c/f interface from a number of different stencil
* directions- i.e., we have a boxarrayarray for each cbox, each
* fbox leading to a boxarray.
*
* Algo.: For each cbox:
* 1) refine & stretch by a unit in each dimension.
* 2) boxman_intersect with the fgrid boxman to get all fboxes contained
* or abutting this cbox.
* 3) get the fgrid_cinterface_extents for each of these fboxes.
*
* fgrid_cinterface_extents[var1][ci]
*--------------------------------------------------------------------------*/
A_pmatrix= hypre_SStructMatrixPMatrix(fac_A, part_crse);
nvars = hypre_SStructPMatrixNVars(A_pmatrix);
fgrid_cinterface_extents= hypre_TAlloc(hypre_BoxArrayArray **, nvars);
for (var1= 0; var1< nvars; var1++)
{
fboxman= hypre_SStructGridBoxManager(grid, part_fine, var1);
stencils= hypre_SStructPMatrixSStencil(A_pmatrix, var1, var1);
cgrid= hypre_SStructPGridSGrid(hypre_SStructPMatrixPGrid(A_pmatrix), var1);
cgrid_boxes= hypre_StructGridBoxes(cgrid);
fgrid_cinterface_extents[var1]= hypre_TAlloc(hypre_BoxArrayArray *,
hypre_BoxArraySize(cgrid_boxes));
hypre_ForBoxI(ci, cgrid_boxes)
{
cgrid_box= hypre_BoxArrayBox(cgrid_boxes, ci);
hypre_StructMapCoarseToFine(hypre_BoxIMin(cgrid_box), zero_index,
refine_factors, hypre_BoxIMin(&refined_box));
hypre_SetIndex3(index1, refine_factors[0]-1, refine_factors[1]-1,
refine_factors[2]-1);
hypre_StructMapCoarseToFine(hypre_BoxIMax(cgrid_box), index1,
refine_factors, hypre_BoxIMax(&refined_box));
/*------------------------------------------------------------------------
* Stretch the refined_box so that a BoxManIntersect will get abutting
* fboxes.
*------------------------------------------------------------------------*/
for (i= 0; i< ndim; i++)
{
hypre_BoxIMin(&refined_box)[i]-= 1;
hypre_BoxIMax(&refined_box)[i]+= 1;
}
hypre_BoxManIntersect(fboxman, hypre_BoxIMin(&refined_box),
hypre_BoxIMax(&refined_box), &boxman_entries,
&nboxman_entries);
fgrid_cinterface_extents[var1][ci]= hypre_BoxArrayArrayCreate(nboxman_entries, ndim);
/*------------------------------------------------------------------------
* Get the fgrid_cinterface_extents using var1-var1 stencil (only like-
* variables couple).
*------------------------------------------------------------------------*/
if (stencils != NULL)
{
for (i= 0; i< nboxman_entries; i++)
{
hypre_BoxManEntryGetExtents(boxman_entries[i],
hypre_BoxIMin(&boxman_entry_box),
hypre_BoxIMax(&boxman_entry_box));
hypre_CFInterfaceExtents2(&boxman_entry_box, cgrid_box, stencils, refine_factors,
hypre_BoxArrayArrayBoxArray(fgrid_cinterface_extents[var1][ci], i) );
}
}
hypre_TFree(boxman_entries);
} /* hypre_ForBoxI(ci, cgrid_boxes) */
} /* for (var1= 0; var1< nvars; var1++) */
/*--------------------------------------------------------------------------
* STEP 1:
* ADJUST THE ENTRIES ALONG THE C/F BOXES SO THAT THE COARSENED
* C/F CONNECTION HAS THE APPROPRIATE ROW SUM.
* WE ARE ASSUMING ONLY LIKE VARIABLES COUPLE.
*--------------------------------------------------------------------------*/
for (var1= 0; var1< nvars; var1++)
{
cgrid= hypre_SStructPGridSGrid(hypre_SStructPMatrixPGrid(A_pmatrix), var1);
cgrid_boxes= hypre_StructGridBoxes(cgrid);
stencils= hypre_SStructPMatrixSStencil(A_pmatrix, var1, var1);
/*----------------------------------------------------------------------
* Extract only where variables couple.
*----------------------------------------------------------------------*/
if (stencils != NULL)
{
stencil_size= hypre_StructStencilSize(stencils);
/*------------------------------------------------------------------
* stencil_ranks[i] = rank of stencil entry i.
* rank_stencils[i] = stencil entry of rank i.
*
* These are needed in collapsing the unstructured connections to
* a stencil connection.
*------------------------------------------------------------------*/
stencil_ranks= hypre_TAlloc(HYPRE_Int, stencil_size);
rank_stencils= hypre_TAlloc(HYPRE_Int, max_stencil_size);
for (i= 0; i< max_stencil_size; i++)
{
rank_stencils[i]= -1;
if (i < stencil_size)
{
stencil_ranks[i]= -1;
}
}
for (i= 0; i< stencil_size; i++)
{
hypre_CopyIndex(hypre_StructStencilElement(stencils, i), stencil_shape_i);
MapStencilRank(stencil_shape_i, j);
stencil_ranks[i]= j;
rank_stencils[stencil_ranks[i]] = i;
}
centre= rank_stencils[0];
smatrix_var = hypre_SStructPMatrixSMatrix(A_pmatrix, var1, var1);
a_ptrs = hypre_TAlloc(HYPRE_Real *, stencil_size);
hypre_ForBoxI(ci, cgrid_boxes)
{
cgrid_box= hypre_BoxArrayBox(cgrid_boxes, ci);
cinterface_arrays= fgrid_cinterface_extents[var1][ci];
A_dbox= hypre_BoxArrayBox(hypre_StructMatrixDataSpace(smatrix_var), ci);
/*-----------------------------------------------------------------
* Ptrs to the correct data location.
*-----------------------------------------------------------------*/
for (i= 0; i< stencil_size; i++)
{
hypre_CopyIndex(hypre_StructStencilElement(stencils, i), stencil_shape_i);
a_ptrs[i]= hypre_StructMatrixExtractPointerByIndex(smatrix_var,
ci,
stencil_shape_i);
}
/*-------------------------------------------------------------------
* Loop over the c/f interface boxes and set the centre to be the row
* sum. Coarsen the c/f connection and set the centre to preserve
* the row sum of the composite operator along the c/f interface.
*-------------------------------------------------------------------*/
hypre_ForBoxArrayI(fi, cinterface_arrays)
{
cinterface_array= hypre_BoxArrayArrayBoxArray(cinterface_arrays, fi);
box_array_size = hypre_BoxArraySize(cinterface_array);
for (boxi= stencil_size; boxi< box_array_size; boxi++)
{
fgrid_cinterface= hypre_BoxArrayBox(cinterface_array, boxi);
hypre_CopyIndex(hypre_BoxIMin(fgrid_cinterface), node_extents);
hypre_BoxGetSize(fgrid_cinterface, loop_size);
hypre_BoxLoop1Begin(ndim, loop_size,
A_dbox, node_extents, stridec, iA);
#if 0 /* Are private static arrays a problem? */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iA,lindex,i,index_temp,boxman_entry,rank,found,Uventry,nUentries,temp1,cnt1,ncols,rows,cols,temp2,vals,index2,index1,j) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop1For(iA)
{
hypre_BoxLoopGetIndex(lindex);
for (i= 0; i< stencil_size; i++)
{
if (i != centre)
{
a_ptrs[centre][iA]+= a_ptrs[i][iA];
}
}
/*-----------------------------------------------------------------
* Search for unstructured connections for this coarse node. Need
* to compute the index of the node. We will "collapse" the
* unstructured connections to the appropriate stencil entry. Thus
* we need to serch for the stencil entry.
*-----------------------------------------------------------------*/
index_temp[0]= node_extents[0] + lindex[0];
index_temp[1]= node_extents[1] + lindex[1];
index_temp[2]= node_extents[2] + lindex[2];
hypre_SStructGridFindBoxManEntry(grid, part_crse, index_temp, var1,
&boxman_entry);
hypre_SStructBoxManEntryGetGlobalRank(boxman_entry, index_temp, &rank,
matrix_type);
if (nUventries > 0)
{
found= falseV;
if ((rank-startrank) >= hypre_SStructGraphIUVEntry(graph, 0) &&
(rank-startrank) <= hypre_SStructGraphIUVEntry(graph, nUventries-1))
{
found= trueV;
}
}
/*-----------------------------------------------------------------
* The graph has Uventries only if (nUventries > 0). Therefore,
* check this. Only like variables contribute to the row sum.
*-----------------------------------------------------------------*/
if (nUventries > 0 && found == trueV)
{
Uventry= hypre_SStructGraphUVEntry(graph, rank-startrank);
if (Uventry != NULL)
{
nUentries= hypre_SStructUVEntryNUEntries(Uventry);
/*-----------------------------------------------------------
* extract only the connections to level part_fine and the
* correct variable.
*-----------------------------------------------------------*/
temp1= hypre_CTAlloc(HYPRE_Int, nUentries);
cnt1= 0;
for (i=0; i< nUentries; i++)
{
if (hypre_SStructUVEntryToPart(Uventry, i) == part_fine
&& hypre_SStructUVEntryToVar(Uventry, i) == var1)
{
temp1[cnt1++]= i;
}
}
ncols= hypre_TAlloc(HYPRE_Int, cnt1);
rows = hypre_TAlloc(HYPRE_Int, cnt1);
cols = hypre_TAlloc(HYPRE_Int, cnt1);
temp2= hypre_TAlloc(HYPRE_Int, cnt1);
vals = hypre_CTAlloc(HYPRE_Real, cnt1);
for (i= 0; i< cnt1; i++)
{
ncols[i]= 1;
rows[i] = rank;
cols[i] = hypre_SStructUVEntryToRank(Uventry, temp1[i]);
/* determine the stencil connection pattern */
hypre_StructMapFineToCoarse(
hypre_SStructUVEntryToIndex(Uventry, temp1[i]),
zero_index, stridef, index2);
hypre_SubtractIndexes(index2, index_temp,
ndim, index1);
MapStencilRank(index1, temp2[i]);
/* zero off this stencil connection into the fbox */
if (temp2[i] < max_stencil_size)
{
j= rank_stencils[temp2[i]];
if (j >= 0)
{
a_ptrs[j][iA]= 0.0;
}
}
} /* for (i= 0; i< cnt1; i++) */
hypre_TFree(temp1);
HYPRE_IJMatrixGetValues(ij_A, cnt1, ncols, rows, cols, vals);
for (i= 0; i< cnt1; i++)
{
a_ptrs[centre][iA]+= vals[i];
}
hypre_TFree(ncols);
hypre_TFree(rows);
hypre_TFree(cols);
/* compute the connection to the coarsened fine box */
for (i= 0; i< cnt1; i++)
{
if (temp2[i] < max_stencil_size)
{
j= rank_stencils[temp2[i]];
if (j >= 0)
{
a_ptrs[j][iA]+= vals[i];
}
}
}
hypre_TFree(vals);
hypre_TFree(temp2);
/* centre connection which preserves the row sum */
for (i= 0; i< stencil_size; i++)
{
if (i != centre)
{
a_ptrs[centre][iA]-= a_ptrs[i][iA];
}
}
} /* if (Uventry != NULL) */
} /* if (nUventries > 0) */
}
hypre_BoxLoop1End(iA);
} /* for (boxi= stencil_size; boxi< box_array_size; boxi++) */
} /* hypre_ForBoxArrayI(fi, cinterface_arrays) */
} /* hypre_ForBoxI(ci, cgrid_boxes) */
hypre_TFree(a_ptrs);
hypre_TFree(stencil_ranks);
hypre_TFree(rank_stencils);
} /* if (stencils != NULL) */
} /* end var1 */
for (var1= 0; var1< nvars; var1++)
{
cgrid= hypre_SStructPGridSGrid(hypre_SStructPMatrixPGrid(A_pmatrix), var1);
cgrid_boxes= hypre_StructGridBoxes(cgrid);
hypre_ForBoxI(ci, cgrid_boxes)
{
hypre_BoxArrayArrayDestroy(fgrid_cinterface_extents[var1][ci]);
}
hypre_TFree(fgrid_cinterface_extents[var1]);
}
hypre_TFree(fgrid_cinterface_extents);
return 0;
}
|
morn_image_transform.c | /*
Copyright (C) 2019-2020 JingWeiZhangHuai <jingweizhanghuai@163.com>
Licensed under the Apache License, Version 2.0; you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
#include "morn_image.h"
#define SRC(CN,X,Y) ((src)->data[CN][Y][X])
void TransformGrid(MImage *src,float (*x_func)(int,int,void *),float (*y_func)(int,int,void *),void *para,MTable *gridx,MTable *gridy,MTable *w)
{
int i, j;
int dst_height = w->row;
int dst_width = w->col;
// printf("dst_height=%d,dst_width=%d\n",dst_height,dst_width);
// #pragma omp parallel for
for(j=0;j<dst_height;j++)for(i=0;i<dst_width;i++)
{
float ly = y_func(i,j,para);
float lx = x_func(i,j,para);
if(lx > 0.00001f) lx -= 0.00001f;
if(ly > 0.00001f) ly -= 0.00001f;
short x_locate = (short)lx;
short y_locate = (short)ly;
if((y_locate<ImageY1(src))||(y_locate>=ImageY2(src)-1)||(x_locate<ImageX1(src,y_locate))||(lx>=ImageX2(src,y_locate)-1))
{
gridx->dataS16[j][i] = DFLT;
gridy->dataS16[j][i] = DFLT;
continue;
}
gridx->dataS16[j][i] = x_locate;
gridy->dataS16[j][i] = y_locate;
x_locate = 15-(short)((lx-(float)x_locate)*15.0f+0.5f);
y_locate = 15-(short)((ly-(float)y_locate)*15.0f+0.5f);
w->dataU8[j][i] = (x_locate<<4)+y_locate;
}
}
void GridInterpolation(MImage *src,MImage *dst,MTable *gridx,MTable *gridy,MTable *w,int mode)
{
int i, j;
int height = dst->height;
int width = dst->width;
unsigned char **s0=src->data[0];unsigned char **s1=src->data[1];unsigned char **s2=src->data[2];unsigned char **s3=src->data[3];
unsigned char **d0=dst->data[0];unsigned char **d1=dst->data[1];unsigned char **d2=dst->data[2];unsigned char **d3=dst->data[3];
mode = ((mode | MORN_NEAREST) == MORN_NEAREST);
#define INTERPOLATION_CACL0(C) do{\
d##C[j][i]=((s##C[y][x]*wx+s##C[y][x+1]*(15-wx))*wy+(s##C[y+1][x]*wx+s##C[y+1][x+1]*(15-wx))*(15-wy)+112)/225;\
}while(0)
#define INTERPOLATION_CACL1(C) do{\
d##C[j][i]=s##C[(wy<8)?y+1:y][(wx<8)?x+1:x];\
}while(0)
// printf("src->channel=%d,height=%d,width=%d\n",src->channel,height,width);
if((src->channel==1)&&(!mode))
{
// #pragma omp parallel for
for(j=0;j<height;j++)for(i=0;i<width;i++)
{
int x = gridx->dataS16[j][i]; if(x<0) {d0[j][i] = 0; continue;} \
int y = gridy->dataS16[j][i];/*if(y<0) {d0[j][i] = 0; continue;}*/\
int wx=(w->dataU8[j][i]>> 4);int wy=(w->dataU8[j][i]&0x0F);\
INTERPOLATION_CACL0(0);
}
return;
}
if((src->channel==1)&&(mode))
{
// #pragma omp parallel for
for(j=0;j<height;j++)for(i=0;i<width;i++)
{
int x = gridx->dataS16[j][i]; if(x<0) {d0[j][i] = 0; continue;} \
int y = gridy->dataS16[j][i];/*if(y<0) {d0[j][i] = 0; continue;}*/\
int wx=(w->dataU8[j][i]>> 4);int wy=(w->dataU8[j][i]&0x0F);\
INTERPOLATION_CACL1(0);
}
return;
}
if((src->channel==2)&&(!mode))
{
// #pragma omp parallel for
for(j=0;j<height;j++)for(i=0;i<width;i++)
{
int x = gridx->dataS16[j][i]; if(x<0) {d0[j][i] = 0;d1[j][i] = 0; continue;} \
int y = gridy->dataS16[j][i];/*if(y<0) {d0[j][i] = 0;d1[j][i] = 0; continue;}*/\
int wx=(w->dataU8[j][i]>> 4);int wy=(w->dataU8[j][i]&0x0F);\
INTERPOLATION_CACL0(0);INTERPOLATION_CACL0(1);
}
return;
}
if((src->channel==2)&&(mode))
{
// #pragma omp parallel for
for(j=0;j<height;j++)for(i=0;i<width;i++)
{
int x = gridx->dataS16[j][i]; if(x<0) {d0[j][i] = 0;d1[j][i] = 0; continue;} \
int y = gridy->dataS16[j][i];/*if(y<0) {d0[j][i] = 0;d1[j][i] = 0; continue;}*/\
int wx=(w->dataU8[j][i]>> 4);int wy=(w->dataU8[j][i]&0x0F);\
INTERPOLATION_CACL1(0);INTERPOLATION_CACL1(1);
}
return;
}
if((src->channel==3)&&(!mode))
{
// #pragma omp parallel for
for(j=0;j<height;j++)for(i=0;i<width;i++)
{
int x = gridx->dataS16[j][i]; if(x<0) {d0[j][i] = 0;d1[j][i] = 0;d2[j][i] = 0; continue;}
int y = gridy->dataS16[j][i];/*if(y<0) {d0[j][i] = 0;d1[j][i] = 0;d2[j][i] = 0; continue;}*/
int wx=(w->dataU8[j][i]>> 4);int wy=(w->dataU8[j][i]&0x0F);
INTERPOLATION_CACL0(0);INTERPOLATION_CACL0(1);INTERPOLATION_CACL0(2);
}
return;
}
if((src->channel==3)&&(mode))
{
// #pragma omp parallel for
for(j=0;j<height;j++)for(i=0;i<width;i++)
{
int x = gridx->dataS16[j][i]; if(x<0) {d0[j][i] = 0;d1[j][i] = 0;d2[j][i] = 0; continue;}
int y = gridy->dataS16[j][i];/*if(y<0) {d0[j][i] = 0;d1[j][i] = 0;d2[j][i] = 0; continue;}*/
int wx=(w->dataU8[j][i]>> 4);int wy=(w->dataU8[j][i]&0x0F);
INTERPOLATION_CACL1(0);INTERPOLATION_CACL1(1);INTERPOLATION_CACL1(2);
}
return;
}
if((src->channel==4)&&(!mode))
{
// #pragma omp parallel for
for(j=0;j<height;j++)for(i=0;i<width;i++)
{
int x = gridx->dataS16[j][i]; if(x<0) {d0[j][i] = 0;d1[j][i] = 0;d2[j][i] = 0;d3[j][i] = 0; continue;}
int y = gridy->dataS16[j][i];/*if(y<0) {d0[j][i] = 0;d1[j][i] = 0;d2[j][i] = 0;d3[j][i] = 0; continue;}*/
int wx=(w->dataU8[j][i]>> 4);int wy=(w->dataU8[j][i]&0x0F);
INTERPOLATION_CACL0(0);INTERPOLATION_CACL0(1);INTERPOLATION_CACL0(2);INTERPOLATION_CACL0(3);
}
return;
}
if((src->channel==4)&&(mode))
{
// #pragma omp parallel for
for(j=0;j<height;j++)for(i=0;i<width;i++)
{
int x = gridx->dataS16[j][i]; if(x<0) {d0[j][i] = 0;d1[j][i] = 0;d2[j][i] = 0;d3[j][i] = 0; continue;}
int y = gridy->dataS16[j][i];/*if(y<0) {d0[j][i] = 0;d1[j][i] = 0;d2[j][i] = 0;d3[j][i] = 0; continue;}*/
int wx=(w->dataU8[j][i]>> 4);int wy=(w->dataU8[j][i]&0x0F);
INTERPOLATION_CACL1(0);INTERPOLATION_CACL1(1);INTERPOLATION_CACL1(2);INTERPOLATION_CACL1(3);
}
return;
}
}
struct HandleImageCoordinateTransform
{
int height;
int width;
float (*x_func)(int,int,void *);
float (*y_func)(int,int,void *);
MTable *lx;
MTable *ly;
MTable *w;
};
void endImageCoordinateTransform(void *handle)
{
struct HandleImageCoordinateTransform *info = (struct HandleImageCoordinateTransform *)handle;
if(info->lx != NULL) mTableRelease(info->lx);
if(info->lx != NULL) mTableRelease(info->ly);
if(info->w != NULL) mTableRelease(info->w );
}
#define HASH_ImageCoordinateTransform 0x5f44c7bc
void mImageCoordinateTransform(MImage *src,MImage *dst,float (*x_func)(int,int,void *),float (*y_func)(int,int,void *),void *para,int mode)
{
mException(INVALID_IMAGE(src),EXIT,"invalid input");
MImage *p=dst;
if(INVALID_POINTER(dst)||(dst==src))
dst = mImageCreate(src->channel,src->height,src->width,NULL);
int width; if(dst->width <=0) width = src->width; else width = dst->width;
int height; if(dst->height<=0) height= src->height; else height= dst->height;
mImageRedefine(dst,src->channel,height,width,dst->data);
memcpy(&(dst->info),&(src->info),sizeof(MInfo));
MHandle *hdl=mHandle(src,ImageCoordinateTransform);
struct HandleImageCoordinateTransform *handle = (struct HandleImageCoordinateTransform *)(hdl->handle);
if((hdl->valid == 0)||(handle->height!=height)||(handle->width!=width)||(handle->x_func!=x_func)||(handle->y_func!=y_func))
{
handle->height = height;
handle->width = width;
handle->x_func = x_func;
handle->y_func = y_func;
if(handle->lx==NULL) handle->lx=mTableCreate(height,width,sizeof(short),NULL);
else mTableRedefine(handle->lx,height,width,sizeof(short),NULL);
if(handle->ly==NULL) handle->ly=mTableCreate(height,width,sizeof(short),NULL);
else mTableRedefine(handle->ly,height,width,sizeof(short),NULL);
if(handle->w ==NULL) handle->w =mTableCreate(height,width,sizeof(unsigned char),NULL);
else mTableRedefine(handle->w ,height,width,sizeof(unsigned char),NULL);
TransformGrid(src,x_func,y_func,para,handle->lx,handle->ly,handle->w);
}
GridInterpolation(src,dst,handle->lx,handle->ly,handle->w,mode);
if(p!=dst)
{
mImageExchange(src,dst);
mImageRelease(dst);
}
hdl->valid = 1;
}
void PerspectivePara(MImagePoint *ps,MImagePoint *pd,float *para)
{
MMatrix *mat = mMatrixCreate(8,9,NULL,DFLT);
for(int n=0,j=0;n<4;n=n+1,j=j+2)
{
mat->data[j][0] = pd[n].x; mat->data[j+1][0] = 0.0f;
mat->data[j][1] = pd[n].y; mat->data[j+1][1] = 0.0f;
mat->data[j][2] = 1.0f; mat->data[j+1][2] = 0.0f;
mat->data[j][3] = 0.0f; mat->data[j+1][3] = pd[n].x;
mat->data[j][4] = 0.0f; mat->data[j+1][4] = pd[n].y;
mat->data[j][5] = 0.0f; mat->data[j+1][5] = 1.0f;
mat->data[j][6] = 0.0f-pd[n].x*ps[n].x; mat->data[j+1][6] = 0.0f-pd[n].x*ps[n].y;
mat->data[j][7] = 0.0f-pd[n].y*ps[n].x; mat->data[j+1][7] = 0.0f-pd[n].y*ps[n].y;
mat->data[j][8] = 0.0f-ps[n].x; mat->data[j+1][8] = 0.0f-ps[n].y;
}
mLinearEquation(mat,para);
mMatrixRelease(mat);
}
float Perspective_x(int u,int v,void *para)
{
float *p = (float *)para;
return ((p[0]*u+p[1]*v+p[2])/(p[6]*u+p[7]*v+1.0f));
}
float Perspective_y(int u,int v,void *para)
{
float *p = (float *)para;
return ((p[3]*u+p[4]*v+p[5])/(p[6]*u+p[7]*v+1.0f));
}
struct HandleImagePerspectiveCorrection
{
int height;
int width;
MImagePoint ps[4];
MImagePoint pd[4];
MTable *lx;
MTable *ly;
MTable *w;
};
void endImagePerspectiveCorrection(void *handle)
{
struct HandleImagePerspectiveCorrection *info = (struct HandleImagePerspectiveCorrection *)handle;
if(info->lx != NULL) mTableRelease(info->lx);
if(info->lx != NULL) mTableRelease(info->ly);
if(info->w != NULL) mTableRelease(info->w );
}
#define HASH_ImagePerspectiveCorrection 0xdd819d48
void mImagePerspectiveCorrection(MImage *src,MImage *dst,MImagePoint *ps,MImagePoint *pd,int mode)
{
mException(INVALID_IMAGE(src),EXIT,"invalid source image");
MImage *p = dst;
if(INVALID_POINTER(dst)||(dst==src))
dst = mImageCreate(src->channel,src->height,src->width,NULL);
else if((dst->height<=0)||(dst->width<=0))
mImageRedefine(dst,src->channel,src->height,src->width,dst->data);
else
mImageRedefine(dst,src->channel,DFLT,DFLT,dst->data);
memcpy(&(dst->info),&(src->info),sizeof(MInfo));
int height = dst->height;
int width = dst->width;
MImagePoint p_s[4],p_d[4];
if(ps==NULL)
{
ps=p_s;
mPoint(ps+0, 0, 0);
mPoint(ps+1,src->width, 0);
mPoint(ps+2,src->width,src->height);
mPoint(ps+3, 0,src->height);
}
if(pd==NULL)
{
pd=p_d;
mPoint(pd+0, 0, 0);
mPoint(pd+1,width, 0);
mPoint(pd+2,width,height);
mPoint(pd+3, 0,height);
}
MHandle *hdl=mHandle(src,ImagePerspectiveCorrection);
struct HandleImagePerspectiveCorrection *handle = (struct HandleImagePerspectiveCorrection *)(hdl->handle);
if((hdl->valid == 0)
||(memcmp(ps,handle->ps,4*sizeof(MImagePoint))!=0)
||(memcmp(pd,handle->pd,4*sizeof(MImagePoint))!=0)
||(handle->height != height)
||(handle->width != width))
{
float para[8];
PerspectivePara(ps,pd,para);
// MMatrix *mat = mMatrixCreate(8,9,NULL);
// for(int n=0,j=0;n<4;n=n+1,j=j+2)
// {
// mat->data[j][0] = pd[n].x; mat->data[j+1][0] = 0.0f;
// mat->data[j][1] = pd[n].y; mat->data[j+1][1] = 0.0f;
// mat->data[j][2] = 1.0f; mat->data[j+1][2] = 0.0f;
// mat->data[j][3] = 0.0f; mat->data[j+1][3] = pd[n].x;
// mat->data[j][4] = 0.0f; mat->data[j+1][4] = pd[n].y;
// mat->data[j][5] = 0.0f; mat->data[j+1][5] = 1.0f;
// mat->data[j][6] = 0.0f-pd[n].x*ps[n].x; mat->data[j+1][6] = 0.0f-pd[n].x*ps[n].y;
// mat->data[j][7] = 0.0f-pd[n].y*ps[n].x; mat->data[j+1][7] = 0.0f-pd[n].y*ps[n].y;
// mat->data[j][8] = 0.0f-ps[n].x; mat->data[j+1][8] = 0.0f-ps[n].y;
// }
// mLinearEquation(mat,para);
// mMatrixRelease(mat);
if(handle->lx==NULL) handle->lx=mTableCreate(height,width,sizeof(short),NULL);
else mTableRedefine(handle->lx,height,width,sizeof(short),NULL);
if(handle->ly==NULL) handle->ly=mTableCreate(height,width,sizeof(short),NULL);
else mTableRedefine(handle->ly,height,width,sizeof(short),NULL);
if(handle->w ==NULL) handle->w =mTableCreate(height,width,sizeof(unsigned char),NULL);
else mTableRedefine(handle->w ,height,width,sizeof(unsigned char),NULL);
TransformGrid(src,Perspective_x,Perspective_y,para,handle->lx,handle->ly,handle->w);
memcpy(handle->ps,ps,4*sizeof(MImagePoint));
memcpy(handle->pd,pd,4*sizeof(MImagePoint));
handle->height = height;
handle->width = width;
}
GridInterpolation(src,dst,handle->lx,handle->ly,handle->w,mode);
if(p!=dst)
{
mImageExchange(src,dst);
mImageRelease(dst);
}
hdl->valid = 1;
}
float Affine_x(int u,int v,void *para)
{
float *p = (float *)para;
return (p[0]*u+p[1]*v+p[2]);
}
float Affine_y(int u,int v,void *para)
{
float *p = (float *)para;
return (p[3]*u+p[4]*v+p[5]);
}
struct HandleImageAffineCorrection
{
int height;
int width;
MImagePoint ps[3];
MImagePoint pd[3];
MTable *lx;
MTable *ly;
MTable *w;
};
void endImageAffineCorrection(void *handle)
{
struct HandleImageAffineCorrection *info = (struct HandleImageAffineCorrection *)handle;
if(info->lx != NULL) mTableRelease(info->lx);
if(info->lx != NULL) mTableRelease(info->ly);
if(info->w != NULL) mTableRelease(info->w );
}
#define HASH_ImageAffineCorrection 0x1670806b
void mImageAffineCorrection(MImage *src,MImage *dst,MImagePoint *ps,MImagePoint *pd,int mode)
{
mException(INVALID_IMAGE(src),EXIT,"invalid source image");
MImage *p = dst;
if(INVALID_POINTER(dst)||(dst==src))
dst = mImageCreate(src->channel,src->height,src->width,NULL);
else if((dst->height<=0)||(dst->width<=0))
mImageRedefine(dst,src->channel,src->height,src->width,dst->data);
else
mImageRedefine(dst,src->channel,DFLT,DFLT,dst->data);
memcpy(&(dst->info),&(src->info),sizeof(MInfo));
int height = dst->height;
int width = dst->width;
MHandle *hdl=mHandle(src,ImageAffineCorrection);
struct HandleImageAffineCorrection *handle = (struct HandleImageAffineCorrection *)(hdl->handle);
if((hdl->valid == 0)
||(memcmp(ps,handle->ps,3*sizeof(MImagePoint))!=0)
||(memcmp(pd,handle->pd,3*sizeof(MImagePoint))!=0)
||(handle->height != height)
||(handle->width != width))
{
float para[6];
float a1 = pd[1].x - pd[0].x;
float a2 = pd[2].x - pd[0].x;
float b1 = pd[1].y - pd[0].y;
float b2 = pd[2].y - pd[0].y;
float c = (a1*b2 -a2*b1);
mException((c==0.0f),EXIT,"invalid achor point");
float c1,c2;
c1 = ps[1].x-ps[0].x;
c2 = ps[2].x-ps[0].x;
para[0] = (c1*b2 - c2*b1)/c;
para[1] = (c2*a1 - c1*a2)/c;
para[2] = ps[0].x - (para[0]*pd[0].x + para[1]*pd[0].y);
c1 = ps[1].y-ps[0].y;
c2 = ps[2].y-ps[0].y;
para[3] = (c1*b2 - c2*b1)/c;
para[4] = (c2*a1 - c1*a2)/c;
para[5] = ps[0].y - (para[3]*pd[0].x + para[4]*pd[0].y);
if(handle->lx==NULL) handle->lx=mTableCreate(height,width,sizeof(short),NULL);
else mTableRedefine(handle->lx,height,width,sizeof(short),NULL);
if(handle->ly==NULL) handle->ly=mTableCreate(height,width,sizeof(short),NULL);
else mTableRedefine(handle->ly,height,width,sizeof(short),NULL);
if(handle->w ==NULL) handle->w =mTableCreate(height,width,sizeof(unsigned char),NULL);
else mTableRedefine(handle->w ,height,width,sizeof(unsigned char),NULL);
TransformGrid(src,Affine_x,Affine_y,para,handle->lx,handle->ly,handle->w);
memcpy(handle->ps,ps,3*sizeof(MImagePoint));
memcpy(handle->pd,pd,3*sizeof(MImagePoint));
handle->height = height;
handle->width = width;
}
GridInterpolation(src,dst,handle->lx,handle->ly,handle->w,mode);
if(p!=dst)
{
mImageExchange(src,dst);
mImageRelease(dst);
}
hdl->valid = 1;
}
void ImageRotate90(MImage *src,MImage *dst)
{
int i,j,cn;
mImageRedefine(dst,src->channel,src->width,src->height,dst->data);
int height = dst->height-1;
int width = dst->width-1;
for(cn=0;cn<dst->channel;cn++)
{
unsigned char **sdata = src->data[cn];
unsigned char **ddata = dst->data[cn];
for(j=0;j<=height;j++)
for(i=0;i<=width;i++)
ddata[j][i] = sdata[width-i][j];
}
}
void ImageRotate180(MImage *src,MImage *dst)
{
int i,j,cn;
mImageRedefine(dst,src->channel,src->height,src->width,dst->data);
int height = dst->height-1;
int width = dst->width-1;
for(cn=0;cn<dst->channel;cn++)
{
unsigned char **sdata = src->data[cn];
unsigned char **ddata = dst->data[cn];
for(j=0;j<=height;j++)
for(i=0;i<=width;i++)
ddata[j][i] = sdata[height-j][width-i];
}
}
void ImageRotate270(MImage *src,MImage *dst)
{
int i,j,cn;
mImageRedefine(dst,src->channel,src->width,src->height,dst->data);
int height = dst->height-1;
int width = dst->width-1;
for(cn=0;cn<dst->channel;cn++)
{
unsigned char **sdata = src->data[cn];
unsigned char **ddata = dst->data[cn];
for(j=0;j<=height;j++)
for(i=0;i<=width;i++)
ddata[j][i] = sdata[i][height-j];
}
}
struct HandleImageRotate
{
int height;
int width;
MImagePoint src_hold;
MImagePoint dst_hold;
float angle;
MTable *lx;
MTable *ly;
MTable *w;
};
void endImageRotate(void *handle)
{
struct HandleImageRotate *info = (struct HandleImageRotate *)handle;
if(info->lx != NULL) mTableRelease(info->lx);
if(info->lx != NULL) mTableRelease(info->ly);
if(info->w != NULL) mTableRelease(info->w );
}
#define HASH_ImageRotate 0x35b8aedf
void mImageRotate(MImage *src,MImage *dst,MImagePoint *src_hold,MImagePoint *dst_hold,float angle,int mode)
{
mException(INVALID_IMAGE(src),EXIT,"invalid source image");
if(INVALID_POINTER(dst)) dst=src;
float scx,scy,dcx,dcy;
if(src_hold == NULL) {scx = ((float)(src->width))/2.0f; scy = ((float)(src->height))/2.0f;}
else {scx = src_hold->x; scy = src_hold->y; }
int height=dst->height;int width=dst->width;
if(angle==0.0f)
{
mImageCut(src,dst,scx-width/2,scx+width/2,scy-height/2,scy+height/2);
return;
}
float sn = mSin(angle);
float cs = mCos(angle);
MImage *p = dst;
if(dst==src)
{
height = (int)(src->height*ABS(cs)+src->width*ABS(sn));
width = (int)(src->height*ABS(sn)+src->width*ABS(cs));
if(dst_hold != NULL)
{
float x =((scx+scx-src->width)*cs - (scy+scy-src->height)*sn + height)/2.0f;
float y =((scx+scx-src->width)*sn + (scy+scy-src->height)*cs + width)/2.0f;
width = width + (int)(x - dst_hold->x);
height= height+ (int)(y + dst_hold->y);
}
dst = mImageCreate(src->channel,height,width,NULL);
}
else if((height<=0)||(width<=0))
{
height = (int)(src->height*ABS(cs)+src->width*ABS(sn));
width = (int)(src->height*ABS(sn)+src->width*ABS(cs));
if(dst_hold != NULL)
{
float x =((scx+scx-src->width)*cs - (scy+scy-src->height)*sn + height)/2.0f;
float y =((scx+scx-src->width)*sn + (scy+scy-src->height)*cs + width)/2.0f;
width = width + (int)(x - dst_hold->x);
height= height+ (int)(y + dst_hold->y);
}
mImageRedefine(dst,src->channel,height,width,dst->data);
}
else
mImageRedefine(dst,src->channel,DFLT,DFLT,dst->data);
mException(INVALID_IMAGE(dst),EXIT,"invalid error");
memcpy(&(dst->info),&(src->info),sizeof(MInfo));
if(dst_hold == NULL) {dcx = ((float)width)/2.0f;dcy = ((float)height)/2.0f;}
else {dcx = dst_hold->x; dcy = dst_hold->y; }
if(angle == 90.0f)
if((scx == dcy)&&(scy == dcx))
if((src->width == height)&&(src->height == width))
{ImageRotate90(src,dst);if(p!=dst){mImageExchange(src,dst);mImageRelease(dst);}return;}
if(angle == 180.0f)
if((scx == dcx)&&(scy == dcy))
if((src->width == width)&&(src->height == height))
{ImageRotate180(src,dst);if(p!=dst){mImageExchange(src,dst);mImageRelease(dst);}return;}
if(angle == 270.0f)
if((scx == dcy)&&(scy == dcx))
if((src->width == height)&&(src->height == width))
{ImageRotate270(src,dst);if(p!=dst){mImageExchange(src,dst);mImageRelease(dst);}return;}
MHandle *hdl=mHandle(src,ImageRotate);
struct HandleImageRotate *handle = (struct HandleImageRotate *)(hdl->handle);
if((hdl->valid == 0)
||(handle->src_hold.x != scx)||(handle->src_hold.y != scy)
||(handle->dst_hold.x != dcx)||(handle->dst_hold.y != dcy)
||(handle->angle !=angle)
||(handle->height != height)
||(handle->width != width))
{
float para[6];
para[0] = cs;
para[1] = sn;
para[2] = scx - dcx*cs - dcy*sn;
para[3] = 0-sn;
para[4] = cs;
para[5] = scy + dcx*sn - dcy*cs;
if(handle->lx==NULL) handle->lx=mTableCreate(height,width,sizeof(short),NULL);
else mTableRedefine(handle->lx,height,width,sizeof(short),NULL);
if(handle->ly==NULL) handle->ly=mTableCreate(height,width,sizeof(short),NULL);
else mTableRedefine(handle->ly,height,width,sizeof(short),NULL);
if(handle->w ==NULL) handle->w =mTableCreate(height,width,sizeof(unsigned char),NULL);
else mTableRedefine(handle->w ,height,width,sizeof(unsigned char),NULL);
TransformGrid(src,Affine_x,Affine_y,para,handle->lx,handle->ly,handle->w);
handle->src_hold.x = scx; handle->src_hold.y = scy;
handle->dst_hold.x = dcx; handle->dst_hold.y = dcy;
handle->angle = angle;
handle->height = height;
handle->width = width;
}
GridInterpolation(src,dst,handle->lx,handle->ly,handle->w,mode);
if(p!=dst)
{
mImageExchange(src,dst);
mImageRelease(dst);
}
hdl->valid = 1;
}
#define TRANSFORM_VALUE(Src,Sx,Sy,Dst,Dx,Dy) {\
int Y1,Y2,X1,X2;\
float Wx,Wy;\
float W11,W12,W21,W22;\
int Cn;\
\
if((Sx<0)||(Sy<0)||(Sx>Src->width-1)||(Sy>Src->height-1))\
{\
for(Cn=0;Cn<dst->channel;Cn++)\
Dst->data[Cn][Dy][Dx] = 0;\
}\
else\
{\
Y1 = (int)Sy;\
Y2 = Y1+1;\
Wy = Sy-(float)Y1;\
\
X1 = (int)Sx;\
X2 = X1+1;\
Wx = Sx-(float)X1;\
\
W22 = Wx*Wy;\
W11 = 1.0f-Wx-Wy+W22;\
W21 = Wx-W22;\
W12 = Wy-W22;\
\
for(Cn=0;Cn<dst->channel;Cn++)\
Dst->data[Cn][Dy][Dx] = Src->data[Cn][Y1][X1]*W11+Src->data[Cn][Y1][X2]*W21+Src->data[Cn][Y2][X1]*W12+Src->data[Cn][Y2][X2]*W22;\
}\
}
/*
void mImageCoordinateTransform(MImage *src,MImage *dst,float (*x_func)(int,int),float (*y_func)(int,int))
{
int i,j,cn;
float lx,ly;
int x1,x2,y1,y2;
float wx,wy;
float w11,w12,w21,w22;
MImage *p;
mException(INVALID_IMAGE(src),"invalid input",EXIT);
p=dst;
if(INVALID_POINTER(dst)||(dst==src))
dst = mImageCreate(src->channel,src->height,src->width,NULL);
else
{
mException(INVALID_IMAGE(dst),"invalid input",EXIT);
mException((src->channel != dst->channel),"invalid input",EXIT);
}
for(j=0;j<dst->height;j++)
for(i=0;i<dst->width;i++)
{
lx = x_func(i,j);
ly = y_func(i,j);
y1 = (int)ly;
y2 = y1+1;
wy = ly-(float)y1;
x1 = (int)lx;
x2 = x1+1;
wx = lx-(float)x1;
w22 = wx*wy;
w11 = 1.0f-wx-wy+w22;
w21 = wx-w22;
w12 = wy-w22;
for(cn=0;cn<dst->channel;cn++)
dst->data[cn][j][i] = SRC(cn,x1,y1)*w11+SRC(cn,x2,y1)*w21+SRC(cn,x1,y2)*w12+SRC(cn,x2,y2)*w22;
}
if(p!=dst)
{
mImageExchange(src,dst);
mImageRelease(dst);
}
}
*/
struct DeformationTemplate {
int locate_x;
int locate_y;
MMatrix *x;
MMatrix *y;
};
void ImageDeformation(MImage *src,MImage *dst,struct DeformationTemplate *temp)
{
int i,j,cn,m,n;
float lx,ly;
int x1,x2,y1,y2;
float wx,wy;
float w11,w12,w21,w22;
MImage *p;
MMatrix *tx,*ty;
int si,ei,sj,ej;
mException(INVALID_IMAGE(src)||INVALID_POINTER(temp),EXIT,"invalid input");
mException(((temp->locate_x>=src->width)||(temp->locate_y>=src->height)),EXIT,"invalid temp");
tx = temp->x;
ty = temp->y;
if(temp->locate_x <0)
{
m=0-temp->locate_x;
si=0;
}
else
{
m=0;
si=temp->locate_x;
}
ei = MIN(dst->width,(si+tx->col));
if(temp->locate_y <0)
{
n=0-temp->locate_y;
sj=0;
}
else
{
n=0;
sj=temp->locate_y;
}
ej = MIN(dst->height,(sj+tx->row));
p=dst;
if(INVALID_POINTER(dst)||(dst==src))
{
dst = src;
src = mImageCreate(dst->channel,dst->height,dst->width,NULL);
for(cn=0;cn<dst->channel;cn++)
for(j=sj;(j<dst->height)&&(n<tx->row);j++,n++)
memcpy(src->data[cn][j]+si,dst->data[cn][j]+si,(ei-si)*sizeof(unsigned char));
}
else
{
mException(INVALID_IMAGE(dst),EXIT,"invalid input");
mException((src->channel != dst->channel)||(src->height!=dst->height)||(src->width!=dst->width),EXIT,"invalid input");
for(cn=0;cn<dst->channel;cn++)
for(j=0;j<dst->height;j++)
memcpy(dst->data[cn][j],src->data[cn][j],src->width*sizeof(unsigned char));
}
for(j=sj;j<ej;j++,n++)
for(i=si;i<ei;i++,m++)
{
lx = tx->data[n][m];
ly = ty->data[n][m];
if((lx<=0.0f)||(ly<=0.0f))
continue;
y1 = (int)ly;
y2 = y1+1;
wy = ly-(float)y1;
x1 = (int)lx;
x2 = x1+1;
wx = lx-(float)x1;
w22 = wx*wy;
w11 = 1.0f-wx-wy+w22;
w21 = wx-w22;
w12 = wy-w22;
for(cn=0;cn<dst->channel;cn++)
dst->data[cn][j][i] = (unsigned char)(SRC(cn,x1,y1)*w11+SRC(cn,x2,y1)*w21+SRC(cn,x1,y2)*w12+SRC(cn,x2,y2)*w22);
}
if(p!=dst)
{
mImageExchange(src,dst);
mImageRelease(dst);
}
}
void TemplateCacuate(int x_in,int y_in,int R,float k,float cx,float cy,int *x_out,int *y_out)
{
float l;
float d_x,d_y;
float d;
d_x = (x_in-cx);
d_y = (y_in-cy);
d = d_x*d_x+d_y*d_y;
if(d>=(float)(R*R))
{
*x_out = DFLT;
*y_out = DFLT;
return;
}
d = (float)sqrt(d);
l = (1.0f-(d/((float)R)));
l = l*l;
l =(k-1.0f)*l + 1.0f;
*x_out = (int)(cx + d_x*l);
*y_out = (int)(cy + d_y*l);
}
/*
int GrtTemplate(struct DeformationTemplate *tep,MList *curve,int R,float k)
{
int height;
int width;
int i,j;
int x0,y0,x1,y1;
int n;
MImagePoint **point;
height = tep->x->row;
width = tep->x->col;
point = curve->data;
d1 = (point[0]->x)*(point[0]->x)+(point[0]->y)*(point[0]->y);
d2 = (point[1]->x)*(point[1]->x)+(point[1]->y)*(point[1]->y);
min = d1+d2;
min_index=0;
n=1;
while(n+1<curve->num)
{
d1 = d2;
d2 = (point[n+1]->x)*(point[n+1]->x)+(point[n+1]->y)*(point[n+1]->y);
if(d1+d2<min)
{
min = d1+d2;
min_index = n;
}
n=n+1;
}
TemplateCacuate(0,0,R,k,point[min_index]->x,point[min_index]->y,point[min_index+1]->x,point[min_index+1]->y,tep->x->data[0][0],tep->y->data[0][0]);
for(j=0;j<height;j=j+2)
{
for(i=0;i<width;i++)
{
while((n>0)&&(n<curve->num))
{
d1 =
*/
void mImageReshapeTemplate(MList *src,MList *dst,MTable *lx,MTable *ly,MTable *w)
{
int height = w->row;
int width = w->col;
int x_step = (width+30)/31;
int y_step = (height+30)/31;
float area = (float)(x_step*y_step);
int i,j,k,m,n;
float x_locate[32][32];
float y_locate[32][32];
MImagePoint **ps = (MImagePoint **)(src->data);
MImagePoint **pd = (MImagePoint **)(dst->data);
#define DISTANCE(x1,y1,x2,y2) (float)(sqrt((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)))
for(n=0,j=0;n<32;n++,j+=y_step)
for(m=0,i=0;m<32;m++,i+=x_step)
{
float sum = 0.0f;
float dx = 0.0f;
float dy = 0.0f;
for(k=0;k<dst->num;k++)
{
float d = DISTANCE(i,j,pd[k]->x,pd[k]->y);
if(d == 0.0f)
{
dx = (pd[k]->x - ps[k]->x);
dy = (pd[k]->y - ps[k]->y);
break;
}
sum += 1.0f/d;
dx += (pd[k]->x - ps[k]->x)/d;
dy += (pd[k]->y - ps[k]->y)/d;
}
if(k == dst->num)
{
dx = dx/sum;
dy = dy/sum;
}
x_locate[n][m] = dx + i;
y_locate[n][m] = dy + j;
}
#pragma omp parallel for
for(j=0;j<height;j++)
{
n = j/y_step;
float wy2 = (float)(j%y_step);
float wy1 = y_step - wy2;
for(i=0;i<width;i++)
{
m = i/x_step;
float wx2 = (float)(i%x_step);
float wx1 = x_step-wx2;
float x =((x_locate[n ][m]*wx1 + x_locate[n ][m+1]*wx2)*wy1
+ (x_locate[n+1][m]*wx1 + x_locate[n+1][m+1]*wx2)*wy2)/area;
float y =((y_locate[n ][m]*wx1 + y_locate[n ][m+1]*wx2)*wy1
+ (y_locate[n+1][m]*wx1 + y_locate[n+1][m+1]*wx2)*wy2)/area;
int wx = (int)x;
int wy = (int)y;
lx->dataS16[j][i] = wx;
ly->dataS16[j][i] = wy;
wx = 16-(int)((x-(float)wx)/0.0625f+0.5f);
wy = 16-(int)((y-(float)wy)/0.0625f+0.5f);
w->dataU8[j][i] = (wx<<4)+wy;
}
}
}
struct HandleImageReshape
{
MTable *lx;
MTable *ly;
MTable *w;
};
void endImageReshape(void *info)
{
struct HandleImageReshape *handle = (struct HandleImageReshape *)info;
if(handle->lx != NULL) mTableRelease(handle->lx);
if(handle->lx != NULL) mTableRelease(handle->ly);
if(handle->w != NULL) mTableRelease(handle->w );
}
#define HASH_ImageReshape 0xe21f102e
void mImageReshape(MImage *src,MImage *dst,MList *src_point,MList *dst_point,int mode)
{
mException(INVALID_IMAGE(src),EXIT,"invalid source image");
MImage *p = dst;
if(INVALID_POINTER(dst)||(dst==src))
dst = mImageCreate(src->channel,src->height,src->width,NULL);
else if((dst->height<=0)||(dst->width<=0))
mImageRedefine(dst,src->channel,src->height,src->width,dst->data);
else
mImageRedefine(dst,src->channel,DFLT,DFLT,dst->data);
memcpy(&(dst->info),&(src->info),sizeof(MInfo));
MHandle *hdl=mHandle(dst,ImageReshape);
struct HandleImageReshape *handle = (struct HandleImageReshape *)(hdl->handle);
{
if(handle->lx == NULL) handle->lx= mTableCreate(dst->height,dst->width,sizeof(short),NULL);
else mTableRedefine(handle->lx,dst->height,dst->width,sizeof(short),NULL);
if(handle->ly == NULL) handle->ly= mTableCreate(dst->height,dst->width,sizeof(short),NULL);
else mTableRedefine(handle->ly,dst->height,dst->width,sizeof(short),NULL);
if(handle-> w == NULL) handle->w = mTableCreate(dst->height,dst->width,sizeof(unsigned char),NULL);
else mTableRedefine(handle->w ,dst->height,dst->width,sizeof(unsigned char),NULL);
}
mImageReshapeTemplate(src_point,dst_point,handle->lx,handle->ly,handle->w);
GridInterpolation(src,dst,handle->lx,handle->ly,handle->w,mode);
if(p!=dst)
{
mImageExchange(src,dst);
mImageRelease(dst);
}
hdl->valid = 1;
}
|
opencl_krb5pa-sha1_fmt_plug.c | /*
* Kerberos 5 "PA ENC TIMESTAMP" by magnum & Dhiru
*
* Pcap file -> input file:
* 1. tshark -r capture.pcapng -T pdml > ~/capture.pdml
* 2. krbng2john.py ~/capture.pdml > krb5.in
* 3. Run john on krb5.in
*
* http://www.ietf.org/rfc/rfc4757.txt
* http://www.securiteam.com/windowsntfocus/5BP0H0A6KM.html
*
* Input format is 'user:$krb5pa$etype$user$realm$salt$timestamp+checksum'
*
* NOTE: Checksum implies last 12 bytes of PA_ENC_TIMESTAMP value in AS-REQ
* packet.
*
* Default Salt: realm + user
*
* AES-256 encryption & decryption of AS-REQ timestamp in Kerberos v5
* See the following RFC for more details about the crypto & algorithms used:
*
* RFC3961 - Encryption and Checksum Specifications for Kerberos 5
* RFC3962 - Advanced Encryption Standard (AES) Encryption for Kerberos 5
*
* march 09 / kevin devine <wyse101 0x40 gmail.com>
*
* This software is Copyright (c) 2012 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*
* This software is Copyright (c) 2012 Dhiru Kholia (dhiru at openwall.com) and
* released under same terms as above
*/
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_krb5pa_sha1;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_krb5pa_sha1);
#else
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include <ctype.h>
#include "arch.h"
#include "misc.h"
#include "formats.h"
#include "options.h"
#include "common.h"
#include "unicode.h"
#include "config.h"
#include "aes.h"
#include "common-opencl.h"
#define OUTLEN 32
#include "opencl_pbkdf2_hmac_sha1.h"
#include "hmac_sha.h"
#include "loader.h"
#define FORMAT_LABEL "krb5pa-sha1-opencl"
#define FORMAT_NAME "Kerberos 5 AS-REQ Pre-Auth etype 17/18" /* aes-cts-hmac-sha1-96 */
#define FORMAT_TAG "$krb5pa$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1001
#define BINARY_SIZE 12
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 1
#define MAX_SALTLEN 52
#define MAX_REALMLEN MAX_SALTLEN
#define MAX_USERLEN MAX_SALTLEN
#define TIMESTAMP_SIZE 44
#define CHECKSUM_SIZE BINARY_SIZE
#define TOTAL_LENGTH (14 + 2 * (CHECKSUM_SIZE + TIMESTAMP_SIZE) + MAX_REALMLEN + MAX_USERLEN + MAX_SALTLEN)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
/* This handles all sizes */
#define GETPOS(i, index) (((index) % ocl_v_width) * 4 + ((i) & ~3U) * ocl_v_width + (((i) & 3) ^ 3) + ((index) / ocl_v_width) * 64 * ocl_v_width)
/* This is faster but can't handle size 3 */
//#define GETPOS(i, index) (((index) & (ocl_v_width - 1)) * 4 + ((i) & ~3U) * ocl_v_width + (((i) & 3) ^ 3) + ((index) / ocl_v_width) * 64 * ocl_v_width)
static struct fmt_tests tests[] = {
{"$krb5pa$18$user1$EXAMPLE.COM$$2a0e68168d1eac344da458599c3a2b33ff326a061449fcbc242b212504e484d45903c6a16e2d593912f56c93883bf697b325193d62a8be9c", "openwall"},
{"$krb5pa$18$user1$EXAMPLE.COM$$a3918bd0381107feedec8db0022bdf3ac56e534ed54d13c62a7013a47713cfc31ef4e7e572f912fa4164f76b335e588bf29c2d17b11c5caa", "openwall"},
{"$krb5pa$18$l33t$EXAMPLE.COM$$98f732b309a1d7ef2355a974842a32894d911e97150f5d57f248e1c2632fbd3735c5f156532ccae0341e6a2d779ca83a06021fe57dafa464", "openwall"},
{"$krb5pa$18$aduser$AD.EXAMPLE.COM$$64dfeee04be2b2e0423814e0df4d0f960885aca4efffe6cb5694c4d34690406071c4968abd2c153ee42d258c5e09a41269bbcd7799f478d3", "password@123"},
{"$krb5pa$18$aduser$AD.EXAMPLE.COM$$f94f755a8b4493d925094a4eb1cec630ac40411a14c9733a853516fe426637d9daefdedc0567e2bb5a83d4f89a0ad1a4b178662b6106c0ff", "password@12345678"},
{"$krb5pa$18$aduser$AD.EXAMPLE.COM$AD.EXAMPLE.COMaduser$f94f755a8b4493d925094a4eb1cec630ac40411a14c9733a853516fe426637d9daefdedc0567e2bb5a83d4f89a0ad1a4b178662b6106c0ff", "password@12345678"},
/* etype 17 hash obtained using MiTM etype downgrade attack */
{"$krb5pa$17$user1$EXAMPLE.COM$$c5461873dc13665771b98ba80be53939e906d90ae1ba79cf2e21f0395e50ee56379fbef4d0298cfccfd6cf8f907329120048fd05e8ae5df4", "openwall"},
{NULL},
};
static cl_mem mem_in, mem_out, mem_salt, mem_state, pinned_in, pinned_out;
static cl_kernel pbkdf2_init, pbkdf2_loop, pbkdf2_final;
static struct fmt_main *self;
static struct custom_salt {
int type;
int etype;
unsigned char realm[64];
unsigned char user[64];
unsigned char salt[64]; /* realm + user */
unsigned char ct[TIMESTAMP_SIZE];
} *cur_salt;
static unsigned char constant[16];
static unsigned char ke_input[16];
static unsigned char ki_input[16];
static size_t key_buf_size;
static unsigned int *inbuffer;
static pbkdf2_salt currentsalt;
static pbkdf2_out *output;
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static int new_keys;
#define ITERATIONS (4096 - 1)
#define HASH_LOOPS 105 // Must be made from factors 3, 3, 5, 7, 13
#define STEP 0
#define SEED 128
static const char * warn[] = {
"P xfer: ", ", init: ", ", loop: ", ", inter: ", ", final: ", ", res xfer: "
};
static int split_events[] = { 2, -1, -1 };
//This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
size_t s;
s = autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_init);
s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_loop));
s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_final));
return s;
}
#if 0
struct fmt_main *me;
#endif
static void create_clobj(size_t gws, struct fmt_main *self)
{
gws *= ocl_v_width;
key_buf_size = 64 * gws;
/// Allocate memory
pinned_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, key_buf_size, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating pinned in");
mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, key_buf_size, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating mem in");
inbuffer = clEnqueueMapBuffer(queue[gpu_id], pinned_in, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, key_buf_size, 0, NULL, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error mapping page-locked memory");
mem_state = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, sizeof(pbkdf2_state) * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating mem_state");
mem_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(pbkdf2_salt), ¤tsalt, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating mem setting");
pinned_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY | CL_MEM_ALLOC_HOST_PTR, sizeof(pbkdf2_out) * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating pinned out");
mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, sizeof(pbkdf2_out) * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating mem out");
output = clEnqueueMapBuffer(queue[gpu_id], pinned_out, CL_TRUE, CL_MAP_READ, 0, sizeof(pbkdf2_out) * gws, 0, NULL, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error mapping page-locked memory");
HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 1, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument");
HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 2, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument");
HANDLE_CLERROR(clSetKernelArg(pbkdf2_loop, 0, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument");
HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 0, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument");
HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 2, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument");
crypt_out = mem_alloc(sizeof(*crypt_out) * gws);
}
static void release_clobj(void)
{
if (crypt_out) {
HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_in, inbuffer, 0, NULL, NULL), "Error Unmapping mem in");
HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_out, output, 0, NULL, NULL), "Error Unmapping mem in");
HANDLE_CLERROR(clFinish(queue[gpu_id]), "Error releasing memory mappings");
HANDLE_CLERROR(clReleaseMemObject(pinned_in), "Release pinned_in");
HANDLE_CLERROR(clReleaseMemObject(pinned_out), "Release pinned_out");
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release pinned_in");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem_out");
HANDLE_CLERROR(clReleaseMemObject(mem_salt), "Release mem_salt");
HANDLE_CLERROR(clReleaseMemObject(mem_state), "Release mem state");
MEM_FREE(crypt_out);
}
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(pbkdf2_init), "Release Kernel");
HANDLE_CLERROR(clReleaseKernel(pbkdf2_loop), "Release Kernel");
HANDLE_CLERROR(clReleaseKernel(pbkdf2_final), "Release Kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
/* n-fold(k-bits):
* l = lcm(n,k)
* r = l/k
* s = k-bits | k-bits rot 13 | k-bits rot 13*2 | ... | k-bits rot 13*(r-1)
* compute the 1's complement sum:
* n-fold = s[0..n-1]+s[n..2n-1]+s[2n..3n-1]+..+s[(k-1)*n..k*n-1] */
/* representation: msb first, assume n and k are multiples of 8, and
* that k>=16. this is the case of all the cryptosystems which are
* likely to be used. this function can be replaced if that
* assumption ever fails. */
/* input length is in bits */
static void nfold(unsigned int inbits, const unsigned char *in,
unsigned int outbits,unsigned char *out)
{
int a,b,c,lcm;
int byte, i, msbit;
/* the code below is more readable if I make these bytes
* instead of bits */
inbits >>= 3;
outbits >>= 3;
/* first compute lcm(n,k) */
a = outbits;
b = inbits;
while (b != 0) {
c = b;
b = a % b;
a = c;
}
lcm = outbits*inbits/a;
/* now do the real work */
memset(out, 0, outbits);
byte = 0;
/* this will end up cycling through k lcm(k,n)/k times, which
* is correct */
for (i = lcm - 1; i >= 0; i--) {
/* compute the msbit in k which gets added into this byte */
msbit = (/* first, start with the msbit in the first, unrotated byte */
((inbits << 3) - 1)
/* then, for each byte, shift to the right for each
* repetition */
+(((inbits << 3) + 13) * (i / inbits))
/* last, pick out the correct byte within that
* shifted repetition */
+((inbits - (i % inbits)) << 3)
) % (inbits << 3);
/* pull out the byte value itself */
byte += (((in[((inbits - 1) - (msbit >> 3)) % inbits] << 8)|
(in[((inbits) - (msbit>>3)) % inbits]))
>>((msbit & 7) + 1)) & 0xff;
/* do the addition */
byte += out[i % outbits];
out[i % outbits] = byte & 0xff;
/* keep around the carry bit, if any */
byte >>= 8;
}
/* if there's a carry bit left over, add it back in */
if (byte) {
for (i = outbits - 1; i >= 0; i--) {
/* do the addition */
byte += out[i];
out[i] = byte & 0xff;
/* keep around the carry bit, if any */
byte >>= 8;\
}
}
}
static void init(struct fmt_main *_self)
{
unsigned char usage[5];
static char valgo[sizeof(ALGORITHM_NAME) + 8] = "";
self = _self;
opencl_prepare_dev(gpu_id);
/* VLIW5 does better with just 2x vectors due to GPR pressure */
if (!options.v_width && amd_vliw5(device_info[gpu_id]))
ocl_v_width = 2;
else
ocl_v_width = opencl_get_vector_width(gpu_id, sizeof(cl_int));
if (ocl_v_width > 1) {
/* Run vectorized kernel */
snprintf(valgo, sizeof(valgo),
ALGORITHM_NAME " %ux", ocl_v_width);
self->params.algorithm_name = valgo;
}
// generate 128 bits from 40 bits of "kerberos" string
nfold(8 * 8, (unsigned char*)"kerberos", 128, constant);
memset(usage,0,sizeof(usage));
usage[3] = 0x01; // key number in big-endian format
usage[4] = 0xAA; // used to derive Ke
nfold(sizeof(usage)*8,usage,sizeof(ke_input)*8,ke_input);
memset(usage,0,sizeof(usage));
usage[3] = 0x01; // key number in big-endian format
usage[4] = 0x55; // used to derive Ki
nfold(sizeof(usage)*8,usage,sizeof(ki_input)*8,ki_input);
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[128];
snprintf(build_opts, sizeof(build_opts),
"-DHASH_LOOPS=%u -DITERATIONS=%u -DOUTLEN=%u "
"-DPLAINTEXT_LENGTH=%u -DV_WIDTH=%u",
HASH_LOOPS, ITERATIONS, OUTLEN,
PLAINTEXT_LENGTH, ocl_v_width);
opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_kernel.cl", gpu_id,
build_opts);
pbkdf2_init = clCreateKernel(program[gpu_id], "pbkdf2_init", &ret_code);
HANDLE_CLERROR(ret_code, "Error creating kernel");
crypt_kernel = pbkdf2_loop = clCreateKernel(program[gpu_id], "pbkdf2_loop", &ret_code);
HANDLE_CLERROR(ret_code, "Error creating kernel");
pbkdf2_final = clCreateKernel(program[gpu_id], "pbkdf2_final", &ret_code);
HANDLE_CLERROR(ret_code, "Error creating kernel");
//Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, 2 * HASH_LOOPS, split_events,
warn, 2, self, create_clobj,
release_clobj,
ocl_v_width * sizeof(pbkdf2_state), 0, db);
//Auto tune execution from shared/included code.
autotune_run(self, 4 * ITERATIONS + 4, 0,
(cpu(device_info[gpu_id]) ?
1000000000 : 5000000000ULL));
}
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *data = ciphertext;
int type, saltlen = 0;
// tag is mandatory
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
data += FORMAT_TAG_LEN;
// etype field, 17 or 18
p = strchr(data, '$');
if (!p || p - data != 2)
return 0;
type = atoi(data);
if (type < 17 || type > 18)
return 0;
data = p + 1;
// user field
p = strchr(data, '$');
if (!p || p - data > MAX_USERLEN)
return 0;
saltlen += p - data;
data = p + 1;
// realm field
p = strchr(data, '$');
if (!p || p - data > MAX_REALMLEN)
return 0;
saltlen += p - data;
data = p + 1;
// salt field
p = strchr(data, '$');
if (!p)
return 0;
// if salt is empty, realm.user is used instead
if (p - data)
saltlen = p - data;
data = p + 1;
// We support a max. total salt length of 52.
// We could opt to emit a warning if rejected here.
if (saltlen > MAX_SALTLEN) {
static int warned = 0;
if (!ldr_in_pot)
if (!warned++)
fprintf(stderr, "%s: One or more hashes rejected due to salt length limitation\n", FORMAT_LABEL);
return 0;
}
// 56 bytes (112 hex chars) encrypted timestamp + checksum
if (strlen(data) != 2 * (TIMESTAMP_SIZE + CHECKSUM_SIZE) ||
strspn(data, HEXCHARS_all) != strlen(data))
return 0;
return 1;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
int i;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += FORMAT_TAG_LEN;
p = strtokm(ctcopy, "$");
cs.etype = atoi(p);
p = strtokm(NULL, "$");
if (p[-1] == '$')
cs.user[0] = 0;
else {
strcpy((char*)cs.user, p);
p = strtokm(NULL, "$");
}
if (p[-1] == '$')
cs.realm[0] = 0;
else {
strcpy((char*)cs.realm, p);
p = strtokm(NULL, "$");
}
if (p[-1] == '$') {
strcpy((char*)cs.salt, (char*)cs.realm);
strcat((char*)cs.salt, (char*)cs.user);
} else {
strcpy((char*)cs.salt, p);
p = strtokm(NULL, "$");
}
for (i = 0; i < TIMESTAMP_SIZE; i++)
cs.ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void clear_keys(void) {
memset(inbuffer, 0, key_buf_size);
}
static void set_key(char *key, int index)
{
int i;
int length = strlen(key);
for (i = 0; i < length; i++)
((char*)inbuffer)[GETPOS(i, index)] = key[i];
new_keys = 1;
}
static char* get_key(int index)
{
static char ret[PLAINTEXT_LENGTH + 1];
int i = 0;
while (i < PLAINTEXT_LENGTH &&
(ret[i] = ((char*)inbuffer)[GETPOS(i, index)]))
i++;
ret[i] = 0;
return ret;
}
static char *split(char *ciphertext, int index, struct fmt_main *pFmt)
{
static char out[TOTAL_LENGTH + 1];
char in[TOTAL_LENGTH + 1];
char salt[MAX_SALTLEN + 1];
char *data;
char *e, *u, *r, *s, *tc;
strnzcpy(in, ciphertext, sizeof(in));
tc = strrchr(in, '$'); *tc++ = 0;
s = strrchr(in, '$'); *s++ = 0;
r = strrchr(in, '$'); *r++ = 0;
u = strrchr(in, '$'); *u++ = 0;
e = in + 8;
/* Default salt is user.realm */
if (!*s) {
snprintf(salt, sizeof(salt), "%s%s", r, u);
s = salt;
}
snprintf(out, sizeof(out), "%s%s$%s$%s$%s$%s", FORMAT_TAG, e, u, r, s, tc);
data = out + strlen(out) - 2 * (CHECKSUM_SIZE + TIMESTAMP_SIZE) - 1;
strlwr(data);
return out;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '$') + 1 + TIMESTAMP_SIZE * 2; /* skip to checksum field */
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
currentsalt.length = strlen((char*)cur_salt->salt);
currentsalt.iterations = ITERATIONS;
memcpy(currentsalt.salt, cur_salt->salt, currentsalt.length);
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_salt, CL_FALSE, 0, sizeof(pbkdf2_salt), ¤tsalt, 0, NULL, NULL), "Copy setting to gpu");
}
static void AES_cts_encrypt(const unsigned char *in, unsigned char *out,
size_t len, const AES_KEY *key,
unsigned char *ivec, const int encryptp)
{
unsigned char tmp[AES_BLOCK_SIZE];
unsigned int i;
if (encryptp) {
while(len > AES_BLOCK_SIZE) {
for (i = 0; i < AES_BLOCK_SIZE; i++)
tmp[i] = in[i] ^ ivec[i];
AES_encrypt(tmp, out, key);
memcpy(ivec, out, AES_BLOCK_SIZE);
len -= AES_BLOCK_SIZE;
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
}
for (i = 0; i < len; i++)
tmp[i] = in[i] ^ ivec[i];
for (; i < AES_BLOCK_SIZE; i++)
tmp[i] = 0 ^ ivec[i];
AES_encrypt(tmp, out - AES_BLOCK_SIZE, key);
memcpy(out, ivec, len);
memcpy(ivec, out - AES_BLOCK_SIZE, AES_BLOCK_SIZE);
} else {
unsigned char tmp2[AES_BLOCK_SIZE];
unsigned char tmp3[AES_BLOCK_SIZE];
while(len > AES_BLOCK_SIZE * 2) {
memcpy(tmp, in, AES_BLOCK_SIZE);
AES_decrypt(in, out, key);
for (i = 0; i < AES_BLOCK_SIZE; i++)
out[i] ^= ivec[i];
memcpy(ivec, tmp, AES_BLOCK_SIZE);
len -= AES_BLOCK_SIZE;
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
}
len -= AES_BLOCK_SIZE;
memcpy(tmp, in, AES_BLOCK_SIZE); /* save last iv */
AES_decrypt(in, tmp2, key);
memcpy(tmp3, in + AES_BLOCK_SIZE, len);
memcpy(tmp3 + len, tmp2 + len, AES_BLOCK_SIZE - len); /* xor 0 */
for (i = 0; i < len; i++)
out[i + AES_BLOCK_SIZE] = tmp2[i] ^ tmp3[i];
AES_decrypt(tmp3, out, key);
for (i = 0; i < AES_BLOCK_SIZE; i++)
out[i] ^= ivec[i];
memcpy(ivec, tmp, AES_BLOCK_SIZE);
}
}
// keysize = 32 for 256 bits, 16 for 128 bits
static void dk(unsigned char key_out[], unsigned char key_in[],
size_t key_size, unsigned char ptext[], size_t ptext_size)
{
unsigned char iv[32];
unsigned char plaintext[32];
AES_KEY ekey;
memset(iv,0,sizeof(iv));
memset(plaintext,0,sizeof(plaintext));
memcpy(plaintext,ptext,16);
AES_set_encrypt_key(key_in,key_size*8,&ekey);
AES_cbc_encrypt(plaintext,key_out,key_size,&ekey,iv,AES_ENCRYPT);
}
static void krb_decrypt(const unsigned char ciphertext[], size_t ctext_size,
unsigned char plaintext[], const unsigned char key[], size_t key_size)
{
unsigned char iv[32];
AES_KEY ekey;
memset(iv,0,sizeof(iv));
AES_set_decrypt_key(key,key_size*8,&ekey);
AES_cts_encrypt(ciphertext,plaintext,ctext_size,&ekey,iv,AES_DECRYPT);
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int i;
int key_size;
size_t scalar_gws;
size_t *lws = local_work_size ? &local_work_size : NULL;
global_work_size = GET_MULTIPLE_OR_BIGGER_VW(count, local_work_size);
scalar_gws = global_work_size * ocl_v_width;
if (cur_salt->etype == 17)
key_size = 16;
else
key_size = 32;
/// Copy data to gpu
if (ocl_autotune_running || new_keys) {
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, key_buf_size, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu");
new_keys = 0;
}
/// Run kernel
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_init, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run initial kernel");
for (i = 0; i < (ocl_autotune_running ? 1 : ITERATIONS / HASH_LOOPS); i++) {
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_loop, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[2]), "Run loop kernel");
BENCH_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel");
opencl_process_event();
}
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_final, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[3]), "Run intermediate kernel");
for (i = 0; i < (ocl_autotune_running ? 1 : ITERATIONS / HASH_LOOPS); i++) {
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_loop, 1, NULL, &global_work_size, lws, 0, NULL, NULL), "Run loop kernel (2nd pass)");
BENCH_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel");
opencl_process_event();
}
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_final, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[4]), "Run final kernel (SHA1)");
BENCH_CLERROR(clFinish(queue[gpu_id]), "Failed running final kernel");
/// Read the result back
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, sizeof(pbkdf2_out) * scalar_gws, output, 0, NULL, multi_profilingEvent[5]), "Copy result back");
if (!ocl_autotune_running) {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < count; i++) {
unsigned char base_key[32];
unsigned char Ke[32];
unsigned char plaintext[TIMESTAMP_SIZE];
//pbkdf2((const unsigned char*)saved_key[i], len, (unsigned char *)cur_salt->salt,strlen((char*)cur_salt->salt), 4096, (unsigned int*)tkey);
// generate 128 bits from 40 bits of "kerberos" string
// This is precomputed in init()
//nfold(8 * 8, (unsigned char*)"kerberos", 128, constant);
dk(base_key, (unsigned char*)output[i].dk, key_size, constant, 32);
/* The "well-known constant" used for the DK function is the key usage number,
* expressed as four octets in big-endian order, followed by one octet indicated below.
* Kc = DK(base-key, usage | 0x99);
* Ke = DK(base-key, usage | 0xAA);
* Ki = DK(base-key, usage | 0x55); */
// derive Ke for decryption/encryption
// This is precomputed in init()
//memset(usage,0,sizeof(usage));
//usage[3] = 0x01; // key number in big-endian format
//usage[4] = 0xAA; // used to derive Ke
//nfold(sizeof(usage)*8,usage,sizeof(ke_input)*8,ke_input);
dk(Ke, base_key, key_size, ke_input, 32);
// decrypt the AS-REQ timestamp encrypted with 256-bit AES
// here is enough to check the string, further computation below is required
// to fully verify the checksum
krb_decrypt(cur_salt->ct, TIMESTAMP_SIZE, plaintext, Ke, key_size);
// Check a couple bytes from known plain (YYYYMMDDHHMMSSZ) and
// bail out if we are out of luck.
if (plaintext[22] == '2' && plaintext[23] == '0' && plaintext[36] == 'Z') {
unsigned char Ki[32];
unsigned char checksum[20];
// derive Ki used in HMAC-SHA-1 checksum
// This is precomputed in init()
//memset(usage,0,sizeof(usage));
//usage[3] = 0x01; // key number in big-endian format
//usage[4] = 0x55; // used to derive Ki
//nfold(sizeof(usage)*8,usage,sizeof(ki_input)*8,ki_input);
dk(Ki, base_key, key_size, ki_input, 32);
// derive checksum of plaintext (only 96 bits used out of 160)
hmac_sha1(Ki, key_size, plaintext, TIMESTAMP_SIZE, checksum, 20);
memcpy(crypt_out[i], checksum, BINARY_SIZE);
} else {
memset(crypt_out[i], 0, BINARY_SIZE);
}
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_opencl_krb5pa_sha1 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
coating.h | #ifndef batoid_coating_h
#define batoid_coating_h
namespace batoid {
#if defined(BATOID_GPU)
#pragma omp declare target
#endif
class Coating {
public:
Coating();
virtual ~Coating();
virtual void getCoefs(double wavelength, double cosIncidenceAngle, double& reflect, double&transmit) const = 0;
virtual double getReflect(double wavelength, double cosIncidenceAngle) const = 0;
virtual double getTransmit(double wavelength, double cosIncidenceAngle) const = 0;
virtual const Coating* getDevPtr() const = 0;
protected:
mutable Coating* _devPtr;
private:
#if defined(BATOID_GPU)
void freeDevPtr() const;
#endif
};
class SimpleCoating : public Coating {
public:
SimpleCoating(double reflectivity, double transmissivity);
~SimpleCoating();
void getCoefs(double wavelength, double cosIncidenceAngle, double& reflect, double& transmit) const override;
double getReflect(double wavelength, double cosIncidenceAngle) const override;
double getTransmit(double wavelength, double cosIncidenceAngle) const override;
virtual const Coating* getDevPtr() const override;
private:
double _reflectivity;
double _transmissivity;
};
#if defined(BATOID_GPU)
#pragma omp end declare target
#endif
}
#endif
|
km_modmat.h | /*
*
* Header file of the KM-config algorithm (C++ version)
*
*
* An algorithm for finding multiple core-periphery pairs in networks
*
*
* Core-periphery structure requires something else in the network
* Sadamori Kojaku and Naoki Masuda
* Preprint arXiv:1710.07076
*
*
* Please do not distribute without contacting the authors.
*
*
* AUTHOR - Sadamori Kojaku
*
*
* DATE - 11 Oct 2017
*/
#ifndef CP_ALGORITHM
#define CP_ALGORITHM
#include "cpalgorithm.h"
#endif
class KM_modmat: public CPAlgorithm{
public:
// Constructor
KM_modmat();
KM_modmat(int num_runs);
void detect(const Graph& G);
void calc_Q(
const Graph& G,
const vector<int>& c,
const vector<double>& x,
double& Q,
vector<double>& q);
protected: // function needed to be implemented
int _num_runs;
private:
void _km_modmat_louvain(
const vector<vector<double>>& M,
const int num_of_runs,
vector<int>& c,
vector<double>& x,
double& Q,
vector<double>& q,
mt19937_64& mtrnd
);
void _km_modmat_louvain_core(
const vector<vector<double>>& M,
vector<int>& c,
vector<double>& x,
mt19937_64& mtrnd
);
void _km_modmat_label_switching(
const vector<vector<double>>& M,
const int num_of_runs,
vector<int>& c,
vector<double>& x,
double& Q,
vector<double>& q,
mt19937_64& mtrnd
);
void _km_modmat_label_switching_core(
const vector<vector<double>>& M,
vector<int>& c,
vector<double>& x,
mt19937_64& mtrnd
);
void _propose_new_label_modmat(
const vector<vector<double>>& M,
const vector<int>& c,
const vector<double>& x,
const int node_id,
int& cprime,
double& xprime,
double& dQ,
mt19937_64& mtrnd
);
void _calc_Q_modmat(
const vector<vector<double>>& M,
const vector<int>& c,
const vector<double>& x,
double& Q,
vector<double>& q);
void _coarsing(
const vector<vector<double>>& M,
const vector<int>& c,
const vector<double>& x,
vector<vector<double>>& newM,
vector<int>& toLayerId
);
int _count_non_empty_block(
vector<int>& c,
vector<double>& x
);
void _relabeling(vector<int>& c);
};
/*-----------------------------
Constructor
-----------------------------*/
KM_modmat::KM_modmat(int num_runs):CPAlgorithm(){
KM_modmat();
_num_runs = num_runs;
};
KM_modmat::KM_modmat(): CPAlgorithm(){
_num_runs = 10;
};
/*-----------------------------
Functions inherited from the super class (CPAlgorithm)
-----------------------------*/
void KM_modmat::detect(const Graph& G){
_km_modmat_louvain(G.to_matrix(), _num_runs, _c, _x, _Q, _q, _mtrnd);
}
void KM_modmat::calc_Q(
const Graph& G,
const vector<int>& c,
const vector<double>& x,
double& Q,
vector<double>& q)
{
vector<vector<double>>M = G.to_matrix();
_calc_Q_modmat(M,c,x,Q,q);
}
/*-----------------------------
Private functions (internal use only)
-----------------------------*/
void KM_modmat::_km_modmat_label_switching(
const vector<vector<double>>& M,
const int num_of_runs,
vector<int>& c,
vector<double>& x,
double& Q,
vector<double>& q,
mt19937_64& mtrnd
)
{
/* Generate \hat q^{(s)} and \hat n^{(s)} (1 \leq s \leq S) */
// create random number generator per each thread
int numthread = 1;
#ifdef _OPENMP
# pragma omp parallel
{
numthread = omp_get_num_threads();
}
#endif
vector<mt19937_64> mtrnd_list(numthread);
for(int i = 0; i < numthread; i++){
mt19937_64 mtrnd = _init_random_number_generator();
mtrnd_list[i] = mtrnd;
}
Q = -1;
int N = (int) M.size();
#ifdef _OPENMP
#pragma omp parallel for shared(c, x, Q, q, N, mtrnd_list)
#endif
for (int i = 0; i < num_of_runs; i++) {
vector<int> ci;
vector<double> xi;
vector<double> qi;
double Qi = 0.0;
int tid = 0;
#ifdef _OPENMP
tid = omp_get_thread_num();
#endif
mt19937_64 mtrnd = mtrnd_list[tid];
_km_modmat_label_switching_core(M, ci, xi, mtrnd);
_calc_Q_modmat(M, ci, xi, Qi, qi);
#ifdef _OPENMP
#pragma omp critical
#endif
{
if (Qi > Q) {
for(int i = 0; i < N; i++){
c[i] = ci[i];
x[i] = xi[i];
}
q.clear();
int K = (int) qi.size();
vector<double> tmp(K,0.0);
q = tmp;
for(int k = 0; k < K; k++){
q[k] = qi[k];
}
Q = Qi;
}
}
}
}
void KM_modmat::_calc_Q_modmat(
const vector<vector<double>>& M,
const vector<int>& c,
const vector<double>& x,
double& Q,
vector<double>& q)
{
int N = (int) M.size();
int K = *max_element(c.begin(), c.end()) + 1;
q.assign(K, 0.0);
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
if( c[i] != c[j]) continue;
q[c[i]]+=M[i][j] * (x[i] + x[j] - x[i] * x[j]);
}
}
Q = 0;
for (int k = 0; k < K; k++) {
Q += q[k];
}
}
void KM_modmat::_propose_new_label_modmat(
const vector<vector<double>>& M,
const vector<int>& c,
const vector<double>& x,
const int node_id,
int& cprime,
double& xprime,
double& dQ,
mt19937_64& mtrnd
)
{
int N = (int) M.size();
int K = *max_element(c.begin(), c.end()) + 1;
vector<double> dq_core(K,0.0);
vector<double> dq_peri(K,0.0);
double dq_old = 0;
for(int i = 0; i < N; i++){
if(i == node_id) continue;
dq_core[c[i]]+= M[node_id][i];
dq_peri[c[i]]+= x[i] * M[node_id][i];
dq_old+= ( x[i] + x[node_id] - x[i] * x[node_id]) * !!(c[node_id] == c[i]) * M[node_id][i];
}
double dqmax = 0;
dq_old+=!!(x[node_id])*M[node_id][node_id]/2; // add quality induced by self-edges
for(int k = 0; k < K; k++){
dq_core[k]+=M[node_id][node_id]/2; // add quality induced by self-edges
if(dq_core[k] > dq_peri[k]){
if( dq_core[k]-dq_old >0 && dq_core[k] > dqmax ){
xprime = 1.0;
cprime = k;
dqmax = dq_core[k];
dQ = dq_core[k] - dq_old;
}
}else{
if( dq_peri[k]-dq_old >0 && dq_peri[k] > dqmax ){
xprime = 0;
cprime = k;
dqmax = dq_peri[k];
dQ = dq_peri[k] - dq_old;
}
}
};
}
void KM_modmat::_relabeling(
vector<int>& c
){
int N = (int) c.size();
std::vector<int> labs;
for (int i = 0; i < N; i++) {
int cid = -1;
int labsize = (int) labs.size();
for (int j = 0; j < labsize; j++) {
if (labs[j] == c[i]) {
cid = j;
break;
}
}
if (cid < 0) {
labs.push_back(c[i]);
cid = (int)labs.size() - 1;
}
c[i] = cid;
}
}
void KM_modmat::_km_modmat_label_switching_core(
const vector<vector<double>>& M,
vector<int>& c,
vector<double>& x,
mt19937_64& mtrnd
)
{
/* Variable declarations */
int N = (int) M.size();
vector<int> order(N);
vector<double> degs(N);
bool isupdated = false;
c.clear();
x.clear();
c.assign(N, 0);
x.assign(N, true);
for (int i = 0; i < N; i++) {
order[i] = i;
c[i] = i;
};
/* Label switching algorithm */
do {
isupdated = false;
shuffle(order.begin(), order.end(), mtrnd);
for (int scan_count = 0; scan_count < N; scan_count++) {
int i = order[scan_count];
int cprime = c[i]; // c'
double xprime = x[i]; // x'
double dQ = 0;
_propose_new_label_modmat(M, c, x, i, cprime, xprime, dQ, mtrnd);
if (dQ <= 0)
continue;
if ( (c[i] == cprime) & (x[i] == xprime) )
continue;
c[i] = cprime;
x[i] = xprime;
isupdated = true;
}
} while (isupdated == true);
/* Remove empty core-periphery pairs */
_relabeling(c);
}
void KM_modmat::_coarsing(
const vector<vector<double>>& M,
const vector<int>& c,
const vector<double>& x,
vector<vector<double>>& newM,
vector<int>& toLayerId
){
int N = (int) c.size();
vector<int> ids(N,0);
int maxid = 0;
for(int i = 0;i<N;i++){
ids[i] = 2 * c[i] + (int)x[i];
maxid = MAX(maxid, ids[i]);
}
_relabeling(ids);
toLayerId.clear();
toLayerId.assign(maxid+1,0);
for(int i = 0;i<N;i++){
toLayerId[2 * c[i] + (int)x[i]] = ids[i];
}
int K = *max_element(ids.begin(), ids.end()) + 1;
vector<vector<double>> tmp(K, vector<double>(K,0));
newM.clear();
newM = tmp;
for(int i = 0;i<N;i++){
int mi = 2 * c[i] + (int)x[i];
for(int j = 0;j<N;j++){
int mj = 2 * c[j] + (int)x[j];
newM[ toLayerId[mi] ][ toLayerId[mj] ]+=M[i][j];
}
}
}
int KM_modmat::_count_non_empty_block(
vector<int>& c,
vector<double>& x
){
int N = (int) c.size();
vector<int> ids(N,0);
for(int i = 0; i< N; i++){
ids[i] = 2 * c[i] + (int)x[i];
}
sort(ids.begin(), ids.end());
return (int) (unique(ids.begin(), ids.end()) - ids.begin());
}
void KM_modmat::_km_modmat_louvain_core(
const vector<vector<double>>& M,
vector<int>& c,
vector<double>& x,
mt19937_64& mtrnd
){
// Intiialise variables
int N = (int) M.size();
c.clear();
x.clear();
c.assign(N, 0);
x.assign(N, 1.0);
for (int i = 0; i < N; i++) c[i] = i;
vector<int>ct = c; // label of each node at tth iteration
vector<double>xt = x; // label of each node at tth iteration.
vector<vector<double>> cnet_M; // coarse network
vector<int> toLayerId; //toLayerId[i] maps 2*c[i] + x[i] to the id of node in the coarse network
_coarsing(M, ct, xt, cnet_M, toLayerId); // Initialise toLayerId
double Qbest = 0; // quality of the current partition
int cnet_N;
do{
cnet_N = (int) cnet_M.size();
// Core-periphery detection
vector<int> cnet_c; // label of node in the coarse network, Mt
vector<double> cnet_x; // label of node in the coarse network, Mt
_km_modmat_label_switching_core(cnet_M, cnet_c, cnet_x, mtrnd);
// Update the label of node in the original network, ct and xt.
for(int i = 0; i< N; i++){
int cnet_id = toLayerId[2 * ct[i] + (int)xt[i]];
ct[i] = cnet_c[ cnet_id ];
xt[i] = cnet_x[ cnet_id ];
}
// Compute the quality
double Qt = 0; vector<double> qt;
_calc_Q_modmat(cnet_M, cnet_c, cnet_x, Qt, qt);
if(Qt>=Qbest){ // if the quality is the highest among those detected so far
c = ct;
x = xt;
Qbest = Qt;
}
// Coarsing
vector<vector<double>> new_cnet_M;
_coarsing(cnet_M, cnet_c, cnet_x, new_cnet_M, toLayerId);
cnet_M = new_cnet_M;
int sz = (int) cnet_M.size();
if(sz == cnet_N) break;
}while( true );
_relabeling(c);
}
void KM_modmat::_km_modmat_louvain(
const vector<vector<double>>& M,
const int num_of_runs,
vector<int>& c,
vector<double>& x,
double& Q,
vector<double>& q,
mt19937_64& mtrnd
)
{
int N = (int) M.size();
c.clear();
x.clear();
c.assign(N, 0);
x.assign(N, 1.0);
/* Generate \hat q^{(s)} and \hat n^{(s)} (1 \leq s \leq S) */
// create random number generator per each thread
int numthread = 1;
#ifdef _OPENMP
# pragma omp parallel
{
numthread = omp_get_num_threads();
}
#endif
vector<mt19937_64> mtrnd_list(numthread);
for(int i = 0; i < numthread; i++){
mt19937_64 mtrnd = _init_random_number_generator();
mtrnd_list[i] = mtrnd;
}
Q = -1;
#ifdef _OPENMP
#pragma omp parallel for shared(c, x, Q, q, N, M, mtrnd_list)
#endif
for (int i = 0; i < num_of_runs; i++) {
vector<int> ci;
vector<double> xi;
vector<double> qi;
double Qi = 0.0;
int tid = 0;
#ifdef _OPENMP
tid = omp_get_thread_num();
#endif
mt19937_64 mtrnd = mtrnd_list[tid];
_km_modmat_louvain_core(M, ci, xi, mtrnd);
_calc_Q_modmat(M, ci, xi, Qi, qi);
#pragma omp critical
{
if (Qi > Q) {
for(int i = 0; i < N; i++){
c[i] = ci[i];
x[i] = xi[i];
}
q.clear();
int K = (int) qi.size();
vector<double> tmp(K,0.0);
q = tmp;
for(int k = 0; k < K; k++){
q[k] = qi[k];
}
Q = Qi;
}
}
}
}
|
pi_estimator.c | // Paul Valdez & Benjamin Hellwig
// November 13th 2019
// CPTS 411 Introduction to Parallel Computing
//
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
#include <math.h>
#include <assert.h>
int main(int argc, char *argv[])
{
long long int i, loops;
// loop {number of iterations} [number of threads]
if(argc<2) {
// printf("Usage: loop {number of iterations} [number of threads]\n");
exit(1);
}
loops = atoll(argv[1]);
int p=1;
if(argc==3) {
p = atoi(argv[2]);
assert(p>=1);
// printf("Debug: number of requested threads = %d\n",p);
}
omp_set_dynamic(0);
omp_set_num_threads(p);
#pragma omp parallel
{
assert(p==omp_get_num_threads());
//printf("Debug: number of threads set = %d\n",omp_get_num_threads());
int rank = omp_get_thread_num();
// printf("Rank=%d: my world has %d threads\n",rank,p);
} // end of my omp parallel region
long long int hits = 0;
struct drand48_data buf;
long long int seed;
double x, y;
double time_s = omp_get_wtime();
#pragma omp parallel for schedule(static) private(buf, x, y, i, seed) reduction(+:hits) //creates N threads to run the next enclosed block
for(i = 0; i < loops; i++) //or line in parallel
{
seed = (omp_get_thread_num() + 1) * omp_get_wtime();
seed = seed ^ i ^ getpid() ^ time(NULL);
srand48_r(seed, &buf);
drand48_r(&buf, &x);
drand48_r(&buf, &y);
if (sqrt((x - 0.5)*(x - 0.5) + (y - 0.5)*(y - 0.5)) <= 0.5){
hits++;
}
}
// end of the second parallel region for FOR LOOP
time_s = omp_get_wtime() - time_s;
FILE *pi_output, *time_output;
pi_output = fopen("pi_results.txt", "a");
time_output = fopen("time_results.txt", "a");
fprintf(pi_output, "%.20f,", (hits/(double) loops) * 4);
fprintf(time_output, "%f,", time_s);
// if (p == 8){
// fprintf(pi_output, "\n");
// fprintf(time_output, "\n");
// }
fclose(time_output);
fclose(pi_output);
// printf("\n %f seconds \n ", time);
// printf("Number inside circle: %d\n", hits);
// printf("Out pi calculation is: %.10lf\n", (hits/(double) loops) * 4);
return 0;
}
|
quantize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE %
% Q Q U U A A NN N T I ZZ E %
% Q Q U U AAAAA N N N T I ZZZ EEEEE %
% Q QQ U U A A N NN T I ZZ E %
% QQQQ UUU A A N N T IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Methods to Reduce the Number of Unique Colors in an Image %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Realism in computer graphics typically requires using 24 bits/pixel to
% generate an image. Yet many graphic display devices do not contain the
% amount of memory necessary to match the spatial and color resolution of
% the human eye. The Quantize methods takes a 24 bit image and reduces
% the number of colors so it can be displayed on raster device with less
% bits per pixel. In most instances, the quantized image closely
% resembles the original reference image.
%
% A reduction of colors in an image is also desirable for image
% transmission and real-time animation.
%
% QuantizeImage() takes a standard RGB or monochrome images and quantizes
% them down to some fixed number of colors.
%
% For purposes of color allocation, an image is a set of n pixels, where
% each pixel is a point in RGB space. RGB space is a 3-dimensional
% vector space, and each pixel, Pi, is defined by an ordered triple of
% red, green, and blue coordinates, (Ri, Gi, Bi).
%
% Each primary color component (red, green, or blue) represents an
% intensity which varies linearly from 0 to a maximum value, Cmax, which
% corresponds to full saturation of that color. Color allocation is
% defined over a domain consisting of the cube in RGB space with opposite
% vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax =
% 255.
%
% The algorithm maps this domain onto a tree in which each node
% represents a cube within that domain. In the following discussion
% these cubes are defined by the coordinate of two opposite vertices (vertex
% nearest the origin in RGB space and the vertex farthest from the origin).
%
% The tree's root node represents the entire domain, (0,0,0) through
% (Cmax,Cmax,Cmax). Each lower level in the tree is generated by
% subdividing one node's cube into eight smaller cubes of equal size.
% This corresponds to bisecting the parent cube with planes passing
% through the midpoints of each edge.
%
% The basic algorithm operates in three phases: Classification,
% Reduction, and Assignment. Classification builds a color description
% tree for the image. Reduction collapses the tree until the number it
% represents, at most, the number of colors desired in the output image.
% Assignment defines the output image's color map and sets each pixel's
% color by restorage_class in the reduced tree. Our goal is to minimize
% the numerical discrepancies between the original colors and quantized
% colors (quantization error).
%
% Classification begins by initializing a color description tree of
% sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color description
% tree in the storage_class phase for realistic values of Cmax. If
% colors components in the input image are quantized to k-bit precision,
% so that Cmax= 2k-1, the tree would need k levels below the root node to
% allow representing each possible input color in a leaf. This becomes
% prohibitive because the tree's total number of nodes is 1 +
% sum(i=1, k, 8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing the pixel's color. It updates the following data for each
% such node:
%
% n1: Number of pixels whose color is contained in the RGB cube which
% this node represents;
%
% n2: Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb: Sums of the red, green, and blue component values for all
% pixels not classified at a lower depth. The combination of these sums
% and n2 will ultimately characterize the mean color of a set of pixels
% represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the
% quantization error for a node.
%
% Reduction repeatedly prunes the tree until the number of nodes with n2
% > 0 is less than or equal to the maximum number of colors allowed in
% the output image. On any given iteration over the tree, it selects
% those nodes whose E count is minimal for pruning and merges their color
% statistics upward. It uses a pruning threshold, Ep, to govern node
% selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors within
% the cubic volume which the node represents. This includes n1 - n2
% pixels whose colors should be defined by nodes at a lower level in the
% tree.
%
% Assignment generates the output image from the pruned tree. The output
% image consists of two parts: (1) A color map, which is an array of
% color descriptions (RGB triples) for each color present in the output
% image; (2) A pixel array, which represents each pixel as an index
% into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% This method is based on a similar algorithm written by Paul Raveling.
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/histogram.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
/*
Define declarations.
*/
#if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE)
#define CacheShift 2
#else
#define CacheShift 3
#endif
#define ErrorQueueLength 16
#define MaxNodes 266817
#define MaxTreeDepth 8
#define NodesInAList 1920
/*
Typdef declarations.
*/
typedef struct _NodeInfo
{
struct _NodeInfo
*parent,
*child[16];
MagickSizeType
number_unique;
DoublePixelPacket
total_color;
MagickRealType
quantize_error;
size_t
color_number,
id,
level;
} NodeInfo;
typedef struct _Nodes
{
NodeInfo
*nodes;
struct _Nodes
*next;
} Nodes;
typedef struct _CubeInfo
{
NodeInfo
*root;
size_t
colors,
maximum_colors;
ssize_t
transparent_index;
MagickSizeType
transparent_pixels;
DoublePixelPacket
target;
MagickRealType
distance,
pruning_threshold,
next_threshold;
size_t
nodes,
free_nodes,
color_number;
NodeInfo
*next_node;
Nodes
*node_queue;
MemoryInfo
*memory_info;
ssize_t
*cache;
DoublePixelPacket
error[ErrorQueueLength];
MagickRealType
weights[ErrorQueueLength];
QuantizeInfo
*quantize_info;
MagickBooleanType
associate_alpha;
ssize_t
x,
y;
size_t
depth;
MagickOffsetType
offset;
MagickSizeType
span;
} CubeInfo;
/*
Method prototypes.
*/
static CubeInfo
*GetCubeInfo(const QuantizeInfo *,const size_t,const size_t);
static NodeInfo
*GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *);
static MagickBooleanType
AssignImageColors(Image *,CubeInfo *),
ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *),
DitherImage(Image *,CubeInfo *),
SetGrayscaleImage(Image *);
static size_t
DefineImageColormap(Image *,CubeInfo *,NodeInfo *);
static void
ClosestColor(const Image *,CubeInfo *,const NodeInfo *),
DestroyCubeInfo(CubeInfo *),
PruneLevel(CubeInfo *,const NodeInfo *),
PruneToCubeDepth(CubeInfo *,const NodeInfo *),
ReduceImageColors(const Image *,CubeInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireQuantizeInfo() allocates the QuantizeInfo structure.
%
% The format of the AcquireQuantizeInfo method is:
%
% QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
{
QuantizeInfo
*quantize_info;
quantize_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*quantize_info));
if (quantize_info == (QuantizeInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetQuantizeInfo(quantize_info);
if (image_info != (ImageInfo *) NULL)
{
const char
*option;
quantize_info->dither=image_info->dither;
option=GetImageOption(image_info,"dither");
if (option != (const char *) NULL)
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,option);
quantize_info->measure_error=image_info->verbose;
}
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A s s i g n I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AssignImageColors() generates the output image from the pruned tree. The
% output image consists of two parts: (1) A color map, which is an array
% of color descriptions (RGB triples) for each color present in the
% output image; (2) A pixel array, which represents each pixel as an
% index into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% The format of the AssignImageColors() method is:
%
% MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static inline void AssociateAlphaPixel(const CubeInfo *cube_info,
const PixelPacket *pixel,DoublePixelPacket *alpha_pixel)
{
MagickRealType
alpha;
alpha_pixel->index=0;
if ((cube_info->associate_alpha == MagickFalse) ||
(pixel->opacity == OpaqueOpacity))
{
alpha_pixel->red=(MagickRealType) GetPixelRed(pixel);
alpha_pixel->green=(MagickRealType) GetPixelGreen(pixel);
alpha_pixel->blue=(MagickRealType) GetPixelBlue(pixel);
alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel);
return;
}
alpha=(MagickRealType) (QuantumScale*(QuantumRange-GetPixelOpacity(pixel)));
alpha_pixel->red=alpha*GetPixelRed(pixel);
alpha_pixel->green=alpha*GetPixelGreen(pixel);
alpha_pixel->blue=alpha*GetPixelBlue(pixel);
alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel);
}
static inline size_t ColorToNodeId(const CubeInfo *cube_info,
const DoublePixelPacket *pixel,size_t index)
{
size_t
id;
id=(size_t) (((ScaleQuantumToChar(ClampPixel(GetPixelRed(pixel))) >> index) &
0x01) | ((ScaleQuantumToChar(ClampPixel(GetPixelGreen(pixel))) >> index) &
0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(GetPixelBlue(pixel))) >>
index) & 0x01) << 2);
if (cube_info->associate_alpha != MagickFalse)
id|=((ScaleQuantumToChar(ClampPixel(GetPixelOpacity(pixel))) >> index) &
0x1) << 3;
return(id);
}
static inline MagickBooleanType IsSameColor(const Image *image,
const PixelPacket *p,const PixelPacket *q)
{
if ((GetPixelRed(p) != GetPixelRed(q)) ||
(GetPixelGreen(p) != GetPixelGreen(q)) ||
(GetPixelBlue(p) != GetPixelBlue(q)))
return(MagickFalse);
if ((image->matte != MagickFalse) &&
(GetPixelOpacity(p) != GetPixelOpacity(q)))
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
{
#define AssignImageTag "Assign/Image"
ColorspaceType
colorspace;
ssize_t
y;
/*
Allocate image colormap.
*/
colorspace=image->colorspace;
if (cube_info->quantize_info->colorspace != UndefinedColorspace)
(void) TransformImageColorspace(image,cube_info->quantize_info->colorspace);
if (AcquireImageColormap(image,cube_info->colors) == MagickFalse)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
cube_info->transparent_pixels=0;
cube_info->transparent_index=(-1);
(void) DefineImageColormap(image,cube_info,cube_info->root);
/*
Create a reduced color image.
*/
if ((cube_info->quantize_info->dither != MagickFalse) &&
(cube_info->quantize_info->dither_method != NoDitherMethod))
(void) DitherImage(image,cube_info);
else
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CubeInfo
cube;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
count;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
cube=(*cube_info);
for (x=0; x < (ssize_t) image->columns; x+=count)
{
DoublePixelPacket
pixel;
register const NodeInfo
*node_info;
register ssize_t
i;
size_t
id,
index;
/*
Identify the deepest node containing the pixel's color.
*/
for (count=1; (x+count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,q,q+count) == MagickFalse)
break;
AssociateAlphaPixel(&cube,q,&pixel);
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*
(QuantumRange+1.0)+1.0);
ClosestColor(image,&cube,node_info->parent);
index=cube.color_number;
for (i=0; i < (ssize_t) count; i++)
{
if (image->storage_class == PseudoClass)
SetPixelIndex(indexes+x+i,index);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q,image->colormap+index);
if (cube.associate_alpha != MagickFalse)
SetPixelOpacity(q,image->colormap[index].opacity);
}
q++;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
if (cube_info->quantize_info->measure_error != MagickFalse)
(void) GetImageQuantizeError(image);
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
{
double
intensity;
/*
Monochrome image.
*/
intensity=GetPixelLuma(image,image->colormap+0) < QuantumRange/2.0 ? 0.0 :
QuantumRange;
if ((image->colors > 1) &&
(GetPixelLuma(image,image->colormap+0) >
GetPixelLuma(image,image->colormap+1)))
intensity=(double) QuantumRange;
image->colormap[0].red=intensity;
image->colormap[0].green=intensity;
image->colormap[0].blue=intensity;
if (image->colors > 1)
{
image->colormap[1].red=(double) QuantumRange-intensity;
image->colormap[1].green=(double) QuantumRange-intensity;
image->colormap[1].blue=(double) QuantumRange-intensity;
}
}
(void) SyncImage(image);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(IssRGBCompatibleColorspace(colorspace) == MagickFalse))
(void) TransformImageColorspace(image,colorspace);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClassifyImageColors() begins by initializing a color description tree
% of sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color
% description tree in the storage_class phase for realistic values of
% Cmax. If colors components in the input image are quantized to k-bit
% precision, so that Cmax= 2k-1, the tree would need k levels below the
% root node to allow representing each possible input color in a leaf.
% This becomes prohibitive because the tree's total number of nodes is
% 1 + sum(i=1,k,8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing It updates the following data for each such node:
%
% n1 : Number of pixels whose color is contained in the RGB cube
% which this node represents;
%
% n2 : Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb : Sums of the red, green, and blue component values for
% all pixels not classified at a lower depth. The combination of
% these sums and n2 will ultimately characterize the mean color of a
% set of pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the quantization
% error for a node.
%
% The format of the ClassifyImageColors() method is:
%
% MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
% const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o image: the image.
%
*/
static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info)
{
MagickBooleanType
associate_alpha;
associate_alpha=image->matte;
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
associate_alpha=MagickFalse;
cube_info->associate_alpha=associate_alpha;
}
static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
const Image *image,ExceptionInfo *exception)
{
#define ClassifyImageTag "Classify/Image"
CacheView
*image_view;
DoublePixelPacket
error,
mid,
midpoint,
pixel;
MagickBooleanType
proceed;
MagickRealType
bisect;
NodeInfo
*node_info;
size_t
count,
id,
index,
level;
ssize_t
y;
/*
Classify the first cube_info->maximum_colors colors to a tree depth of 8.
*/
SetAssociatedAlpha(image,cube_info);
if (cube_info->quantize_info->colorspace != image->colorspace)
{
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,
cube_info->quantize_info->colorspace);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace((Image *) image,sRGBColorspace);
}
midpoint.red=(MagickRealType) QuantumRange/2.0;
midpoint.green=(MagickRealType) QuantumRange/2.0;
midpoint.blue=(MagickRealType) QuantumRange/2.0;
midpoint.opacity=(MagickRealType) QuantumRange/2.0;
midpoint.index=(MagickRealType) QuantumRange/2.0;
error.opacity=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,p,p+count) == MagickFalse)
break;
AssociateAlphaPixel(cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((MagickRealType) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= MaxTreeDepth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.opacity+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
continue;
}
if (level == MaxTreeDepth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.opacity=QuantumScale*(pixel.opacity-mid.opacity);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.opacity*error.opacity);
if (IsNaN(distance) != MagickFalse)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.opacity+=count*QuantumScale*
ClampPixel(pixel.opacity);
else
node_info->total_color.opacity+=count*QuantumScale*
ClampPixel(OpaqueOpacity);
p+=count;
}
if (cube_info->colors > cube_info->maximum_colors)
{
PruneToCubeDepth(cube_info,cube_info->root);
break;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
for (y++; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,p,p+count) == MagickFalse)
break;
AssociateAlphaPixel(cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((MagickRealType) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= cube_info->depth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.opacity+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
image->filename);
continue;
}
if (level == cube_info->depth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.opacity=QuantumScale*(pixel.opacity-mid.opacity);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.opacity*error.opacity);
if (IsNaN(distance))
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.opacity+=count*QuantumScale*ClampPixel(
pixel.opacity);
else
node_info->total_color.opacity+=count*QuantumScale*
ClampPixel(OpaqueOpacity);
p+=count;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
if (cube_info->quantize_info->colorspace != image->colorspace)
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneQuantizeInfo() makes a duplicate of the given quantize info structure,
% or if quantize info is NULL, a new one.
%
% The format of the CloneQuantizeInfo method is:
%
% QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o clone_info: Method CloneQuantizeInfo returns a duplicate of the given
% quantize info, or if image info is NULL a new one.
%
% o quantize_info: a structure of type info.
%
*/
MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
{
QuantizeInfo
*clone_info;
clone_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*clone_info));
if (clone_info == (QuantizeInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetQuantizeInfo(clone_info);
if (quantize_info == (QuantizeInfo *) NULL)
return(clone_info);
clone_info->number_colors=quantize_info->number_colors;
clone_info->tree_depth=quantize_info->tree_depth;
clone_info->dither=quantize_info->dither;
clone_info->dither_method=quantize_info->dither_method;
clone_info->colorspace=quantize_info->colorspace;
clone_info->measure_error=quantize_info->measure_error;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o s e s t C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClosestColor() traverses the color cube tree at a particular node and
% determines which colormap entry best represents the input color.
%
% The format of the ClosestColor method is:
%
% void ClosestColor(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void ClosestColor(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
ClosestColor(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
MagickRealType
pixel;
register DoublePixelPacket
*magick_restrict q;
register MagickRealType
alpha,
beta,
distance;
register PixelPacket
*magick_restrict p;
/*
Determine if this color is "closest".
*/
p=image->colormap+node_info->color_number;
q=(&cube_info->target);
alpha=1.0;
beta=1.0;
if (cube_info->associate_alpha != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p));
beta=(MagickRealType) (QuantumScale*GetPixelAlpha(q));
}
pixel=alpha*GetPixelRed(p)-beta*GetPixelRed(q);
distance=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*GetPixelGreen(p)-beta*GetPixelGreen(q);
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*GetPixelBlue(p)-beta*GetPixelBlue(q);
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
if (cube_info->associate_alpha != MagickFalse)
{
pixel=GetPixelAlpha(p)-GetPixelAlpha(q);
distance+=pixel*pixel;
}
if (distance <= cube_info->distance)
{
cube_info->distance=distance;
cube_info->color_number=node_info->color_number;
}
}
}
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p r e s s I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompressImageColormap() compresses an image colormap by removing any
% duplicate or unused color entries.
%
% The format of the CompressImageColormap method is:
%
% MagickBooleanType CompressImageColormap(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType CompressImageColormap(Image *image)
{
QuantizeInfo
quantize_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsPaletteImage(image,&image->exception) == MagickFalse)
return(MagickFalse);
GetQuantizeInfo(&quantize_info);
quantize_info.number_colors=image->colors;
quantize_info.tree_depth=MaxTreeDepth;
return(QuantizeImage(&quantize_info,image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineImageColormap() traverses the color cube tree and notes each colormap
% entry. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero. DefineImageColormap() returns the number of
% colors in the image colormap.
%
% The format of the DefineImageColormap method is:
%
% size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
% NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
(void) DefineImageColormap(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
register MagickRealType
alpha;
register PixelPacket
*magick_restrict q;
/*
Colormap entry is defined by the mean color in this cube.
*/
q=image->colormap+image->colors;
alpha=(MagickRealType) ((MagickOffsetType) node_info->number_unique);
alpha=PerceptibleReciprocal(alpha);
if (cube_info->associate_alpha == MagickFalse)
{
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.blue)));
SetPixelOpacity(q,OpaqueOpacity);
}
else
{
MagickRealType
opacity;
opacity=(MagickRealType) (alpha*QuantumRange*
node_info->total_color.opacity);
SetPixelOpacity(q,ClampToQuantum(opacity));
if (q->opacity == OpaqueOpacity)
{
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.blue)));
}
else
{
double
gamma;
gamma=(double) (QuantumScale*(QuantumRange-(double) q->opacity));
gamma=PerceptibleReciprocal(gamma);
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.blue)));
if (node_info->number_unique > cube_info->transparent_pixels)
{
cube_info->transparent_pixels=node_info->number_unique;
cube_info->transparent_index=(ssize_t) image->colors;
}
}
}
node_info->color_number=image->colors++;
}
return(image->colors);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyCubeInfo() deallocates memory associated with an image.
%
% The format of the DestroyCubeInfo method is:
%
% DestroyCubeInfo(CubeInfo *cube_info)
%
% A description of each parameter follows:
%
% o cube_info: the address of a structure of type CubeInfo.
%
*/
static void DestroyCubeInfo(CubeInfo *cube_info)
{
register Nodes
*nodes;
/*
Release color cube tree storage.
*/
do
{
nodes=cube_info->node_queue->next;
cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory(
cube_info->node_queue->nodes);
cube_info->node_queue=(Nodes *) RelinquishMagickMemory(
cube_info->node_queue);
cube_info->node_queue=nodes;
} while (cube_info->node_queue != (Nodes *) NULL);
if (cube_info->memory_info != (MemoryInfo *) NULL)
cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info);
cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info);
cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo
% structure.
%
% The format of the DestroyQuantizeInfo method is:
%
% QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
*/
MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
quantize_info->signature=(~MagickCoreSignature);
quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info);
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DitherImage() distributes the difference between an original image and
% the corresponding color reduced algorithm to neighboring pixels using
% serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns
% MagickTrue if the image is dithered otherwise MagickFalse.
%
% The format of the DitherImage method is:
%
% MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels)
{
register ssize_t
i;
assert(pixels != (DoublePixelPacket **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (DoublePixelPacket *) NULL)
pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static DoublePixelPacket **AcquirePixelThreadSet(const size_t count)
{
DoublePixelPacket
**pixels;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (DoublePixelPacket **) NULL)
return((DoublePixelPacket **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,
2*sizeof(**pixels));
if (pixels[i] == (DoublePixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static inline ssize_t CacheOffset(CubeInfo *cube_info,
const DoublePixelPacket *pixel)
{
#define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift)))
#define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift)))
#define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift)))
#define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift)))
ssize_t
offset;
offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) |
GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) |
BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue))));
if (cube_info->associate_alpha != MagickFalse)
offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->opacity)));
return(offset);
}
static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
const char
*artifact;
double
amount;
DoublePixelPacket
**pixels;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
/*
Distribute quantization error using Floyd-Steinberg.
*/
pixels=AcquirePixelThreadSet(image->columns);
if (pixels == (DoublePixelPacket **) NULL)
return(MagickFalse);
exception=(&image->exception);
status=MagickTrue;
amount=1.0;
artifact=GetImageArtifact(image,"dither:diffusion-amount");
if (artifact != (const char *) NULL)
amount=StringToDoubleInterval(artifact,1.0);
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
CubeInfo
cube;
DoublePixelPacket
*current,
*previous;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
size_t
index;
ssize_t
v;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
cube=(*cube_info);
current=pixels[id]+(y & 0x01)*image->columns;
previous=pixels[id]+((y+1) & 0x01)*image->columns;
v=(ssize_t) ((y & 0x01) ? -1 : 1);
for (x=0; x < (ssize_t) image->columns; x++)
{
DoublePixelPacket
color,
pixel;
register ssize_t
i;
ssize_t
u;
u=(y & 0x01) ? (ssize_t) image->columns-1-x : x;
AssociateAlphaPixel(&cube,q+u,&pixel);
if (x > 0)
{
pixel.red+=7.0*amount*current[u-v].red/16;
pixel.green+=7.0*amount*current[u-v].green/16;
pixel.blue+=7.0*amount*current[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=7.0*amount*current[u-v].opacity/16;
}
if (y > 0)
{
if (x < (ssize_t) (image->columns-1))
{
pixel.red+=previous[u+v].red/16;
pixel.green+=previous[u+v].green/16;
pixel.blue+=previous[u+v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=previous[u+v].opacity/16;
}
pixel.red+=5.0*amount*previous[u].red/16;
pixel.green+=5.0*amount*previous[u].green/16;
pixel.blue+=5.0*amount*previous[u].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=5.0*amount*previous[u].opacity/16;
if (x > 0)
{
pixel.red+=3.0*amount*previous[u-v].red/16;
pixel.green+=3.0*amount*previous[u-v].green/16;
pixel.blue+=3.0*amount*previous[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=3.0*amount*previous[u-v].opacity/16;
}
}
pixel.red=(MagickRealType) ClampPixel(pixel.red);
pixel.green=(MagickRealType) ClampPixel(pixel.green);
pixel.blue=(MagickRealType) ClampPixel(pixel.blue);
if (cube.associate_alpha != MagickFalse)
pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity);
i=CacheOffset(&cube,&pixel);
if (cube.cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*(QuantumRange+
1.0)+1.0);
ClosestColor(image,&cube,node_info->parent);
cube.cache[i]=(ssize_t) cube.color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) cube.cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(indexes+u,index);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q+u,image->colormap+index);
if (cube.associate_alpha != MagickFalse)
SetPixelOpacity(q+u,image->colormap[index].opacity);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
/*
Store the error.
*/
AssociateAlphaPixel(&cube,image->colormap+index,&color);
current[u].red=pixel.red-color.red;
current[u].green=pixel.green-color.green;
current[u].blue=pixel.blue-color.blue;
if (cube.associate_alpha != MagickFalse)
current[u].opacity=pixel.opacity-color.opacity;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
image_view=DestroyCacheView(image_view);
pixels=DestroyPixelThreadSet(pixels);
return(MagickTrue);
}
static MagickBooleanType
RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int);
static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info,
const size_t level,const unsigned int direction)
{
if (level == 1)
switch (direction)
{
case WestGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
break;
}
case EastGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
break;
}
case NorthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
break;
}
case SouthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
break;
}
default:
break;
}
else
switch (direction)
{
case WestGravity:
{
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
Riemersma(image,image_view,cube_info,level-1,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
Riemersma(image,image_view,cube_info,level-1,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
break;
}
case EastGravity:
{
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
Riemersma(image,image_view,cube_info,level-1,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
Riemersma(image,image_view,cube_info,level-1,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
break;
}
case NorthGravity:
{
Riemersma(image,image_view,cube_info,level-1,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
Riemersma(image,image_view,cube_info,level-1,EastGravity);
break;
}
case SouthGravity:
{
Riemersma(image,image_view,cube_info,level-1,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
Riemersma(image,image_view,cube_info,level-1,WestGravity);
break;
}
default:
break;
}
}
static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view,
CubeInfo *cube_info,const unsigned int direction)
{
#define DitherImageTag "Dither/Image"
DoublePixelPacket
color,
pixel;
MagickBooleanType
proceed;
register CubeInfo
*p;
size_t
index;
p=cube_info;
if ((p->x >= 0) && (p->x < (ssize_t) image->columns) &&
(p->y >= 0) && (p->y < (ssize_t) image->rows))
{
ExceptionInfo
*exception;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
i;
/*
Distribute error.
*/
exception=(&image->exception);
q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickFalse);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
AssociateAlphaPixel(cube_info,q,&pixel);
for (i=0; i < ErrorQueueLength; i++)
{
pixel.red+=p->weights[i]*p->error[i].red;
pixel.green+=p->weights[i]*p->error[i].green;
pixel.blue+=p->weights[i]*p->error[i].blue;
if (cube_info->associate_alpha != MagickFalse)
pixel.opacity+=p->weights[i]*p->error[i].opacity;
}
pixel.red=(MagickRealType) ClampPixel(pixel.red);
pixel.green=(MagickRealType) ClampPixel(pixel.green);
pixel.blue=(MagickRealType) ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity);
i=CacheOffset(cube_info,&pixel);
if (p->cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=p->root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(cube_info,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
p->target=pixel;
p->distance=(MagickRealType) (4.0*(QuantumRange+1.0)*((MagickRealType)
QuantumRange+1.0)+1.0);
ClosestColor(image,p,node_info->parent);
p->cache[i]=(ssize_t) p->color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) (1*p->cache[i]);
if (image->storage_class == PseudoClass)
*indexes=(IndexPacket) index;
if (cube_info->quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q,image->colormap+index);
if (cube_info->associate_alpha != MagickFalse)
SetPixelOpacity(q,image->colormap[index].opacity);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
return(MagickFalse);
/*
Propagate the error as the last entry of the error queue.
*/
(void) memmove(p->error,p->error+1,(ErrorQueueLength-1)*
sizeof(p->error[0]));
AssociateAlphaPixel(cube_info,image->colormap+index,&color);
p->error[ErrorQueueLength-1].red=pixel.red-color.red;
p->error[ErrorQueueLength-1].green=pixel.green-color.green;
p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue;
if (cube_info->associate_alpha != MagickFalse)
p->error[ErrorQueueLength-1].opacity=pixel.opacity-color.opacity;
proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span);
if (proceed == MagickFalse)
return(MagickFalse);
p->offset++;
}
switch (direction)
{
case WestGravity: p->x--; break;
case EastGravity: p->x++; break;
case NorthGravity: p->y--; break;
case SouthGravity: p->y++; break;
}
return(MagickTrue);
}
static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
depth;
if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod)
return(FloydSteinbergDither(image,cube_info));
/*
Distribute quantization error along a Hilbert curve.
*/
(void) memset(cube_info->error,0,ErrorQueueLength*sizeof(*cube_info->error));
cube_info->x=0;
cube_info->y=0;
i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows);
for (depth=1; i != 0; depth++)
i>>=1;
if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows))
depth++;
cube_info->offset=0;
cube_info->span=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,&image->exception);
if (depth > 1)
Riemersma(image,image_view,cube_info,depth-1,NorthGravity);
status=RiemersmaDither(image,image_view,cube_info,ForgetGravity);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCubeInfo() initialize the Cube data structure.
%
% The format of the GetCubeInfo method is:
%
% CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info,
% const size_t depth,const size_t maximum_colors)
%
% A description of each parameter follows.
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o depth: Normally, this integer value is zero or one. A zero or
% one tells Quantize to choose a optimal tree depth of Log4(number_colors).
% A tree of this depth generally allows the best representation of the
% reference image with the least amount of memory and the fastest
% computational speed. In some cases, such as an image with low color
% dispersion (a few number of colors), a value other than
% Log4(number_colors) is required. To expand the color tree completely,
% use a value of 8.
%
% o maximum_colors: maximum colors.
%
*/
static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info,
const size_t depth,const size_t maximum_colors)
{
CubeInfo
*cube_info;
MagickRealType
sum,
weight;
register ssize_t
i;
size_t
length;
/*
Initialize tree to describe color cube_info.
*/
cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info));
if (cube_info == (CubeInfo *) NULL)
return((CubeInfo *) NULL);
(void) memset(cube_info,0,sizeof(*cube_info));
cube_info->depth=depth;
if (cube_info->depth > MaxTreeDepth)
cube_info->depth=MaxTreeDepth;
if (cube_info->depth < 2)
cube_info->depth=2;
cube_info->maximum_colors=maximum_colors;
/*
Initialize root node.
*/
cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL);
if (cube_info->root == (NodeInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->root->parent=cube_info->root;
cube_info->quantize_info=CloneQuantizeInfo(quantize_info);
if (cube_info->quantize_info->dither == MagickFalse)
return(cube_info);
/*
Initialize dither resources.
*/
length=(size_t) (1UL << (4*(8-CacheShift)));
cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache));
if (cube_info->memory_info == (MemoryInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info);
/*
Initialize color cache.
*/
(void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*length);
/*
Distribute weights along a curve of exponential decay.
*/
weight=1.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight);
weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0));
}
/*
Normalize the weighting factors.
*/
weight=0.0;
for (i=0; i < ErrorQueueLength; i++)
weight+=cube_info->weights[i];
sum=0.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[i]/=weight;
sum+=cube_info->weights[i];
}
cube_info->weights[0]+=1.0-sum;
return(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t N o d e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNodeInfo() allocates memory for a new node in the color cube tree and
% presets all fields to zero.
%
% The format of the GetNodeInfo method is:
%
% NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
% const size_t level,NodeInfo *parent)
%
% A description of each parameter follows.
%
% o node: The GetNodeInfo method returns a pointer to a queue of nodes.
%
% o id: Specifies the child number of the node.
%
% o level: Specifies the level in the storage_class the node resides.
%
*/
static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
const size_t level,NodeInfo *parent)
{
NodeInfo
*node_info;
if (cube_info->free_nodes == 0)
{
Nodes
*nodes;
/*
Allocate a new queue of nodes.
*/
nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes));
if (nodes == (Nodes *) NULL)
return((NodeInfo *) NULL);
nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList,
sizeof(*nodes->nodes));
if (nodes->nodes == (NodeInfo *) NULL)
return((NodeInfo *) NULL);
nodes->next=cube_info->node_queue;
cube_info->node_queue=nodes;
cube_info->next_node=nodes->nodes;
cube_info->free_nodes=NodesInAList;
}
cube_info->nodes++;
cube_info->free_nodes--;
node_info=cube_info->next_node++;
(void) memset(node_info,0,sizeof(*node_info));
node_info->parent=parent;
node_info->id=id;
node_info->level=level;
return(node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t i z e E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantizeError() measures the difference between the original
% and quantized images. This difference is the total quantization error.
% The error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% The format of the GetImageQuantizeError method is:
%
% MagickBooleanType GetImageQuantizeError(Image *image)
%
% A description of each parameter follows.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType GetImageQuantizeError(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
IndexPacket
*indexes;
MagickRealType
alpha,
area,
beta,
distance,
gamma,
maximum_error,
mean_error,
mean_error_per_pixel;
ssize_t
index,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->total_colors=GetNumberColors(image,(FILE *) NULL,&image->exception);
(void) memset(&image->error,0,sizeof(image->error));
if (image->storage_class == DirectClass)
return(MagickTrue);
alpha=1.0;
beta=1.0;
area=3.0*image->columns*image->rows;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
exception=(&image->exception);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) GetPixelIndex(indexes+x);
if (image->matte != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*(GetPixelAlpha(p)));
beta=(MagickRealType) (QuantumScale*(QuantumRange-
image->colormap[index].opacity));
}
distance=fabs((double) (alpha*GetPixelRed(p)-beta*
image->colormap[index].red));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelGreen(p)-beta*
image->colormap[index].green));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelBlue(p)-beta*
image->colormap[index].blue));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
p++;
}
}
image_view=DestroyCacheView(image_view);
gamma=PerceptibleReciprocal(area);
image->error.mean_error_per_pixel=gamma*mean_error_per_pixel;
image->error.normalized_mean_error=gamma*QuantumScale*QuantumScale*mean_error;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetQuantizeInfo() initializes the QuantizeInfo structure.
%
% The format of the GetQuantizeInfo method is:
%
% GetQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to a QuantizeInfo structure.
%
*/
MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
(void) memset(quantize_info,0,sizeof(*quantize_info));
quantize_info->number_colors=256;
quantize_info->dither=MagickTrue;
quantize_info->dither_method=RiemersmaDitherMethod;
quantize_info->colorspace=UndefinedColorspace;
quantize_info->measure_error=MagickFalse;
quantize_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t e r i z e I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PosterizeImage() reduces the image to a limited number of colors for a
% "poster" effect.
%
% The format of the PosterizeImage method is:
%
% MagickBooleanType PosterizeImage(Image *image,const size_t levels,
% const MagickBooleanType dither)
% MagickBooleanType PosterizeImageChannel(Image *image,
% const ChannelType channel,const size_t levels,
% const MagickBooleanType dither)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o levels: Number of color levels allowed in each channel. Very low values
% (2, 3, or 4) have the most visible effect.
%
% o dither: Set this integer value to something other than zero to dither
% the mapped image.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels,
const MagickBooleanType dither)
{
MagickBooleanType
status;
status=PosterizeImageChannel(image,DefaultChannels,levels,dither);
return(status);
}
MagickExport MagickBooleanType PosterizeImageChannel(Image *image,
const ChannelType channel,const size_t levels,const MagickBooleanType dither)
{
#define PosterizeImageTag "Posterize/Image"
#define PosterizePixel(pixel) ClampToQuantum((MagickRealType) QuantumRange*( \
MagickRound(QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1))
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
QuantizeInfo
*quantize_info;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Posterize colormap.
*/
if ((channel & RedChannel) != 0)
image->colormap[i].red=PosterizePixel(image->colormap[i].red);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=PosterizePixel(image->colormap[i].green);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=PosterizePixel(image->colormap[i].blue);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=PosterizePixel(image->colormap[i].opacity);
}
/*
Posterize image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,PosterizePixel(GetPixelRed(q)));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,PosterizePixel(GetPixelGreen(q)));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,PosterizePixel(GetPixelBlue(q)));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,PosterizePixel(GetPixelOpacity(q)));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,PosterizePixel(GetPixelIndex(indexes+x)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels*
levels,MaxColormapSize+1);
quantize_info->dither=dither;
quantize_info->tree_depth=MaxTreeDepth;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e C h i l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneChild() deletes the given node and merges its statistics into its
% parent.
%
% The format of the PruneSubtree method is:
%
% PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
{
NodeInfo
*parent;
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneChild(cube_info,node_info->child[i]);
if (cube_info->nodes > cube_info->maximum_colors)
{
/*
Merge color statistics into parent.
*/
parent=node_info->parent;
parent->number_unique+=node_info->number_unique;
parent->total_color.red+=node_info->total_color.red;
parent->total_color.green+=node_info->total_color.green;
parent->total_color.blue+=node_info->total_color.blue;
parent->total_color.opacity+=node_info->total_color.opacity;
parent->child[node_info->id]=(NodeInfo *) NULL;
cube_info->nodes--;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e L e v e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneLevel() deletes all nodes at the bottom level of the color tree merging
% their color statistics into their parent node.
%
% The format of the PruneLevel method is:
%
% PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneLevel(cube_info,node_info->child[i]);
if (node_info->level == cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e T o C u b e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneToCubeDepth() deletes any nodes at a depth greater than
% cube_info->depth while merging their color statistics into their parent
% node.
%
% The format of the PruneToCubeDepth method is:
%
% PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneToCubeDepth(cube_info,node_info->child[i]);
if (node_info->level > cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImage() analyzes the colors within a reference image and chooses a
% fixed number of colors to represent the image. The goal of the algorithm
% is to minimize the color difference between the input and output image while
% minimizing the processing time.
%
% The format of the QuantizeImage method is:
%
% MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
% Image *image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
Image *image)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
size_t
depth,
maximum_colors;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
if (image->matte == MagickFalse)
{
if (SetImageGray(image,&image->exception) != MagickFalse)
(void) SetGrayscaleImage(image);
}
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if ((quantize_info->dither != MagickFalse) && (depth > 2))
depth--;
if ((image->matte != MagickFalse) && (depth > 5))
depth--;
if (SetImageGray(image,&image->exception) != MagickFalse)
depth=MaxTreeDepth;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,image,&image->exception);
if (status != MagickFalse)
{
/*
Reduce the number of colors in the image.
*/
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
status=AssignImageColors(image,cube_info);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImages() analyzes the colors within a set of reference images and
% chooses a fixed number of colors to represent the set. The goal of the
% algorithm is to minimize the color difference between the input and output
% images while minimizing the processing time.
%
% The format of the QuantizeImages method is:
%
% MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
% Image *images)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: Specifies a pointer to a list of Image structures.
%
*/
MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
Image *images)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
proceed,
status;
MagickProgressMonitor
progress_monitor;
register ssize_t
i;
size_t
depth,
maximum_colors,
number_images;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
if (GetNextImageInList(images) == (Image *) NULL)
{
/*
Handle a single image with QuantizeImage.
*/
status=QuantizeImage(quantize_info,images);
return(status);
}
status=MagickFalse;
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if (quantize_info->dither != MagickFalse)
depth--;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
{
(void) ThrowMagickException(&images->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(MagickFalse);
}
number_images=GetImageListLength(images);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,
image->client_data);
status=ClassifyImageColors(cube_info,image,&image->exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
if (status != MagickFalse)
{
/*
Reduce the number of colors in an image sequence.
*/
ReduceImageColors(images,cube_info);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor)
NULL,image->client_data);
status=AssignImageColors(image,cube_info);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,
image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u a n t i z e E r r o r F l a t t e n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeErrorFlatten() traverses the color cube and flattens the quantization
% error into a sorted 1D array. This accelerates the color reduction process.
%
% Contributed by Yoya.
%
% The format of the QuantizeErrorFlatten method is:
%
% size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
% const NodeInfo *node_info,const ssize_t offset,
% MagickRealType *quantize_error)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is current pointer.
%
% o offset: quantize error offset.
%
% o quantize_error: the quantization error vector.
%
*/
static size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
const NodeInfo *node_info,const ssize_t offset,
MagickRealType *quantize_error)
{
register ssize_t
i;
size_t
n,
number_children;
if (offset >= (ssize_t) cube_info->nodes)
return(0);
quantize_error[offset]=node_info->quantize_error;
n=1;
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children ; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n,
quantize_error);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Reduce() traverses the color cube tree and prunes any node whose
% quantization error falls below a particular threshold.
%
% The format of the Reduce method is:
%
% Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
Reduce(cube_info,node_info->child[i]);
if (node_info->quantize_error <= cube_info->pruning_threshold)
PruneChild(cube_info,node_info);
else
{
/*
Find minimum pruning threshold.
*/
if (node_info->number_unique > 0)
cube_info->colors++;
if (node_info->quantize_error < cube_info->next_threshold)
cube_info->next_threshold=node_info->quantize_error;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceImageColors() repeatedly prunes the tree until the number of nodes
% with n2 > 0 is less than or equal to the maximum number of colors allowed
% in the output image. On any given iteration over the tree, it selects
% those nodes whose E value is minimal for pruning and merges their
% color statistics upward. It uses a pruning threshold, Ep, to govern
% node selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors
% within the cubic volume which the node represents. This includes n1 -
% n2 pixels whose colors should be defined by nodes at a lower level in
% the tree.
%
% The format of the ReduceImageColors method is:
%
% ReduceImageColors(const Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static int MagickRealTypeCompare(const void *error_p,const void *error_q)
{
MagickRealType
*p,
*q;
p=(MagickRealType *) error_p;
q=(MagickRealType *) error_q;
if (*p > *q)
return(1);
if (fabs((double) (*q-*p)) <= MagickEpsilon)
return(0);
return(-1);
}
static void ReduceImageColors(const Image *image,CubeInfo *cube_info)
{
#define ReduceImageTag "Reduce/Image"
MagickBooleanType
proceed;
MagickOffsetType
offset;
size_t
span;
cube_info->next_threshold=0.0;
if (cube_info->colors > cube_info->maximum_colors)
{
MagickRealType
*quantize_error;
/*
Enable rapid reduction of the number of unique colors.
*/
quantize_error=(MagickRealType *) AcquireQuantumMemory(cube_info->nodes,
sizeof(*quantize_error));
if (quantize_error != (MagickRealType *) NULL)
{
(void) QuantizeErrorFlatten(cube_info,cube_info->root,0,
quantize_error);
qsort(quantize_error,cube_info->nodes,sizeof(MagickRealType),
MagickRealTypeCompare);
if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100))
cube_info->next_threshold=quantize_error[cube_info->nodes-110*
(cube_info->maximum_colors+1)/100];
quantize_error=(MagickRealType *) RelinquishMagickMemory(
quantize_error);
}
}
for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; )
{
cube_info->pruning_threshold=cube_info->next_threshold;
cube_info->next_threshold=cube_info->root->quantize_error-1;
cube_info->colors=0;
Reduce(cube_info,cube_info->root);
offset=(MagickOffsetType) span-cube_info->colors;
proceed=SetImageProgress(image,ReduceImageTag,offset,span-
cube_info->maximum_colors+1);
if (proceed == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImage() replaces the colors of an image with the closest color from
% a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *remap_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o remap_image: the reference image.
%
*/
MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
Image *image,const Image *remap_image)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(remap_image != (Image *) NULL);
assert(remap_image->signature == MagickCoreSignature);
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,&image->exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
status=AssignImageColors(image,cube_info);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
% Image *images,Image *remap_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o remap_image: the reference image.
%
*/
MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
Image *images,const Image *remap_image)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
status;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
if (remap_image == (Image *) NULL)
{
/*
Create a global colormap for an image sequence.
*/
status=QuantizeImages(quantize_info,images);
return(status);
}
/*
Classify image colors from the reference image.
*/
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,&image->exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
status=AssignImageColors(image,cube_info);
if (status == MagickFalse)
break;
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetGrayscaleImage() converts an image to a PseudoClass grayscale image.
%
% The format of the SetGrayscaleImage method is:
%
% MagickBooleanType SetGrayscaleImage(Image *image)
%
% A description of each parameter follows:
%
% o image: The image.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
double
intensity;
PixelPacket
*color_1,
*color_2;
color_1=(PixelPacket *) x;
color_2=(PixelPacket *) y;
intensity=PixelPacketIntensity(color_1)-PixelPacketIntensity(color_2);
if (intensity < (double) INT_MIN)
intensity=(double) INT_MIN;
if (intensity > (double) INT_MAX)
intensity=(double) INT_MAX;
return((int) intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType SetGrayscaleImage(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
PixelPacket
*colormap;
register ssize_t
i;
size_t
extent;
ssize_t
*colormap_index,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
exception=(&image->exception);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace);
extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1));
colormap_index=(ssize_t *) AcquireQuantumMemory(extent,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
(void) memset(colormap_index,(-1),extent*sizeof(*colormap_index));
if (AcquireImageColormap(image,MaxColormapSize) == MagickFalse)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
image->colors=0;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=GetPixelRed(q);
image->colormap[image->colors].green=GetPixelGreen(q);
image->colormap[image->colors].blue=GetPixelBlue(q);
image->colors++;
}
}
SetPixelIndex(indexes+x,colormap_index[intensity]);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
(void) memset(colormap_index,0,extent*sizeof(*colormap_index));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].opacity=(Quantum) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelPacket),
IntensityCompare);
colormap=(PixelPacket *) AcquireQuantumMemory(image->colors,
sizeof(*colormap));
if (colormap == (PixelPacket *) NULL)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsSameColor(image,&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].opacity]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,colormap_index[ScaleQuantumToMap(GetPixelIndex(
indexes+x))]);
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (SetImageMonochrome(image,&image->exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
|
convolutiondepthwise_5x5.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void convdw5x5s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g=0; g<group; g++)
{
Mat out = top_blob.channel(g);
const float bias0 = bias ? bias[g] : 0.f;
const float* kernel0 = kernel + g*25;
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(g);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* r5 = img0 + w*5;
const float* k0 = kernel0;
const float* k1 = kernel0 + 5;
const float* k2 = kernel0 + 10;
const float* k3 = kernel0 + 15;
const float* k4 = kernel0 + 20;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
float32x4_t _k4567 = vld1q_f32(kernel0+4);
float32x4_t _k891011 = vld1q_f32(kernel0+8);
float32x4_t _k12131415 = vld1q_f32(kernel0+12);
float32x4_t _k16171819 = vld1q_f32(kernel0+16);
float32x4_t _k20212223 = vld1q_f32(kernel0+20);
float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]);
float32x4_t _bias0 = vdupq_n_f32(bias0);
#endif // __ARM_NEON
int i = 0;
for (; i+1 < outh; i+=2)
{
#if __ARM_NEON
#if __aarch64__
int nn = outw >> 3;
int remain = outw & 7;
#else
int nn = outw >> 2;
int remain = outw & 3;
#endif // __aarch64__
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
// r1
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%4] \n"// v16 v17 v18 = r10 r14 r18
"mov v8.16b, %25.16b \n"// v8 = _bias0
"mov v9.16b, %25.16b \n"// v9 = _bias0
"0: \n"
"mov v10.16b, %25.16b \n"// v10 = _bias0
"mov v11.16b, %25.16b \n"// v11 = _bias0
"fmla v8.4s, v16.4s, %19.s[1] \n"
"fmla v10.4s, v16.4s, %18.s[0] \n"
"ext v19.16b, v16.16b, v17.16b, #4 \n"// r11
"fmla v9.4s, v17.4s, %19.s[1] \n"
"fmla v11.4s, v17.4s, %18.s[0] \n"
"ext v20.16b, v17.16b, v18.16b, #4 \n"// r15
"fmla v8.4s, v17.4s, %20.s[1] \n"
"fmla v10.4s, v17.4s, %19.s[0] \n"
"ext v21.16b, v16.16b, v17.16b, #8 \n"// r12
"fmla v9.4s, v18.4s, %20.s[1] \n"
"fmla v11.4s, v18.4s, %19.s[0] \n"
"ext v22.16b, v17.16b, v18.16b, #8 \n"// r16
"fmla v8.4s, v19.4s, %19.s[2] \n"
"fmla v10.4s, v19.4s, %18.s[1] \n"
"ext v19.16b, v16.16b, v17.16b, #12 \n"// r13
"fmla v9.4s, v20.4s, %19.s[2] \n"
"fmla v11.4s, v20.4s, %18.s[1] \n"
"ext v20.16b, v17.16b, v18.16b, #12 \n"// r17
"fmla v8.4s, v21.4s, %19.s[3] \n"
"fmla v10.4s, v21.4s, %18.s[2] \n"
"add %4, %4, #32 \n"
"fmla v9.4s, v22.4s, %19.s[3] \n"
"fmla v11.4s, v22.4s, %18.s[2] \n"
// r2
"prfm pldl1keep, [%5, #384] \n"
"ld1 {v12.4s, v13.4s, v14.4s}, [%5] \n"// v12 v13 v14 = r20 r24 r28
"fmla v8.4s, v19.4s, %20.s[0] \n"
"fmla v10.4s, v19.4s, %18.s[3] \n"
"fmla v9.4s, v20.4s, %20.s[0] \n"
"fmla v11.4s, v20.4s, %18.s[3] \n"
"add %5, %5, #32 \n"
"fmla v8.4s, v12.4s, %20.s[2] \n"
"fmla v10.4s, v12.4s, %19.s[1] \n"
"ext v21.16b, v12.16b, v13.16b, #4 \n"// r21
"fmla v9.4s, v13.4s, %20.s[2] \n"
"fmla v11.4s, v13.4s, %19.s[1] \n"
"ext v22.16b, v13.16b, v14.16b, #4 \n"// r25
"fmla v8.4s, v13.4s, %21.s[2] \n"
"fmla v10.4s, v13.4s, %20.s[1] \n"
"ext v19.16b, v12.16b, v13.16b, #8 \n"// r22
"fmla v9.4s, v14.4s, %21.s[2] \n"
"fmla v11.4s, v14.4s, %20.s[1] \n"
"ext v20.16b, v13.16b, v14.16b, #8 \n"// r26
"fmla v8.4s, v21.4s, %20.s[3] \n"
"fmla v10.4s, v21.4s, %19.s[2] \n"
"ext v21.16b, v12.16b, v13.16b, #12 \n"// r23
"fmla v9.4s, v22.4s, %20.s[3] \n"
"fmla v11.4s, v22.4s, %19.s[2] \n"
"ext v22.16b, v13.16b, v14.16b, #12 \n"// r27
"fmla v8.4s, v19.4s, %21.s[0] \n"
"fmla v10.4s, v19.4s, %19.s[3] \n"
"fmla v9.4s, v20.4s, %21.s[0] \n"
"fmla v11.4s, v20.4s, %19.s[3] \n"
// r3
"prfm pldl1keep, [%6, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%6] \n"// v16 v17 v18 = r30 r34 r38
"fmla v8.4s, v21.4s, %21.s[1] \n"
"fmla v10.4s, v21.4s, %20.s[0] \n"
"fmla v9.4s, v22.4s, %21.s[1] \n"
"fmla v11.4s, v22.4s, %20.s[0] \n"
"add %6, %6, #32 \n"
"fmla v8.4s, v16.4s, %21.s[3] \n"
"fmla v10.4s, v16.4s, %20.s[2] \n"
"ext v19.16b, v16.16b, v17.16b, #4 \n"// r31
"fmla v9.4s, v17.4s, %21.s[3] \n"
"fmla v11.4s, v17.4s, %20.s[2] \n"
"ext v20.16b, v17.16b, v18.16b, #4 \n"// r35
"fmla v8.4s, v17.4s, %22.s[3] \n"
"fmla v10.4s, v17.4s, %21.s[2] \n"
"ext v21.16b, v16.16b, v17.16b, #8 \n"// r32
"fmla v9.4s, v18.4s, %22.s[3] \n"
"fmla v11.4s, v18.4s, %21.s[2] \n"
"ext v22.16b, v17.16b, v18.16b, #8 \n"// r36
"fmla v8.4s, v19.4s, %22.s[0] \n"
"fmla v10.4s, v19.4s, %20.s[3] \n"
"ext v19.16b, v16.16b, v17.16b, #12 \n"// r33
"fmla v9.4s, v20.4s, %22.s[0] \n"
"fmla v11.4s, v20.4s, %20.s[3] \n"
"ext v20.16b, v17.16b, v18.16b, #12 \n"// r37
"fmla v8.4s, v21.4s, %22.s[1] \n"
"fmla v10.4s, v21.4s, %21.s[0] \n"
"fmla v9.4s, v22.4s, %22.s[1] \n"
"fmla v11.4s, v22.4s, %21.s[0] \n"
// r4
"prfm pldl1keep, [%7, #384] \n"
"ld1 {v12.4s, v13.4s, v14.4s}, [%7] \n"// v12 v13 v14 = r40 r44 r48
"fmla v8.4s, v19.4s, %22.s[2] \n"
"fmla v10.4s, v19.4s, %21.s[1] \n"
"add %7, %7, #32 \n"
"fmla v9.4s, v20.4s, %22.s[2] \n"
"fmla v11.4s, v20.4s, %21.s[1] \n"
"ext v21.16b, v12.16b, v13.16b, #4 \n"// r41
"fmla v8.4s, v12.4s, %23.s[0] \n"
"fmla v10.4s, v12.4s, %21.s[3] \n"
"ext v22.16b, v13.16b, v14.16b, #4 \n"// r45
"fmla v9.4s, v13.4s, %23.s[0] \n"
"fmla v11.4s, v13.4s, %21.s[3] \n"
"ext v19.16b, v12.16b, v13.16b, #8 \n"// r42
"fmla v8.4s, v13.4s, %24.s[0] \n"
"fmla v10.4s, v13.4s, %22.s[3] \n"
"ext v20.16b, v13.16b, v14.16b, #8 \n"// r46
"fmla v9.4s, v14.4s, %24.s[0] \n"
"fmla v11.4s, v14.4s, %22.s[3] \n"
// r0 and r5
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%3] \n"// v16 v17 v18 = r00 r04 r08
"fmla v8.4s, v21.4s, %23.s[1] \n"
"fmla v10.4s, v21.4s, %22.s[0] \n"
"ext v21.16b, v12.16b, v13.16b, #12 \n"// r43
"fmla v9.4s, v22.4s, %23.s[1] \n"
"fmla v11.4s, v22.4s, %22.s[0] \n"
"ext v22.16b, v13.16b, v14.16b, #12 \n"// r47
"fmla v8.4s, v19.4s, %23.s[2] \n"
"fmla v10.4s, v19.4s, %22.s[1] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v12.4s, v13.4s, v14.4s}, [%8] \n"// v12 v13 v14 = r50 r54 r58
"fmla v9.4s, v20.4s, %23.s[2] \n"
"fmla v11.4s, v20.4s, %22.s[1] \n"
"ext v19.16b, v16.16b, v17.16b, #4 \n"// r01
"fmla v8.4s, v21.4s, %23.s[3] \n"
"fmla v10.4s, v21.4s, %22.s[2] \n"
"ext v23.16b, v12.16b, v13.16b, #4 \n"// r51
"fmla v9.4s, v22.4s, %23.s[3] \n"
"fmla v11.4s, v22.4s, %22.s[2] \n"
"ext v20.16b, v17.16b, v18.16b, #4 \n"// r05
"fmla v8.4s, v16.4s, %18.s[0] \n"
"fmla v10.4s, v12.4s, %23.s[0] \n"
"ext v24.16b, v13.16b, v14.16b, #4 \n"// r55
"fmla v9.4s, v17.4s, %18.s[0] \n"
"fmla v11.4s, v13.4s, %23.s[0] \n"
"ext v21.16b, v16.16b, v17.16b, #8 \n"// r02
"fmla v8.4s, v17.4s, %19.s[0] \n"
"fmla v10.4s, v13.4s, %24.s[0] \n"
"ext v25.16b, v12.16b, v13.16b, #8 \n"// r52
"fmla v9.4s, v18.4s, %19.s[0] \n"
"fmla v11.4s, v14.4s, %24.s[0] \n"
"ext v22.16b, v17.16b, v18.16b, #8 \n"// r06
"fmla v8.4s, v19.4s, %18.s[1] \n"
"fmla v10.4s, v23.4s, %23.s[1] \n"
"ext v26.16b, v13.16b, v14.16b, #8 \n"// r56
"fmla v9.4s, v20.4s, %18.s[1] \n"
"fmla v11.4s, v24.4s, %23.s[1] \n"
"ext v19.16b, v16.16b, v17.16b, #12 \n"// r03
"fmla v8.4s, v21.4s, %18.s[2] \n"
"fmla v10.4s, v25.4s, %23.s[2] \n"
"ext v23.16b, v12.16b, v13.16b, #12 \n"// r53
"fmla v9.4s, v22.4s, %18.s[2] \n"
"fmla v11.4s, v26.4s, %23.s[2] \n"
"ext v20.16b, v17.16b, v18.16b, #12 \n"// r07
"fmla v8.4s, v19.4s, %18.s[3] \n"
"fmla v10.4s, v23.4s, %23.s[3] \n"
"ext v24.16b, v13.16b, v14.16b, #12 \n"// r57
"fmla v9.4s, v20.4s, %18.s[3] \n"
"add %3, %3, #32 \n"
"fmla v11.4s, v24.4s, %23.s[3] \n"
"add %8, %8, #32 \n"
// r1
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%4] \n"// v16 v17 v18 = r10 r14 r18
"subs %w0, %w0, #1 \n"
"st1 {v8.4s, v9.4s}, [%1], #32 \n"
"mov v8.16b, %25.16b \n"// v8 = _bias0
"mov v9.16b, %25.16b \n"// v9 = _bias0
"st1 {v10.4s, v11.4s}, [%2], #32 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(outptr2), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3), // %6
"=r"(r4), // %7
"=r"(r5) // %8
: "0"(nn),
"1"(outptr),
"2"(outptr2),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"7"(r4),
"8"(r5),
"w"(_k0123), // %18
"w"(_k4567), // %19
"w"(_k891011), // %20
"w"(_k12131415), // %21
"w"(_k16171819), // %22
"w"(_k20212223), // %23
"w"(_k24242424), // %24
"w"(_bias0) // %25
: "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26"
);
}
if (remain >= 4)
{
remain -= 4;
asm volatile(
// r1
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v12.4s, v13.4s}, [%3] \n"// v12 v13 = r10 r14
"mov v8.16b, %23.16b \n"// v8 = _bias0
"mov v9.16b, %23.16b \n"// v9 = _bias0
"fmul v10.4s, v12.4s, %17.s[1] \n"
"fmul v11.4s, v12.4s, %16.s[0] \n"
"ext v21.16b, v12.16b, v13.16b, #4 \n"// r11
"fmla v8.4s, v13.4s, %18.s[1] \n"
"fmla v9.4s, v13.4s, %17.s[0] \n"
"ext v22.16b, v12.16b, v13.16b, #8 \n"// r12
"fmla v10.4s, v21.4s, %17.s[2] \n"
"fmla v11.4s, v21.4s, %16.s[1] \n"
"ext v23.16b, v12.16b, v13.16b, #12 \n"// r13
"fmla v8.4s, v22.4s, %17.s[3] \n"
"fmla v9.4s, v22.4s, %16.s[2] \n"
// r2
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v16.4s, v17.4s}, [%4] \n"// v16 v17 = r20 r24
"fmla v10.4s, v23.4s, %18.s[0] \n"
"fmla v11.4s, v23.4s, %16.s[3] \n"
"add %4, %4, #16 \n"
"fmla v8.4s, v16.4s, %18.s[2] \n"
"fmla v9.4s, v16.4s, %17.s[1] \n"
"ext v18.16b, v16.16b, v17.16b, #4 \n"// r21
"fmla v10.4s, v17.4s, %19.s[2] \n"
"fmla v11.4s, v17.4s, %18.s[1] \n"
"ext v19.16b, v16.16b, v17.16b, #8 \n"// r22
"fmla v8.4s, v18.4s, %18.s[3] \n"
"fmla v9.4s, v18.4s, %17.s[2] \n"
"ext v20.16b, v16.16b, v17.16b, #12 \n"// r23
"fmla v10.4s, v19.4s, %19.s[0] \n"
"fmla v11.4s, v19.4s, %17.s[3] \n"
// r3
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v12.4s, v13.4s}, [%5] \n"// v12 v13 = r30 r34
"fmla v8.4s, v20.4s, %19.s[1] \n"
"fmla v9.4s, v20.4s, %18.s[0] \n"
"add %5, %5, #16 \n"
"fmla v10.4s, v12.4s, %19.s[3] \n"
"fmla v11.4s, v12.4s, %18.s[2] \n"
"ext v21.16b, v12.16b, v13.16b, #4 \n"// r31
"fmla v8.4s, v13.4s, %20.s[3] \n"
"fmla v9.4s, v13.4s, %19.s[2] \n"
"ext v22.16b, v12.16b, v13.16b, #8 \n"// r32
"fmla v10.4s, v21.4s, %20.s[0] \n"
"fmla v11.4s, v21.4s, %18.s[3] \n"
"ext v23.16b, v12.16b, v13.16b, #12 \n"// r33
"fmla v8.4s, v22.4s, %20.s[1] \n"
"fmla v9.4s, v22.4s, %19.s[0] \n"
// r4
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4s, v17.4s}, [%6] \n"// v16 v17 = r40 r44
"fmla v10.4s, v23.4s, %20.s[2] \n"
"fmla v11.4s, v23.4s, %19.s[1] \n"
"add %6, %6, #16 \n"
"fmla v8.4s, v16.4s, %21.s[0] \n"
"fmla v9.4s, v16.4s, %19.s[3] \n"
"ext v18.16b, v16.16b, v17.16b, #4 \n"// r41
"fmla v10.4s, v17.4s, %22.s[0] \n"
"fmla v11.4s, v17.4s, %20.s[3] \n"
"ext v19.16b, v16.16b, v17.16b, #8 \n"// r42
"fmla v8.4s, v18.4s, %21.s[1] \n"
"fmla v9.4s, v18.4s, %20.s[0] \n"
"ext v20.16b, v16.16b, v17.16b, #12 \n"// r43
"fmla v10.4s, v19.4s, %21.s[2] \n"
"fmla v11.4s, v19.4s, %20.s[1] \n"
// r0
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v16.4s, v17.4s}, [%2] \n"// v16 v17 = r00 r04
"fmla v8.4s, v20.4s, %21.s[3] \n"
"fmla v9.4s, v20.4s, %20.s[2] \n"
// r5
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v12.4s, v13.4s}, [%7] \n"// v12 v13 = r50 r54
"fmla v10.4s, v16.4s, %16.s[0] \n"
"fmla v11.4s, v12.4s, %21.s[0] \n"
"ext v18.16b, v16.16b, v17.16b, #4 \n"// r01
"fmla v8.4s, v17.4s, %17.s[0] \n"
"ext v21.16b, v12.16b, v13.16b, #4 \n"// r51
"fmla v9.4s, v13.4s, %22.s[0] \n"
"ext v19.16b, v16.16b, v17.16b, #8 \n"// r02
"fmla v10.4s, v18.4s, %16.s[1] \n"
"ext v22.16b, v12.16b, v13.16b, #8 \n"// r52
"fmla v11.4s, v21.4s, %21.s[1] \n"
"ext v20.16b, v16.16b, v17.16b, #12 \n"// r03
"fmla v8.4s, v19.4s, %16.s[2] \n"
"ext v23.16b, v12.16b, v13.16b, #12 \n"// r53
"fmla v9.4s, v22.4s, %21.s[2] \n"
"add %3, %3, #16 \n"
"fmla v10.4s, v20.4s, %16.s[3] \n"
"fmla v11.4s, v23.4s, %21.s[3] \n"
"add %2, %2, #16 \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"fadd v9.4s, v9.4s, v11.4s \n"
"add %7, %7, #16 \n"
"st1 {v8.4s}, [%0], #16 \n"
"st1 {v9.4s}, [%1], #16 \n"
: "=r"(outptr), // %0
"=r"(outptr2), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5) // %7
: "0"(outptr),
"1"(outptr2),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"w"(_k0123), // %16
"w"(_k4567), // %17
"w"(_k891011), // %18
"w"(_k12131415), // %19
"w"(_k16171819), // %20
"w"(_k20212223), // %21
"w"(_k24242424), // %22
"w"(_bias0) // %23
: "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
}
#else
if (nn > 0)
{
asm volatile(
// r1
"pld [%4, #256] \n"
"vld1.f32 {d28-d31}, [%4] \n"// q14 q15 = r10 r14
"vmov q8, %q25 \n"// q8 = _bias0
"0: \n"
"vmov q9, %q25 \n"// q9 = _bias0
"vmla.f32 q8, q14, %e19[1] \n"
"vmla.f32 q9, q14, %e18[0] \n"
"vext.32 q12, q14, q15, #1 \n"// r11
"vmla.f32 q8, q15, %e20[1] \n"
"vmla.f32 q9, q15, %e19[0] \n"
"vext.32 q13, q14, q15, #2 \n"// r12
"vmla.f32 q8, q12, %f19[0] \n"
"vmla.f32 q9, q12, %e18[1] \n"
"vext.32 q12, q14, q15, #3 \n"// r13
"vmla.f32 q8, q13, %f19[1] \n"
"vmla.f32 q9, q13, %f18[0] \n"
// r2
"pld [%5, #256] \n"
"vld1.f32 {d20-d23}, [%5] \n"// q10 q11 = r20 r24
"vmla.f32 q8, q12, %e20[0] \n"
"vmla.f32 q9, q12, %f18[1] \n"
"add %5, #16 \n"
"vmla.f32 q8, q10, %f20[0] \n"
"vmla.f32 q9, q10, %e19[1] \n"
"vext.32 q12, q10, q11, #1 \n"// r21
"vmla.f32 q8, q11, %f21[0] \n"
"vmla.f32 q9, q11, %e20[1] \n"
"vext.32 q13, q10, q11, #2 \n"// r22
"vmla.f32 q8, q12, %f20[1] \n"
"vmla.f32 q9, q12, %f19[0] \n"
"vext.32 q12, q10, q11, #3 \n"// r23
"vmla.f32 q8, q13, %e21[0] \n"
"vmla.f32 q9, q13, %f19[1] \n"
// r3
"pld [%6, #256] \n"
"vld1.f32 {d28-d31}, [%6] \n"// q14 q15 = r30 r34
"vmla.f32 q8, q12, %e21[1] \n"
"vmla.f32 q9, q12, %e20[0] \n"
"add %6, #16 \n"
"vmla.f32 q8, q14, %f21[1] \n"
"vmla.f32 q9, q14, %f20[0] \n"
"vext.32 q12, q14, q15, #1 \n"// r31
"vmla.f32 q8, q15, %f22[1] \n"
"vmla.f32 q9, q15, %f21[0] \n"
"vext.32 q13, q14, q15, #2 \n"// r32
"vmla.f32 q8, q12, %e22[0] \n"
"vmla.f32 q9, q12, %f20[1] \n"
"vext.32 q12, q14, q15, #3 \n"// r33
"vmla.f32 q8, q13, %e22[1] \n"
"vmla.f32 q9, q13, %e21[0] \n"
// r4
"pld [%7, #256] \n"
"vld1.f32 {d20-d23}, [%7] \n"// q10 q11 = r40 r44
"vmla.f32 q8, q12, %f22[0] \n"
"vmla.f32 q9, q12, %e21[1] \n"
"add %7, #16 \n"
"vmla.f32 q8, q10, %e23[0] \n"
"vmla.f32 q9, q10, %f21[1] \n"
"vext.32 q12, q10, q11, #1 \n"// r41
"vmla.f32 q8, q11, %e24[0] \n"
"vmla.f32 q9, q11, %f22[1] \n"
"vext.32 q13, q10, q11, #2 \n"// r42
"vmla.f32 q8, q12, %e23[1] \n"
"vmla.f32 q9, q12, %e22[0] \n"
"vext.32 q12, q10, q11, #3 \n"// r43
"vmla.f32 q8, q13, %f23[0] \n"
"vmla.f32 q9, q13, %e22[1] \n"
// r0 and r5
"pld [%3, #256] \n"
"vld1.f32 {d20-d23}, [%3] \n"// q10 q11 = r00 r04
"vmla.f32 q8, q12, %f23[1] \n"
"vmla.f32 q9, q12, %f22[0] \n"
// r5
"pld [%8, #256] \n"
"vld1.f32 {d28-d31}, [%8] \n"// q14 q15 = r50 r54
"vmla.f32 q8, q10, %e18[0] \n"
"vmla.f32 q9, q14, %e23[0] \n"
"vext.32 q12, q10, q11, #1 \n"// r01
"vmla.f32 q8, q11, %e19[0] \n"
"vmla.f32 q9, q15, %e24[0] \n"
"vext.32 q13, q14, q15, #1 \n"// r51
"vmla.f32 q8, q12, %e18[1] \n"
"vext.32 q12, q10, q11, #2 \n"// r02
"vmla.f32 q9, q13, %e23[1] \n"
"vext.32 q13, q14, q15, #2 \n"// r52
"vmla.f32 q8, q12, %f18[0] \n"
"vext.32 q12, q10, q11, #3 \n"// r03
"vmla.f32 q9, q13, %f23[0] \n"
"vext.32 q13, q14, q15, #3 \n"// r33
"vmla.f32 q8, q12, %f18[1] \n"
"add %3, #16 \n"
"vmla.f32 q9, q13, %f23[1] \n"
"add %4, #16 \n"
// r1
"pld [%4, #256] \n"
"vld1.f32 {d28-d31}, [%4] \n"// q14 q15 = r10 r14
"add %8, #16 \n"
"vst1.f32 {d16-d17}, [%1]! \n"
"vmov q8, %q25 \n"// q8 = _bias0
"subs %0, #1 \n"
"vst1.f32 {d18-d19}, [%2]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(outptr2), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3), // %6
"=r"(r4), // %7
"=r"(r5) // %8
: "0"(nn),
"1"(outptr),
"2"(outptr2),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"7"(r4),
"8"(r5),
"w"(_k0123), // %18
"w"(_k4567), // %19
"w"(_k891011), // %20
"w"(_k12131415), // %21
"w"(_k16171819), // %22
"w"(_k20212223), // %23
"w"(_k24242424), // %24
"w"(_bias0) // %25
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = bias0;
float sum2 = bias0;
#if __ARM_NEON
// TODO neon assembly optimize
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _sum = vmulq_f32(_r1, _k1);
float32x4_t _sum2 = vmulq_f32(_r1, _k0123);
float32x4_t _r2 = vld1q_f32(r2);
float32x4_t _k2 = vld1q_f32(k2);
_sum = vmlaq_f32(_sum, _r2, _k2);
_sum2 = vmlaq_f32(_sum2, _r2, _k1);
float32x4_t _r3 = vld1q_f32(r3);
float32x4_t _k3 = vld1q_f32(k3);
_sum = vmlaq_f32(_sum, _r3, _k3);
_sum2 = vmlaq_f32(_sum2, _r3, _k2);
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
_sum2 = vmlaq_f32(_sum2, _r4, _k3);
float32x4_t _r0 = vld1q_f32(r0);
_sum = vmlaq_f32(_sum, _r0, _k0123);
float32x4_t _r5 = vld1q_f32(r5);
_sum2 = vmlaq_f32(_sum2, _r5, _k20212223);
float32x4_t _k_t4;
_k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0);
_k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1);
_k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2);
_k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3);
float32x4_t _r_t4;
_r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0);
_r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1);
_r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2);
_r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3);
_sum = vmlaq_f32(_sum, _r_t4, _k_t4);
sum += r4[4] * k4[4];
_r_t4 = vextq_f32(_r_t4, _r_t4, 1);
_r_t4 = vsetq_lane_f32(r4[4], _r_t4, 3);
_sum2 = vmlaq_f32(_sum2, _r_t4, _k_t4);
sum2 += r5[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2));
float32x2_t _ss_ss2 = vpadd_f32(_ss, _ss2);
sum += vget_lane_f32(_ss_ss2, 0);
sum2 += vget_lane_f32(_ss_ss2, 1);
#else
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r1[3] * k0[3];
sum2 += r1[4] * k0[4];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r2[3] * k1[3];
sum2 += r2[4] * k1[4];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
sum2 += r3[3] * k2[3];
sum2 += r3[4] * k2[4];
sum2 += r4[0] * k3[0];
sum2 += r4[1] * k3[1];
sum2 += r4[2] * k3[2];
sum2 += r4[3] * k3[3];
sum2 += r4[4] * k3[4];
sum2 += r5[0] * k4[0];
sum2 += r5[1] * k4[1];
sum2 += r5[2] * k4[2];
sum2 += r5[3] * k4[3];
sum2 += r5[4] * k4[4];
#endif // __ARM_NEON
*outptr = sum;
*outptr2 = sum2;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
outptr++;
outptr2++;
}
r0 += 4 + w;
r1 += 4 + w;
r2 += 4 + w;
r3 += 4 + w;
r4 += 4 + w;
r5 += 4 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
#if __aarch64__
int nn = outw >> 3;
int remain = outw & 7;
#else
int nn = outw >> 2;
int remain = outw & 3;
#endif // __aarch64__
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
// v10 v11
// r0
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%2] \n"// v16 v17 v18 = r00 r04 r08
"mov v8.16b, %21.16b \n"// v8 = _bias0
"mov v9.16b, %21.16b \n"// v9 = _bias0
"0: \n"
"fmul v10.4s, v16.4s, %14.s[0] \n"
"ext v19.16b, v16.16b, v17.16b, #4 \n"// r01
"fmul v11.4s, v17.4s, %14.s[0] \n"
"ext v20.16b, v17.16b, v18.16b, #4 \n"// r05
"fmla v8.4s, v17.4s, %15.s[0] \n"
"ext v21.16b, v16.16b, v17.16b, #8 \n"// r02
"fmla v9.4s, v18.4s, %15.s[0] \n"
"ext v22.16b, v17.16b, v18.16b, #8 \n"// r06
"fmla v10.4s, v19.4s, %14.s[1] \n"
"ext v19.16b, v16.16b, v17.16b, #12 \n"// r03
"fmla v11.4s, v20.4s, %14.s[1] \n"
"ext v20.16b, v17.16b, v18.16b, #12 \n"// r07
"fmla v8.4s, v21.4s, %14.s[2] \n"
"fmla v9.4s, v22.4s, %14.s[2] \n"
// r1
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v12.4s, v13.4s, v14.4s}, [%3] \n"// v12 v13 v14 = r10 r14 r18
"fmla v10.4s, v19.4s, %14.s[3] \n"
"fmla v11.4s, v20.4s, %14.s[3] \n"
"fmla v8.4s, v12.4s, %15.s[1] \n"
"ext v19.16b, v12.16b, v13.16b, #4 \n"// r11
"fmla v9.4s, v13.4s, %15.s[1] \n"
"ext v20.16b, v13.16b, v14.16b, #4 \n"// r15
"fmla v10.4s, v13.4s, %16.s[1] \n"
"ext v21.16b, v12.16b, v13.16b, #8 \n"// r12
"fmla v11.4s, v14.4s, %16.s[1] \n"
"ext v22.16b, v13.16b, v14.16b, #8 \n"// r16
"fmla v8.4s, v19.4s, %15.s[2] \n"
"ext v19.16b, v12.16b, v13.16b, #12 \n"// r13
"fmla v9.4s, v20.4s, %15.s[2] \n"
"ext v20.16b, v13.16b, v14.16b, #12 \n"// r17
"fmla v10.4s, v21.4s, %15.s[3] \n"
"fmla v11.4s, v22.4s, %15.s[3] \n"
// r2
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%4] \n"// v16 v17 v18 = r20 r24 r28
"fmla v8.4s, v19.4s, %16.s[0] \n"
"fmla v9.4s, v20.4s, %16.s[0] \n"
"fmla v10.4s, v16.4s, %16.s[2] \n"
"ext v19.16b, v16.16b, v17.16b, #4 \n"// r21
"fmla v11.4s, v17.4s, %16.s[2] \n"
"ext v20.16b, v17.16b, v18.16b, #4 \n"// r25
"fmla v8.4s, v17.4s, %17.s[2] \n"
"ext v21.16b, v16.16b, v17.16b, #8 \n"// r22
"fmla v9.4s, v18.4s, %17.s[2] \n"
"ext v22.16b, v17.16b, v18.16b, #8 \n"// r26
"fmla v10.4s, v19.4s, %16.s[3] \n"
"ext v19.16b, v16.16b, v17.16b, #12 \n"// r23
"fmla v11.4s, v20.4s, %16.s[3] \n"
"ext v20.16b, v17.16b, v18.16b, #12 \n"// r27
"fmla v8.4s, v21.4s, %17.s[0] \n"
"fmla v9.4s, v22.4s, %17.s[0] \n"
// r3
"prfm pldl1keep, [%5, #384] \n"
"ld1 {v12.4s, v13.4s, v14.4s}, [%5] \n"// v12 v13 v14 = r30 r34 r38
"fmla v10.4s, v19.4s, %17.s[1] \n"
"fmla v11.4s, v20.4s, %17.s[1] \n"
"fmla v8.4s, v12.4s, %17.s[3] \n"
"ext v19.16b, v12.16b, v13.16b, #4 \n"// r11
"fmla v9.4s, v13.4s, %17.s[3] \n"
"ext v20.16b, v13.16b, v14.16b, #4 \n"// r15
"fmla v10.4s, v13.4s, %18.s[3] \n"
"ext v21.16b, v12.16b, v13.16b, #8 \n"// r12
"fmla v11.4s, v14.4s, %18.s[3] \n"
"ext v22.16b, v13.16b, v14.16b, #8 \n"// r16
"fmla v8.4s, v19.4s, %18.s[0] \n"
"ext v19.16b, v12.16b, v13.16b, #12 \n"// r13
"fmla v9.4s, v20.4s, %18.s[0] \n"
"ext v20.16b, v13.16b, v14.16b, #12 \n"// r17
"fmla v10.4s, v21.4s, %18.s[1] \n"
"fmla v11.4s, v22.4s, %18.s[1] \n"
// r4
"prfm pldl1keep, [%6, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%6] \n"// v16 v17 v18 = r40 r44 r48
"fmla v8.4s, v19.4s, %18.s[2] \n"
"fmla v9.4s, v20.4s, %18.s[2] \n"
"fmla v10.4s, v16.4s, %19.s[0] \n"
"ext v19.16b, v16.16b, v17.16b, #4 \n"// r41
"fmla v11.4s, v17.4s, %19.s[0] \n"
"ext v20.16b, v17.16b, v18.16b, #4 \n"// r45
"fmla v8.4s, v17.4s, %20.s[0] \n"
"ext v21.16b, v16.16b, v17.16b, #8 \n"// r42
"fmla v9.4s, v18.4s, %20.s[0] \n"
"ext v22.16b, v17.16b, v18.16b, #8 \n"// r46
"fmla v10.4s, v19.4s, %19.s[1] \n"
"ext v19.16b, v16.16b, v17.16b, #12 \n"// r43
"fmla v11.4s, v20.4s, %19.s[1] \n"
"ext v20.16b, v17.16b, v18.16b, #12 \n"// r47
"fmla v8.4s, v21.4s, %19.s[2] \n"
"add %2, %2, #32 \n"
"fmla v9.4s, v22.4s, %19.s[2] \n"
"add %3, %3, #32 \n"
"fmla v10.4s, v19.4s, %19.s[3] \n"
"add %4, %4, #32 \n"
"fmla v11.4s, v20.4s, %19.s[3] \n"
// r0
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%2] \n"// v16 v17 v18 = r00 r04 r08
"add %5, %5, #32 \n"
"fadd v10.4s, v8.4s, v10.4s \n"
"add %6, %6, #32 \n"
"fadd v11.4s, v9.4s, v11.4s \n"
"mov v8.16b, %21.16b \n"// v8 = _bias0
"mov v9.16b, %21.16b \n"// v9 = _bias0
"subs %w0, %w0, #1 \n"
"st1 {v10.4s, v11.4s}, [%1], #32 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424), // %20
"w"(_bias0) // %21
: "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22"
);
}
if (remain >= 4)
{
remain -= 4;
asm volatile(
// r0
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v16.4s, v17.4s}, [%1] \n"// v16 v17 = r00 r04
"mov v8.16b, %19.16b \n"// v8 = _bias0
"add %1, %1, #16 \n"
"fmul v9.4s, v16.4s, %12.s[0] \n"
"ext v18.16b, v16.16b, v17.16b, #4 \n"// r01
"fmla v8.4s, v17.4s, %13.s[0] \n"
"ext v19.16b, v16.16b, v17.16b, #8 \n"// r02
"fmla v9.4s, v18.4s, %12.s[1] \n"
"ext v20.16b, v16.16b, v17.16b, #12 \n"// r03
"fmla v8.4s, v19.4s, %12.s[2] \n"
// r1
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v10.4s, v11.4s}, [%2] \n"// v10 v11 = r10 r14
"fmla v9.4s, v20.4s, %12.s[3] \n"
"add %2, %2, #16 \n"
"fmla v8.4s, v10.4s, %13.s[1] \n"
"ext v12.16b, v10.16b, v11.16b, #4 \n"// r11
"fmla v9.4s, v11.4s, %14.s[1] \n"
"ext v13.16b, v10.16b, v11.16b, #8 \n"// r12
"fmla v8.4s, v12.4s, %13.s[2] \n"
"ext v14.16b, v10.16b, v11.16b, #12 \n"// r13
"fmla v9.4s, v13.4s, %13.s[3] \n"
// r2
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v16.4s, v17.4s}, [%3] \n"// v16 v17 = r20 r24
"fmla v8.4s, v14.4s, %14.s[0] \n"
"add %3, %3, #16 \n"
"fmla v9.4s, v16.4s, %14.s[2] \n"
"ext v18.16b, v16.16b, v17.16b, #4 \n"// r21
"fmla v8.4s, v17.4s, %15.s[2] \n"
"ext v19.16b, v16.16b, v17.16b, #8 \n"// r22
"fmla v9.4s, v18.4s, %14.s[3] \n"
"ext v20.16b, v16.16b, v17.16b, #12 \n"// r23
"fmla v8.4s, v19.4s, %15.s[0] \n"
// r3
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v10.4s, v11.4s}, [%4] \n"// v10 v11 = r30 r34
"fmla v9.4s, v20.4s, %15.s[1] \n"
"add %4, %4, #16 \n"
"fmla v8.4s, v10.4s, %15.s[3] \n"
"ext v12.16b, v10.16b, v11.16b, #4 \n"// r31
"fmla v9.4s, v11.4s, %16.s[3] \n"
"ext v13.16b, v10.16b, v11.16b, #8 \n"// r32
"fmla v8.4s, v12.4s, %16.s[0] \n"
"ext v14.16b, v10.16b, v11.16b, #12 \n"// r33
"fmla v9.4s, v13.4s, %16.s[1] \n"
// r4
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v16.4s, v17.4s}, [%5] \n"// v16 v17 = r40 r44
"fmla v8.4s, v14.4s, %16.s[2] \n"
"add %5, %5, #16 \n"
"fmla v9.4s, v16.4s, %17.s[0] \n"
"ext v18.16b, v16.16b, v17.16b, #4 \n"// r41
"fmla v8.4s, v17.4s, %18.s[0] \n"
"ext v19.16b, v16.16b, v17.16b, #8 \n"// r42
"fmla v9.4s, v18.4s, %17.s[1] \n"
"ext v20.16b, v16.16b, v17.16b, #12 \n"// r43
"fmla v8.4s, v19.4s, %17.s[2] \n"
"fmla v9.4s, v20.4s, %17.s[3] \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"st1 {v8.4s}, [%0], #16 \n"
: "=r"(outptr), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4) // %5
: "0"(outptr),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"w"(_k0123), // %12
"w"(_k4567), // %13
"w"(_k891011), // %14
"w"(_k12131415), // %15
"w"(_k16171819), // %16
"w"(_k20212223), // %17
"w"(_k24242424), // %18
"w"(_bias0) // %19
: "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20"
);
}
#else
if (nn > 0)
{
asm volatile(
// r0
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2] \n"// q10 q11 = r00 r04
"vmov q8, %q21 \n"// q8 = _bias0
"0: \n"
"vmul.f32 q9, q10, %e14[0] \n"
"vext.32 q12, q10, q11, #1 \n"// r01
"vmla.f32 q8, q11, %e15[0] \n"
"vext.32 q13, q10, q11, #2 \n"// r02
"vmla.f32 q9, q12, %e14[1] \n"
"vext.32 q12, q10, q11, #3 \n"// r03
"vmla.f32 q8, q13, %f14[0] \n"
// r1
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3] \n"// q14 q15 = r10 r14
"vmla.f32 q9, q12, %f14[1] \n"
"add %3, #16 \n"
"vmla.f32 q8, q14, %e15[1] \n"
"vext.32 q12, q14, q15, #1 \n"// r11
"vmla.f32 q9, q15, %e16[1] \n"
"vext.32 q13, q14, q15, #2 \n"// r12
"vmla.f32 q8, q12, %f15[0] \n"
"vext.32 q12, q14, q15, #3 \n"// r13
"vmla.f32 q9, q13, %f15[1] \n"
// r2
"pld [%4, #256] \n"
"vld1.f32 {d20-d23}, [%4] \n"// q10 q11 = r20 r24
"vmla.f32 q8, q12, %e16[0] \n"
"add %4, #16 \n"
"vmla.f32 q9, q10, %f16[0] \n"
"vext.32 q12, q10, q11, #1 \n"// r21
"vmla.f32 q8, q11, %f17[0] \n"
"vext.32 q13, q10, q11, #2 \n"// r22
"vmla.f32 q9, q12, %f16[1] \n"
"vext.32 q12, q10, q11, #3 \n"// r23
"vmla.f32 q8, q13, %e17[0] \n"
// r3
"pld [%5, #256] \n"
"vld1.f32 {d28-d31}, [%5] \n"// q14 q15 = r30 r34
"vmla.f32 q9, q12, %e17[1] \n"
"add %5, #16 \n"
"vmla.f32 q8, q14, %f17[1] \n"
"vext.32 q12, q14, q15, #1 \n"// r31
"vmla.f32 q9, q15, %f18[1] \n"
"vext.32 q13, q14, q15, #2 \n"// r32
"vmla.f32 q8, q12, %e18[0] \n"
"vext.32 q12, q14, q15, #3 \n"// r33
"vmla.f32 q9, q13, %e18[1] \n"
// r4
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6] \n"// q10 q11 = r40 r44
"vmla.f32 q8, q12, %f18[0] \n"
"add %6, #16 \n"
"vmla.f32 q9, q10, %e19[0] \n"
"vext.32 q12, q10, q11, #1 \n"// r41
"vmla.f32 q8, q11, %e20[0] \n"
"vext.32 q13, q10, q11, #2 \n"// r42
"vmla.f32 q9, q12, %e19[1] \n"
"vext.32 q12, q10, q11, #3 \n"// r43
"vmla.f32 q8, q13, %f19[0] \n"
"add %2, #16 \n"
"vmla.f32 q9, q12, %f19[1] \n"
// r0
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2] \n"// q10 q11 = r00 r04
"vadd.f32 q9, q9, q8 \n"
"vmov q8, %q21 \n"// q8 = _bias0
"subs %0, #1 \n"
"vst1.f32 {d18-d19}, [%1]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424), // %20
"w"(_bias0) // %21
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
#if __aarch64__
// TODO neon assembly optimize
float sum = bias0;
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _sum = vmulq_f32(_r0, _k0123);
float32x4_t _r1 = vld1q_f32(r1);
_sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1));
float32x4_t _r2 = vld1q_f32(r2);
_sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2));
float32x4_t _r3 = vld1q_f32(r3);
_sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3));
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
float32x4_t _k_t4;
_k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0);
_k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1);
_k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2);
_k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3);
float32x4_t _r_t4;
_r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0);
_r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1);
_r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2);
_r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3);
_sum = vmlaq_f32(_sum, _r_t4, _k_t4);
sum += r4[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
sum += vget_lane_f32(_ss, 0);
*outptr = sum;
r0++;
r1++;
r2++;
r3++;
r4++;
outptr++;
#else
// TODO neon assembly optimize
asm volatile(
"veor q14, q14 \n"
"vext.32 q14, %q19, q14, #3 \n"// q14 = bias0 0 0 0
"vld1.f32 {d16-d17}, [%1] \n"// q8 = r00 r01 r02 r03
"vld1.f32 {d18-d19}, [%2] \n"// q9 = r10 r11 r12 r13(X)
"add r4, %1, #16 \n"
"vld1.f32 {d19[1]}, [r4] \n"
"vext.32 q9, q9, q9, #3 \n"// q9 = r04 r10 r11 r12
"vmla.f32 q14, q8, %q12 \n"
"add r4, %2, #12 \n"
"vld1.f32 {d20}, [r4] \n"// d20 = r13 r14
"vld1.f32 {d21}, [%3] \n"// d21 = r20 r21
"vmla.f32 q14, q9, %q13 \n"
"add r4, %3, #8 \n"
"vld1.f32 {d22-d23}, [r4] \n"// q11 = r22 r23 r24 X
"vld1.f32 {d23[1]}, [%4] \n"// q11 = r22 r23 r24 r30
"vmla.f32 q14, q10, %q14 \n"
"add r4, %4, #4 \n"
"vld1.f32 {d24-d25}, [r4] \n"// q12 = r31 r32 r33 r34
"vmla.f32 q14, q11, %q15 \n"
"vld1.f32 {d26-d27}, [%5] \n"// q13 = r40 r41 r42 r43
"vmla.f32 q14, q12, %q16 \n"
"veor d30, d30 \n"
"add r4, %5, #16 \n"
"vld1.f32 {d30[0]}, [r4] \n"// d30 = r44 0
"vmla.f32 q14, q13, %q17 \n"
"vmla.f32 d28, d30, %e18 \n"
"add %1, #4 \n"
// h-sum
"vadd.f32 d28, d28, d29 \n"
"add %2, #4 \n"
"add %3, #4 \n"
"vpadd.f32 d28, d28, d28 \n"
"add %4, #4 \n"
"add %5, #4 \n"
"vst1.f32 {d28[0]}, [%0]! \n"
: "=r"(outptr), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4) // %5
: "0"(outptr),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"w"(_k0123), // %12
"w"(_k4567), // %13
"w"(_k891011), // %14
"w"(_k12131415), // %15
"w"(_k16171819), // %16
"w"(_k20212223), // %17
"w"(_k24242424), // %18
"w"(_bias0) // %19
: "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
*outptr = sum;
r0++;
r1++;
r2++;
r3++;
r4++;
outptr++;
#endif
}
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
}
}
}
static void convdw5x5s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
//int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
//int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const int group = bottom_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g=0; g<group; g++)
{
Mat out = top_blob.channel(g);
const float bias0 = bias ? bias[g] : 0.f;
const float* kernel0 = kernel + g*25;
float* outptr = out;
const float* img0 = bottom_blob.channel(g);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* k0 = kernel0;
const float* k1 = kernel0 + 5;
const float* k2 = kernel0 + 10;
const float* k3 = kernel0 + 15;
const float* k4 = kernel0 + 20;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
float32x4_t _k4567 = vld1q_f32(kernel0+4);
float32x4_t _k891011 = vld1q_f32(kernel0+8);
float32x4_t _k12131415 = vld1q_f32(kernel0+12);
float32x4_t _k16171819 = vld1q_f32(kernel0+16);
float32x4_t _k20212223 = vld1q_f32(kernel0+20);
float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]);
float32x4_t _bias0 = vdupq_n_f32(bias0);
#endif // __ARM_NEON
int i = 0;
// NOTE unroll outh 2 results somewhat speed drop :| (about -4%)
// so we do not implement it here
for (; i < outh; i++)
{
#if __ARM_NEON
#if __aarch64__
int nn = outw >> 3;
int remain = outw & 7;
#else
int nn = outw >> 2;
int remain = outw & 3;
#endif // __aarch64__
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
// r0
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v16.4s, v17.4s}, [%2], #32 \n"// v16 v17 = r00 r01
"mov v8.16b, %21.16b \n"// v8 = _bias0
"mov v9.16b, %21.16b \n"// v9 = _bias0
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v18.4s, v19.4s}, [%2], #32 \n"// v18 v19 = r08 r09
"0: \n"
"fmul v10.4s, v16.4s, %14.s[0] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v20.4s, v21.4s}, [%2] \n"// v20 v21 = r016 r017
"fmul v11.4s, v18.4s, %14.s[0] \n"
"ext v22.16b, v16.16b, v18.16b, #4 \n"// v22 = r02
"fmla v8.4s, v17.4s, %14.s[1] \n"
"ext v25.16b, v18.16b, v20.16b, #4 \n"// v25 = r010
"fmla v9.4s, v19.4s, %14.s[1] \n"
"ext v23.16b, v17.16b, v19.16b, #4 \n"// v23 = r03
"fmla v10.4s, v22.4s, %14.s[2] \n"
"ext v26.16b, v19.16b, v21.16b, #4 \n"// v26 = r011
"fmla v11.4s, v25.4s, %14.s[2] \n"
"ext v24.16b, v16.16b, v18.16b, #8 \n"// v24 = r04
"fmla v8.4s, v23.4s, %14.s[3] \n"
"ext v27.16b, v18.16b, v20.16b, #8 \n"// v27 = r012
"fmla v9.4s, v26.4s, %14.s[3] \n"
// r1
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v12.4s, v13.4s}, [%3], #32 \n"// v12 v13 = r10 r11
"fmla v10.4s, v24.4s, %15.s[0] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v14.4s, v15.4s}, [%3], #32 \n"// v14 v15 = r18 r19
"fmla v11.4s, v27.4s, %15.s[0] \n"
"fmla v8.4s, v12.4s, %15.s[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v20.4s, v21.4s}, [%3] \n"// v20 v21 = r116 r117
"fmla v9.4s, v14.4s, %15.s[1] \n"
"ext v22.16b, v12.16b, v14.16b, #4 \n"// v22 = r12
"fmla v10.4s, v13.4s, %15.s[2] \n"
"ext v25.16b, v14.16b, v20.16b, #4 \n"// v25 = r110
"fmla v11.4s, v15.4s, %15.s[2] \n"
"ext v23.16b, v13.16b, v15.16b, #4 \n"// v23 = r13
"fmla v8.4s, v22.4s, %15.s[3] \n"
"ext v26.16b, v15.16b, v21.16b, #4 \n"// v26 = r111
"fmla v9.4s, v25.4s, %15.s[3] \n"
"ext v24.16b, v12.16b, v14.16b, #8 \n"// v24 = r14
"fmla v10.4s, v23.4s, %16.s[0] \n"
"ext v27.16b, v14.16b, v20.16b, #8 \n"// v27 = r112
"fmla v11.4s, v26.4s, %16.s[0] \n"
// r2
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v16.4s, v17.4s}, [%4], #32 \n"// v16 v17 = r20 r21
"fmla v8.4s, v24.4s, %16.s[1] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v18.4s, v19.4s}, [%4], #32 \n"// v18 v19 = r28 r29
"fmla v9.4s, v27.4s, %16.s[1] \n"
"fmla v10.4s, v16.4s, %16.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v20.4s, v21.4s}, [%4] \n"// v20 v21 = r216 r217
"fmla v11.4s, v18.4s, %16.s[2] \n"
"ext v22.16b, v16.16b, v18.16b, #4 \n"// v22 = r22
"fmla v8.4s, v17.4s, %16.s[3] \n"
"ext v25.16b, v18.16b, v20.16b, #4 \n"// v25 = r210
"fmla v9.4s, v19.4s, %16.s[3] \n"
"ext v23.16b, v17.16b, v19.16b, #4 \n"// v23 = r23
"fmla v10.4s, v22.4s, %17.s[0] \n"
"ext v26.16b, v19.16b, v21.16b, #4 \n"// v26 = r211
"fmla v11.4s, v25.4s, %17.s[0] \n"
"ext v24.16b, v16.16b, v18.16b, #8 \n"// v24 = r24
"fmla v8.4s, v23.4s, %17.s[1] \n"
"ext v27.16b, v18.16b, v20.16b, #8 \n"// v27 = r212
"fmla v9.4s, v26.4s, %17.s[1] \n"
// r3
"prfm pldl1keep, [%5, #256] \n"
"ld2 {v12.4s, v13.4s}, [%5], #32 \n"// v12 v13 = r30 r31
"fmla v10.4s, v24.4s, %17.s[2] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld2 {v14.4s, v15.4s}, [%5], #32 \n"// v14 v15 = r38 r39
"fmla v11.4s, v27.4s, %17.s[2] \n"
"fmla v8.4s, v12.4s, %17.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld2 {v20.4s, v21.4s}, [%5] \n"// v20 v21 = r316 r317
"fmla v9.4s, v14.4s, %17.s[3] \n"
"ext v22.16b, v12.16b, v14.16b, #4 \n"// v22 = r32
"fmla v10.4s, v13.4s, %18.s[0] \n"
"ext v25.16b, v14.16b, v20.16b, #4 \n"// v25 = r310
"fmla v11.4s, v15.4s, %18.s[0] \n"
"ext v23.16b, v13.16b, v15.16b, #4 \n"// v23 = r33
"fmla v8.4s, v22.4s, %18.s[1] \n"
"ext v26.16b, v15.16b, v21.16b, #4 \n"// v26 = r311
"fmla v9.4s, v25.4s, %18.s[1] \n"
"ext v24.16b, v12.16b, v14.16b, #8 \n"// v24 = r34
"fmla v10.4s, v23.4s, %18.s[2] \n"
"ext v27.16b, v14.16b, v20.16b, #8 \n"// v27 = r312
"fmla v11.4s, v26.4s, %18.s[2] \n"
// r4
"prfm pldl1keep, [%6, #256] \n"
"ld2 {v16.4s, v17.4s}, [%6], #32 \n"// v16 v17 = r40 r41
"fmla v8.4s, v24.4s, %18.s[3] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld2 {v18.4s, v19.4s}, [%6], #32 \n"// v18 v19 = r48 r49
"fmla v9.4s, v27.4s, %18.s[3] \n"
"fmla v10.4s, v16.4s, %19.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld2 {v20.4s, v21.4s}, [%6] \n"// v20 v21 = r416 r417
"fmla v11.4s, v18.4s, %19.s[0] \n"
"ext v22.16b, v16.16b, v18.16b, #4 \n"// v22 = r42
"fmla v8.4s, v17.4s, %19.s[1] \n"
"ext v25.16b, v18.16b, v20.16b, #4 \n"// v25 = r410
"fmla v9.4s, v19.4s, %19.s[1] \n"
"ext v23.16b, v17.16b, v19.16b, #4 \n"// v23 = r43
"fmla v10.4s, v22.4s, %19.s[2] \n"
"ext v26.16b, v19.16b, v21.16b, #4 \n"// v26 = r411
"fmla v11.4s, v25.4s, %19.s[2] \n"
"ext v24.16b, v16.16b, v18.16b, #8 \n"// v24 = r44
"fmla v8.4s, v23.4s, %19.s[3] \n"
"ext v27.16b, v18.16b, v20.16b, #8 \n"// v27 = r412
"fmla v9.4s, v26.4s, %19.s[3] \n"
"fmla v10.4s, v24.4s, %20.s[0] \n"
// r0
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v16.4s, v17.4s}, [%2], #32 \n"// v16 v17 = r00 r01
"fmla v11.4s, v27.4s, %20.s[0] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v18.4s, v19.4s}, [%2], #32 \n"// v18 v19 = r08 r09
"fadd v10.4s, v8.4s, v10.4s \n"
"fadd v11.4s, v9.4s, v11.4s \n"
"subs %w0, %w0, #1 \n"
"mov v8.16b, %21.16b \n"// v8 = _bias0
"mov v9.16b, %21.16b \n"// v9 = _bias0
"st1 {v10.4s, v11.4s}, [%1], #32 \n"
"bne 0b \n"
"sub %2, %2, #64 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424), // %20
"w"(_bias0) // %21
: "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"
);
}
#else
if (nn > 0)
{
asm volatile(
// r0
"pld [%2, #256] \n"
"vld2.f32 {d20-d23}, [%2]! \n"// q10 q11 = r00 r01
"vmov q8, %q21 \n"
"pld [%2, #128] \n"
"vld2.f32 {d24-d25}, [%2] \n"// q12 = r08 x x
"0: \n"
"vmul.f32 q9, q10, %e14[0] \n"
"vmov d26, d25 \n"// q13 = r09 x x
"vext.32 q14, q10, q12, #1 \n"// q14 = r02
"vmla.f32 q8, q11, %e14[1] \n"
"vext.32 q15, q11, q13, #1 \n"// q15 = r03
"vmla.f32 q9, q14, %f14[0] \n"
"vext.32 q14, q10, q12, #2 \n"// q14 = r04
"vmla.f32 q8, q15, %f14[1] \n"
// r1
"pld [%3, #256] \n"
"vld2.f32 {d20-d23}, [%3]! \n"// q10 q11 = r10 r11
"vmla.f32 q9, q14, %e15[0] \n"
"pld [%3, #128] \n"
"vld2.f32 {d24-d25}, [%3] \n"// q12 = r18 x x
"vmla.f32 q8, q10, %e15[1] \n"
"vmov d26, d25 \n"// q13 = r19 x x
"vext.32 q14, q10, q12, #1 \n"// q14 = r12
"vmla.f32 q9, q11, %f15[0] \n"
"vext.32 q15, q11, q13, #1 \n"// q15 = r13
"vmla.f32 q8, q14, %f15[1] \n"
"vext.32 q14, q10, q12, #2 \n"// q14 = r14
"vmla.f32 q9, q15, %e16[0] \n"
// r2
"pld [%4, #256] \n"
"vld2.f32 {d20-d23}, [%4]! \n"// q10 q11 = r20 r21
"vmla.f32 q8, q14, %e16[1] \n"
"pld [%4, #128] \n"
"vld2.f32 {d24-d25}, [%4] \n"// q12 = r28 x x
"vmla.f32 q9, q10, %f16[0] \n"
"vmov d26, d25 \n"// q13 = r29 x x
"vext.32 q14, q10, q12, #1 \n"// q14 = r22
"vmla.f32 q8, q11, %f16[1] \n"
"vext.32 q15, q11, q13, #1 \n"// q15 = r23
"vmla.f32 q9, q14, %e17[0] \n"
"vext.32 q14, q10, q12, #2 \n"// q14 = r24
"vmla.f32 q8, q15, %e17[1] \n"
// r3
"pld [%5, #256] \n"
"vld2.f32 {d20-d23}, [%5]! \n"// q10 q11 = r30 r31
"vmla.f32 q9, q14, %f17[0] \n"
"pld [%5, #128] \n"
"vld2.f32 {d24-d25}, [%5] \n"// q12 = r38 x x
"vmla.f32 q8, q10, %f17[1] \n"
"vmov d26, d25 \n"// q13 = r39 x x
"vext.32 q14, q10, q12, #1 \n"// q14 = r32
"vmla.f32 q9, q11, %e18[0] \n"
"vext.32 q15, q11, q13, #1 \n"// q15 = r33
"vmla.f32 q8, q14, %e18[1] \n"
"vext.32 q14, q10, q12, #2 \n"// q14 = r34
"vmla.f32 q9, q15, %f18[0] \n"
// r4
"pld [%6, #256] \n"
"vld2.f32 {d20-d23}, [%6]! \n"// q10 q11 = r40 r41
"vmla.f32 q8, q14, %f18[1] \n"
"pld [%6, #128] \n"
"vld2.f32 {d24-d25}, [%6] \n"// q12 = r48 x x
"vmla.f32 q9, q10, %e19[0] \n"
"vmov d26, d25 \n"// q13 = r49 x x
"vext.32 q14, q10, q12, #1 \n"// q14 = r42
"vmla.f32 q8, q11, %e19[1] \n"
"vext.32 q15, q11, q13, #1 \n"// q15 = r43
"vmla.f32 q9, q14, %f19[0] \n"
"vext.32 q14, q10, q12, #2 \n"// q14 = r44
"vmla.f32 q8, q15, %f19[1] \n"
// r0
"pld [%2, #256] \n"
"vld2.f32 {d20-d23}, [%2]! \n"// q10 q11 = r00 r01
"vmla.f32 q9, q14, %e20[0] \n"
"pld [%2, #128] \n"
"vld2.f32 {d24-d25}, [%2] \n"// q12 = r08 x x
"vadd.f32 q9, q8, q9 \n"
"vmov q8, %q21 \n"
"subs %0, #1 \n"
"vst1.f32 {d18-d19}, [%1]! \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424), // %20
"w"(_bias0) // %21
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = bias0;
#if __ARM_NEON
// TODO neon assembly optimize
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _sum = vmulq_f32(_r0, _k0123);
float32x4_t _r1 = vld1q_f32(r1);
_sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1));
float32x4_t _r2 = vld1q_f32(r2);
_sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2));
float32x4_t _r3 = vld1q_f32(r3);
_sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3));
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
sum += r0[4] * k0[4];
sum += r1[4] * k1[4];
sum += r2[4] * k2[4];
sum += r3[4] * k3[4];
sum += r4[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
sum += vget_lane_f32(_ss, 0);
#else
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
#endif
*outptr = sum;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
r4 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
|
CSRTile.h | /******************************************************************************
* ** Copyright (c) 2016, Intel Corporation **
* ** All rights reserved. **
* ** **
* ** Redistribution and use in source and binary forms, with or without **
* ** modification, are permitted provided that the following conditions **
* ** are met: **
* ** 1. Redistributions of source code must retain the above copyright **
* ** notice, this list of conditions and the following disclaimer. **
* ** 2. Redistributions in binary form must reproduce the above copyright **
* ** notice, this list of conditions and the following disclaimer in the **
* ** documentation and/or other materials provided with the distribution. **
* ** 3. Neither the name of the copyright holder nor the names of its **
* ** contributors may be used to endorse or promote products derived **
* ** from this software without specific prior written permission. **
* ** **
* ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
* ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
* ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
* ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
* ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
* ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
* ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
* ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
* ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
* ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
* ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* * ******************************************************************************/
/* Michael Anderson (Intel Corp.)
* * ******************************************************************************/
#ifndef SRC_CSRTILE_H_
#define SRC_CSRTILE_H_
#include <string>
#include <algorithm>
#include <vector>
#include "binary_search.h"
template <typename T>
bool compare_notrans(const edge_t<T>& a, const edge_t<T>& b) {
if (a.src < b.src)
return true;
else if (a.src > b.src)
return false;
if (a.dst < b.dst)
return true;
else if (a.dst > b.dst)
return false;
return false;
}
template <typename T>
class CSRTile {
public:
std::string name;
int m;
int n;
int nnz;
T* a;
int* ja;
int* ia;
CSRTile() : name("TEMP"), m(0), n(0), nnz(0) {}
CSRTile(int _m, int _n) : name("TEMP"), m(_m), n(_n), nnz(0) {}
CSRTile(edge_t<T>* edges, int _m, int _n, int _nnz, int row_start,
int col_start)
: name("TEMP"), m(_m), n(_n), nnz(_nnz) {
double stt = MPI_Wtime();
if (nnz > 0) {
__gnu_parallel::sort(edges, edges + nnz, compare_notrans<T>);
a = reinterpret_cast<T*>(
_mm_malloc((uint64_t)nnz * (uint64_t)sizeof(T), 64));
ja = reinterpret_cast<int*>(
_mm_malloc((uint64_t)nnz * (uint64_t)sizeof(int), 64));
int * jia = reinterpret_cast<int*>(
_mm_malloc((uint64_t)nnz * (uint64_t)sizeof(int), 64));
ia = reinterpret_cast<int*>(_mm_malloc((m + 1) * sizeof(int), 64));
// convert to CSR
#pragma omp parallel for
for (uint64_t i = 0; i < (uint64_t)nnz; i++) {
a[i] = edges[i].val;
ja[i] = edges[i].dst - col_start; // one-based
jia[i] = edges[i].src - row_start; // one-based
}
// Assign ia in parallel
int num_partitions = omp_get_max_threads() * 4;
int rows_per_partition = (m + num_partitions-1) / num_partitions;
//#pragma omp parallel for
for(int p = 0 ; p < num_partitions ; p++)
{
int start_row = rows_per_partition * p;
int end_row = rows_per_partition * (p+1);
if(end_row > m) end_row = m;
// Find first
int nz_start = -1;
int row = start_row ;
while(row < m && nz_start == -1)
{
nz_start = binary_search_left_border(jia, row+1, 0, nnz, nnz);
row++;
}
if(nz_start == -1)
{
nz_start = nnz;
}
int current_row = start_row;
ia[current_row] = nz_start;
for (int i = nz_start; i < nnz; i++) {
if(current_row >= end_row) break;
while ((jia[i] > current_row) && (current_row < end_row)) {
ia[current_row] = i + 1;
current_row++;
}
}
while (current_row < end_row) {
ia[current_row] = nnz + 1;
current_row++;
}
}
ia[m] = nnz+1;
/*
int cnt = 0;
for(int row = 0 ; row < m ; row++)
{
for(int nz = ia[row] ; nz < ia[row+1] ; nz++)
{
assert(ja[nz] == edges[nz].dst - col_start);
cnt++;
}
}
*/
_mm_free(jia);
}
}
bool isEmpty() const { return nnz <= 0; }
void get_edges(edge_t<T>* edges, int row_start, int col_start) {
int nnzcnt = 0;
#pragma omp parallel for
for (int i = 0; i < this->m; i++) {
for (int nz_id = ia[i]; nz_id < ia[i + 1]; nz_id++) {
edges[nz_id-1].src = i + row_start + 1;
edges[nz_id-1].dst = ja[nz_id - 1] + col_start;
edges[nz_id-1].val = a[nz_id - 1];
}
}
//assert(nnzcnt == this->nnz);
}
CSRTile& operator=(CSRTile other) {
this->name = other.name;
this->m = other.m;
this->n = other.n;
this->nnz = other.nnz;
this->a = other.a;
this->ia = other.ia;
this->ja = other.ja;
}
void clear() {
if (!isEmpty()) {
_mm_free(a);
_mm_free(ja);
_mm_free(ia);
}
nnz = 0;
}
~CSRTile(void) {}
void send_tile_metadata(int myrank, int dst_rank, int output_rank) {
if (myrank == output_rank)
std::cout << "Rank: " << myrank << " sending " << name << " to rank "
<< dst_rank << std::endl;
MPI_Send(&(nnz), 1, MPI_INT, dst_rank, 0, MPI_COMM_WORLD);
MPI_Send(&(m), 1, MPI_INT, dst_rank, 0, MPI_COMM_WORLD);
MPI_Send(&(n), 1, MPI_INT, dst_rank, 0, MPI_COMM_WORLD);
if (myrank == output_rank)
std::cout << "Metadata sent, nnz: " << nnz << std::endl;
}
void recv_tile_metadata(int myrank, int src_rank, int output_rank) {
if (!isEmpty()) {
clear();
}
MPI_Recv(&(nnz), 1, MPI_INT, src_rank, 0, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
MPI_Recv(&(m), 1, MPI_INT, src_rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(&(n), 1, MPI_INT, src_rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
void send_tile(int myrank, int dst_rank, int output_rank, bool block, std::vector<MPI_Request>* reqs) {
if (!isEmpty()) {
if (block) {
MPI_Send(this->a, (uint64_t)(this->nnz * sizeof(T)), MPI_BYTE, dst_rank,
0, MPI_COMM_WORLD);
MPI_Send(this->ja, (uint64_t)(this->nnz), MPI_INT, dst_rank, 0,
MPI_COMM_WORLD);
MPI_Send(this->ia, ((this->m) + 1), MPI_INT, dst_rank, 0,
MPI_COMM_WORLD);
} else {
MPI_Request r1, r2, r3;
MPI_Isend(this->a, (uint64_t)(this->nnz * sizeof(T)), MPI_BYTE,
dst_rank, 0, MPI_COMM_WORLD, &r1);
MPI_Isend(this->ja, (uint64_t)(this->nnz), MPI_INT, dst_rank, 0,
MPI_COMM_WORLD, &r2);
MPI_Isend(this->ia, ((this->m) + 1), MPI_INT, dst_rank, 0,
MPI_COMM_WORLD, &r3);
(*reqs).push_back(r1);
(*reqs).push_back(r2);
(*reqs).push_back(r3);
}
}
}
void recv_tile(int myrank, int src_rank, int output_rank, bool block,
std::vector<MPI_Request>* reqs) {
if (!(isEmpty())) {
a = reinterpret_cast<T*>(
_mm_malloc((uint64_t)(nnz) * (uint64_t)sizeof(T), 64));
ja = reinterpret_cast<int*>(
_mm_malloc((uint64_t)(nnz) * (uint64_t)sizeof(int), 64));
ia = reinterpret_cast<int*>(_mm_malloc(((m) + 1) * sizeof(int), 64));
if (block) {
MPI_Recv(a, (uint64_t)(nnz * sizeof(T)), MPI_BYTE, src_rank, 0,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(ja, (uint64_t)(nnz), MPI_INT, src_rank, 0, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
MPI_Recv(ia, ((m) + 1), MPI_INT, src_rank, 0, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
} else {
MPI_Request r1, r2, r3;
MPI_Irecv(a, (uint64_t)(nnz * sizeof(T)), MPI_BYTE, src_rank, 0,
MPI_COMM_WORLD, &r1);
MPI_Irecv(ja, (uint64_t)(nnz), MPI_INT, src_rank, 0, MPI_COMM_WORLD,
&r2);
MPI_Irecv(ia, ((m) + 1), MPI_INT, src_rank, 0, MPI_COMM_WORLD, &r3);
(*reqs).push_back(r1);
(*reqs).push_back(r2);
(*reqs).push_back(r3);
}
}
}
};
#endif // SRC_CSRTILE_H_
|
reduction.c | // // From: https://github.com/OpenMP/Examples/blob/master/sources/Example_reduction.3.c
#include <stdio.h>
#define N 1000
#define SUM (N * (N-1)/2)
int main (void)
{
int a, i;
int error = 0;
#pragma omp target parallel shared(a) private(i) map(tofrom: error)
{
#pragma omp master
a = 0;
#pragma omp barrier
#pragma omp for reduction(+:a)
for (i = 0; i < N; i++) {
a += i;
}
// The Sum shall be sum:[0:N]
#pragma omp single
{
if (a != SUM){
printf ("Incorrect result = %d, expected = %d!\n", a, SUM);
error = 1;
}
else{
printf ("The result is correct = %d!\n", a);
error = 0;
}
}
}
return error;
}
|
convolution_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 9 + q * 9;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* r3 = img0 + w * 3;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i + 1 < outh; i += 2)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
float sum2 = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr += sum;
*outptr2 += sum2;
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
}
static void conv3x3s1_winograd23_transform_kernel_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(4 * 4, inch, outch);
// G
const float ktm[4][3] = {
{1.0f, 0.0f, 0.0f},
{1.0f / 2, 1.0f / 2, 1.0f / 2},
{1.0f / 2, -1.0f / 2, 1.0f / 2},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[4][3];
for (int i = 0; i < 4; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 4; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 4; i++)
{
kernel_tm0[j * 4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
}
static void conv3x3s1_winograd23_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 2n+2, winograd F(2,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 1) / 2 * 2;
outh = (outh + 1) / 2 * 2;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 4;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4 * 4, tiles, inch, 4u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {1.0f, 0.0f, -1.0f, 0.0f},
// {0.0f, 1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 0.00f, 1.0f}
// };
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const float* img = bottom_blob_bordered.channel(q);
float* out_tm0 = bottom_blob_tm.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const float* r0 = img + w * j * 2;
const float* r1 = r0 + w;
const float* r2 = r1 + w;
const float* r3 = r2 + w;
for (int i = 0; i < nRowBlocks; i++)
{
#if __AVX__
__m128 _d0, _d1, _d2, _d3;
__m128 _w0, _w1, _w2, _w3;
// load
_d0 = _mm_loadu_ps(r0);
_d1 = _mm_loadu_ps(r1);
_d2 = _mm_loadu_ps(r2);
_d3 = _mm_loadu_ps(r3);
// w = B_t * d
_w0 = _mm_sub_ps(_d0, _d2);
_w1 = _mm_add_ps(_d1, _d2);
_w2 = _mm_sub_ps(_d2, _d1);
_w3 = _mm_sub_ps(_d3, _d1);
// transpose d to d_t
_MM_TRANSPOSE4_PS(_w0, _w1, _w2, _w3);
// d = B_t * d_t
_d0 = _mm_sub_ps(_w0, _w2);
_d1 = _mm_add_ps(_w1, _w2);
_d2 = _mm_sub_ps(_w2, _w1);
_d3 = _mm_sub_ps(_w3, _w1);
// save to out_tm
_mm_storeu_ps(out_tm0, _d0);
_mm_storeu_ps(out_tm0 + 4, _d1);
_mm_storeu_ps(out_tm0 + 8, _d2);
_mm_storeu_ps(out_tm0 + 12, _d3);
#else
float d0[4], d1[4], d2[4], d3[4];
float w0[4], w1[4], w2[4], w3[4];
float t0[4], t1[4], t2[4], t3[4];
// load
for (int n = 0; n < 4; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
}
// w = B_t * d
for (int n = 0; n < 4; n++)
{
w0[n] = d0[n] - d2[n];
w1[n] = d1[n] + d2[n];
w2[n] = d2[n] - d1[n];
w3[n] = d3[n] - d1[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
}
// d = B_t * d_t
for (int n = 0; n < 4; n++)
{
d0[n] = t0[n] - t2[n];
d1[n] = t1[n] + t2[n];
d2[n] = t2[n] - t1[n];
d3[n] = t3[n] - t1[n];
}
// save to out_tm
for (int n = 0; n < 4; n++)
{
out_tm0[n] = d0[n];
out_tm0[n + 4] = d1[n];
out_tm0[n + 8] = d2[n];
out_tm0[n + 12] = d3[n];
}
#endif
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
out_tm0 += 16;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 4;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator);
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p + 1);
Mat out2_tm = top_blob_tm.channel(p + 2);
Mat out3_tm = top_blob_tm.channel(p + 3);
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p + 1);
const Mat kernel2_tm = kernel_tm.channel(p + 2);
const Mat kernel3_tm = kernel_tm.channel(p + 3);
for (int i = 0; i < tiles; i++)
{
float* output0_tm = out0_tm.row(i);
float* output1_tm = out1_tm.row(i);
float* output2_tm = out2_tm.row(i);
float* output3_tm = out3_tm.row(i);
#if __AVX__
float zero_val = 0.f;
__m256 _sum0 = _mm256_broadcast_ss(&zero_val);
__m256 _sum0n = _mm256_broadcast_ss(&zero_val);
__m256 _sum1 = _mm256_broadcast_ss(&zero_val);
__m256 _sum1n = _mm256_broadcast_ss(&zero_val);
__m256 _sum2 = _mm256_broadcast_ss(&zero_val);
__m256 _sum2n = _mm256_broadcast_ss(&zero_val);
__m256 _sum3 = _mm256_broadcast_ss(&zero_val);
__m256 _sum3n = _mm256_broadcast_ss(&zero_val);
int q = 0;
for (; q + 3 < inch; q += 4)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* r1 = bottom_blob_tm.channel(q + 1).row(i);
const float* r2 = bottom_blob_tm.channel(q + 2).row(i);
const float* r3 = bottom_blob_tm.channel(q + 3).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
__m256 _r0 = _mm256_loadu_ps(r0);
__m256 _r0n = _mm256_loadu_ps(r0 + 8);
// k0
__m256 _k0 = _mm256_loadu_ps(k0);
__m256 _k0n = _mm256_loadu_ps(k0 + 8);
__m256 _k1 = _mm256_loadu_ps(k1);
__m256 _k1n = _mm256_loadu_ps(k1 + 8);
__m256 _k2 = _mm256_loadu_ps(k2);
__m256 _k2n = _mm256_loadu_ps(k2 + 8);
__m256 _k3 = _mm256_loadu_ps(k3);
__m256 _k3n = _mm256_loadu_ps(k3 + 8);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
// k1
_r0 = _mm256_loadu_ps(r1);
_r0n = _mm256_loadu_ps(r1 + 8);
_k0 = _mm256_loadu_ps(k0 + 16);
_k0n = _mm256_loadu_ps(k0 + 24);
_k1 = _mm256_loadu_ps(k1 + 16);
_k1n = _mm256_loadu_ps(k1 + 24);
_k2 = _mm256_loadu_ps(k2 + 16);
_k2n = _mm256_loadu_ps(k2 + 24);
_k3 = _mm256_loadu_ps(k3 + 16);
_k3n = _mm256_loadu_ps(k3 + 24);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
// k2
_r0 = _mm256_loadu_ps(r2);
_r0n = _mm256_loadu_ps(r2 + 8);
_k0 = _mm256_loadu_ps(k0 + 32);
_k0n = _mm256_loadu_ps(k0 + 40);
_k1 = _mm256_loadu_ps(k1 + 32);
_k1n = _mm256_loadu_ps(k1 + 40);
_k2 = _mm256_loadu_ps(k2 + 32);
_k2n = _mm256_loadu_ps(k2 + 40);
_k3 = _mm256_loadu_ps(k3 + 32);
_k3n = _mm256_loadu_ps(k3 + 40);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
// k3
_r0 = _mm256_loadu_ps(r3);
_r0n = _mm256_loadu_ps(r3 + 8);
_k0 = _mm256_loadu_ps(k0 + 48);
_k0n = _mm256_loadu_ps(k0 + 56);
_k1 = _mm256_loadu_ps(k1 + 48);
_k1n = _mm256_loadu_ps(k1 + 56);
_k2 = _mm256_loadu_ps(k2 + 48);
_k2n = _mm256_loadu_ps(k2 + 56);
_k3 = _mm256_loadu_ps(k3 + 48);
_k3n = _mm256_loadu_ps(k3 + 56);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
}
for (; q < inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
__m256 _r0 = _mm256_loadu_ps(r0);
__m256 _r0n = _mm256_loadu_ps(r0 + 8);
__m256 _k0 = _mm256_loadu_ps(k0);
__m256 _k0n = _mm256_loadu_ps(k0 + 8);
__m256 _k1 = _mm256_loadu_ps(k1);
__m256 _k1n = _mm256_loadu_ps(k1 + 8);
__m256 _k2 = _mm256_loadu_ps(k2);
__m256 _k2n = _mm256_loadu_ps(k2 + 8);
__m256 _k3 = _mm256_loadu_ps(k3);
__m256 _k3n = _mm256_loadu_ps(k3 + 8);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
}
_mm256_storeu_ps(output0_tm, _sum0);
_mm256_storeu_ps(output0_tm + 8, _sum0n);
_mm256_storeu_ps(output1_tm, _sum1);
_mm256_storeu_ps(output1_tm + 8, _sum1n);
_mm256_storeu_ps(output2_tm, _sum2);
_mm256_storeu_ps(output2_tm + 8, _sum2n);
_mm256_storeu_ps(output3_tm, _sum3);
_mm256_storeu_ps(output3_tm + 8, _sum3n);
#else
float sum0[16] = {0.0f};
float sum1[16] = {0.0f};
float sum2[16] = {0.0f};
float sum3[16] = {0.0f};
int q = 0;
for (; q + 3 < inch; q += 4)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* r1 = bottom_blob_tm.channel(q + 1).row(i);
const float* r2 = bottom_blob_tm.channel(q + 2).row(i);
const float* r3 = bottom_blob_tm.channel(q + 3).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
for (int n = 0; n < 16; n++)
{
sum0[n] += r0[n] * k0[n];
k0 += 16;
sum0[n] += r1[n] * k0[n];
k0 += 16;
sum0[n] += r2[n] * k0[n];
k0 += 16;
sum0[n] += r3[n] * k0[n];
k0 -= 16 * 3;
sum1[n] += r0[n] * k1[n];
k1 += 16;
sum1[n] += r1[n] * k1[n];
k1 += 16;
sum1[n] += r2[n] * k1[n];
k1 += 16;
sum1[n] += r3[n] * k1[n];
k1 -= 16 * 3;
sum2[n] += r0[n] * k2[n];
k2 += 16;
sum2[n] += r1[n] * k2[n];
k2 += 16;
sum2[n] += r2[n] * k2[n];
k2 += 16;
sum2[n] += r3[n] * k2[n];
k2 -= 16 * 3;
sum3[n] += r0[n] * k3[n];
k3 += 16;
sum3[n] += r1[n] * k3[n];
k3 += 16;
sum3[n] += r2[n] * k3[n];
k3 += 16;
sum3[n] += r3[n] * k3[n];
k3 -= 16 * 3;
}
}
for (; q < inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
for (int n = 0; n < 16; n++)
{
sum0[n] += r0[n] * k0[n];
sum1[n] += r0[n] * k1[n];
sum2[n] += r0[n] * k2[n];
sum3[n] += r0[n] * k3[n];
}
}
for (int n = 0; n < 16; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int i = 0; i < tiles; i++)
{
float* output0_tm = out0_tm.row(i);
float sum0[16] = {0.0f};
int q = 0;
for (; q + 3 < inch; q += 4)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* r1 = bottom_blob_tm.channel(q + 1).row(i);
const float* r2 = bottom_blob_tm.channel(q + 2).row(i);
const float* r3 = bottom_blob_tm.channel(q + 3).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel0_tm.row(q + 1);
const float* k2 = kernel0_tm.row(q + 2);
const float* k3 = kernel0_tm.row(q + 3);
for (int n = 0; n < 16; n++)
{
sum0[n] += r0[n] * k0[n];
sum0[n] += r1[n] * k1[n];
sum0[n] += r2[n] * k2[n];
sum0[n] += r3[n] * k3[n];
}
}
for (; q < inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* k0 = kernel0_tm.row(q);
for (int n = 0; n < 16; n++)
{
sum0[n] += r0[n] * k0[n];
}
}
for (int n = 0; n < 16; n++)
{
output0_tm[n] = sum0[n];
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
}
{
// AT
// const float itm[2][4] = {
// {1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 1.0f}
// };
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out_tm = top_blob_tm.channel(p);
Mat out = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
for (int j = 0; j < nColBlocks; j++)
{
float* outRow0 = out.row(j * 2);
float* outRow1 = out.row(j * 2 + 1);
for (int i = 0; i < nRowBlocks; i++)
{
float* out_tile = out_tm.row(j * nRowBlocks + i);
float s0[4], s1[4], s2[4], s3[4];
float w0[4], w1[4];
float d0[2], d1[2], d2[2], d3[2];
float o0[2], o1[2];
// load
for (int n = 0; n < 4; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 4];
s2[n] = out_tile[n + 8];
s3[n] = out_tile[n + 12];
}
// w = A_T * W
for (int n = 0; n < 4; n++)
{
w0[n] = s0[n] + s1[n] + s2[n];
w1[n] = s1[n] - s2[n] + s3[n];
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d1[0] = w0[1];
d1[1] = w1[1];
d2[0] = w0[2];
d2[1] = w1[2];
d3[0] = w0[3];
d3[1] = w1[3];
}
// Y = A_T * w_t
for (int n = 0; n < 2; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + bias0;
o1[n] = d1[n] - d2[n] + d3[n] + bias0;
}
// save to top blob tm
outRow0[0] = o0[0];
outRow0[1] = o0[1];
outRow1[0] = o1[0];
outRow1[1] = o1[1];
outRow0 += 2;
outRow1 += 2;
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_transform_kernel_sse(const Mat& kernel, std::vector<Mat>& kernel_tm2, int inch, int outch)
{
Mat kernel_tm(6 * 6, inch, outch);
// G
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f},
{-1.0f / 6, -1.0f / 6, -1.0f / 6},
{-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6},
{1.0f / 24, -1.0f / 12, 1.0f / 6},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
for (int r = 0; r < 9; r++)
{
Mat kernel_tm_test(4 * 8, inch, outch / 8 + (outch % 8) / 4 + outch % 4);
int p = 0;
for (; p + 7 < outch; p += 8)
{
const float* kernel0 = (const float*)kernel_tm.channel(p);
const float* kernel1 = (const float*)kernel_tm.channel(p + 1);
const float* kernel2 = (const float*)kernel_tm.channel(p + 2);
const float* kernel3 = (const float*)kernel_tm.channel(p + 3);
const float* kernel4 = (const float*)kernel_tm.channel(p + 4);
const float* kernel5 = (const float*)kernel_tm.channel(p + 5);
const float* kernel6 = (const float*)kernel_tm.channel(p + 6);
const float* kernel7 = (const float*)kernel_tm.channel(p + 7);
float* ktmp = kernel_tm_test.channel(p / 8);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp[16] = kernel4[r * 4 + 0];
ktmp[17] = kernel4[r * 4 + 1];
ktmp[18] = kernel4[r * 4 + 2];
ktmp[19] = kernel4[r * 4 + 3];
ktmp[20] = kernel5[r * 4 + 0];
ktmp[21] = kernel5[r * 4 + 1];
ktmp[22] = kernel5[r * 4 + 2];
ktmp[23] = kernel5[r * 4 + 3];
ktmp[24] = kernel6[r * 4 + 0];
ktmp[25] = kernel6[r * 4 + 1];
ktmp[26] = kernel6[r * 4 + 2];
ktmp[27] = kernel6[r * 4 + 3];
ktmp[28] = kernel7[r * 4 + 0];
ktmp[29] = kernel7[r * 4 + 1];
ktmp[30] = kernel7[r * 4 + 2];
ktmp[31] = kernel7[r * 4 + 3];
ktmp += 32;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
kernel4 += 36;
kernel5 += 36;
kernel6 += 36;
kernel7 += 36;
}
}
for (; p + 3 < outch; p += 4)
{
const float* kernel0 = (const float*)kernel_tm.channel(p);
const float* kernel1 = (const float*)kernel_tm.channel(p + 1);
const float* kernel2 = (const float*)kernel_tm.channel(p + 2);
const float* kernel3 = (const float*)kernel_tm.channel(p + 3);
float* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp += 16;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
}
}
for (; p < outch; p++)
{
const float* kernel0 = (const float*)kernel_tm.channel(p);
float* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4 + p % 4);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp += 4;
kernel0 += 36;
}
}
kernel_tm2.push_back(kernel_tm_test);
}
}
static void conv3x3s1_winograd43_sse(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat>& kernel_tm_test, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
const float* bias = _bias;
// pad to 4n+2, winograd F(4,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4, inch, tiles * 9, elemsize, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
#if __AVX__
__m256 _1_n = _mm256_set1_ps(-1);
__m256 _2_p = _mm256_set1_ps(2);
__m256 _2_n = _mm256_set1_ps(-2);
__m256 _4_p = _mm256_set1_ps(4);
__m256 _4_n = _mm256_set1_ps(-4);
__m256 _5_n = _mm256_set1_ps(-5);
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const float* img = bottom_blob_bordered.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const float* r0 = img + w * j * 4;
const float* r1 = r0 + w;
const float* r2 = r1 + w;
const float* r3 = r2 + w;
const float* r4 = r3 + w;
const float* r5 = r4 + w;
for (int i = 0; i < nRowBlocks; i++)
{
float* out_tm0 = bottom_blob_tm.channel(tiles * 0 + j * nRowBlocks + i).row(q);
float* out_tm1 = bottom_blob_tm.channel(tiles * 1 + j * nRowBlocks + i).row(q);
float* out_tm2 = bottom_blob_tm.channel(tiles * 2 + j * nRowBlocks + i).row(q);
float* out_tm3 = bottom_blob_tm.channel(tiles * 3 + j * nRowBlocks + i).row(q);
float* out_tm4 = bottom_blob_tm.channel(tiles * 4 + j * nRowBlocks + i).row(q);
float* out_tm5 = bottom_blob_tm.channel(tiles * 5 + j * nRowBlocks + i).row(q);
float* out_tm6 = bottom_blob_tm.channel(tiles * 6 + j * nRowBlocks + i).row(q);
float* out_tm7 = bottom_blob_tm.channel(tiles * 7 + j * nRowBlocks + i).row(q);
float* out_tm8 = bottom_blob_tm.channel(tiles * 8 + j * nRowBlocks + i).row(q);
#if __AVX__
__m256 _d0, _d1, _d2, _d3, _d4, _d5;
__m256 _w0, _w1, _w2, _w3, _w4, _w5;
__m256 _t0, _t1, _t2, _t3, _t4, _t5;
__m256 _n0, _n1, _n2, _n3, _n4, _n5;
// load
_d0 = _mm256_loadu_ps(r0);
_d1 = _mm256_loadu_ps(r1);
_d2 = _mm256_loadu_ps(r2);
_d3 = _mm256_loadu_ps(r3);
_d4 = _mm256_loadu_ps(r4);
_d5 = _mm256_loadu_ps(r5);
// w = B_t * d
_w0 = _mm256_mul_ps(_d0, _4_p);
_w0 = _mm256_fmadd_ps(_d2, _5_n, _w0);
_w0 = _mm256_add_ps(_w0, _d4);
_w1 = _mm256_mul_ps(_d1, _4_n);
_w1 = _mm256_fmadd_ps(_d2, _4_n, _w1);
_w1 = _mm256_add_ps(_w1, _d3);
_w1 = _mm256_add_ps(_w1, _d4);
_w2 = _mm256_mul_ps(_d1, _4_p);
_w2 = _mm256_fmadd_ps(_d2, _4_n, _w2);
_w2 = _mm256_fmadd_ps(_d3, _1_n, _w2);
_w2 = _mm256_add_ps(_w2, _d4);
_w3 = _mm256_mul_ps(_d1, _2_n);
_w3 = _mm256_fmadd_ps(_d2, _1_n, _w3);
_w3 = _mm256_fmadd_ps(_d3, _2_p, _w3);
_w3 = _mm256_add_ps(_w3, _d4);
_w4 = _mm256_mul_ps(_d1, _2_p);
_w4 = _mm256_fmadd_ps(_d2, _1_n, _w4);
_w4 = _mm256_fmadd_ps(_d3, _2_n, _w4);
_w4 = _mm256_add_ps(_w4, _d4);
_w5 = _mm256_mul_ps(_d1, _4_p);
_w5 = _mm256_fmadd_ps(_d3, _5_n, _w5);
_w5 = _mm256_add_ps(_w5, _d5);
// transpose d to d_t
#if (defined _WIN32 && !(defined __MINGW32__) && !__clang__)
{
_t0.m256_f32[0] = _w0.m256_f32[0];
_t1.m256_f32[0] = _w0.m256_f32[1];
_t2.m256_f32[0] = _w0.m256_f32[2];
_t3.m256_f32[0] = _w0.m256_f32[3];
_t4.m256_f32[0] = _w0.m256_f32[4];
_t5.m256_f32[0] = _w0.m256_f32[5];
_t0.m256_f32[1] = _w1.m256_f32[0];
_t1.m256_f32[1] = _w1.m256_f32[1];
_t2.m256_f32[1] = _w1.m256_f32[2];
_t3.m256_f32[1] = _w1.m256_f32[3];
_t4.m256_f32[1] = _w1.m256_f32[4];
_t5.m256_f32[1] = _w1.m256_f32[5];
_t0.m256_f32[2] = _w2.m256_f32[0];
_t1.m256_f32[2] = _w2.m256_f32[1];
_t2.m256_f32[2] = _w2.m256_f32[2];
_t3.m256_f32[2] = _w2.m256_f32[3];
_t4.m256_f32[2] = _w2.m256_f32[4];
_t5.m256_f32[2] = _w2.m256_f32[5];
_t0.m256_f32[3] = _w3.m256_f32[0];
_t1.m256_f32[3] = _w3.m256_f32[1];
_t2.m256_f32[3] = _w3.m256_f32[2];
_t3.m256_f32[3] = _w3.m256_f32[3];
_t4.m256_f32[3] = _w3.m256_f32[4];
_t5.m256_f32[3] = _w3.m256_f32[5];
_t0.m256_f32[4] = _w4.m256_f32[0];
_t1.m256_f32[4] = _w4.m256_f32[1];
_t2.m256_f32[4] = _w4.m256_f32[2];
_t3.m256_f32[4] = _w4.m256_f32[3];
_t4.m256_f32[4] = _w4.m256_f32[4];
_t5.m256_f32[4] = _w4.m256_f32[5];
_t0.m256_f32[5] = _w5.m256_f32[0];
_t1.m256_f32[5] = _w5.m256_f32[1];
_t2.m256_f32[5] = _w5.m256_f32[2];
_t3.m256_f32[5] = _w5.m256_f32[3];
_t4.m256_f32[5] = _w5.m256_f32[4];
_t5.m256_f32[5] = _w5.m256_f32[5];
}
#else
{
_t0[0] = _w0[0];
_t1[0] = _w0[1];
_t2[0] = _w0[2];
_t3[0] = _w0[3];
_t4[0] = _w0[4];
_t5[0] = _w0[5];
_t0[1] = _w1[0];
_t1[1] = _w1[1];
_t2[1] = _w1[2];
_t3[1] = _w1[3];
_t4[1] = _w1[4];
_t5[1] = _w1[5];
_t0[2] = _w2[0];
_t1[2] = _w2[1];
_t2[2] = _w2[2];
_t3[2] = _w2[3];
_t4[2] = _w2[4];
_t5[2] = _w2[5];
_t0[3] = _w3[0];
_t1[3] = _w3[1];
_t2[3] = _w3[2];
_t3[3] = _w3[3];
_t4[3] = _w3[4];
_t5[3] = _w3[5];
_t0[4] = _w4[0];
_t1[4] = _w4[1];
_t2[4] = _w4[2];
_t3[4] = _w4[3];
_t4[4] = _w4[4];
_t5[4] = _w4[5];
_t0[5] = _w5[0];
_t1[5] = _w5[1];
_t2[5] = _w5[2];
_t3[5] = _w5[3];
_t4[5] = _w5[4];
_t5[5] = _w5[5];
}
#endif
// d = B_t * d_t
_n0 = _mm256_mul_ps(_t0, _4_p);
_n0 = _mm256_fmadd_ps(_t2, _5_n, _n0);
_n0 = _mm256_add_ps(_n0, _t4);
_n1 = _mm256_mul_ps(_t1, _4_n);
_n1 = _mm256_fmadd_ps(_t2, _4_n, _n1);
_n1 = _mm256_add_ps(_n1, _t3);
_n1 = _mm256_add_ps(_n1, _t4);
_n2 = _mm256_mul_ps(_t1, _4_p);
_n2 = _mm256_fmadd_ps(_t2, _4_n, _n2);
_n2 = _mm256_fmadd_ps(_t3, _1_n, _n2);
_n2 = _mm256_add_ps(_n2, _t4);
_n3 = _mm256_mul_ps(_t1, _2_n);
_n3 = _mm256_fmadd_ps(_t2, _1_n, _n3);
_n3 = _mm256_fmadd_ps(_t3, _2_p, _n3);
_n3 = _mm256_add_ps(_n3, _t4);
_n4 = _mm256_mul_ps(_t1, _2_p);
_n4 = _mm256_fmadd_ps(_t2, _1_n, _n4);
_n4 = _mm256_fmadd_ps(_t3, _2_n, _n4);
_n4 = _mm256_add_ps(_n4, _t4);
_n5 = _mm256_mul_ps(_t1, _4_p);
_n5 = _mm256_fmadd_ps(_t3, _5_n, _n5);
_n5 = _mm256_add_ps(_n5, _t5);
// save to out_tm
float output_n0[8] = {0.f};
_mm256_storeu_ps(output_n0, _n0);
float output_n1[8] = {0.f};
_mm256_storeu_ps(output_n1, _n1);
float output_n2[8] = {0.f};
_mm256_storeu_ps(output_n2, _n2);
float output_n3[8] = {0.f};
_mm256_storeu_ps(output_n3, _n3);
float output_n4[8] = {0.f};
_mm256_storeu_ps(output_n4, _n4);
float output_n5[8] = {0.f};
_mm256_storeu_ps(output_n5, _n5);
out_tm0[0] = output_n0[0];
out_tm0[1] = output_n0[1];
out_tm0[2] = output_n0[2];
out_tm0[3] = output_n0[3];
out_tm1[0] = output_n0[4];
out_tm1[1] = output_n0[5];
out_tm1[2] = output_n1[0];
out_tm1[3] = output_n1[1];
out_tm2[0] = output_n1[2];
out_tm2[1] = output_n1[3];
out_tm2[2] = output_n1[4];
out_tm2[3] = output_n1[5];
out_tm3[0] = output_n2[0];
out_tm3[1] = output_n2[1];
out_tm3[2] = output_n2[2];
out_tm3[3] = output_n2[3];
out_tm4[0] = output_n2[4];
out_tm4[1] = output_n2[5];
out_tm4[2] = output_n3[0];
out_tm4[3] = output_n3[1];
out_tm5[0] = output_n3[2];
out_tm5[1] = output_n3[3];
out_tm5[2] = output_n3[4];
out_tm5[3] = output_n3[5];
out_tm6[0] = output_n4[0];
out_tm6[1] = output_n4[1];
out_tm6[2] = output_n4[2];
out_tm6[3] = output_n4[3];
out_tm7[0] = output_n4[4];
out_tm7[1] = output_n4[5];
out_tm7[2] = output_n5[0];
out_tm7[3] = output_n5[1];
out_tm8[0] = output_n5[2];
out_tm8[1] = output_n5[3];
out_tm8[2] = output_n5[4];
out_tm8[3] = output_n5[5];
#else
float d0[6], d1[6], d2[6], d3[6], d4[6], d5[6];
float w0[6], w1[6], w2[6], w3[6], w4[6], w5[6];
float t0[6], t1[6], t2[6], t3[6], t4[6], t5[6];
// load
for (int n = 0; n < 6; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
d4[n] = r4[n];
d5[n] = r5[n];
}
// w = B_t * d
for (int n = 0; n < 6; n++)
{
w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n];
w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n];
w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n];
w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n];
w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n];
w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t4[0] = w0[4];
t5[0] = w0[5];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t4[1] = w1[4];
t5[1] = w1[5];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t4[2] = w2[4];
t5[2] = w2[5];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
t4[3] = w3[4];
t5[3] = w3[5];
t0[4] = w4[0];
t1[4] = w4[1];
t2[4] = w4[2];
t3[4] = w4[3];
t4[4] = w4[4];
t5[4] = w4[5];
t0[5] = w5[0];
t1[5] = w5[1];
t2[5] = w5[2];
t3[5] = w5[3];
t4[5] = w5[4];
t5[5] = w5[5];
}
// d = B_t * d_t
for (int n = 0; n < 6; n++)
{
d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n];
d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n];
d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n];
d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n];
d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n];
d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n];
}
// save to out_tm
{
out_tm0[0] = d0[0];
out_tm0[1] = d0[1];
out_tm0[2] = d0[2];
out_tm0[3] = d0[3];
out_tm1[0] = d0[4];
out_tm1[1] = d0[5];
out_tm1[2] = d1[0];
out_tm1[3] = d1[1];
out_tm2[0] = d1[2];
out_tm2[1] = d1[3];
out_tm2[2] = d1[4];
out_tm2[3] = d1[5];
out_tm3[0] = d2[0];
out_tm3[1] = d2[1];
out_tm3[2] = d2[2];
out_tm3[3] = d2[3];
out_tm4[0] = d2[4];
out_tm4[1] = d2[5];
out_tm4[2] = d3[0];
out_tm4[3] = d3[1];
out_tm5[0] = d3[2];
out_tm5[1] = d3[3];
out_tm5[2] = d3[4];
out_tm5[3] = d3[5];
out_tm6[0] = d4[0];
out_tm6[1] = d4[1];
out_tm6[2] = d4[2];
out_tm6[3] = d4[3];
out_tm7[0] = d4[4];
out_tm7[1] = d4[5];
out_tm7[2] = d5[0];
out_tm7[3] = d5[1];
out_tm8[0] = d5[2];
out_tm8[1] = d5[3];
out_tm8[2] = d5[4];
out_tm8[3] = d5[5];
}
#endif // __AVX__
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(36, tiles, outch, elemsize, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 9; r++)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p + 1);
float* output2_tm = top_blob_tm.channel(p + 2);
float* output3_tm = top_blob_tm.channel(p + 3);
float* output4_tm = top_blob_tm.channel(p + 4);
float* output5_tm = top_blob_tm.channel(p + 5);
float* output6_tm = top_blob_tm.channel(p + 6);
float* output7_tm = top_blob_tm.channel(p + 7);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
output4_tm = output4_tm + r * 4;
output5_tm = output5_tm + r * 4;
output6_tm = output6_tm + r * 4;
output7_tm = output7_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test[r].channel(p / 8);
const float* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
__m128 _sum1 = _mm_broadcast_ss(&zero_val);
__m128 _sum2 = _mm_broadcast_ss(&zero_val);
__m128 _sum3 = _mm_broadcast_ss(&zero_val);
__m128 _sum4 = _mm_broadcast_ss(&zero_val);
__m128 _sum5 = _mm_broadcast_ss(&zero_val);
__m128 _sum6 = _mm_broadcast_ss(&zero_val);
__m128 _sum7 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
__m128 _sum4 = _mm_set1_ps(0.f);
__m128 _sum5 = _mm_set1_ps(0.f);
__m128 _sum6 = _mm_set1_ps(0.f);
__m128 _sum7 = _mm_set1_ps(0.f);
#endif
int q = 0;
for (; q + 3 < inch; q = q + 4)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _r1 = _mm_loadu_ps(r0 + 4);
__m128 _r2 = _mm_loadu_ps(r0 + 8);
__m128 _r3 = _mm_loadu_ps(r0 + 12);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
__m128 _k4 = _mm_loadu_ps(kptr + 16);
__m128 _k5 = _mm_loadu_ps(kptr + 20);
__m128 _k6 = _mm_loadu_ps(kptr + 24);
__m128 _k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r0, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r0, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r0, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r0, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r1, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r1, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r1, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r1, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r1, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r1, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r1, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r1, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r1, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r1, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r1, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r1, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r1, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r1, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r1, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r1, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r2, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r2, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r2, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r2, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r2, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r2, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r2, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r2, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r2, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r2, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r2, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r2, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r2, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r2, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r2, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r2, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r3, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r3, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r3, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r3, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r3, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r3, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r3, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r3, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r3, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r3, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r3, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r3, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r3, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r3, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r3, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r3, _k7));
#endif
kptr += 32;
r0 += 16;
}
for (; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
__m128 _k4 = _mm_loadu_ps(kptr + 16);
__m128 _k5 = _mm_loadu_ps(kptr + 20);
__m128 _k6 = _mm_loadu_ps(kptr + 24);
__m128 _k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r0, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r0, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r0, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r0, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7));
#endif
kptr += 32;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
_mm_storeu_ps(output4_tm, _sum4);
_mm_storeu_ps(output5_tm, _sum5);
_mm_storeu_ps(output6_tm, _sum6);
_mm_storeu_ps(output7_tm, _sum7);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
float sum4[4] = {0};
float sum5[4] = {0};
float sum6[4] = {0};
float sum7[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += r0[n] * kptr[n];
sum1[n] += r0[n] * kptr[n + 4];
sum2[n] += r0[n] * kptr[n + 8];
sum3[n] += r0[n] * kptr[n + 12];
sum4[n] += r0[n] * kptr[n + 16];
sum5[n] += r0[n] * kptr[n + 20];
sum6[n] += r0[n] * kptr[n + 24];
sum7[n] += r0[n] * kptr[n + 28];
}
kptr += 32;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
output4_tm[n] = sum4[n];
output5_tm[n] = sum5[n];
output6_tm[n] = sum6[n];
output7_tm[n] = sum7[n];
}
#endif // __AVX__
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
output4_tm += 36;
output5_tm += 36;
output6_tm += 36;
output7_tm += 36;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p + 1);
float* output2_tm = top_blob_tm.channel(p + 2);
float* output3_tm = top_blob_tm.channel(p + 3);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4);
const float* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
__m128 _sum1 = _mm_broadcast_ss(&zero_val);
__m128 _sum2 = _mm_broadcast_ss(&zero_val);
__m128 _sum3 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
#endif
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
#endif
kptr += 16;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += r0[n] * kptr[n];
sum1[n] += r0[n] * kptr[n + 4];
sum2[n] += r0[n] * kptr[n + 8];
sum3[n] += r0[n] * kptr[n + 12];
}
kptr += 16;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif // __AVX__
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
}
}
remain_outch_start += nn_outch << 2;
for (int p = remain_outch_start; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
output0_tm = output0_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4 + p % 4);
const float* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
#endif
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
#endif
kptr += 16;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
#else
float sum0[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
}
kptr += 4;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
}
#endif // __AVX__ || __SSE__
output0_tm += 36;
}
}
// for (int p=0; p<outch; p++)
// {
// Mat out0_tm = top_blob_tm.channel(p);
// const Mat kernel0_tm = kernel_tm.channel(p);
// for (int i=0; i<tiles; i++)
// {
// float* output0_tm = out0_tm.row<int>(i);
// int sum0[36] = {0};
// for (int q=0; q<inch; q++)
// {
// const float* r0 = bottom_blob_tm.channel(q).row<float>(i);
// const float* k0 = kernel0_tm.row<float>(q);
// for (int n=0; n<36; n++)
// {
// sum0[n] += (int)r0[n] * k0[n];
// }
// }
// for (int n=0; n<36; n++)
// {
// output0_tm[n] = sum0[n];
// }
// }
// }
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, opt.workspace_allocator);
}
{
// AT
// const float itm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + r01 + r02 + r03 + r04
// 1 = r01 - r02 + 2 * (r03 - r04)
// 2 = r01 + r02 + 4 * (r03 + r04)
// 3 = r01 - r02 + 8 * (r03 - r04) + r05
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* out_tile = top_blob_tm.channel(p);
float* outRow0 = top_blob_bordered.channel(p);
float* outRow1 = outRow0 + outw;
float* outRow2 = outRow0 + outw * 2;
float* outRow3 = outRow0 + outw * 3;
const float bias0 = bias ? bias[p] : 0.f;
for (int j = 0; j < nColBlocks; j++)
{
for (int i = 0; i < nRowBlocks; i++)
{
// TODO AVX2
float s0[6], s1[6], s2[6], s3[6], s4[6], s5[6];
float w0[6], w1[6], w2[6], w3[6];
float d0[4], d1[4], d2[4], d3[4], d4[4], d5[4];
float o0[4], o1[4], o2[4], o3[4];
// load
for (int n = 0; n < 6; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 6];
s2[n] = out_tile[n + 12];
s3[n] = out_tile[n + 18];
s4[n] = out_tile[n + 24];
s5[n] = out_tile[n + 30];
}
// w = A_T * W
for (int n = 0; n < 6; n++)
{
w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n];
w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n];
w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n];
w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + s5[n];
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d0[2] = w2[0];
d0[3] = w3[0];
d1[0] = w0[1];
d1[1] = w1[1];
d1[2] = w2[1];
d1[3] = w3[1];
d2[0] = w0[2];
d2[1] = w1[2];
d2[2] = w2[2];
d2[3] = w3[2];
d3[0] = w0[3];
d3[1] = w1[3];
d3[2] = w2[3];
d3[3] = w3[3];
d4[0] = w0[4];
d4[1] = w1[4];
d4[2] = w2[4];
d4[3] = w3[4];
d5[0] = w0[5];
d5[1] = w1[5];
d5[2] = w2[5];
d5[3] = w3[5];
}
// Y = A_T * w_t
for (int n = 0; n < 4; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n];
o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n];
o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n];
o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n];
}
// save to top blob tm
for (int n = 0; n < 4; n++)
{
outRow0[n] = o0[n] + bias0;
outRow1[n] = o1[n] + bias0;
outRow2[n] = o2[n] + bias0;
outRow3[n] = o3[n] + bias0;
}
out_tile += 36;
outRow0 += 4;
outRow1 += 4;
outRow2 += 4;
outRow3 += 4;
}
outRow0 += outw * 3;
outRow1 += outw * 3;
outRow2 += outw * 3;
outRow3 += outw * 3;
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s2_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
float* outptr = out;
const float* img = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 9 + q * 9;
const float* r0 = img;
const float* r1 = img + w;
const float* r2 = img + w * 2;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
}
|
gemm.c | #include "gemm.h"
#include "utils.h"
#include "im2col.h"
#include "dark_cuda.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <float.h>
#include <string.h>
#include <stdint.h>
#ifdef _WIN32
#include <intrin.h>
#endif
#if defined(_OPENMP)
#include <omp.h>
#endif
#define TILE_M 4 // 4 ops
#define TILE_N 16 // AVX2 = 2 ops * 8 floats
#define TILE_K 16 // loop
#ifdef __cplusplus
#define PUT_IN_REGISTER
#else
#define PUT_IN_REGISTER register
#endif
void gemm_bin(int M, int N, int K, float ALPHA,
char *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
char A_PART = A[i*lda+k];
if(A_PART){
for(j = 0; j < N; ++j){
C[i*ldc+j] += B[k*ldb+j];
}
} else {
for(j = 0; j < N; ++j){
C[i*ldc+j] -= B[k*ldb+j];
}
}
}
}
}
float *random_matrix(int rows, int cols)
{
int i;
float* m = (float*)xcalloc(rows * cols, sizeof(float));
for(i = 0; i < rows*cols; ++i){
m[i] = (float)rand()/RAND_MAX;
}
return m;
}
void time_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<10; ++i){
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
}
//--------------------------------------------
// XNOR bitwise GEMM for binary neural network
//--------------------------------------------
static inline unsigned char xnor(unsigned char a, unsigned char b) {
//return a == b;
return !(a^b);
}
// INT-32
static inline uint32_t get_bit_int32(uint32_t const*const src, size_t index) {
size_t src_i = index / 32;
int src_shift = index % 32;
unsigned char val = (src[src_i] & (1 << src_shift)) > 0;
return val;
}
static inline uint32_t xnor_int32(uint32_t a, uint32_t b) {
return ~(a^b);
}
static inline uint64_t xnor_int64(uint64_t a, uint64_t b) {
return ~(a^b);
}
static inline uint32_t fill_bit_int32(char src) {
if (src == 0) return 0x00000000;
else return 0xFFFFFFFF;
}
static inline uint64_t fill_bit_int64(char src) {
if (src == 0) return 0x0000000000000000;
else return 0xFFFFFFFFFFFFFFFF;
}
void binary_int32_printf(uint32_t src) {
int i;
for (i = 0; i < 32; ++i) {
if (src & 1) printf("1");
else printf("0");
src = src >> 1;
}
printf("\n");
}
void binary_int64_printf(uint64_t src) {
int i;
for (i = 0; i < 64; ++i) {
if (src & 1) printf("1");
else printf("0");
src = src >> 1;
}
printf("\n");
}
/*
void gemm_nn_custom_bin_mean(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int *count_arr = xcalloc(M*N, sizeof(int));
int i, j, k;
for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216]
char a_bit = get_bit(A, i*lda + k);
for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
char b_bit = get_bit(B, k*ldb + j);
count_arr[i*ldc + j] += xnor(a_bit, b_bit);
}
}
}
for (i = 0; i < M; ++i) {
float mean_val = mean_arr[i];
for (j = 0; j < N; ++j) {
C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val;
}
}
free(count_arr);
}
*/
/*
void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int *count_arr = xcalloc(M*N, sizeof(int));
int i, j, k;
for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216]
char a_bit = get_bit(A, i*lda + k);
char b_bit = get_bit(B, j*ldb + k);
count_arr[i*ldc + j] += xnor(a_bit, b_bit);
}
}
}
for (i = 0; i < M; ++i) {
float mean_val = mean_arr[i];
for (j = 0; j < N; ++j) {
C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val;
}
}
free(count_arr);
}
*/
/*
void gemm_nn_custom_bin_mean(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int *count_arr = xcalloc(M*N, sizeof(int));
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
int j, k, h;
for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216]
const char a_bit = get_bit(A, i*lda + k);
uint64_t a_bit64 = fill_bit_int64(a_bit);
int k_ldb = k*ldb;
for (j = 0; j < N; j += 64) { // out_h*out_w - one channel output size [169 - 173056]
if ((N - j > 64) && (k_ldb % 8 == 0)) {
uint64_t b_bit64 = *((uint64_t *)(B + (k_ldb + j) / 8));
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
//printf("\n %d \n",__builtin_popcountll(c_bit64)); // gcc
printf("\n %d \n", __popcnt64(c_bit64)); // msvs
int h;
for (h = 0; h < 64; ++h)
if ((c_bit64 >> h) & 1) count_arr[i*ldc + j + h] += 1;
//binary_int64_printf(a_bit64);
//binary_int64_printf(b_bit64);
//binary_int64_printf(c_bit64);
}
else {
for (; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
char b_bit = get_bit(B, k_ldb + j);
if (xnor(a_bit, b_bit)) count_arr[i*ldc + j] += 1;
}
}
}
}
}
if (mean_arr) {
//int K_2 = K / 2;
for (i = 0; i < M; ++i) {
float mean_val = mean_arr[i];
//float mean_val2 = 2 * mean_val;
for (j = 0; j < N; ++j) {
C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val;
//C[i*ldc + j] = (count_arr[i*ldc + j] - K_2) *mean_val2;
}
}
}
else {
for (i = 0; i < M; ++i) {
for (j = 0; j < N; ++j) {
C[i*ldc + j] = count_arr[i*ldc + j] - K / 2;
}
}
}
free(count_arr);
//getchar();
}
*/
/*
void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
int j, k, h;
float mean_val = mean_arr[i];
for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
int count = 0;
for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8));
uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8));
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
#ifdef WIN32
int tmp_count = __popcnt64(c_bit64);
#else
int tmp_count = __builtin_popcountll(c_bit64);
#endif
if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits
count += tmp_count;
//binary_int64_printf(c_bit64);
//printf(", count = %d \n\n", tmp_count);
}
C[i*ldc + j] = (2 * count - K) * mean_val;
}
}
}
*/
//----------------------------
// is not used
/*
void transpose_32x32_bits_my(uint32_t *A, uint32_t *B, int lda, int ldb)
{
unsigned int x, y;
for (y = 0; y < 32; ++y) {
for (x = 0; x < 32; ++x) {
if (A[y * lda] & ((uint32_t)1 << x)) B[x * ldb] |= (uint32_t)1 << y;
}
}
}
*/
#ifndef GPU
uint8_t reverse_8_bit(uint8_t a) {
return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16;
}
uint32_t reverse_32_bit(uint32_t a)
{
// unsigned int __rbit(unsigned int val) // for ARM //__asm__("rbit %0, %1\n" : "=r"(output) : "r"(input));
return (reverse_8_bit(a >> 24) << 0) |
(reverse_8_bit(a >> 16) << 8) |
(reverse_8_bit(a >> 8) << 16) |
(reverse_8_bit(a >> 0) << 24);
}
#define swap(a0, a1, j, m) t = (a0 ^ (a1 >>j)) & m; a0 = a0 ^ t; a1 = a1 ^ (t << j);
void transpose32_optimized(uint32_t A[32]) {
int j, k;
unsigned m, t;
//m = 0x0000FFFF;
//for (j = 16; j != 0; j = j >> 1, m = m ^ (m << j)) {
// for (k = 0; k < 32; k = (k + j + 1) & ~j) {
// t = (A[k] ^ (A[k + j] >> j)) & m;
// A[k] = A[k] ^ t;
// A[k + j] = A[k + j] ^ (t << j);
// }
//}
j = 16;
m = 0x0000FFFF;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 8;
m = 0x00ff00ff;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 4;
m = 0x0f0f0f0f;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 2;
m = 0x33333333;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 1;
m = 0x55555555;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
// reverse Y
for (j = 0; j < 16; ++j) {
uint32_t tmp = A[j];
A[j] = reverse_32_bit(A[31 - j]);
A[31 - j] = reverse_32_bit(tmp);
}
}
void transpose_32x32_bits_reversed_diagonale(uint32_t *A, uint32_t *B, int m, int n)
{
unsigned A_tmp[32];
int i;
#pragma unroll
for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m];
transpose32_optimized(A_tmp);
#pragma unroll
for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i];
}
void transpose_8x8_bits_my(unsigned char *A, unsigned char *B, int lda, int ldb)
{
unsigned x, y;
for (y = 0; y < 8; ++y) {
for (x = 0; x < 8; ++x) {
if (A[y * lda] & (1 << x)) B[x * ldb] |= 1 << y;
}
}
}
unsigned char reverse_byte_1(char a)
{
return ((a & 0x1) << 7) | ((a & 0x2) << 5) |
((a & 0x4) << 3) | ((a & 0x8) << 1) |
((a & 0x10) >> 1) | ((a & 0x20) >> 3) |
((a & 0x40) >> 5) | ((a & 0x80) >> 7);
}
unsigned char reverse_byte(unsigned char a)
{
return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16;
}
static unsigned char lookup[16] = {
0x0, 0x8, 0x4, 0xc, 0x2, 0xa, 0x6, 0xe,
0x1, 0x9, 0x5, 0xd, 0x3, 0xb, 0x7, 0xf, };
unsigned char reverse_byte_3(unsigned char n) {
// Reverse the top and bottom nibble then swap them.
return (lookup[n & 0b1111] << 4) | lookup[n >> 4];
}
void transpose8rS32_reversed_diagonale(unsigned char* A, unsigned char* B, int m, int n)
{
unsigned x, y, t;
x = y = 0;
// Load the array and pack it into x and y.
//x = (A[0] << 24) | (A[m] << 16) | (A[2 * m] << 8) | A[3 * m];
//y = (A[4 * m] << 24) | (A[5 * m] << 16) | (A[6 * m] << 8) | A[7 * m];
t = (x ^ (x >> 7)) & 0x00AA00AA; x = x ^ t ^ (t << 7);
t = (y ^ (y >> 7)) & 0x00AA00AA; y = y ^ t ^ (t << 7);
t = (x ^ (x >> 14)) & 0x0000CCCC; x = x ^ t ^ (t << 14);
t = (y ^ (y >> 14)) & 0x0000CCCC; y = y ^ t ^ (t << 14);
t = (x & 0xF0F0F0F0) | ((y >> 4) & 0x0F0F0F0F);
y = ((x << 4) & 0xF0F0F0F0) | (y & 0x0F0F0F0F);
x = t;
B[7 * n] = reverse_byte(x >> 24); B[6 * n] = reverse_byte(x >> 16); B[5 * n] = reverse_byte(x >> 8); B[4 * n] = reverse_byte(x);
B[3 * n] = reverse_byte(y >> 24); B[2 * n] = reverse_byte(y >> 16); B[1 * n] = reverse_byte(y >> 8); B[0 * n] = reverse_byte(y);
}
/*
// transpose by 8-bit
void transpose_bin(char *A, char *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
//printf("\n n = %d, ldb = %d \t\t m = %d, lda = %d \n", n, ldb, m, lda);
int i;
#pragma omp parallel for
for (i = 0; i < n; i += 8) {
int j;
for (j = 0; j < m; j += 8) {
int a_index = i*lda + j;
int b_index = j*ldb + i;
//transpose_8x8_bits_my(&A[a_index/8], &B[b_index/8], lda/8, ldb/8);
transpose8rS32_reversed_diagonale(&A[a_index / 8], &B[b_index / 8], lda / 8, ldb / 8);
}
for (; j < m; ++j) {
if (get_bit(A, i*lda + j)) set_bit(B, j*ldb + i);
}
}
}
*/
#endif
// transpose by 32-bit
void transpose_bin(uint32_t *A, uint32_t *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
//printf("\n n = %d (n mod 32 = %d), m = %d (m mod 32 = %d) \n", n, n % 32, m, m % 32);
//printf("\n lda = %d (lda mod 32 = %d), ldb = %d (ldb mod 32 = %d) \n", lda, lda % 32, ldb, ldb % 32);
int i;
#pragma omp parallel for
for (i = 0; i < n; i += 32) {
int j;
for (j = 0; j < m; j += 32) {
int a_index = i*lda + j;
int b_index = j*ldb + i;
transpose_32x32_bits_reversed_diagonale(&A[a_index / 32], &B[b_index / 32], lda / 32, ldb / 32);
//transpose_32x32_bits_my(&A[a_index/32], &B[b_index/32], lda/32, ldb/32);
}
for (; j < m; ++j) {
if (get_bit((const unsigned char* const)A, i * lda + j)) set_bit((unsigned char* const)B, j * ldb + i);
}
}
}
static inline int popcnt_32(uint32_t val32) {
#ifdef WIN32 // Windows MSVS
int tmp_count = __popcnt(val32);
#else // Linux GCC
int tmp_count = __builtin_popcount(val32);
#endif
return tmp_count;
}
//----------------------------
#if (defined(__AVX__) && defined(__x86_64__)) || (defined(_WIN64) && !defined(__MINGW32__))
#if (defined(_WIN64) && !defined(__MINGW64__))
#include <intrin.h>
#include <ammintrin.h>
#include <immintrin.h>
#include <smmintrin.h>
#if defined(_MSC_VER) && _MSC_VER <= 1900
static inline __int32 _mm256_extract_epi64(__m256i a, const int index) {
return a.m256i_i64[index];
}
static inline __int32 _mm256_extract_epi32(__m256i a, const int index) {
return a.m256i_i32[index];
}
#endif
static inline float _dn_castu32_f32(uint32_t a) {
return *((float *)&a);
}
static inline float _mm256_extract_float32(__m256 a, const int index) {
return a.m256_f32[index];
}
#else // Linux GCC/Clang
#include <x86intrin.h>
#include <ammintrin.h>
#include <immintrin.h>
#include <smmintrin.h>
#include <cpuid.h>
static inline float _dn_castu32_f32(uint32_t a) {
return *((float *)&a);
}
static inline float _mm256_extract_float32(__m256 a, const int index) {
switch(index) {
case 0:
return _dn_castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 0));
case 1:
return _dn_castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 1));
case 2:
return _dn_castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 2));
case 3:
return _dn_castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 3));
case 4:
return _dn_castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 4));
case 5:
return _dn_castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 5));
case 6:
return _dn_castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 6));
case 7:
return _dn_castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 7));
default:
return _dn_castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 0));
}
}
void asm_cpuid(uint32_t* abcd, uint32_t eax)
{
uint32_t ebx = 0, edx = 0, ecx = 0;
// EBX is saved to EDI and later restored
__asm__("movl %%ebx, %%edi;"
"cpuid;"
"xchgl %%ebx, %%edi;"
: "=D"(ebx),
"+a"(eax), "+c"(ecx), "=d"(edx));
abcd[0] = eax;
abcd[1] = ebx;
abcd[2] = ecx;
abcd[3] = edx;
}
#endif
#ifdef _WIN32
// Windows
#define cpuid(info, x) __cpuidex(info, x, 0)
#else
// GCC Intrinsics
void cpuid(int info[4], int InfoType) {
__cpuid_count(InfoType, 0, info[0], info[1], info[2], info[3]);
}
#endif
// Misc.
static int HW_MMX, HW_x64, HW_RDRAND, HW_BMI1, HW_BMI2, HW_ADX, HW_PREFETCHWT1;
static int HW_ABM; // Advanced Bit Manipulation
// SIMD: 128-bit
static int HW_SSE, HW_SSE2, HW_SSE3, HW_SSSE3, HW_SSE41, HW_SSE42, HW_SSE4a, HW_AES, HW_SHA;
// SIMD: 256-bit
static int HW_AVX, HW_XOP, HW_FMA3, HW_FMA4, HW_AVX2;
// SIMD: 512-bit
static int HW_AVX512F; // AVX512 Foundation
static int HW_AVX512CD; // AVX512 Conflict Detection
static int HW_AVX512PF; // AVX512 Prefetch
static int HW_AVX512ER; // AVX512 Exponential + Reciprocal
static int HW_AVX512VL; // AVX512 Vector Length Extensions
static int HW_AVX512BW; // AVX512 Byte + Word
static int HW_AVX512DQ; // AVX512 Doubleword + Quadword
static int HW_AVX512IFMA; // AVX512 Integer 52-bit Fused Multiply-Add
static int HW_AVX512VBMI; // AVX512 Vector Byte Manipulation Instructions
// https://stackoverflow.com/questions/6121792/how-to-check-if-a-cpu-supports-the-sse3-instruction-set
void check_cpu_features(void) {
int info[4];
cpuid(info, 0);
int nIds = info[0];
cpuid(info, 0x80000000);
unsigned nExIds = info[0];
// Detect Features
if (nIds >= 0x00000001) {
cpuid(info, 0x00000001);
HW_MMX = (info[3] & ((uint32_t)1 << 23)) != 0;
HW_SSE = (info[3] & ((uint32_t)1 << 25)) != 0;
HW_SSE2 = (info[3] & ((uint32_t)1 << 26)) != 0;
HW_SSE3 = (info[2] & ((uint32_t)1 << 0)) != 0;
HW_SSSE3 = (info[2] & ((uint32_t)1 << 9)) != 0;
HW_SSE41 = (info[2] & ((uint32_t)1 << 19)) != 0;
HW_SSE42 = (info[2] & ((uint32_t)1 << 20)) != 0;
HW_AES = (info[2] & ((uint32_t)1 << 25)) != 0;
HW_AVX = (info[2] & ((uint32_t)1 << 28)) != 0;
HW_FMA3 = (info[2] & ((uint32_t)1 << 12)) != 0;
HW_RDRAND = (info[2] & ((uint32_t)1 << 30)) != 0;
}
if (nIds >= 0x00000007) {
cpuid(info, 0x00000007);
HW_AVX2 = (info[1] & ((uint32_t)1 << 5)) != 0;
HW_BMI1 = (info[1] & ((uint32_t)1 << 3)) != 0;
HW_BMI2 = (info[1] & ((uint32_t)1 << 8)) != 0;
HW_ADX = (info[1] & ((uint32_t)1 << 19)) != 0;
HW_SHA = (info[1] & ((uint32_t)1 << 29)) != 0;
HW_PREFETCHWT1 = (info[2] & ((uint32_t)1 << 0)) != 0;
HW_AVX512F = (info[1] & ((uint32_t)1 << 16)) != 0;
HW_AVX512CD = (info[1] & ((uint32_t)1 << 28)) != 0;
HW_AVX512PF = (info[1] & ((uint32_t)1 << 26)) != 0;
HW_AVX512ER = (info[1] & ((uint32_t)1 << 27)) != 0;
HW_AVX512VL = (info[1] & ((uint32_t)1 << 31)) != 0;
HW_AVX512BW = (info[1] & ((uint32_t)1 << 30)) != 0;
HW_AVX512DQ = (info[1] & ((uint32_t)1 << 17)) != 0;
HW_AVX512IFMA = (info[1] & ((uint32_t)1 << 21)) != 0;
HW_AVX512VBMI = (info[2] & ((uint32_t)1 << 1)) != 0;
}
if (nExIds >= 0x80000001) {
cpuid(info, 0x80000001);
HW_x64 = (info[3] & ((uint32_t)1 << 29)) != 0;
HW_ABM = (info[2] & ((uint32_t)1 << 5)) != 0;
HW_SSE4a = (info[2] & ((uint32_t)1 << 6)) != 0;
HW_FMA4 = (info[2] & ((uint32_t)1 << 16)) != 0;
HW_XOP = (info[2] & ((uint32_t)1 << 11)) != 0;
}
}
int is_avx() {
static int result = -1;
if (result == -1) {
check_cpu_features();
result = HW_AVX;
if (result == 1) printf(" Used AVX \n");
else printf(" Not used AVX \n");
}
return result;
}
int is_fma_avx2() {
static int result = -1;
if (result == -1) {
check_cpu_features();
result = HW_FMA3 && HW_AVX2;
if (result == 1) printf(" Used FMA & AVX2 \n");
else printf(" Not used FMA & AVX2 \n");
}
return result;
}
// https://software.intel.com/sites/landingpage/IntrinsicsGuide
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i, j, k;
if (is_avx() == 1) { // AVX
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
float A_PART = ALPHA*A[i*lda + k];
__m256 a256, b256, c256, result256; // AVX
a256 = _mm256_set1_ps(A_PART);
for (j = 0; j < N - 8; j += 8) {
b256 = _mm256_loadu_ps(&B[k*ldb + j]);
c256 = _mm256_loadu_ps(&C[i*ldc + j]);
// FMA - Intel Haswell (2013), AMD Piledriver (2012)
//result256 = _mm256_fmadd_ps(a256, b256, c256);
result256 = _mm256_mul_ps(a256, b256);
result256 = _mm256_add_ps(result256, c256);
_mm256_storeu_ps(&C[i*ldc + j], result256);
}
int prev_end = (N % 8 == 0) ? (N - 8) : (N / 8) * 8;
for (j = prev_end; j < N; ++j)
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
else {
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
PUT_IN_REGISTER float A_PART = ALPHA * A[i * lda + k];
for (j = 0; j < N; ++j) {
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
/* // SSE
__m128 a128, b128, c128, result128; // SSE
a128 = _mm_set1_ps(A_PART);
for (j = 0; j < N - 4; j += 4) {
b128 = _mm_loadu_ps(&B[k*ldb + j]);
c128 = _mm_loadu_ps(&C[i*ldc + j]);
//result128 = _mm_fmadd_ps(a128, b128, c128);
result128 = _mm_mul_ps(a128, b128);
result128 = _mm_add_ps(result128, c128);
_mm_storeu_ps(&C[i*ldc + j], result128);
}
int prev_end = (N % 4 == 0) ? (N - 4) : (N / 4) * 4;
for (j = prev_end; j < N; ++j){
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
*/
}
}
}
}
void gemm_nn_fast(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i;
#pragma omp parallel for
for (i = 0; i < (M / TILE_M)*TILE_M; i += TILE_M)
{
int j, k;
int i_d, k_d;
for (k = 0; k < (K / TILE_K)*TILE_K; k += TILE_K)
{
for (j = 0; j < (N / TILE_N)*TILE_N; j += TILE_N)
{
// L1 - 6 bits tag [11:6] - cache size 32 KB, conflict for each 4 KB
// L2 - 9 bits tag [14:6] - cache size 256 KB, conflict for each 32 KB
// L3 - 13 bits tag [18:6] - cache size 8 MB, conflict for each 512 KB
__m256 result256;
__m256 a256_0, b256_0; // AVX
__m256 a256_1, b256_1; // AVX
__m256 a256_2;// , b256_2; // AVX
__m256 a256_3;// , b256_3; // AVX
__m256 c256_0, c256_1, c256_2, c256_3;
__m256 c256_4, c256_5, c256_6, c256_7;
c256_0 = _mm256_loadu_ps(&C[(0 + i)*ldc + (0 + j)]);
c256_1 = _mm256_loadu_ps(&C[(1 + i)*ldc + (0 + j)]);
c256_2 = _mm256_loadu_ps(&C[(0 + i)*ldc + (8 + j)]);
c256_3 = _mm256_loadu_ps(&C[(1 + i)*ldc + (8 + j)]);
c256_4 = _mm256_loadu_ps(&C[(2 + i)*ldc + (0 + j)]);
c256_5 = _mm256_loadu_ps(&C[(3 + i)*ldc + (0 + j)]);
c256_6 = _mm256_loadu_ps(&C[(2 + i)*ldc + (8 + j)]);
c256_7 = _mm256_loadu_ps(&C[(3 + i)*ldc + (8 + j)]);
for (k_d = 0; k_d < (TILE_K); ++k_d)
{
a256_0 = _mm256_set1_ps(ALPHA*A[(0 + i)*lda + (k_d + k)]);
a256_1 = _mm256_set1_ps(ALPHA*A[(1 + i)*lda + (k_d + k)]);
a256_2 = _mm256_set1_ps(ALPHA*A[(2 + i)*lda + (k_d + k)]);
a256_3 = _mm256_set1_ps(ALPHA*A[(3 + i)*lda + (k_d + k)]);
b256_0 = _mm256_loadu_ps(&B[(k_d + k)*ldb + (0 + j)]);
b256_1 = _mm256_loadu_ps(&B[(k_d + k)*ldb + (8 + j)]);
// FMA - Intel Haswell (2013), AMD Piledriver (2012)
//c256_0 = _mm256_fmadd_ps(a256_0, b256_0, c256_0);
//c256_1 = _mm256_fmadd_ps(a256_1, b256_0, c256_1);
//c256_2 = _mm256_fmadd_ps(a256_0, b256_1, c256_2);
//c256_3 = _mm256_fmadd_ps(a256_1, b256_1, c256_3);
//c256_4 = _mm256_fmadd_ps(a256_2, b256_0, c256_4);
//c256_5 = _mm256_fmadd_ps(a256_3, b256_0, c256_5);
//c256_6 = _mm256_fmadd_ps(a256_2, b256_1, c256_6);
//c256_7 = _mm256_fmadd_ps(a256_3, b256_1, c256_7);
result256 = _mm256_mul_ps(a256_0, b256_0);
c256_0 = _mm256_add_ps(result256, c256_0);
result256 = _mm256_mul_ps(a256_1, b256_0);
c256_1 = _mm256_add_ps(result256, c256_1);
result256 = _mm256_mul_ps(a256_0, b256_1);
c256_2 = _mm256_add_ps(result256, c256_2);
result256 = _mm256_mul_ps(a256_1, b256_1);
c256_3 = _mm256_add_ps(result256, c256_3);
result256 = _mm256_mul_ps(a256_2, b256_0);
c256_4 = _mm256_add_ps(result256, c256_4);
result256 = _mm256_mul_ps(a256_3, b256_0);
c256_5 = _mm256_add_ps(result256, c256_5);
result256 = _mm256_mul_ps(a256_2, b256_1);
c256_6 = _mm256_add_ps(result256, c256_6);
result256 = _mm256_mul_ps(a256_3, b256_1);
c256_7 = _mm256_add_ps(result256, c256_7);
}
_mm256_storeu_ps(&C[(0 + i)*ldc + (0 + j)], c256_0);
_mm256_storeu_ps(&C[(1 + i)*ldc + (0 + j)], c256_1);
_mm256_storeu_ps(&C[(0 + i)*ldc + (8 + j)], c256_2);
_mm256_storeu_ps(&C[(1 + i)*ldc + (8 + j)], c256_3);
_mm256_storeu_ps(&C[(2 + i)*ldc + (0 + j)], c256_4);
_mm256_storeu_ps(&C[(3 + i)*ldc + (0 + j)], c256_5);
_mm256_storeu_ps(&C[(2 + i)*ldc + (8 + j)], c256_6);
_mm256_storeu_ps(&C[(3 + i)*ldc + (8 + j)], c256_7);
}
for (j = (N / TILE_N)*TILE_N; j < N; ++j) {
for (i_d = i; i_d < (i + TILE_M); ++i_d)
{
for (k_d = k; k_d < (k + TILE_K); ++k_d)
{
PUT_IN_REGISTER float A_PART = ALPHA*A[i_d*lda + k_d];
C[i_d*ldc + j] += A_PART*B[k_d*ldb + j];
}
}
}
}
for (k = (K / TILE_K)*TILE_K; k < K; ++k)
{
for (i_d = i; i_d < (i + TILE_M); ++i_d)
{
PUT_IN_REGISTER float A_PART = ALPHA*A[i_d*lda + k];
for (j = 0; j < N; ++j) {
C[i_d*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
}
for (i = (M / TILE_M)*TILE_M; i < M; ++i) {
int j, k;
for (k = 0; k < K; ++k) {
PUT_IN_REGISTER float A_PART = ALPHA*A[i*lda + k];
for (j = 0; j < N; ++j) {
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
}
void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA,
uint32_t *A, int lda,
uint32_t *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n
int j, s;
float mean_val = mean_arr[i];
//printf(" l.mean_arr[i] = %d \n ", l.mean_arr[i]);
for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c)
{
PUT_IN_REGISTER uint32_t A_PART = A[i*lda + s];
__m256i a256 = _mm256_set1_epi32(A_PART);
for (j = 0; j < N - 8; j += 8)
{
__m256i b256 = *((__m256i*)&B[s*ldb + j]);
__m256i xor256 = _mm256_xor_si256(a256, b256); // xnor = xor(a,b)
__m256i all_1 = _mm256_set1_epi8((char)255);
__m256i xnor256 = _mm256_andnot_si256(xor256, all_1); // xnor = not(xor(a,b))
// waiting for - CPUID Flags: AVX512VPOPCNTDQ: __m512i _mm512_popcnt_epi32(__m512i a)
__m256 count = _mm256_setr_ps(
popcnt_32(_mm256_extract_epi32(xnor256, 0)),
popcnt_32(_mm256_extract_epi32(xnor256, 1)),
popcnt_32(_mm256_extract_epi32(xnor256, 2)),
popcnt_32(_mm256_extract_epi32(xnor256, 3)),
popcnt_32(_mm256_extract_epi32(xnor256, 4)),
popcnt_32(_mm256_extract_epi32(xnor256, 5)),
popcnt_32(_mm256_extract_epi32(xnor256, 6)),
popcnt_32(_mm256_extract_epi32(xnor256, 7)));
__m256 val2 = _mm256_set1_ps(2);
count = _mm256_mul_ps(count, val2); // count * 2
__m256 val32 = _mm256_set1_ps(32);
count = _mm256_sub_ps(count, val32); // count - 32
__m256 mean256 = _mm256_set1_ps(mean_val);
count = _mm256_mul_ps(count, mean256); // count * mean_val
__m256 c256 = *((__m256*)&C[i*ldc + j]);
count = _mm256_add_ps(count, c256); // c = c + count
*((__m256*)&C[i*ldc + j]) = count;
}
for (; j < N; ++j) // out_h*out_w;
{
PUT_IN_REGISTER uint32_t B_PART = B[s*ldb + j];
uint32_t xnor_result = ~(A_PART ^ B_PART);
int32_t count = popcnt_32(xnor_result); // must be Signed int
C[i*ldc + j] += (2 * count - 32) * mean_val;
}
}
}
}
void convolution_2d_old(int w, int h, int ksize, int n, int c, int pad, int stride,
float *weights, float *input, float *output)
{
//const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1
//const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1
int fil;
// filter index
#pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP
for (fil = 0; fil < n; ++fil) {
//int i, f, j;
int chan, y, x, f_y, f_x;
// channel index
for (chan = 0; chan < c; ++chan)
// input - y
for (y = 0; y < h; ++y)
// input - x
for (x = 0; x < w; ++x)
{
int const output_index = fil*w*h + y*w + x;
int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize;
int const input_pre_index = chan*w*h;
float sum = 0;
// filter - y
for (f_y = 0; f_y < ksize; ++f_y)
{
int input_y = y + f_y - pad;
// filter - x
for (f_x = 0; f_x < ksize; ++f_x)
{
int input_x = x + f_x - pad;
if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue;
int input_index = input_pre_index + input_y*w + input_x;
int weights_index = weights_pre_index + f_y*ksize + f_x;
sum += input[input_index] * weights[weights_index];
}
}
// l.output[filters][width][height] +=
// state.input[channels][width][height] *
// l.weights[filters][channels][filter_width][filter_height];
output[output_index] += sum;
}
}
}
void convolution_2d(int w, int h, int ksize, int n, int c, int pad, int stride,
float *weights, float *input, float *output, float *mean)
{
//const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1
//const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1
int i;
#if defined(_OPENMP)
static int max_num_threads = 0;
if (max_num_threads == 0) {
max_num_threads = omp_get_max_threads();
//omp_set_num_threads( max_num_threads / 2);
}
#endif
//convolution_2d_old(w, h, ksize, n, c, pad, stride, weights, input, output);
__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
for (i = 0; i < ksize*ksize*n*c; i+=8) {
*((__m256*)&weights[i]) = _mm256_and_ps(*((__m256*)&weights[i]), _mm256_castsi256_ps(all256_sing1));
}
//for (i = 0; i < w*h*c; i += 8) {
//(*(__m256*)&input[i]) = _mm256_and_ps(*((__m256*)&input[i]), _mm256_castsi256_ps(all256_sing1));
//}
//__m256i all256_last_zero = _mm256_set1_epi32(0xFFFFFFFF);
//all256_last_zero.m256i_i32[7] = 0;
__m256i all256_last_zero =
_mm256_set_epi32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x0);
__m256i idx256 = _mm256_set_epi32(0, 7, 6, 5, 4, 3, 2, 1);
//__m256 all256_sing1 = _mm256_set1_ps(0x80000000);
__m256 all256_one = _mm256_set1_ps(1);
__m256i all256i_one = _mm256_set1_epi32(1);
///__m256i src256 = _mm256_loadu_si256((__m256i *)(&src[i]));
///__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats
int fil;
// filter index
#pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP
for (fil = 0; fil < n; ++fil) {
int chan, y, x, f_y, f_x;
float cur_mean = fabs(mean[fil]);
__m256 mean256 = _mm256_set1_ps(cur_mean);
// channel index
//for (chan = 0; chan < c; ++chan)
// input - y
for (y = 0; y < h; ++y)
// input - x
for (x = 0; x < w-8; x+=8)
{
int const output_index = fil*w*h + y*w + x;
float sum = 0;
__m256 sum256 = _mm256_set1_ps(0);
for (chan = 0; chan < c; ++chan) {
int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize;
int const input_pre_index = chan*w*h;
// filter - y
for (f_y = 0; f_y < ksize; ++f_y)
{
int input_y = y + f_y - pad;
//__m256 in = *((__m256*)&input[input_pre_index + input_y*w]);
if (input_y < 0 || input_y >= h) continue;
//__m256 in = _mm256_loadu_ps(&input[input_pre_index + input_y*w + x - pad]);
// filter - x
for (f_x = 0; f_x < ksize; ++f_x)
{
int input_x = x + f_x - pad;
//if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue;
int input_index = input_pre_index + input_y*w + input_x;
int weights_index = weights_pre_index + f_y*ksize + f_x;
//if (input_y < 0 || input_y >= h) continue;
//sum += input[input_index] * weights[weights_index];
__m256 in = *((__m256*)&input[input_index]);
__m256 w = _mm256_set1_ps(weights[weights_index]);
//__m256 w_sign = _mm256_and_ps(w, _mm256_castsi256_ps(all256_sing1)); // check sign in 8 x 32-bit floats
__m256 xor256 = _mm256_xor_ps(w, in);
//printf("\n xor256_1 = %f, xor256_2 = %f \n", xor256.m256_f32[0], xor256.m256_f32[1]);
//printf("\n in = %f, w = %f, xor256 = %f \n", in.m256_f32[0], w_sign.m256_f32[0], xor256.m256_f32[0]);
//__m256 pn1 = _mm256_and_ps(_mm256_castsi256_ps(all256i_one), xor256);
//sum256 = xor256;
sum256 = _mm256_add_ps(xor256, sum256);
//printf("\n --- \n");
//printf("\n 0 = %f, 1 = %f, 2 = %f, 3 = %f, 4 = %f, 5 = %f, 6 = %f, 7 = %f \n", in.m256_f32[0], in.m256_f32[1], in.m256_f32[2], in.m256_f32[3], in.m256_f32[4], in.m256_f32[5], in.m256_f32[6], in.m256_f32[7]);
if (f_x < ksize-1) {
//in = _mm256_permutevar8x32_ps(in, idx256);
//in = _mm256_and_ps(in, _mm256_castsi256_ps(all256_last_zero));
}
}
}
}
// l.output[filters][width][height] +=
// state.input[channels][width][height] *
// l.weights[filters][channels][filter_width][filter_height];
//output[output_index] += sum;
sum256 = _mm256_mul_ps(sum256, mean256);
//printf("\n cur_mean = %f, sum256 = %f, sum256 = %f, in = %f \n",
// cur_mean, sum256.m256_f32[0], sum256.m256_f32[1], input[input_pre_index]);
//__m256 out = *((__m256*)&output[output_index]);
//out = _mm256_add_ps(out, sum256);
//(*(__m256*)&output[output_index]) = out;
*((__m256*)&output[output_index]) = sum256;
//_mm256_storeu_ps(&C[i*ldc + j], result256);
}
}
}
// http://graphics.stanford.edu/~seander/bithacks.html
// https://stackoverflow.com/questions/17354971/fast-counting-the-number-of-set-bits-in-m128i-register
// https://arxiv.org/pdf/1611.07612.pdf
static inline int popcnt128(__m128i n) {
const __m128i n_hi = _mm_unpackhi_epi64(n, n);
#if defined(_MSC_VER)
return __popcnt64(_mm_cvtsi128_si64(n)) + __popcnt64(_mm_cvtsi128_si64(n_hi));
#elif defined(__APPLE__) && defined(__clang__)
return _mm_popcnt_u64(_mm_cvtsi128_si64(n)) + _mm_popcnt_u64(_mm_cvtsi128_si64(n_hi));
#else
return __popcntq(_mm_cvtsi128_si64(n)) + __popcntq(_mm_cvtsi128_si64(n_hi));
#endif
}
static inline int popcnt256(__m256i n) {
return popcnt128(_mm256_extractf128_si256(n, 0)) + popcnt128(_mm256_extractf128_si256(n, 1));
}
static inline __m256i count256(__m256i v) {
__m256i lookup =
_mm256_setr_epi8(0, 1, 1, 2, 1, 2, 2, 3, 1, 2,
2, 3, 2, 3, 3, 4, 0, 1, 1, 2, 1, 2, 2, 3,
1, 2, 2, 3, 2, 3, 3, 4);
__m256i low_mask = _mm256_set1_epi8(0x0f);
__m256i lo = _mm256_and_si256(v, low_mask);
__m256i hi = _mm256_and_si256(_mm256_srli_epi32(v, 4), low_mask);
__m256i popcnt1 = _mm256_shuffle_epi8(lookup, lo);
__m256i popcnt2 = _mm256_shuffle_epi8(lookup, hi);
__m256i total = _mm256_add_epi8(popcnt1, popcnt2);
return _mm256_sad_epu8(total, _mm256_setzero_si256());
}
static inline int popcnt256_custom(__m256i n) {
__m256i val = count256(n);
//return val.m256i_i64[0] +
//val.m256i_i64[1] +
//val.m256i_i64[2] +
//val.m256i_i64[3];
return _mm256_extract_epi64(val, 0)
+ _mm256_extract_epi64(val, 1)
+ _mm256_extract_epi64(val, 2)
+ _mm256_extract_epi64(val, 3);
}
static inline void xnor_avx2_popcnt(__m256i a_bit256, __m256i b_bit256, __m256i *count_sum) {
__m256i c_bit256 = _mm256_set1_epi8((char)255);
__m256i xor256 = _mm256_xor_si256(a_bit256, b_bit256); // xnor = not(xor(a,b))
c_bit256 = _mm256_andnot_si256(xor256, c_bit256); // can be optimized - we can do other NOT for wegihts once and do not do this NOT
*count_sum = _mm256_add_epi64(count256(c_bit256), *count_sum); // 1st part - popcnt Mula's algorithm
}
// 2nd part - popcnt Mula's algorithm
static inline int get_count_mula(__m256i count_sum) {
return _mm256_extract_epi64(count_sum, 0)
+ _mm256_extract_epi64(count_sum, 1)
+ _mm256_extract_epi64(count_sum, 2)
+ _mm256_extract_epi64(count_sum, 3);
}
// 5x times faster than gemm()-float32
// further optimizations: do mean-mult only for the last layer
void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#if defined(_OPENMP)
static int max_num_threads = 0;
if (max_num_threads == 0) {
max_num_threads = omp_get_max_threads();
//omp_set_num_threads(max_num_threads / 2);
}
#endif
//#pragma omp parallel for
//for (i = 0; i < M; ++i)
#pragma omp parallel for
for (i = 0; i < (M/2)*2; i += 2)
{ // l.n - filters [16 - 55 - 1024]
float mean_val_0 = mean_arr[i + 0];
float mean_val_1 = mean_arr[i + 1];
int j, k;
//__m256i all_1 = _mm256_set1_epi8(255);
//for (j = 0; j < N; ++j)
for (j = 0; j < (N/2)*2; j += 2)
{ // out_h*out_w - one channel output size [169 - 173056]
//int count = 0;
const int bit_step = 256;
__m256i count_sum_0 = _mm256_set1_epi8(0);
__m256i count_sum_1 = _mm256_set1_epi8(0);
__m256i count_sum_2 = _mm256_set1_epi8(0);
__m256i count_sum_3 = _mm256_set1_epi8(0);
for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216]
__m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + 0)*lda + k) / 8));
__m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8));
__m256i a_bit256_1 = _mm256_loadu_si256((__m256i *)(A + ((i + 1)*lda + k) / 8));
__m256i b_bit256_1 = _mm256_loadu_si256((__m256i *)(B + ((j + 1)*ldb + k) / 8));
xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum_0);
xnor_avx2_popcnt(a_bit256_0, b_bit256_1, &count_sum_1);
xnor_avx2_popcnt(a_bit256_1, b_bit256_0, &count_sum_2);
xnor_avx2_popcnt(a_bit256_1, b_bit256_1, &count_sum_3);
//count += popcnt256(c_bit256);
//binary_int64_printf(c_bit64);
//printf(", count = %d \n\n", tmp_count);
}
int count_0 = get_count_mula(count_sum_0);
int count_1 = get_count_mula(count_sum_1);
int count_2 = get_count_mula(count_sum_2);
int count_3 = get_count_mula(count_sum_3);
const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count_0 = count_0 - f1; // remove extra bits (from empty space for align only)
count_1 = count_1 - f1;
count_2 = count_2 - f1;
count_3 = count_3 - f1;
C[i*ldc + (j + 0)] = (2 * count_0 - K) * mean_val_0;
C[i*ldc + (j + 1)] = (2 * count_1 - K) * mean_val_0;
C[(i + 1)*ldc + (j + 0)] = (2 * count_2 - K) * mean_val_1;
C[(i + 1)*ldc + (j + 1)] = (2 * count_3 - K) * mean_val_1;
}
int i_d;
for (i_d = 0; i_d < 2; ++i_d)
{
float mean_val = mean_arr[i + i_d];
for (j = (N / 2) * 2; j < N; j += 1)
{ // out_h*out_w - one channel output size [169 - 173056]
const int bit_step = 256;
__m256i count_sum = _mm256_set1_epi8(0);
for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216]
__m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + i_d + 0)*lda + k) / 8));
__m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8));
xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum);
}
int count = get_count_mula(count_sum);
const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1; // remove extra bits (from empty space for align only)
C[(i + i_d)*ldc + j] = (2 * count - K) * mean_val;
}
}
}
for (i = (M / 2) * 2; i < M; i += 1)
{
float mean_val = mean_arr[i];
int j, k;
for (j = 0; j < N; j += 1)
{ // out_h*out_w - one channel output size [169 - 173056]
const int bit_step = 256;
__m256i count_sum = _mm256_set1_epi8(0);
for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216]
__m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + 0)*lda + k) / 8));
__m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8));
xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum);
}
int count = get_count_mula(count_sum);
const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1; // remove extra bits (from empty space for align only)
C[i*ldc + j] = (2 * count - K) * mean_val;
}
}
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom_transpose(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col, int ldb_align)
{
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
int c;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1)
{
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad - 4; w+=8) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
__m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)]));
data_col[col_index + ldb_align * 0] = _mm256_extract_float32(src256, 0);// src256.m256_f32[0];
data_col[col_index + ldb_align * 1] = _mm256_extract_float32(src256, 1);// src256.m256_f32[1];
data_col[col_index + ldb_align * 2] = _mm256_extract_float32(src256, 2);// src256.m256_f32[2];
data_col[col_index + ldb_align * 3] = _mm256_extract_float32(src256, 3);// src256.m256_f32[3];
data_col[col_index + ldb_align * 4] = _mm256_extract_float32(src256, 4);// src256.m256_f32[4];
data_col[col_index + ldb_align * 5] = _mm256_extract_float32(src256, 5);// src256.m256_f32[5];
data_col[col_index + ldb_align * 6] = _mm256_extract_float32(src256, 6);// src256.m256_f32[6];
data_col[col_index + ldb_align * 7] = _mm256_extract_float32(src256, 7);// src256.m256_f32[7];
//_mm256_storeu_ps(&data_col[col_index], src256);
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
else {
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = 0; h < height_col; ++h) {
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h * stride;
int im_col = w_offset + w * stride;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col)
{
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2())
{
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col-pad; ++h) {
for (w = pad; w < width_col-pad-8; w += 8) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
int col_index = (c * height_col + h) * width_col + w;
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
__m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)]));
_mm256_storeu_ps(&data_col[col_index], src256);
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
w = width_col-1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = height_col-1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
else {
//printf("\n Error: is no non-optimized version \n");
im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col);
}
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom_align(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col, int bit_align)
{
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2())
{
int new_ldb = bit_align;
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad - 8; w += 8) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
__m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)]));
_mm256_storeu_ps(&data_col[col_index], src256);
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
}
}
}
}
else {
printf("\n Error: is no non-optimized version \n");
//im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin
// float_to_bit(b, t_input, src_size);
// transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8);
}
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom_bin(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col, int bit_align)
{
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2())
{
__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
__m256 float_zero256 = _mm256_set1_ps(0.00);
int new_ldb = bit_align;
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad - 8; w += 8) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//__m256i src256 = _mm256_loadu_si256((__m256i *)(&data_im[im_col + width*(im_row + height*c_im)]));
//__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats
//uint16_t mask = _mm256_movemask_ps(_mm256_castsi256_ps(result256)); // (val >= 0) ? 0 : 1
//mask = ~mask; // inverse mask, (val >= 0) ? 1 : 0
__m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)]));
__m256 result256 = _mm256_cmp_ps(src256, float_zero256, _CMP_GT_OS);
uint16_t mask = _mm256_movemask_ps(result256); // (val > 0) ? 0 : 1
uint16_t* dst_ptr = (uint16_t*)&((uint8_t*)data_col)[col_index / 8];
*dst_ptr |= (mask << (col_index % 8));
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
float val = data_im[im_col + width*(im_row + height*c_im)];
if (val > 0) set_bit((unsigned char* const)data_col, col_index);
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char* const)data_col, col_index);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char* const)data_col, col_index);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char* const)data_col, col_index);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char* const)data_col, col_index);
}
}
}
}
else {
printf("\n Error: is no non-optimized version \n");
//im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin
// float_to_bit(b, t_input, src_size);
// transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8);
}
}
void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a)
{
int i = 0;
if (a == LINEAR)
{}
else if (a == LEAKY)
{
if (is_fma_avx2()) {
__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
__m256 all256_01 = _mm256_set1_ps(0.1F);
for (i = 0; i < n - 8; i += 8) {
//x[i] = (x[i]>0) ? x[i] : .1*x[i];
__m256 src256 = _mm256_loadu_ps(&x[i]);
__m256 mult256 = _mm256_mul_ps((src256), all256_01); // mult * 0.1
__m256i sign256 = _mm256_and_si256(_mm256_castps_si256(src256), all256_sing1); // check sign in 8 x 32-bit floats
__m256 result256 = _mm256_blendv_ps(src256, mult256, _mm256_castsi256_ps(sign256)); // (sign>0) ? src : mult;
_mm256_storeu_ps(&x[i], result256);
}
}
for (; i < n; ++i) {
x[i] = (x[i]>0) ? x[i] : .1*x[i];
}
}
else {
for (i = 0; i < n; ++i) {
x[i] = activate(x[i], a);
}
}
}
void float_to_bit(float *src, unsigned char *dst, size_t size)
{
size_t dst_size = size / 8 + 1;
memset(dst, 0, dst_size);
size_t i;
//__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
__m256 float_zero256 = _mm256_set1_ps(0.0);
for (i = 0; i < size; i+=8)
{
//__m256i src256 = _mm256_loadu_si256((__m256i *)(&src[i]));
//__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats
//uint32_t mask = _mm256_movemask_ps(_mm256_castsi256_ps(result256)); // (val >= 0) ? 0 : 1
////mask = ~mask; // inverse mask, (val >= 0) ? 1 : 0
__m256 src256 = _mm256_loadu_ps((float *)(&src[i]));
__m256 result256 = _mm256_cmp_ps(src256, float_zero256, _CMP_GT_OS);
uint32_t mask = _mm256_movemask_ps(result256); // (val > 0) ? 0 : 1
dst[i / 8] = mask;
}
}
static inline void transpose4x4_SSE(float *A, float *B, const int lda, const int ldb)
{
__m128 row1 = _mm_loadu_ps(&A[0 * lda]);
__m128 row2 = _mm_loadu_ps(&A[1 * lda]);
__m128 row3 = _mm_loadu_ps(&A[2 * lda]);
__m128 row4 = _mm_loadu_ps(&A[3 * lda]);
_MM_TRANSPOSE4_PS(row1, row2, row3, row4);
_mm_storeu_ps(&B[0 * ldb], row1);
_mm_storeu_ps(&B[1 * ldb], row2);
_mm_storeu_ps(&B[2 * ldb], row3);
_mm_storeu_ps(&B[3 * ldb], row4);
}
void transpose_block_SSE4x4(float *A, float *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
int i;
#pragma omp parallel for
for (i = 0; i < n; i += block_size) {
int j, i2, j2;
//int max_i2 = (i + block_size < n) ? (i + block_size) : n;
if (i + block_size < n) {
int max_i2 = i + block_size;
for (j = 0; j < m; j += block_size) {
//int max_j2 = (j + block_size < m) ? (j + block_size) : m;
if (j + block_size < m) {
int max_j2 = j + block_size;
for (i2 = i; i2 < max_i2; i2 += 4) {
for (j2 = j; j2 < max_j2; j2 += 4) {
transpose4x4_SSE(&A[i2*lda + j2], &B[j2*ldb + i2], lda, ldb);
}
}
}
else {
for (i2 = i; i2 < max_i2; ++i2) {
for (j2 = j; j2 < m; ++j2) {
B[j2*ldb + i2] = A[i2*lda + j2];
}
}
}
}
}
else {
for (i2 = i; i2 < n; ++i2) {
for (j2 = 0; j2 < m; ++j2) {
B[j2*ldb + i2] = A[i2*lda + j2];
}
}
}
}
}
void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c,
int pad, int stride, int batch)
{
const int w_offset = -pad / 2;
const int h_offset = -pad / 2;
int b, k;
for (b = 0; b < batch; ++b) {
#pragma omp parallel for
for (k = 0; k < c; ++k) {
int i, j, m, n;
for (i = 0; i < out_h; ++i) {
//for (j = 0; j < out_w; ++j) {
j = 0;
if(stride == 1 && is_avx() == 1) {
for (j = 0; j < out_w - 8 - (size - 1); j += 8) {
int out_index = j + out_w*(i + out_h*(k + c*b));
__m256 max256 = _mm256_set1_ps(-FLT_MAX);
for (n = 0; n < size; ++n) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i*stride + n;
int cur_w = w_offset + j*stride + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
if (!valid) continue;
__m256 src256 = _mm256_loadu_ps(&src[index]);
max256 = _mm256_max_ps(src256, max256);
}
}
_mm256_storeu_ps(&dst[out_index], max256);
}
}
else if (size == 2 && stride == 2 && is_avx() == 1) {
for (j = 0; j < out_w - 4; j += 4) {
int out_index = j + out_w*(i + out_h*(k + c*b));
//float max = -FLT_MAX;
//int max_i = -1;
__m128 max128 = _mm_set1_ps(-FLT_MAX);
for (n = 0; n < size; ++n) {
//for (m = 0; m < size; ++m)
m = 0;
{
int cur_h = h_offset + i*stride + n;
int cur_w = w_offset + j*stride + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
if (!valid) continue;
__m256 src256 = _mm256_loadu_ps(&src[index]);
__m256 src256_2 = _mm256_permute_ps(src256, (1 << 0) | (3 << 4));
__m256 max256 = _mm256_max_ps(src256, src256_2);
__m128 src128_0 = _mm256_extractf128_ps(max256, 0);
__m128 src128_1 = _mm256_extractf128_ps(max256, 1);
__m128 src128 = _mm_shuffle_ps(src128_0, src128_1, (2 << 2) | (2 << 6));
max128 = _mm_max_ps(src128, max128);
}
}
_mm_storeu_ps(&dst[out_index], max128);
}
}
for (; j < out_w; ++j) {
int out_index = j + out_w*(i + out_h*(k + c*b));
float max = -FLT_MAX;
int max_i = -1;
for (n = 0; n < size; ++n) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i*stride + n;
int cur_w = w_offset + j*stride + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
float val = (valid != 0) ? src[index] : -FLT_MAX;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
dst[out_index] = max;
if (indexes) indexes[out_index] = max_i;
}
}
}
}
}
#else // AVX
int is_avx() {
return 0;
}
int is_fma_avx2() {
return 0;
}
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i, j, k;
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
PUT_IN_REGISTER float A_PART = ALPHA * A[i * lda + k];
for (j = 0; j < N; ++j) {
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
}
void gemm_nn_fast(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i, j, k;
#pragma omp parallel for
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
PUT_IN_REGISTER float A_PART = ALPHA*A[i*lda + k];
for (j = 0; j < N; ++j) {
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
}
void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA,
uint32_t *A, int lda,
uint32_t *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n
int j, s;
float mean_val = mean_arr[i];
//printf(" l.mean_arr[i] = %d \n ", l.mean_arr[i]);
for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c)
{
//PUT_IN_REGISTER float A_PART = 1*a[i*k + s];
PUT_IN_REGISTER uint32_t A_PART = A[i * lda + s];
for (j = 0; j < N; ++j) // out_h*out_w;
{
//c[i*n + j] += A_PART*b[s*n + j];
PUT_IN_REGISTER uint32_t B_PART = B[s * ldb + j];
uint32_t xnor_result = ~(A_PART ^ B_PART);
//printf(" xnor_result = %d, ", xnor_result);
int32_t count = popcnt_32(xnor_result); // must be Signed int
C[i*ldc + j] += (2 * count - 32) * mean_val;
//c[i*n + j] += count*mean;
}
}
}
}
void convolution_2d(int w, int h, int ksize, int n, int c, int pad, int stride,
float *weights, float *input, float *output, float *mean)
{
const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1
const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1
//int i, f, j;
int fil;
// filter index
#pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP
for (fil = 0; fil < n; ++fil) {
int chan, y, x, f_y, f_x;
// channel index
for (chan = 0; chan < c; ++chan)
// input - y
for (y = 0; y < h; ++y)
// input - x
for (x = 0; x < w; ++x)
{
int const output_index = fil*w*h + y*w + x;
int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize;
int const input_pre_index = chan*w*h;
float sum = 0;
// filter - y
for (f_y = 0; f_y < ksize; ++f_y)
{
int input_y = y + f_y - pad;
// filter - x
for (f_x = 0; f_x < ksize; ++f_x)
{
int input_x = x + f_x - pad;
if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue;
int input_index = input_pre_index + input_y*w + input_x;
int weights_index = weights_pre_index + f_y*ksize + f_x;
sum += input[input_index] * weights[weights_index];
}
}
// l.output[filters][width][height] +=
// state.input[channels][width][height] *
// l.weights[filters][channels][filter_width][filter_height];
output[output_index] += sum;
}
}
}
static inline int popcnt_64(uint64_t val64) {
#ifdef WIN32 // Windows
#ifdef _WIN64 // Windows 64-bit
int tmp_count = __popcnt64(val64);
#else // Windows 32-bit
int tmp_count = __popcnt(val64);
tmp_count += __popcnt(val64 >> 32);
#endif
#else // Linux
#if defined(__x86_64__) || defined(__aarch64__) // Linux 64-bit
int tmp_count = __builtin_popcountll(val64);
#else // Linux 32-bit
int tmp_count = __builtin_popcount(val64);
tmp_count += __builtin_popcount(val64 >> 32);
#endif
#endif
return tmp_count;
}
void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
int j, k;
float mean_val = mean_arr[i];
for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
int count = 0;
for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8));
uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8));
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
int tmp_count = popcnt_64(c_bit64);
if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits
count += tmp_count;
//binary_int64_printf(c_bit64);
//printf(", count = %d \n\n", tmp_count);
}
C[i*ldc + j] = (2 * count - K) * mean_val;
}
}
}
void im2col_cpu_custom_transpose(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col, int ldb_align)
{
printf("\n im2col_cpu_custom_transpose() isn't implemented without AVX \n");
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col)
{
im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col);
return;
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1)
{
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
else {
//printf("\n Error: is no non-optimized version \n");
im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col);
}
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom_bin(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col, int bit_align)
{
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1)
{
int new_ldb = bit_align;
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad - 8; w += 1) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
float val = data_im[im_col + width*(im_row + height*c_im)];
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
float val = data_im[im_col + width*(im_row + height*c_im)];
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
}
}
}
else {
printf("\n Error: is no non-optimized version \n");
//im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin
// float_to_bit(b, t_input, src_size);
// transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8);
}
}
void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a)
{
int i;
if (a == LINEAR)
{
}
else if (a == LEAKY)
{
for (i = 0; i < n; ++i) {
x[i] = (x[i]>0) ? x[i] : .1*x[i];
}
}
else {
for (i = 0; i < n; ++i) {
x[i] = activate(x[i], a);
}
}
}
void float_to_bit(float *src, unsigned char *dst, size_t size)
{
size_t dst_size = size / 8 + 1;
memset(dst, 0, dst_size);
size_t i;
char* byte_arr = (char*)xcalloc(size, sizeof(char));
for (i = 0; i < size; ++i) {
if (src[i] > 0) byte_arr[i] = 1;
}
//for (i = 0; i < size; ++i) {
// dst[i / 8] |= byte_arr[i] << (i % 8);
//}
for (i = 0; i < size; i += 8) {
char dst_tmp = 0;
dst_tmp |= byte_arr[i + 0] << 0;
dst_tmp |= byte_arr[i + 1] << 1;
dst_tmp |= byte_arr[i + 2] << 2;
dst_tmp |= byte_arr[i + 3] << 3;
dst_tmp |= byte_arr[i + 4] << 4;
dst_tmp |= byte_arr[i + 5] << 5;
dst_tmp |= byte_arr[i + 6] << 6;
dst_tmp |= byte_arr[i + 7] << 7;
dst[i / 8] = dst_tmp;
}
free(byte_arr);
}
static inline void transpose_scalar_block(float *A, float *B, const int lda, const int ldb, const int block_size)
{
int i;
//#pragma omp parallel for
for (i = 0; i<block_size; i++) {
int j;
for (j = 0; j<block_size; j++) {
B[j*ldb + i] = A[i*lda + j];
}
}
}
void transpose_block_SSE4x4(float *A, float *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
int i;
#pragma omp parallel for
for (i = 0; i < n; i += block_size) {
int j, i2, j2;
for (j = 0; j < m; j += block_size) {
int max_i2 = i + block_size < n ? i + block_size : n;
int max_j2 = j + block_size < m ? j + block_size : m;
for (i2 = i; i2 < max_i2; ++i2) {
for (j2 = j; j2 < max_j2; ++j2) {
B[j2*ldb + i2] = A[i2*lda + j2];
}
}
}
}
}
void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c,
int pad, int stride, int batch)
{
int b, k;
const int w_offset = -pad / 2;
const int h_offset = -pad / 2;
for (b = 0; b < batch; ++b) {
#pragma omp parallel for
for (k = 0; k < c; ++k) {
int i, j, m, n;
for (i = 0; i < out_h; ++i) {
for (j = 0; j < out_w; ++j) {
int out_index = j + out_w*(i + out_h*(k + c*b));
float max = -FLT_MAX;
int max_i = -1;
for (n = 0; n < size; ++n) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i*stride + n;
int cur_w = w_offset + j*stride + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
float val = (valid != 0) ? src[index] : -FLT_MAX;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
dst[out_index] = max;
if (indexes) indexes[out_index] = max_i;
}
}
}
}
}
#endif // AVX
// 32 channels -> 1 channel (with 32 floats)
// 256 channels -> 8 channels (with 32 floats)
void repack_input(float *input, float *re_packed_input, int w, int h, int c)
{
const int items_per_channel = w * h;
int chan, i;
for (chan = 0; chan < c; chan += 32)
{
for (i = 0; i < items_per_channel; ++i)
{
int c_pack;
for (c_pack = 0; c_pack < 32; ++c_pack) {
float src = input[(chan + c_pack)*items_per_channel + i];
re_packed_input[chan*items_per_channel + i * 32 + c_pack] = src;
}
}
}
}
void transpose_uint32(uint32_t *src, uint32_t *dst, int src_h, int src_w, int src_align, int dst_align)
{
//l.bit_align - algined (n) by 32
//new_ldb - aligned (k) by 256
int i;
//#pragma omp parallel for
for (i = 0; i < src_h; i += 1) // l.size*l.size*l.c;
{
int j;
for (j = 0; j < src_w; j += 1) // out_h*out_w;
{
((uint32_t *)dst)[j*dst_align / 32 + i] = ((uint32_t *)src)[i*src_align + j];
}
}
}
void gemm_nn_bin_transposed_32bit_packed(int M, int N, int K, float ALPHA,
uint32_t *A, int lda,
uint32_t *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n
int j, s;
float mean_val = mean_arr[i];
for (j = 0; j < N; ++j) // out_h*out_w;
{
float val = 0;
for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c)
{
PUT_IN_REGISTER uint32_t A_PART = ((uint32_t*)A)[i*lda + s];
PUT_IN_REGISTER uint32_t B_PART = ((uint32_t*)B)[j * ldb + s];
uint32_t xnor_result = ~(A_PART ^ B_PART);
int32_t count = popcnt_32(xnor_result); // must be Signed int
val += (2 * count - 32) * mean_val;
}
C[i*ldc + j] += val;
}
}
}
void convolution_repacked(uint32_t *packed_input, uint32_t *packed_weights, float *output,
int w, int h, int c, int n, int size, int pad, int new_lda, float *mean_arr)
{
int fil;
// filter index
#pragma omp parallel for
for (fil = 0; fil < n; ++fil) {
float mean_val = mean_arr[fil];
int chan, y, x, f_y, f_x; // c_pack
// channel index
for (chan = 0; chan < c / 32; ++chan)
//for (chan = 0; chan < l.c; chan += 32)
//for (c_pack = 0; c_pack < 32; ++c_pack)
// input - y
for (y = 0; y < h; ++y)
// input - x
for (x = 0; x < w; ++x)
{
int const output_index = fil*w*h + y*w + x;
float sum = 0;
// filter - y
for (f_y = 0; f_y < size; ++f_y)
{
int input_y = y + f_y - pad;
// filter - x
for (f_x = 0; f_x < size; ++f_x)
{
int input_x = x + f_x - pad;
if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue;
// normal
//float input = state.input[(chan + c_pack)*l.w*l.h + input_y*l.w + input_x];
//float weight = l.weights[fil*l.c*l.size*l.size + (chan + c_pack)*l.size*l.size + f_y*l.size + f_x];
// packed
//float input = re_packed_input[chan*l.w*l.h + (input_y*l.w + input_x) * 32 + c_pack];
//float weight = l.weights[fil*l.c*l.size*l.size + chan*l.size*l.size + (f_y*l.size + f_x) * 32 + c_pack];
//sum += input * weight;
//float input = re_packed_input[chan*l.w*l.h + (input_y*l.w + input_x) * 32 + c_pack];
//float weight = l.weights[fil*l.c*l.size*l.size + chan*l.size*l.size + (f_y*l.size + f_x) * 32 + c_pack];
//uint32_t bit1 = input > 0;
//uint32_t bit2 = weight > 0;
//uint32_t count = (~(bit1 ^ bit2)) & 1;
//float result = (2 * (float)count - 1) * mean_val;
//printf("\n mul = %f, bit1 = %d, bit2 = %d, count = %d, mean = %f, result = %f ", input*weight, bit1, bit2, count, mean_val, result);
//sum += result;
uint32_t input = ((uint32_t *)packed_input)[chan*w*h + input_y*w + input_x];
//uint32_t weight = ((uint32_t *)l.align_bit_weights)[fil*l.c*l.size*l.size/32 + chan*l.size*l.size + f_y*l.size + f_x];
uint32_t weight = ((uint32_t *)packed_weights)[fil*new_lda / 32 + chan*size*size + f_y*size + f_x];
uint32_t xnor_result = ~(input ^ weight);
int32_t count = popcnt_32(xnor_result); // mandatory Signed int
sum += (2 * count - 32) * mean_val;
}
}
// l.output[filters][width][height] +=
// state.input[channels][width][height] *
// l.weights[filters][channels][filter_width][filter_height];
output[output_index] += sum;
}
}
}
void gemm_nt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
PUT_IN_REGISTER float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i*lda+k]*B[j*ldb + k];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_tn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
PUT_IN_REGISTER float A_PART = ALPHA * A[k * lda + i];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_tt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
PUT_IN_REGISTER float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i+k*lda]*B[k+j*ldb];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
//printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
if (BETA != 1){
int i, j;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
C[i*ldc + j] *= BETA;
}
}
}
is_avx(); // initialize static variable
if (is_fma_avx2() && !TA && !TB) {
gemm_nn_fast(M, N, K, ALPHA, A, lda, B, ldb, C, ldc);
}
else {
int t;
#pragma omp parallel for
for (t = 0; t < M; ++t) {
if (!TA && !TB)
gemm_nn(1, N, K, ALPHA, A + t*lda, lda, B, ldb, C + t*ldc, ldc);
else if (TA && !TB)
gemm_tn(1, N, K, ALPHA, A + t, lda, B, ldb, C + t*ldc, ldc);
else if (!TA && TB)
gemm_nt(1, N, K, ALPHA, A + t*lda, lda, B, ldb, C + t*ldc, ldc);
else
gemm_tt(1, N, K, ALPHA, A + t, lda, B, ldb, C + t*ldc, ldc);
}
}
}
#ifdef GPU
#include <math.h>
void gemm_ongpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A_gpu, int lda,
float *B_gpu, int ldb,
float BETA,
float *C_gpu, int ldc)
{
cublasHandle_t handle = blas_handle();
cudaError_t stream_status = (cudaError_t)cublasSetStream(handle, get_cuda_stream());
CHECK_CUDA(stream_status);
cudaError_t status = (cudaError_t)cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N),
(TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc);
CHECK_CUDA(status);
}
void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
float *A_gpu = cuda_make_array(A, (TA ? lda*K:lda*M));
float *B_gpu = cuda_make_array(B, (TB ? ldb*N : ldb*K));
float *C_gpu = cuda_make_array(C, ldc*M);
gemm_ongpu(TA, TB, M, N, K, ALPHA, A_gpu, lda, B_gpu, ldb, BETA, C_gpu, ldc);
cuda_pull_array(C_gpu, C, ldc*M);
cuda_free(A_gpu);
cuda_free(B_gpu);
cuda_free(C_gpu);
}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
void time_gpu_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<32; ++i){
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void time_ongpu(int TA, int TB, int m, int k, int n)
{
int iter = 10;
float *a = random_matrix(m,k);
float *b = random_matrix(k,n);
int lda = (!TA)?k:m;
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *a_cl = cuda_make_array(a, m*k);
float *b_cl = cuda_make_array(b, k*n);
float *c_cl = cuda_make_array(c, m*n);
int i;
clock_t start = clock(), end;
for(i = 0; i<iter; ++i){
gemm_ongpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n);
cudaDeviceSynchronize();
}
double flop = ((double)m)*n*(2.*k + 2.)*iter;
double gflop = flop/pow(10., 9);
end = clock();
double seconds = sec(end-start);
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds);
cuda_free(a_cl);
cuda_free(b_cl);
cuda_free(c_cl);
free(a);
free(b);
free(c);
}
void test_gpu_accuracy(int TA, int TB, int m, int k, int n)
{
srand(0);
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *c_gpu = random_matrix(m,n);
memset(c, 0, m*n*sizeof(float));
memset(c_gpu, 0, m*n*sizeof(float));
int i;
//pm(m,k,b);
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n);
//printf("GPU\n");
//pm(m, n, c_gpu);
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
//printf("\n\nCPU\n");
//pm(m, n, c);
double sse = 0;
for(i = 0; i < m*n; ++i) {
//printf("%f %f\n", c[i], c_gpu[i]);
sse += pow(c[i]-c_gpu[i], 2);
}
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n));
free(a);
free(b);
free(c);
free(c_gpu);
}
int test_gpu_blas()
{
/*
test_gpu_accuracy(0,0,10,576,75);
test_gpu_accuracy(0,0,17,10,10);
test_gpu_accuracy(1,0,17,10,10);
test_gpu_accuracy(0,1,17,10,10);
test_gpu_accuracy(1,1,17,10,10);
test_gpu_accuracy(0,0,1000,10,100);
test_gpu_accuracy(1,0,1000,10,100);
test_gpu_accuracy(0,1,1000,10,100);
test_gpu_accuracy(1,1,1000,10,100);
test_gpu_accuracy(0,0,10,10,10);
time_ongpu(0,0,64,2916,363);
time_ongpu(0,0,64,2916,363);
time_ongpu(0,0,64,2916,363);
time_ongpu(0,0,192,729,1600);
time_ongpu(0,0,384,196,1728);
time_ongpu(0,0,256,196,3456);
time_ongpu(0,0,256,196,2304);
time_ongpu(0,0,128,4096,12544);
time_ongpu(0,0,128,4096,4096);
*/
time_ongpu(0,0,64,75,12544);
time_ongpu(0,0,64,75,12544);
time_ongpu(0,0,64,75,12544);
time_ongpu(0,0,64,576,12544);
time_ongpu(0,0,256,2304,784);
time_ongpu(1,1,2304,256,784);
time_ongpu(0,0,512,4608,196);
time_ongpu(1,1,4608,512,196);
return 0;
}
#endif
void init_cpu() {
is_avx();
is_fma_avx2();
}
|
depthwise_convolution_3x3_int4.c | /*
* Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* CSI-NN2 version 1.12.x */
#include "csi_thead_rvv.h"
static vint8m1_t requantize_m4(vint32m4_t _src, int32_t multiplier, int32_t shift, int32_t out_zp,
int vl)
{
vint32m4_t _mulh = vmulh_vx_i32m4(_src, multiplier, vl);
_mulh = vssra_vx_i32m4(_mulh, -shift - 1, vl);
_mulh = vadd_vx_i32m4(_mulh, out_zp, vl);
vint16m2_t _tmp1 = vnclip_wx_i16m2(_mulh, 0, vl);
vint8m1_t _tmp2 = vnclip_wx_i8m1(_tmp1, 0, vl);
return _tmp2;
}
int csi_nn_rvv_dwconv3x3s1_int4(struct csi_tensor *input, struct csi_tensor *output,
struct csi_tensor *kernel, struct csi_tensor *bias,
struct conv2d_params *params)
{
int8_t *input_data = (int8_t *)input->data;
int8_t *output_data = (int8_t *)output->data;
int8_t *kernel_data = (int8_t *)kernel->data;
int32_t *bias_data = (int32_t *)bias->data;
int32_t batch = input->dim[0];
int32_t in_h = input->dim[1];
int32_t in_w = input->dim[2];
int32_t in_c = input->dim[3]; // group = in_channel
int32_t out_h = output->dim[1];
int32_t out_w = output->dim[2];
int32_t out_c = output->dim[3];
int8_t *input_padd_buf = (int8_t *)csi_mem_alloc((in_h + params->pad_top + params->pad_down) *
(in_w + params->pad_left + params->pad_right) *
in_c * sizeof(int8_t));
int8_t pad_value = input->qinfo->zero_point;
csi_nn_rvv_pad_input_int4_trans_int8(
input_data, input_padd_buf, in_c, in_h, in_w, in_h + params->pad_top + params->pad_down,
in_w + params->pad_left + params->pad_right, params->pad_top, params->pad_left,
input->qinfo->zero_point);
int8_t *kernel_tran_buf = (int8_t *)csi_mem_alloc(9 * in_c * sizeof(int8_t));
int8_t *output_tran_buf = (int8_t *)csi_mem_alloc(out_h * out_w * out_c * sizeof(int8_t));
csi_nn_rvv_int4_trans_int8(kernel_data, kernel_tran_buf, 9 * in_c);
in_h = in_h + params->pad_top + params->pad_down;
in_w = in_w + params->pad_left + params->pad_right;
#pragma omp parallel for num_threads(1)
for (int c = 0; c < in_c; c++) {
int8_t *outptr0 = output_tran_buf + c;
int8_t *outptr1 = outptr0 + out_w * out_c;
// please use fuse_zp2bias option in hhb, thus bias_data wont be NULL
int32_t bias0 = bias_data[c];
int8_t *img0 = input_padd_buf + c;
int8_t *r0 = img0;
int8_t *r1 = r0 + in_w * in_c;
int8_t *r2 = r1 + in_w * in_c;
int8_t *r3 = r2 + in_w * in_c;
const int8_t *kernel0 = kernel_tran_buf + c;
int8_t k00 = kernel0[0];
int8_t k01 = kernel0[1 * in_c];
int8_t k02 = kernel0[2 * in_c];
int8_t k10 = kernel0[3 * in_c];
int8_t k11 = kernel0[4 * in_c];
int8_t k12 = kernel0[5 * in_c];
int8_t k20 = kernel0[6 * in_c];
int8_t k21 = kernel0[7 * in_c];
int8_t k22 = kernel0[8 * in_c];
int vl;
int h = 0;
// h2 loop
for (; h + 1 < out_h; h += 2) {
int w = out_w;
// h2w8 loop
while (w > 0) {
vl = vsetvl_e32m4(w);
vint32m4_t _acc0 = vmv_v_x_i32m4(bias0, vl);
vint32m4_t _acc1 = vmv_v_x_i32m4(bias0, vl);
vint8m1_t _r0_0_7 = vlse8_v_i8m1(r0, in_c * sizeof(int8_t), vl);
vint8m1_t _r0_1_8 = vlse8_v_i8m1(r0 + 1 * in_c, in_c * sizeof(int8_t), vl);
vint8m1_t _r0_2_9 = vlse8_v_i8m1(r0 + 2 * in_c, in_c * sizeof(int8_t), vl);
vint8m1_t _r1_0_7 = vlse8_v_i8m1(r1, in_c * sizeof(int8_t), vl);
vint8m1_t _r1_1_8 = vlse8_v_i8m1(r1 + 1 * in_c, in_c * sizeof(int8_t), vl);
vint8m1_t _r1_2_9 = vlse8_v_i8m1(r1 + 2 * in_c, in_c * sizeof(int8_t), vl);
vint8m1_t _r2_0_7 = vlse8_v_i8m1(r2, in_c * sizeof(int8_t), vl);
vint8m1_t _r2_1_8 = vlse8_v_i8m1(r2 + 1 * in_c, in_c * sizeof(int8_t), vl);
vint8m1_t _r2_2_9 = vlse8_v_i8m1(r2 + 2 * in_c, in_c * sizeof(int8_t), vl);
vint8m1_t _r3_0_7 = vlse8_v_i8m1(r3, in_c * sizeof(int8_t), vl);
vint8m1_t _r3_1_8 = vlse8_v_i8m1(r3 + 1 * in_c, in_c * sizeof(int8_t), vl);
vint8m1_t _r3_2_9 = vlse8_v_i8m1(r3 + 2 * in_c, in_c * sizeof(int8_t), vl);
vint16m2_t _r0_0_7_w = vwadd_vx_i16m2(_r0_0_7, 0, vl); // widden 8->16
vint16m2_t _r0_1_8_w = vwadd_vx_i16m2(_r0_1_8, 0, vl);
vint16m2_t _r0_2_9_w = vwadd_vx_i16m2(_r0_2_9, 0, vl);
vint16m2_t _r1_0_7_w = vwadd_vx_i16m2(_r1_0_7, 0, vl);
vint16m2_t _r1_1_8_w = vwadd_vx_i16m2(_r1_1_8, 0, vl);
vint16m2_t _r1_2_9_w = vwadd_vx_i16m2(_r1_2_9, 0, vl);
vint16m2_t _r2_0_7_w = vwadd_vx_i16m2(_r2_0_7, 0, vl);
vint16m2_t _r2_1_8_w = vwadd_vx_i16m2(_r2_1_8, 0, vl);
vint16m2_t _r2_2_9_w = vwadd_vx_i16m2(_r2_2_9, 0, vl);
vint16m2_t _r3_0_7_w = vwadd_vx_i16m2(_r3_0_7, 0, vl);
vint16m2_t _r3_1_8_w = vwadd_vx_i16m2(_r3_1_8, 0, vl);
vint16m2_t _r3_2_9_w = vwadd_vx_i16m2(_r3_2_9, 0, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k00, _r0_0_7_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k01, _r0_1_8_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k02, _r0_2_9_w, vl);
_acc1 = vwmacc_vx_i32m4(_acc1, k00, _r1_0_7_w, vl);
_acc1 = vwmacc_vx_i32m4(_acc1, k01, _r1_1_8_w, vl);
_acc1 = vwmacc_vx_i32m4(_acc1, k02, _r1_2_9_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k10, _r1_0_7_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k11, _r1_1_8_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k12, _r1_2_9_w, vl);
_acc1 = vwmacc_vx_i32m4(_acc1, k10, _r2_0_7_w, vl);
_acc1 = vwmacc_vx_i32m4(_acc1, k11, _r2_1_8_w, vl);
_acc1 = vwmacc_vx_i32m4(_acc1, k12, _r2_2_9_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k20, _r2_0_7_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k21, _r2_1_8_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k22, _r2_2_9_w, vl);
_acc1 = vwmacc_vx_i32m4(_acc1, k20, _r3_0_7_w, vl);
_acc1 = vwmacc_vx_i32m4(_acc1, k21, _r3_1_8_w, vl);
_acc1 = vwmacc_vx_i32m4(_acc1, k22, _r3_2_9_w, vl);
vint8m1_t _res0, _res1;
if (kernel->quant_channel > 1) {
_res0 = requantize_m4(_acc0, kernel->qinfo[c].multiplier,
kernel->qinfo[c].shift, output->qinfo->zero_point, vl);
_res1 = requantize_m4(_acc1, kernel->qinfo[c].multiplier,
kernel->qinfo[c].shift, output->qinfo->zero_point, vl);
} else if (kernel->quant_channel == 1) {
_res0 = requantize_m4(_acc0, kernel->qinfo[0].multiplier,
kernel->qinfo[0].shift, output->qinfo->zero_point, vl);
_res1 = requantize_m4(_acc1, kernel->qinfo[0].multiplier,
kernel->qinfo[0].shift, output->qinfo->zero_point, vl);
}
vsse8_v_i8m1(outptr0, in_c * sizeof(int8_t), _res0, vl);
vsse8_v_i8m1(outptr1, in_c * sizeof(int8_t), _res1, vl);
r0 += vl * in_c;
r1 += vl * in_c;
r2 += vl * in_c;
r3 += vl * in_c;
outptr0 += vl * in_c;
outptr1 += vl * in_c;
w -= vl;
}
r0 += (2 + in_w) * in_c;
r1 += (2 + in_w) * in_c;
r2 += (2 + in_w) * in_c;
r3 += (2 + in_w) * in_c;
outptr0 += out_w * in_c;
outptr1 += out_w * in_c;
}
for (; h < out_h; h++) {
int w = out_w;
// h2w8 loop
while (w > 0) {
vl = vsetvl_e32m4(w);
vint32m4_t _acc0 = vmv_v_x_i32m4(bias0, vl);
vint8m1_t _r0_0_7 = vlse8_v_i8m1(r0, in_c * sizeof(int8_t), vl);
vint8m1_t _r0_1_8 = vlse8_v_i8m1(r0 + 1 * in_c, in_c * sizeof(int8_t), vl);
vint8m1_t _r0_2_9 = vlse8_v_i8m1(r0 + 2 * in_c, in_c * sizeof(int8_t), vl);
vint8m1_t _r1_0_7 = vlse8_v_i8m1(r1, in_c * sizeof(int8_t), vl);
vint8m1_t _r1_1_8 = vlse8_v_i8m1(r1 + 1 * in_c, in_c * sizeof(int8_t), vl);
vint8m1_t _r1_2_9 = vlse8_v_i8m1(r1 + 2 * in_c, in_c * sizeof(int8_t), vl);
vint8m1_t _r2_0_7 = vlse8_v_i8m1(r2, in_c * sizeof(int8_t), vl);
vint8m1_t _r2_1_8 = vlse8_v_i8m1(r2 + 1 * in_c, in_c * sizeof(int8_t), vl);
vint8m1_t _r2_2_9 = vlse8_v_i8m1(r2 + 2 * in_c, in_c * sizeof(int8_t), vl);
vint16m2_t _r0_0_7_w = vwadd_vx_i16m2(_r0_0_7, 0, vl); // widden 8->16
vint16m2_t _r0_1_8_w = vwadd_vx_i16m2(_r0_1_8, 0, vl);
vint16m2_t _r0_2_9_w = vwadd_vx_i16m2(_r0_2_9, 0, vl);
vint16m2_t _r1_0_7_w = vwadd_vx_i16m2(_r1_0_7, 0, vl);
vint16m2_t _r1_1_8_w = vwadd_vx_i16m2(_r1_1_8, 0, vl);
vint16m2_t _r1_2_9_w = vwadd_vx_i16m2(_r1_2_9, 0, vl);
vint16m2_t _r2_0_7_w = vwadd_vx_i16m2(_r2_0_7, 0, vl);
vint16m2_t _r2_1_8_w = vwadd_vx_i16m2(_r2_1_8, 0, vl);
vint16m2_t _r2_2_9_w = vwadd_vx_i16m2(_r2_2_9, 0, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k00, _r0_0_7_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k01, _r0_1_8_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k02, _r0_2_9_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k10, _r1_0_7_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k11, _r1_1_8_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k12, _r1_2_9_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k20, _r2_0_7_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k21, _r2_1_8_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k22, _r2_2_9_w, vl);
vint8m1_t _res0;
if (kernel->quant_channel > 1) {
_res0 = requantize_m4(_acc0, kernel->qinfo[c].multiplier,
kernel->qinfo[c].shift, output->qinfo->zero_point, vl);
} else if (kernel->quant_channel == 1) {
_res0 = requantize_m4(_acc0, kernel->qinfo[0].multiplier,
kernel->qinfo[0].shift, output->qinfo->zero_point, vl);
}
vsse8_v_i8m1(outptr0, in_c * sizeof(int8_t), _res0, vl);
r0 += vl * in_c;
r1 += vl * in_c;
r2 += vl * in_c;
outptr0 += vl * in_c;
w -= vl;
}
}
}
csi_nn_rvv_int8_to_int4(output_tran_buf, output_data, out_h * out_w * in_c);
csi_mem_free(input_padd_buf);
csi_mem_free(kernel_tran_buf);
csi_mem_free(output_tran_buf);
return CSINN_TRUE;
}
int csi_nn_rvv_dwconv3x3s2_int4(struct csi_tensor *input, struct csi_tensor *output,
struct csi_tensor *kernel, struct csi_tensor *bias,
struct conv2d_params *params)
{
int8_t *input_data = (int8_t *)input->data;
int8_t *output_data = (int8_t *)output->data;
int8_t *kernel_data = (int8_t *)kernel->data;
int32_t *bias_data = (int32_t *)bias->data;
int32_t batch = input->dim[0];
int32_t in_h = input->dim[1];
int32_t in_w = input->dim[2];
int32_t in_c = input->dim[3];
int32_t out_h = output->dim[1];
int32_t out_w = output->dim[2];
int32_t out_c = output->dim[3];
int8_t *input_padd_buf = (int8_t *)csi_mem_alloc((in_h + params->pad_top + params->pad_down) *
(in_w + params->pad_left + params->pad_right) *
in_c * sizeof(int8_t));
csi_nn_rvv_pad_input_int4_trans_int8(
input_data, input_padd_buf, in_c, in_h, in_w, in_h + params->pad_top + params->pad_down,
in_w + params->pad_left + params->pad_right, params->pad_top, params->pad_left,
input->qinfo->zero_point);
int8_t *kernel_tran_buf = (int8_t *)csi_mem_alloc(9 * in_c * sizeof(int8_t));
int8_t *output_tran_buf = (int8_t *)csi_mem_alloc(out_h * out_w * out_c * sizeof(int8_t));
csi_nn_rvv_int4_trans_int8(kernel_data, kernel_tran_buf, 9 * in_c);
in_h = in_h + params->pad_top + params->pad_down;
in_w = in_w + params->pad_left + params->pad_right;
int tailstep = (in_w - 2 * out_w + in_w) * in_c;
#pragma omp parallel for num_threads(1)
for (int c = 0; c < in_c; c++) {
int8_t *outptr0 = output_tran_buf + c;
int32_t bias0 = bias_data[c];
int8_t *img0 = input_padd_buf + c;
int8_t *r0 = img0;
int8_t *r1 = r0 + in_w * in_c;
int8_t *r2 = r1 + in_w * in_c;
const int8_t *kernel0 = kernel_tran_buf + c;
int8_t k00 = kernel0[0];
int8_t k01 = kernel0[1 * in_c];
int8_t k02 = kernel0[2 * in_c];
int8_t k10 = kernel0[3 * in_c];
int8_t k11 = kernel0[4 * in_c];
int8_t k12 = kernel0[5 * in_c];
int8_t k20 = kernel0[6 * in_c];
int8_t k21 = kernel0[7 * in_c];
int8_t k22 = kernel0[8 * in_c];
int vl;
for (int h = 0; h < out_h; h++) {
int w = out_w;
while (w > 0) {
vl = vsetvl_e32m4(w);
vint32m4_t _acc0 = vmv_v_x_i32m4(bias0, vl);
vint8m1_t _r0_0_7 = vlse8_v_i8m1(r0, 2 * in_c * sizeof(int8_t), vl);
r0 += in_c;
vint8m1_t _r0_1_8 = vlse8_v_i8m1(r0, 2 * in_c * sizeof(int8_t), vl);
r0 += in_c;
vint8m1_t _r0_2_9 = vlse8_v_i8m1(r0, 2 * in_c * sizeof(int8_t), vl);
r0 += (vl - 1) * 2 * in_c;
vint8m1_t _r1_0_7 = vlse8_v_i8m1(r1, 2 * in_c * sizeof(int8_t), vl);
r1 += in_c;
vint8m1_t _r1_1_8 = vlse8_v_i8m1(r1, 2 * in_c * sizeof(int8_t), vl);
r1 += in_c;
vint8m1_t _r1_2_9 = vlse8_v_i8m1(r1, 2 * in_c * sizeof(int8_t), vl);
r1 += (vl - 1) * 2 * in_c;
vint8m1_t _r2_0_7 = vlse8_v_i8m1(r2, 2 * in_c * sizeof(int8_t), vl);
r2 += in_c;
vint8m1_t _r2_1_8 = vlse8_v_i8m1(r2, 2 * in_c * sizeof(int8_t), vl);
r2 += in_c;
vint8m1_t _r2_2_9 = vlse8_v_i8m1(r2, 2 * in_c * sizeof(int8_t), vl);
r2 += (vl - 1) * 2 * in_c;
vint16m2_t _r0_0_7_w = vwadd_vx_i16m2(_r0_0_7, 0, vl); // widden 8->16
vint16m2_t _r0_1_8_w = vwadd_vx_i16m2(_r0_1_8, 0, vl);
vint16m2_t _r0_2_9_w = vwadd_vx_i16m2(_r0_2_9, 0, vl);
vint16m2_t _r1_0_7_w = vwadd_vx_i16m2(_r1_0_7, 0, vl);
vint16m2_t _r1_1_8_w = vwadd_vx_i16m2(_r1_1_8, 0, vl);
vint16m2_t _r1_2_9_w = vwadd_vx_i16m2(_r1_2_9, 0, vl);
vint16m2_t _r2_0_7_w = vwadd_vx_i16m2(_r2_0_7, 0, vl);
vint16m2_t _r2_1_8_w = vwadd_vx_i16m2(_r2_1_8, 0, vl);
vint16m2_t _r2_2_9_w = vwadd_vx_i16m2(_r2_2_9, 0, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k00, _r0_0_7_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k01, _r0_1_8_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k02, _r0_2_9_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k10, _r1_0_7_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k11, _r1_1_8_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k12, _r1_2_9_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k20, _r2_0_7_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k21, _r2_1_8_w, vl);
_acc0 = vwmacc_vx_i32m4(_acc0, k22, _r2_2_9_w, vl);
vint8m1_t _res0;
if (kernel->quant_channel > 1) {
_res0 = requantize_m4(_acc0, kernel->qinfo[c].multiplier,
kernel->qinfo[c].shift, output->qinfo->zero_point, vl);
} else if (kernel->quant_channel == 1) {
_res0 = requantize_m4(_acc0, kernel->qinfo[0].multiplier,
kernel->qinfo[0].shift, output->qinfo->zero_point, vl);
}
vsse8_v_i8m1(outptr0, in_c * sizeof(int8_t), _res0, vl);
outptr0 += vl * in_c;
w -= vl;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
csi_nn_rvv_int8_to_int4(output_tran_buf, output_data, out_h * out_w * in_c);
csi_mem_free(input_padd_buf);
csi_mem_free(kernel_tran_buf);
csi_mem_free(output_tran_buf);
return CSINN_TRUE;
} |
tcbjac2.c | #include "cbjac2.h"
#include "wnrme.h"
#include "rnd.h"
#include "timer.h"
int main(int argc, char *argv[])
{
if (4 != argc) {
(void)fprintf(stderr, "%s filename 2^{batch_size} #batches\n", *argv);
return EXIT_FAILURE;
}
const size_t n = ((size_t)1u << atoz(argv[2u]));
if (n % VSL) {
(void)fprintf(stderr, "batch_size has to be a multiple of %u.\n", VSL);
return EXIT_FAILURE;
}
int th = 0;
#ifdef _OPENMP
th = omp_get_max_threads();
if (n % th) {
(void)fprintf(stderr, "batch_size has to be a multiple of %d.\n", th);
return EXIT_FAILURE;
}
#endif /* _OPENMP */
const size_t b = atoz(argv[3u]);
if (!b)
return EXIT_SUCCESS;
const size_t
nl = strlen(argv[1u]),
nl1 = (nl + 1u);
char *const fn = calloc((nl + 3u), sizeof(char));
assert(fn);
strcpy(fn, argv[1u])[nl] = '.';
int fm = O_RDONLY;
#ifdef _LARGEFILE64_SOURCE
fm |= O_LARGEFILE;
#endif /* _LARGEFILE64_SOURCE */
fn[nl1] = 'k';
const int fk = open(fn, fm);
if (-1 >= fk) {
(void)fprintf(stderr, "Cannot open %s for reading!\n", fn);
return EXIT_FAILURE;
}
fn[nl1] = 'l';
const int fl = open(fn, fm);
if (-1 >= fl) {
(void)fprintf(stderr, "Cannot open %s for reading!\n", fn);
return EXIT_FAILURE;
}
fn[nl1] = 'f';
const int ff = open(fn, fm);
if (-1 >= ff) {
(void)fprintf(stderr, "Cannot open %s for reading!\n", fn);
return EXIT_FAILURE;
}
fn[nl1] = 'g';
const int fg = open(fn, fm);
if (-1 >= fg) {
(void)fprintf(stderr, "Cannot open %s for reading!\n", fn);
return EXIT_FAILURE;
}
fn[nl1] = 'r';
const int fr = open(fn, fm);
if (-1 >= fr) {
(void)fprintf(stderr, "Cannot open %s for reading!\n", fn);
return EXIT_FAILURE;
}
fn[nl1] = 'j';
const int fj = open(fn, fm);
if (-1 >= fj) {
(void)fprintf(stderr, "Cannot open %s for reading!\n", fn);
return EXIT_FAILURE;
}
const size_t nt = n * sizeof(float);
float
*const a11 = (float*)aligned_alloc(VA, nt),
*const a22 = (float*)aligned_alloc(VA, nt),
*const a21r = (float*)aligned_alloc(VA, nt),
*const a21i = (float*)aligned_alloc(VA, nt),
*const c = (float*)aligned_alloc(VA, nt),
*const cat = (float*)aligned_alloc(VA, nt),
*const sat = (float*)aligned_alloc(VA, nt),
*const l1 = (float*)aligned_alloc(VA, nt),
*const l2 = (float*)aligned_alloc(VA, nt);
assert(a11);
assert(a22);
assert(a21r);
assert(a21i);
assert(c);
assert(cat);
assert(sat);
assert(l1);
assert(l2);
wide *const w = (wide*)aligned_alloc(sizeof(wide), (n * sizeof(wide)));
assert(w);
unsigned *const p = (unsigned*)malloc((n >> VSLlg) * sizeof(unsigned));
assert(p);
unsigned rd[2u] = { 0u, 0u };
uint64_t hz = tsc_get_freq_hz_(rd), be[2u] = { UINT64_C(0), UINT64_C(0) };
(void)fprintf(stderr, "TSC frequency: %llu+(%u/%u) Hz.\n", (unsigned long long)hz, rd[0u], rd[1u]);
(void)fflush(stderr);
(void)fprintf(stdout, "\"B\",\"Ts\",\"ORT\",\"REN\",\"RLN\",\"RLX\",\"RLM\"\n");
(void)fflush(stdout);
const char *bf = (const char*)NULL;
if (b <= 10u)
bf = "%1zu";
else if (b <= 100u)
bf = "%2zu";
else if (b <= 1000u)
bf = "%3zu";
else // b > 1000
bf = "%zu";
const size_t n_t = n / imax(th, 1);
const size_t cnt = n_t * sizeof(float);
char a[31u] = { '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0' };
for (size_t j = 0u; j < b; ++j) {
(void)fprintf(stdout, bf, j);
(void)fflush(stdout);
const size_t jn = j * n;
#ifdef _OPENMP
#pragma omp parallel default(none) shared(ff,fg,fr,fj,a11,a22,a21r,a21i,n,n_t,cnt,jn)
#endif /* _OPENMP */
{
const int mt =
#ifdef _OPENMP
omp_get_thread_num()
#else /* !_OPENMP */
0
#endif /* ?_OPENMP */
;
const size_t tnt = mt * n_t;
const off_t off = (jn + tnt) * sizeof(float);
if ((ssize_t)cnt != pread(ff, (a11 + tnt), cnt, off))
exit(EXIT_FAILURE);
if ((ssize_t)cnt != pread(fg, (a22 + tnt), cnt, off))
exit(EXIT_FAILURE);
if ((ssize_t)cnt != pread(fr, (a21r + tnt), cnt, off))
exit(EXIT_FAILURE);
if ((ssize_t)cnt != pread(fj, (a21i + tnt), cnt, off))
exit(EXIT_FAILURE);
}
(void)fprintf(stdout, ",");
(void)fflush(stdout);
be[0u] = rdtsc_beg(rd);
const fint _n = -(fint)n;
(void)cbjac2_(&_n, a11, a22, a21r, a21i, c, cat, sat, l1, l2, p);
be[1u] = rdtsc_end(rd);
(void)fprintf(stdout, "%15.9Lf,", tsc_lap(hz, be[0u], be[1u]));
(void)fflush(stdout);
wide o = W_ZERO, r = W_ZERO;
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,a11,a22,a21r,a21i,c,cat,sat,l1,l2,w) reduction(max:o,r)
#endif /* _OPENMP */
for (size_t i = 0u; i < n; ++i) {
const wide CS = (wide)(c[i]);
const wide SNR = (wide)(cat[i]);
const wide SNI = (wide)(sat[i]);
wide AE = W_ZERO, AN = W_ZERO;
o = fmaxw(o, (w[i] = worc(CS, SNR, SNI)));
r = fmaxw(r, wrec(a11[i], a22[i], a21r[i], a21i[i], CS, SNR, SNI, l1[i], l2[i], &AE, &AN));
}
(void)fprintf(stdout, "%s,", xtoa(a, (long double)o));
(void)fprintf(stdout, "%s", xtoa(a, (long double)r));
(void)fflush(stdout);
size_t ix = n;
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,o,w) reduction(min:ix)
#endif /* _OPENMP */
for (size_t i = 0u; i < n; ++i)
if (w[i] == o)
ix = i;
(void)fprintf(stderr, "%zu,%zu,%s;", j, ix, xtoa(a, (long double)o));
(void)fprintf(stderr, "%s,", xtoa(a, a11[ix]));
(void)fprintf(stderr, "%s,", xtoa(a, a22[ix]));
(void)fprintf(stderr, "(%s,", xtoa(a, a21r[ix]));
(void)fprintf(stderr, "%s);", xtoa(a, a21i[ix]));
(void)fprintf(stderr, "%s,", xtoa(a, c[ix]));
(void)fprintf(stderr, "(%s,", xtoa(a, cat[ix]));
(void)fprintf(stderr, "%s);", xtoa(a, sat[ix]));
(void)fprintf(stderr, "%s,", xtoa(a, l1[ix]));
(void)fprintf(stderr, "%s\n", xtoa(a, l2[ix]));
(void)fflush(stderr);
#ifdef _OPENMP
#pragma omp parallel default(none) shared(fk,fl,cat,sat,n,n_t,cnt,jn)
#endif /* _OPENMP */
{
const int mt =
#ifdef _OPENMP
omp_get_thread_num()
#else /* !_OPENMP */
0
#endif /* ?_OPENMP */
;
const size_t tnt = mt * n_t;
const off_t off = (jn + tnt) * sizeof(float);
if ((ssize_t)cnt != pread(fk, (cat + tnt), cnt, off))
exit(EXIT_FAILURE);
if ((ssize_t)cnt != pread(fl, (sat + tnt), cnt, off))
exit(EXIT_FAILURE);
}
(void)fprintf(stdout, ",");
(void)fflush(stdout);
wide x = W_ZERO, m = W_ZERO;
r = W_ZERO;
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,l1,l2,cat,sat) reduction(max:r,x,m)
#endif /* _OPENMP */
for (size_t i = 0u; i < n; ++i) {
wide AE = W_ZERO, AN = W_ZERO;
const wide RE = wlam(l1[i], l2[i], cat[i], sat[i], &AE, &AN);
r = fmaxw(r, RE);
x = fmaxw(x, AE);
m = fmaxw(m, AN);
}
(void)fprintf(stdout, "%s,", xtoa(a, (long double)r));
(void)fprintf(stdout, "%s,", xtoa(a, (long double)x));
(void)fprintf(stdout, "%s\n", xtoa(a, (long double)m));
(void)fflush(stdout);
}
(void)close(fj);
(void)close(fr);
(void)close(fg);
(void)close(ff);
(void)close(fl);
(void)close(fk);
free(p);
free(w);
free(l2);
free(l1);
free(sat);
free(cat);
free(c);
free(a21i);
free(a21r);
free(a22);
free(a11);
free(fn);
return EXIT_SUCCESS;
}
|
GB_unaryop__ainv_int32_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int32_int32
// op(A') function: GB_tran__ainv_int32_int32
// C type: int32_t
// A type: int32_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int32_t z = (int32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int32_int32
(
int32_t *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int32_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
kernel_2mm.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
void kernel_2mm(int ni, int nj, int nk, int nl,
double alpha,
double beta,
double tmp[ ni + 0][nj + 0],
double A[ ni + 0][nk + 0],
double B[ nk + 0][nj + 0],
double C[ nj + 0][nl + 0],
double D[ ni + 0][nl + 0])
{
int i, j, k;
int t1, t2, t3, t4, t5;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if (ni >= 1) {
if ((nj >= nl+1) && (nl >= 1)) {
lbp=0;
ubp=ni-1;
#pragma omp parallel for private(lbv,ubv,t3,t4,t5)
for (t2=lbp;t2<=ubp;t2++) {
lbv=0;
ubv=nl-1;
#pragma ivdep
#pragma vector always
for (t3=lbv;t3<=ubv;t3++) {
D[t2][t3] *= beta;;
tmp[t2][t3] = 0.0;;
}
lbv=nl;
ubv=nj-1;
#pragma ivdep
#pragma vector always
for (t3=lbv;t3<=ubv;t3++) {
tmp[t2][t3] = 0.0;;
}
}
}
if ((nj >= 1) && (nj <= nl-1)) {
lbp=0;
ubp=ni-1;
#pragma omp parallel for private(lbv,ubv,t3,t4,t5)
for (t2=lbp;t2<=ubp;t2++) {
lbv=0;
ubv=nj-1;
#pragma ivdep
#pragma vector always
for (t3=lbv;t3<=ubv;t3++) {
D[t2][t3] *= beta;;
tmp[t2][t3] = 0.0;;
}
lbv=nj;
ubv=nl-1;
#pragma ivdep
#pragma vector always
for (t3=lbv;t3<=ubv;t3++) {
D[t2][t3] *= beta;;
}
}
}
if ((nj >= 1) && (nj == nl)) {
lbp=0;
ubp=ni-1;
#pragma omp parallel for private(lbv,ubv,t3,t4,t5)
for (t2=lbp;t2<=ubp;t2++) {
lbv=0;
ubv=nj-1;
#pragma ivdep
#pragma vector always
for (t3=lbv;t3<=ubv;t3++) {
D[t2][t3] *= beta;;
tmp[t2][t3] = 0.0;;
}
}
}
if ((nj >= 1) && (nl <= 0)) {
lbp=0;
ubp=ni-1;
#pragma omp parallel for private(lbv,ubv,t3,t4,t5)
for (t2=lbp;t2<=ubp;t2++) {
lbv=0;
ubv=nj-1;
#pragma ivdep
#pragma vector always
for (t3=lbv;t3<=ubv;t3++) {
tmp[t2][t3] = 0.0;;
}
}
}
if ((nj <= 0) && (nl >= 1)) {
lbp=0;
ubp=ni-1;
#pragma omp parallel for private(lbv,ubv,t3,t4,t5)
for (t2=lbp;t2<=ubp;t2++) {
lbv=0;
ubv=nl-1;
#pragma ivdep
#pragma vector always
for (t3=lbv;t3<=ubv;t3++) {
D[t2][t3] *= beta;;
}
}
}
if ((nj >= 1) && (nk >= 1) && (nl >= 1)) {
lbp=0;
ubp=ni-1;
#pragma omp parallel for private(lbv,ubv,t3,t4,t5)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=0;t3<=nj-1;t3++) {
for (t5=0;t5<=nk-1;t5++) {
tmp[t2][t3] += alpha * A[t2][t5] * B[t5][t3];;
}
lbv=0;
ubv=nl-1;
#pragma ivdep
#pragma vector always
for (t5=lbv;t5<=ubv;t5++) {
D[t2][t5] += tmp[t2][t3] * C[t3][t5];;
}
}
}
}
if ((nj >= 1) && (nk >= 1) && (nl <= 0)) {
lbp=0;
ubp=ni-1;
#pragma omp parallel for private(lbv,ubv,t3,t4,t5)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=0;t3<=nj-1;t3++) {
for (t5=0;t5<=nk-1;t5++) {
tmp[t2][t3] += alpha * A[t2][t5] * B[t5][t3];;
}
}
}
}
if ((nj >= 1) && (nk <= 0) && (nl >= 1)) {
lbp=0;
ubp=ni-1;
#pragma omp parallel for private(lbv,ubv,t3,t4,t5)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=0;t3<=nj-1;t3++) {
lbv=0;
ubv=nl-1;
#pragma ivdep
#pragma vector always
for (t5=lbv;t5<=ubv;t5++) {
D[t2][t5] += tmp[t2][t3] * C[t3][t5];;
}
}
}
}
}
/* End of CLooG code */
}
|
detector.c | #include "darknet.h"
#include <stdbool.h>
#include "image.h"
static int coco_ids[] = {1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,67,70,72,73,74,75,76,77,78,79,80,81,82,84,85,86,87,88,89,90};
//enum chi_province {wan = 35, shan, jin1, jin2, hu, yu1, meng, xin, zang, qing, lu, yu2, su, zhe, ning, gui1, hei, ji1, liao, jin3, ji3, ming, gan, xiang, ee, yue, qiong, gan2, gui2, yun, chuan};
const char *str_province[] = {"wan", "shan", "jin1", "jin2", "hu", "yu1", "meng", "xin", "zang", "qing", "lu", "yu2", "su", "zhe", "ning", "gui1", "hei", "ji1", "liao", "jin3", "ji3", "ming", "gan", "xiang", "ee", "yue", "qiong", "gan2", "gui2", "yun", "chuan"};
void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear)
{
list *options = read_data_cfg(datacfg);
char *train_images = option_find_str(options, "train", "data/train.list");
char *backup_directory = option_find_str(options, "backup", "/backup/");
srand(time(0));
char *base = basecfg(cfgfile);
printf("%s\n", base);
float avg_loss = -1;
network **nets = calloc(ngpus, sizeof(network));
srand(time(0));
int seed = rand();
int i;
for(i = 0; i < ngpus; ++i){
srand(seed);
#ifdef GPU
cuda_set_device(gpus[i]);
#endif
nets[i] = load_network(cfgfile, weightfile, clear);
nets[i]->learning_rate *= ngpus;
}
srand(time(0));
network *net = nets[0];
int imgs = net->batch * net->subdivisions * ngpus;//huigb 64,ngpus==1
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
data train, buffer;
layer l = net->layers[net->n - 1];
int classes = l.classes;
float jitter = l.jitter;
list *plist = get_paths(train_images);
//int N = plist->size;
char **paths = (char **)list_to_array(plist);
load_args args = get_base_args(net);
args.coords = l.coords;
args.paths = paths;
args.n = imgs;//huigb 64
args.m = plist->size;
args.classes = classes;
args.jitter = jitter;
args.num_boxes = l.max_boxes;
args.d = &buffer;
args.type = DETECTION_DATA;
//args.type = INSTANCE_DATA;
args.threads = 64;
pthread_t load_thread = load_data(args);
double time;
int count = 0;
//while(i*imgs < N*120){
while(get_current_batch(net) < net->max_batches){
if(l.random && count++%10 == 0){
printf("Resizing\n");
int dim = (rand() % 10 + 10) * 32;
if (get_current_batch(net)+200 > net->max_batches) dim = 608;
//int dim = (rand() % 4 + 16) * 32;
printf("%d\n", dim);
args.w = dim;
args.h = dim;
pthread_join(load_thread, 0);
train = buffer;
free_data(train);
load_thread = load_data(args);
#pragma omp parallel for
for(i = 0; i < ngpus; ++i){
resize_network(nets[i], dim, dim);
}
net = nets[0];
}
time=what_time_is_it_now();
pthread_join(load_thread, 0);
train = buffer;
load_thread = load_data(args);
/*
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[10] + 1 + k*5);
if(!b.x) break;
printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h);
}
*/
/*
int zz;
for(zz = 0; zz < train.X.cols; ++zz){
image im = float_to_image(net->w, net->h, 3, train.X.vals[zz]);
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[zz] + k*5, 1);
printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
draw_bbox(im, b, 1, 1,0,0);
}
show_image(im, "truth11");
cvWaitKey(0);
save_image(im, "truth11");
}
*/
printf("Loaded: %lf seconds\n", what_time_is_it_now()-time);
time=what_time_is_it_now();
float loss = 0;
#ifdef GPU
if(ngpus == 1){
loss = train_network(net, train);
} else {
loss = train_networks(nets, ngpus, train, 4);
}
#else
loss = train_network(net, train);
#endif
if (avg_loss < 0) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
i = get_current_batch(net);
printf("%ld: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), what_time_is_it_now()-time, i*imgs);
if(i%100==0){
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s.backup", backup_directory, base);
save_weights(net, buff);
}
if(i%10000==0 || (i < 1000 && i%100 == 0)){
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
save_weights(net, buff);
}
free_data(train);
}
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_final.weights", backup_directory, base);
save_weights(net, buff);
}
static int get_coco_image_id(char *filename)
{
char *p = strrchr(filename, '/');
char *c = strrchr(filename, '_');
if(c) p = c;
return atoi(p+1);
}
static void print_cocos(FILE *fp, char *image_path, detection *dets, int num_boxes, int classes, int w, int h)
{
int i, j;
int image_id = get_coco_image_id(image_path);
for(i = 0; i < num_boxes; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
float bx = xmin;
float by = ymin;
float bw = xmax - xmin;
float bh = ymax - ymin;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fp, "{\"image_id\":%d, \"category_id\":%d, \"bbox\":[%f, %f, %f, %f], \"score\":%f},\n", image_id, coco_ids[j], bx, by, bw, bh, dets[i].prob[j]);
}
}
}
void print_detector_detections(FILE **fps, char *id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2. + 1;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2. + 1;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2. + 1;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2. + 1;
if (xmin < 1) xmin = 1;
if (ymin < 1) ymin = 1;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fps[j], "%s %f %f %f %f %f\n", id, dets[i].prob[j],
xmin, ymin, xmax, ymax);
}
}
}
void print_imagenet_detections(FILE *fp, int id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
int class = j;
if (dets[i].prob[class]) fprintf(fp, "%d %d %f %f %f %f %f\n", id, j+1, dets[i].prob[class],
xmin, ymin, xmax, ymax);
}
}
}
void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 2);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = calloc(nthreads, sizeof(image));
image *val_resized = calloc(nthreads, sizeof(image));
image *buf = calloc(nthreads, sizeof(image));
image *buf_resized = calloc(nthreads, sizeof(image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
image input = make_image(net->w, net->h, net->c*2);
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data, 1);
flip_image(val_resized[t]);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data + net->w*net->h*net->c, 1);
network_predict(net, input.data);
int w = val[t].w;
int h = val[t].h;
int num = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &num);
if (nms) do_nms_sort(dets, num, classes, nms);
if (coco){
print_cocos(fp, path, dets, num, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, num, classes, w, h);
} else {
print_detector_detections(fps, id, dets, num, classes, w, h);
}
free_detections(dets, num);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = calloc(nthreads, sizeof(image));
image *val_resized = calloc(nthreads, sizeof(image));
image *buf = calloc(nthreads, sizeof(image));
image *buf_resized = calloc(nthreads, sizeof(image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
float *X = val_resized[t].data;
network_predict(net, X);
int w = val[t].w;
int h = val[t].h;
int nboxes = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &nboxes);
if (nms) do_nms_sort(dets, nboxes, classes, nms);
if (coco){
print_cocos(fp, path, dets, nboxes, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, nboxes, classes, w, h);
} else {
print_detector_detections(fps, id, dets, nboxes, classes, w, h);
}
free_detections(dets, nboxes);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector_recall(char *cfgfile, char *weightfile)
{
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths("data/coco_val_5k.list");
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int j, k;
int m = plist->size;
int i=0;
float thresh = .001;
float iou_thresh = .5;
float nms = .4;
int total = 0;
int correct = 0;
int proposals = 0;
float avg_iou = 0;
for(i = 0; i < m; ++i){
char *path = paths[i];
image orig = load_image_color(path, 0, 0);
image sized = resize_image(orig, net->w, net->h);
char *id = basecfg(path);
network_predict(net, sized.data);
int nboxes = 0;
detection *dets = get_network_boxes(net, sized.w, sized.h, thresh, .5, 0, 1, &nboxes);
if (nms) do_nms_obj(dets, nboxes, 1, nms);
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int num_labels = 0;
box_label *truth = read_boxes(labelpath, &num_labels);
for(k = 0; k < nboxes; ++k){
if(dets[k].objectness > thresh){
++proposals;
}
}
for (j = 0; j < num_labels; ++j) {
++total;
box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h};
float best_iou = 0;
for(k = 0; k < l.w*l.h*l.n; ++k){
float iou = box_iou(dets[k].bbox, t);
if(dets[k].objectness > thresh && iou > best_iou){
best_iou = iou;
}
}
avg_iou += best_iou;
if(best_iou > iou_thresh){
++correct;
}
}
fprintf(stderr, "%5d %5d %5d\tRPs/Img: %.2f\tIOU: %.2f%%\tRecall:%.2f%%\n", i, correct, total, (float)proposals/(i+1), avg_iou*100/total, 100.*correct/total);
free(id);
free_image(orig);
free_image(sized);
}
}
void get_province(int pro_num, char* chname, char *province_name)
{
CHINESE_PROVINCE pro_enum;// = pro_num;
bool province_find = false;
for(pro_enum = wan; pro_enum <= chuan; pro_enum++){
if (strcmp(chname, str_province[pro_enum-35]) == 0){
province_find = true;
break;
}
}
if (province_find){
switch(pro_enum){
case wan:
strcpy(province_name, "皖");
break;
case shan:
strcpy(province_name, "陕");
break;
case jin1:
strcpy(province_name, "京");
break;
case jin2:
strcpy(province_name, "津");
break;
case hu:
strcpy(province_name, "沪");
break;
case yu1:
strcpy(province_name, "渝");
break;
case meng:
strcpy(province_name, "蒙");
break;
case xin:
strcpy(province_name, "新");
break;
case zang:
strcpy(province_name, "藏");
break;
case qing:
strcpy(province_name, "青");
break;
case lu:
strcpy(province_name, "鲁");
break;
case yu2:
strcpy(province_name, "豫");
break;
case su:
strcpy(province_name, "苏");
break;
case zhe:
strcpy(province_name, "浙");
break;
case ning:
strcpy(province_name, "宁");
break;
case gui1:
strcpy(province_name, "桂");
break;
case hei:
strcpy(province_name, "黑");
break;
case ji1:
strcpy(province_name, "吉");
break;
case liao:
strcpy(province_name, "辽");
break;
case jin3:
strcpy(province_name, "晋");
break;
case ji3:
strcpy(province_name, "冀");
break;
case ming:
strcpy(province_name, "闽");
break;
case gan:
strcpy(province_name, "赣");
break;
case xiang:
strcpy(province_name, "湘");
break;
case ee:
strcpy(province_name, "鄂");
break;
case yue:
strcpy(province_name, "粤");
break;
case qiong:
strcpy(province_name, "琼");
break;
case gan2:
strcpy(province_name, "甘");
break;
case gui2:
strcpy(province_name, "贵");
break;
case yun:
strcpy(province_name, "云");
break;
case chuan:
strcpy(province_name, "川");
break;
default:
strcpy(province_name, "无");
}
}else
printf("unknown province name!\n");
}
//huiguobao definition get_chinese_char func:
void get_chinese_lp(detection *dets, int num, float thresh, char **names, int classes, char* get_name)
{
int i,j;
for(i = 0; i < num; ++i){
//char labelstr[20] = {0};
int class = -1;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j] > thresh){
if (class < 0) {
if (j >= 35){
char namestr[3] = {0};
get_province(j, names[j], namestr);
strcat(get_name, namestr);
}else
strcat(get_name, names[j]);
class = j;
} else {
strcat(get_name, ", ");
if (j >= 35){
char namestr[3] = {0};
get_province(j, names[j], namestr);
strcat(get_name, namestr);
}else
strcat(get_name, names[j]);
}
printf("%s: %.0f%%\n", names[j], dets[i].prob[j]*100);
}
}
}
}
void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen)
{
//list *options = read_data_cfg(datacfg);
//char *name_list = option_find_str(options, "names", "data/names.list");
//char **names = get_labels(name_list);
//metadata meta = get_metadata("./lp_net/lpdetect/lp.data");
network *net = load_network("./lp_net/lpdetect/yolov3-lp.cfg", "./lp_net/lpdetect/yolov3-lp_final.weights", 0);
metadata meta_ocr = get_metadata("./lp_net/lpscr/lpscr.data");
network *net_ocr = load_network("./lp_net/lpscr/lpscr-net.cfg", "./lp_net/lpscr/lpscr-net_final.weights", 0);
//image **alphabet = load_alphabet();//input data/labels
//set_batch_network(net, 1);
//set_batch_network(net_ocr, 1);
//srand(2222222);
//double time;
char buff[256];
char *input = buff;
float nms=.45;
//char ch_name[20];
while(1){
if(filename){
strncpy(input, filename, 256);
} else {
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if(!input) return;
strtok(input, "\n");
}
image im = load_image_color(input,0,0);
//image sized = letterbox_image(im, net->w, net->h);
//image sized = resize_image(im, net->w, net->h);
//image sized2 = resize_max(im, net->w);
//image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h);
//resize_network(net, sized.w, sized.h);
layer l = net->layers[net->n-1];
layer l_ocr = net_ocr->layers[net_ocr->n-1];
//float *X = sized.data;
//time=what_time_is_it_now();
//network_predict(net, X);
//printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now()-time);
network_predict_image(net,im);
int nboxes = 0;
detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes);
printf("%d\n", nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
int i;
//huiguobao crop image:
for(i = 0; i < nboxes; ++i){
char ch_name[20] = {0};
//char save_name[20] = {0};
int sign = -1;
int j;
//image im_crop = get_crop_detect(im, dets, i, thresh, meta.names, l.classes);
for(j = 0; j < l.classes; ++j){
if (dets[i].prob[j] > thresh) sign = j;
}
if (sign >= 0){
image im_crop = get_crop_detect(im, dets[i].bbox);
if (im_crop.w == 0) break;
network_predict_image(net_ocr, im_crop);
int nboxes_ocr = 0;
detection *dets_ocr = get_network_boxes(net_ocr, im_crop.w, im_crop.h, thresh, hier_thresh, 0, 1, &nboxes_ocr);
do_nms_sort(dets_ocr, nboxes_ocr, l_ocr.classes, nms);
do_dets_sort(dets_ocr, nboxes_ocr);
get_chinese_lp(dets_ocr, nboxes_ocr, thresh, meta_ocr.names, l_ocr.classes, ch_name);
if (strlen(ch_name)){
//sprintf(ch_name, "%s%04d", outfile, i);//"%s%04d":reserve four 0
//save_image(im_crop, ch_name);
printf("chinese LP name:%s\n", ch_name);
}
//draw_detections(im_crop, dets_ocr, nboxes_ocr, thresh, meta_ocr.names, alphabet, l_ocr.classes);
free_detections(dets_ocr, nboxes_ocr);
/*
if (outfile){
sprintf(save_name, "%s%02d", outfile, i);//"%s%04d":reserve four 0
save_image(im_crop, save_name);
}else{
#ifdef OPENCV
make_window("predictions_ocr", 512, 512, 0);
show_image(im_crop, "predictions_ocr", 0);
#endif
}
*/
free_image(im_crop);
}
draw_bbox(im, dets[i].bbox, 5, 1,0,0);
}
//draw_detections(im, dets, nboxes, thresh, meta.names, alphabet, l.classes);
free_detections(dets, nboxes);
/*
if(outfile){
save_image(im, outfile);
}
else{
save_image(im, "predictions");
#ifdef OPENCV
make_window("predictions", 512, 512, 0);
show_image(im, "predictions", 0);
#endif
}
*/
free_image(im);
//free_image(sized);
if (filename) break;
}
}
void test_demo(int cam_index, char *filename, char *prefix, int w, int h, int frames, int fullscreen)
{
//list *options = read_data_cfg(datacfg);
//char *name_list = option_find_str(options, "names", "data/names.list");
//char **names = get_labels(name_list);
//metadata meta = get_metadata("./lp_net/lpdetect/lp.data");
network *net = load_network("./lp_net/lpdetect/yolov3-lp.cfg", "./lp_net/lpdetect/yolov3-lp_final.weights", 0);
metadata meta_ocr = get_metadata("./lp_net/lpscr/lpscr.data");
network *net_ocr = load_network("./lp_net/lpscr/lpscr-net.cfg", "./lp_net/lpscr/lpscr-net_final.weights", 0);
//image **alphabet = load_alphabet();//input data/labels
//set_batch_network(net, 1);
//set_batch_network(net_ocr, 1);
//srand(2222222);
double time;
//char buff[256];
//char *input = buff;
float nms=.45;
void * cap;
float thresh = 0.5;
float hier_thresh = 0.5;
//char ch_name[20];
if(filename){
//printf("video file: %s\n", filename);
cap = open_video_stream(filename, 0, 0, 0, 0);
}else{
cap = open_video_stream(0, cam_index, w, h, frames);
}
if(!cap) error("Couldn't connect to webcam.\n");
if(!prefix){
make_window("Demo", 1352, 1013, fullscreen);
}
while(1){
image im = get_image_from_stream(cap);
//image sized = letterbox_image(im, net->w, net->h);
//image sized = resize_image(im, net->w, net->h);
//image sized2 = resize_max(im, net->w);
//image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h);
//resize_network(net, sized.w, sized.h);
layer l = net->layers[net->n-1];
layer l_ocr = net_ocr->layers[net_ocr->n-1];
//float *X = sized.data;
time=what_time_is_it_now();
//network_predict(net, X);
network_predict_image(net,im);
int nboxes = 0;
detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes);
printf("%d\n", nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
int i;
//huiguobao crop image:
for(i = 0; i < nboxes; ++i){
char ch_name[20] = {0};
//char save_name[20] = {0};
int sign = -1;
int j;
//image im_crop = get_crop_detect(im, dets, i, thresh, meta.names, l.classes);
for(j = 0; j < l.classes; ++j){
if (dets[i].prob[j] > thresh) sign = j;
}
if (sign >= 0){
image im_crop = get_crop_detect(im, dets[i].bbox);
if (im_crop.w == 0) break;
network_predict_image(net_ocr, im_crop);
int nboxes_ocr = 0;
detection *dets_ocr = get_network_boxes(net_ocr, im_crop.w, im_crop.h, thresh, hier_thresh, 0, 1, &nboxes_ocr);
do_nms_sort(dets_ocr, nboxes_ocr, l_ocr.classes, nms);
do_dets_sort(dets_ocr, nboxes_ocr);
get_chinese_lp(dets_ocr, nboxes_ocr, thresh, meta_ocr.names, l_ocr.classes, ch_name);
if (strlen(ch_name)){
//sprintf(ch_name, "%s%04d", outfile, i);//"%s%04d":reserve four 0
//save_image(im_crop, ch_name);
printf("chinese LP name:%s\n", ch_name);
}
//draw_detections(im_crop, dets_ocr, nboxes_ocr, thresh, meta_ocr.names, alphabet, l_ocr.classes);
free_detections(dets_ocr, nboxes_ocr);
/*
if (outfile){
sprintf(save_name, "%s%02d", outfile, i);//"%s%04d":reserve four 0
save_image(im_crop, save_name);
}else{
#ifdef OPENCV
make_window("predictions_ocr", 512, 512, 0);
show_image(im_crop, "predictions_ocr", 0);
#endif
}
*/
free_image(im_crop);
}
draw_bbox(im, dets[i].bbox, 5, 1,0,0);
}
//draw_detections(im, dets, nboxes, thresh, meta.names, alphabet, l.classes);
free_detections(dets, nboxes);
/*
if(outfile){
save_image(im, outfile);
}
else{
save_image(im, "predictions");
#ifdef OPENCV
make_window("predictions", 512, 512, 0);
show_image(im, "predictions", 0);
#endif
}
*/
int c = show_image(im, "Demo", 1);
if (c != -1) c = c%256;
free_image(im);
//free_image(sized);
printf("one thread take in %f seconds.\n", what_time_is_it_now()-time);
if (filename || c == 27) break;
}
}
/*
void censor_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
float nms = .45;
while(1){
image in = get_image_from_stream(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
float *X = in_s.data;
network_predict(net, X);
int nboxes = 0;
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 0, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int left = b.x-b.w/2.;
int top = b.y-b.h/2.;
censor_image(in, left, top, b.w, b.h);
}
}
show_image(in, base);
cvWaitKey(10);
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream(cap);
free_image(in);
}
}
#endif
}
void extract_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
int count = 0;
float nms = .45;
while(1){
image in = get_image_from_stream(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
show_image(in, base);
int nboxes = 0;
float *X = in_s.data;
network_predict(net, X);
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 1, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int size = b.w*in.w > b.h*in.h ? b.w*in.w : b.h*in.h;
int dx = b.x*in.w-size/2.;
int dy = b.y*in.h-size/2.;
image bim = crop_image(in, dx, dy, size, size);
char buff[2048];
sprintf(buff, "results/extract/%07d", count);
++count;
save_image(bim, buff);
free_image(bim);
}
}
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream(cap);
free_image(in);
}
}
#endif
}
*/
/*
void network_detect(network *net, image im, float thresh, float hier_thresh, float nms, detection *dets)
{
network_predict_image(net, im);
layer l = net->layers[net->n-1];
int nboxes = num_boxes(net);
fill_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 0, dets);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
}
*/
void run_detector(int argc, char **argv)
{
char *prefix = find_char_arg(argc, argv, "-prefix", 0);
int cam_index = find_int_arg(argc, argv, "-c", 0);
int avg = find_int_arg(argc, argv, "-avg", 3);
//if(argc < 4){
// fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]);
// return;
//}
char *gpu_list = find_char_arg(argc, argv, "-gpus", 0);
int *gpus = 0;
int gpu = 0;
int ngpus = 0;
if(gpu_list){
printf("%s\n", gpu_list);
int len = strlen(gpu_list);
ngpus = 1;
int i;
for(i = 0; i < len; ++i){
if (gpu_list[i] == ',') ++ngpus;
}
gpus = calloc(ngpus, sizeof(int));
for(i = 0; i < ngpus; ++i){
gpus[i] = atoi(gpu_list);
gpu_list = strchr(gpu_list, ',')+1;
}
} else {
gpu = gpu_index;//#huigb 0
gpus = &gpu;
ngpus = 1;
}
int fullscreen = find_arg(argc, argv, "-fullscreen");
int width = find_int_arg(argc, argv, "-w", 0);
int height = find_int_arg(argc, argv, "-h", 0);
int fps = find_int_arg(argc, argv, "-fps", 0);
//int class = find_int_arg(argc, argv, "-class", 0);
char *filename = find_char_arg(argc, argv, "-filename", 0);
demo(cam_index, filename, prefix, avg, width, height, fps, fullscreen); // huigb rewrite demo, in demo.c, multi threads
}
|
cofold.c | /*
* minimum free energy
* RNA secondary structure prediction
*
* c Ivo Hofacker, Chrisoph Flamm
* original implementation by
* Walter Fontana
*
* Vienna RNA package
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <ctype.h>
#include <string.h>
#include <limits.h>
#include "ViennaRNA/utils/basic.h"
#include "ViennaRNA/utils/strings.h"
#include "ViennaRNA/utils/structures.h"
#include "ViennaRNA/params/default.h"
#include "ViennaRNA/fold_vars.h"
#include "ViennaRNA/params/basic.h"
#include "ViennaRNA/subopt.h"
#include "ViennaRNA/fold.h"
#include "ViennaRNA/loops/all.h"
#include "ViennaRNA/gquad.h"
#include "ViennaRNA/alphabet.h"
#include "ViennaRNA/cofold.h"
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
#ifdef _OPENMP
#include <omp.h>
#endif
#endif
#define MAXSECTORS 500 /* dimension for a backtrack array */
/*
#################################
# GLOBAL VARIABLES #
#################################
*/
/*
#################################
# PRIVATE VARIABLES #
#################################
*/
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
/* some backward compatibility stuff */
PRIVATE int backward_compat = 0;
PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL;
PRIVATE float mfe1, mfe2; /* minimum free energies of the monomers */
#ifdef _OPENMP
#pragma omp threadprivate(mfe1, mfe2, backward_compat_compound, backward_compat)
#endif
#endif
/*
#################################
# PRIVATE FUNCTION DECLARATIONS #
#################################
*/
PRIVATE int
backtrack(sect bt_stack[],
vrna_bp_stack_t *bp_list,
vrna_fold_compound_t *vc);
PRIVATE int
fill_arrays(vrna_fold_compound_t *vc,
int zuker);
PRIVATE void
free_end(int *array,
int i,
int start,
vrna_fold_compound_t *vc);
PRIVATE void
doubleseq(vrna_fold_compound_t *vc); /* do magic */
PRIVATE void
halfseq(vrna_fold_compound_t *vc); /* undo magic */
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
/* wrappers for old API compatibility */
PRIVATE void
wrap_array_export(int **f5_p,
int **c_p,
int **fML_p,
int **fM1_p,
int **fc_p,
int **indx_p,
char **ptype_p);
PRIVATE float
wrap_cofold(const char *string,
char *structure,
vrna_param_t *parameters,
int is_constrained);
PRIVATE SOLUTION *
wrap_zukersubopt(const char *string,
vrna_param_t *parameters);
#endif
/*
#################################
# BEGIN OF FUNCTION DEFINITIONS #
#################################
*/
PUBLIC float
vrna_mfe_dimer(vrna_fold_compound_t *vc,
char *structure)
{
int length, energy;
char *s;
sect bt_stack[MAXSECTORS]; /* stack of partial structures for backtracking */
vrna_bp_stack_t *bp;
length = (int)vc->length;
vc->sequence_encoding[0] = vc->sequence_encoding2[0]; /* store length at pos. 0 in S1 too */
if (!vrna_fold_compound_prepare(vc, VRNA_OPTION_MFE | VRNA_OPTION_HYBRID)) {
vrna_message_warning("vrna_mfe_dimer@cofold.c: Failed to prepare vrna_fold_compound");
return (float)(INF / 100.);
}
/* call user-defined recursion status callback function */
if (vc->stat_cb)
vc->stat_cb(VRNA_STATUS_MFE_PRE, vc->auxdata);
energy = fill_arrays(vc, 0);
/* call user-defined recursion status callback function */
if (vc->stat_cb)
vc->stat_cb(VRNA_STATUS_MFE_POST, vc->auxdata);
if (structure && vc->params->model_details.backtrack) {
bp = (vrna_bp_stack_t *)vrna_alloc(sizeof(vrna_bp_stack_t) * (4 * (1 + length / 2))); /* add a guess of how many G's may be involved in a G quadruplex */
backtrack(bt_stack, bp, vc);
s = vrna_db_from_bp_stack(bp, length);
strncpy(structure, s, length + 1);
free(s);
free(bp);
}
if (vc->params->model_details.backtrack_type == 'C')
return (float)vc->matrices->c[vc->jindx[length] + 1] / 100.;
else if (vc->params->model_details.backtrack_type == 'M')
return (float)vc->matrices->fML[vc->jindx[length] + 1] / 100.;
else
return (float)energy / 100.;
}
PRIVATE int
fill_arrays(vrna_fold_compound_t *vc,
int zuker)
{
/* fill "c", "fML" and "f5" arrays and return optimal energy */
unsigned int strands, *sn, *ss, *se, *so;
int i, j, length, energy;
int uniq_ML;
int no_close, type, maxj, *indx;
int *my_f5, *my_c, *my_fML, *my_fM1, *my_fc;
int *cc, *cc1; /* auxilary arrays for canonical structures */
int *Fmi; /* holds row i of fML (avoids jumps in memory) */
int *DMLi; /* DMLi[j] holds MIN(fML[i,k]+fML[k+1,j]) */
int *DMLi1; /* MIN(fML[i+1,k]+fML[k+1,j]) */
int *DMLi2; /* MIN(fML[i+2,k]+fML[k+1,j]) */
int dangle_model, noGUclosure, noLP, hc_decompose, turn;
char *ptype;
unsigned char *hard_constraints;
vrna_param_t *P;
vrna_mx_mfe_t *matrices;
vrna_hc_t *hc;
length = (int)vc->length;
ptype = vc->ptype;
indx = vc->jindx;
P = vc->params;
dangle_model = P->model_details.dangles;
noGUclosure = P->model_details.noGUclosure;
noLP = P->model_details.noLP;
uniq_ML = P->model_details.uniq_ML;
strands = vc->strands;
sn = vc->strand_number;
ss = vc->strand_start;
se = vc->strand_end;
so = vc->strand_order;
hc = vc->hc;
hard_constraints = hc->mx;
matrices = vc->matrices;
my_f5 = matrices->f5;
my_c = matrices->c;
my_fML = matrices->fML;
my_fM1 = matrices->fM1;
my_fc = matrices->fc;
turn = P->model_details.min_loop_size;
/* allocate memory for all helper arrays */
cc = (int *)vrna_alloc(sizeof(int) * (length + 2));
cc1 = (int *)vrna_alloc(sizeof(int) * (length + 2));
Fmi = (int *)vrna_alloc(sizeof(int) * (length + 1));
DMLi = (int *)vrna_alloc(sizeof(int) * (length + 1));
DMLi1 = (int *)vrna_alloc(sizeof(int) * (length + 1));
DMLi2 = (int *)vrna_alloc(sizeof(int) * (length + 1));
/* hard code min_loop_size to 0, since we can not be sure yet that this is already the case */
turn = 0;
for (j = 1; j <= length; j++) {
Fmi[j] = DMLi[j] = DMLi1[j] = DMLi2[j] = INF;
my_fc[j] = 0;
}
for (j = 1; j <= length; j++)
for (i = 1; i <= j; i++) {
my_c[indx[j] + i] = my_fML[indx[j] + i] = INF;
if (uniq_ML)
my_fM1[indx[j] + i] = INF;
}
for (i = length - turn - 1; i >= 1; i--) {
/* i,j in [1..length] */
maxj = (zuker) ? (MIN2(i + se[so[0]], length)) : length;
for (j = i + turn + 1; j <= maxj; j++) {
int ij;
ij = indx[j] + i;
type = vrna_get_ptype(ij, ptype);
hc_decompose = hard_constraints[length * i + j];
energy = INF;
no_close = (((type == 3) || (type == 4)) && noGUclosure);
if (hc_decompose) {
/* we have a pair */
int new_c = INF;
if (!no_close) {
/* check for hairpin loop */
energy = vrna_E_hp_loop(vc, i, j);
new_c = MIN2(new_c, energy);
/* check for multibranch loops */
energy = vrna_E_mb_loop_fast(vc, i, j, DMLi1, DMLi2);
new_c = MIN2(new_c, energy);
}
if (dangle_model == 3) {
/* coaxial stacking */
energy = vrna_E_mb_loop_stack(vc, i, j);
new_c = MIN2(new_c, energy);
}
/* check for interior loops */
energy = vrna_E_int_loop(vc, i, j);
new_c = MIN2(new_c, energy);
/* remember stack energy for --noLP option */
if (noLP) {
if ((sn[i] == sn[i + 1]) && (sn[j - 1] == sn[j])) {
int stackEnergy = vrna_E_stack(vc, i, j);
new_c = MIN2(new_c, cc1[j - 1] + stackEnergy);
my_c[ij] = cc1[j - 1] + stackEnergy;
} else {
/* currently we don't allow stacking over the cut point */
my_c[ij] = FORBIDDEN;
}
cc[j] = new_c;
} else {
my_c[ij] = new_c;
}
} /* end >> if (pair) << */
else {
my_c[ij] = INF;
}
/*
* done with c[i,j], now compute fML[i,j]
* free ends ? -----------------------------------------
*/
my_fML[ij] = vrna_E_ml_stems_fast(vc, i, j, Fmi, DMLi);
if (uniq_ML) /* compute fM1 for unique decomposition */
my_fM1[ij] = E_ml_rightmost_stem(i, j, vc);
}
if (i == se[so[0]] + 1)
for (j = i; j <= maxj; j++)
free_end(my_fc, j, ss[so[1]], vc);
if (i <= se[so[0]])
free_end(my_fc, i, se[so[0]], vc);
{
int *FF; /* rotate the auxilliary arrays */
FF = DMLi2;
DMLi2 = DMLi1;
DMLi1 = DMLi;
DMLi = FF;
FF = cc1;
cc1 = cc;
cc = FF;
for (j = 1; j <= maxj; j++)
cc[j] = Fmi[j] = DMLi[j] = INF;
}
}
/* calculate energies of 5' and 3' fragments */
for (i = 1; i <= length; i++)
free_end(my_f5, i, 1, vc);
if (strands > 1) {
mfe1 = my_f5[se[so[0]]];
mfe2 = my_fc[length];
/* add DuplexInit, check whether duplex*/
for (i = ss[so[1]]; i <= length; i++) {
if (my_f5[i] != INF)
my_f5[i] += P->DuplexInit;
if ((my_fc[i] != INF) &&
(my_fc[1] != INF)) {
energy = my_fc[i] +
my_fc[1];
if (energy < my_f5[i])
my_f5[i] = energy;
}
}
}
energy = my_f5[length];
if (strands == 1)
mfe1 = mfe2 = energy;
/* clean up memory */
free(cc);
free(cc1);
free(Fmi);
free(DMLi);
free(DMLi1);
free(DMLi2);
return energy;
}
PRIVATE int
backtrack_co(sect bt_stack[],
vrna_bp_stack_t *bp_list,
int s,
int b, /* b=0: start new structure, b \ne 0: add to existing structure */
vrna_fold_compound_t *vc)
{
/*------------------------------------------------------------------
* trace back through the "c", "fc", "f5" and "fML" arrays to get the
* base pairing list. No search for equivalent structures is done.
* This is fast, since only few structure elements are recalculated.
* ------------------------------------------------------------------*/
unsigned int *se, *so;
int i, j, ij, k, length, no_close, type, ret;
char *string = vc->sequence;
vrna_param_t *P = vc->params;
int *indx = vc->jindx;
char *ptype = vc->ptype;
int noLP = P->model_details.noLP;
int noGUclosure = P->model_details.noGUclosure;
char backtrack_type = P->model_details.backtrack_type;
/* the folding matrices */
int *my_c;
ret = 1;
length = vc->length;
my_c = vc->matrices->c;
se = vc->strand_end;
so = vc->strand_order;
/* int b=0;*/
length = strlen(string);
if (s == 0) {
bt_stack[++s].i = 1;
bt_stack[s].j = length;
bt_stack[s].ml = (backtrack_type == 'M') ? 1 : ((backtrack_type == 'C') ? 2 : 0);
}
while (s > 0) {
int ml, cij;
int canonical = 1; /* (i,j) closes a canonical structure */
/* pop one element from stack */
i = bt_stack[s].i;
j = bt_stack[s].j;
ml = bt_stack[s--].ml;
switch (ml) {
/* backtrack in f5 */
case 0:
{
int p, q;
if (vrna_BT_ext_loop_f5(vc, &j, &p, &q, bp_list, &b)) {
if (j > 0) {
bt_stack[++s].i = 1;
bt_stack[s].j = j;
bt_stack[s].ml = 0;
}
if (p > 0) {
i = p;
j = q;
goto repeat1;
}
continue;
} else {
vrna_message_warning("backtracking failed in f5, segment [%d,%d]\n", i, j);
ret = 0;
goto backtrack_exit;
}
}
break;
/* true multi-loop backtrack in fML */
case 1:
{
int p, q, comp1, comp2;
if (vrna_BT_mb_loop_split(vc, &i, &j, &p, &q, &comp1, &comp2, bp_list, &b)) {
if (i > 0) {
bt_stack[++s].i = i;
bt_stack[s].j = j;
bt_stack[s].ml = comp1;
}
if (p > 0) {
bt_stack[++s].i = p;
bt_stack[s].j = q;
bt_stack[s].ml = comp2;
}
continue;
} else {
vrna_message_warning("backtrack failed in fML\n%s", string);
ret = 0;
goto backtrack_exit;
}
}
break;
case 2:
bp_list[++b].i = i;
bp_list[b].j = j;
goto repeat1;
/* backtrack fake-multi loop parts */
case 3:
case 4:
{
int lower, k, p, q;
p = i;
q = j;
lower = (i <= se[so[0]]) ? 1 : 0;
if (vrna_BT_mb_loop_fake(vc, &k, &i, &j, bp_list, &b)) {
if (k > 0) {
bt_stack[++s].i = (lower) ? k : p;
bt_stack[s].j = (lower) ? q : k;
bt_stack[s].ml = ml;
}
if (i > 0)
goto repeat1;
continue;
} else {
vrna_message_warning("backtrack failed in fc\n%s", string);
ret = 0;
goto backtrack_exit;
}
}
break;
} /* end of switch(ml) */
repeat1:
/*----- begin of "repeat:" -----*/
ij = indx[j] + i;
if (canonical)
cij = my_c[ij];
type = vrna_get_ptype(ij, ptype);
if (noLP) {
if (vrna_BT_stack(vc, &i, &j, &cij, bp_list, &b)) {
canonical = 0;
goto repeat1;
}
}
canonical = 1;
no_close = (((type == 3) || (type == 4)) && noGUclosure);
if (no_close) {
if (cij == FORBIDDEN)
continue;
} else {
if (vrna_BT_hp_loop(vc, i, j, cij, bp_list, &b))
continue;
}
if (vrna_BT_int_loop(vc, &i, &j, cij, bp_list, &b)) {
if (i < 0)
continue;
else
goto repeat1;
}
/* (i.j) must close a fake or true multi-loop */
int comp1, comp2;
if (vrna_BT_mb_loop(vc, &i, &j, &k, cij, &comp1, &comp2)) {
bt_stack[++s].i = i;
bt_stack[s].j = k;
bt_stack[s].ml = comp1;
bt_stack[++s].i = k + 1;
bt_stack[s].j = j;
bt_stack[s].ml = comp2;
} else {
vrna_message_warning("backtracking failed in repeat, segment [%d,%d]\n", i, j);
ret = 0;
goto backtrack_exit;
}
/* end of repeat: --------------------------------------------------*/
} /* end >> while (s>0) << */
backtrack_exit:
bp_list[0].i = b; /* save the total number of base pairs */
return ret;
}
PRIVATE void
free_end(int *array,
int i,
int start,
vrna_fold_compound_t *vc)
{
unsigned int *sn;
int inc, type, energy, en, length, j, left, right, dangle_model, with_gquad, *indx,
*c, *ggg, turn;
vrna_param_t *P;
short *S1;
char *ptype;
unsigned char *hard_constraints;
vrna_mx_mfe_t *matrices;
vrna_hc_t *hc;
vrna_sc_t *sc;
P = vc->params;
dangle_model = P->model_details.dangles;
with_gquad = P->model_details.gquad;
turn = P->model_details.min_loop_size;
inc = (i > start) ? 1 : -1;
length = (int)vc->length;
S1 = vc->sequence_encoding;
ptype = vc->ptype;
indx = vc->jindx;
sn = vc->strand_number;
matrices = vc->matrices;
c = matrices->c;
ggg = matrices->ggg;
hc = vc->hc;
sc = vc->sc;
hard_constraints = hc->mx;
if (hc->up_ext[i]) {
if (i == start)
array[i] = 0;
else
array[i] = array[i - inc];
if (sc) {
if (sc->energy_up)
array[i] += sc->energy_up[i][1];
if (sc->f)
array[i] += sc->f(start, i, start, i - 1, VRNA_DECOMP_EXT_EXT, sc->data);
}
} else {
array[i] = INF;
}
if (inc > 0) {
left = start;
right = i;
} else {
left = i;
right = start;
}
/* hard code min_loop_size to 0, since we can not be sure yet that this is already the case */
turn = 0;
for (j = start; inc * (i - j) > turn; j += inc) {
int ii, jj;
short si, sj;
if (i > j) {
ii = j;
jj = i;
} /* inc>0 */
else {
ii = i;
jj = j;
} /* inc<0 */
if (hard_constraints[length * ii + jj] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) {
type = vrna_get_ptype(indx[jj] + ii, ptype);
si = ((ii > 1) && (sn[ii - 1] == sn[ii])) ? S1[ii - 1] : -1;
sj = ((jj < length) && (sn[jj] == sn[jj + 1])) ? S1[jj + 1] : -1;
energy = c[indx[jj] + ii];
if ((sc) && (sc->f))
energy += sc->f(start, jj, ii - 1, ii, VRNA_DECOMP_EXT_EXT_STEM, sc->data);
if (energy != INF) {
switch (dangle_model) {
case 0:
if (array[j - inc] != INF) {
en = array[j - inc] + energy + vrna_E_ext_stem(type, -1, -1, P);
array[i] = MIN2(array[i], en);
}
break;
case 2:
if (array[j - inc] != INF) {
en = array[j - inc] + energy + vrna_E_ext_stem(type, si, sj, P);
array[i] = MIN2(array[i], en);
}
break;
default:
if (array[j - inc] != INF) {
en = array[j - inc] + energy + vrna_E_ext_stem(type, -1, -1, P);
array[i] = MIN2(array[i], en);
}
if (inc > 0) {
if (j > left) {
if (hc->up_ext[ii - 1]) {
if (array[j - 2] != INF) {
en = array[j - 2] + energy + vrna_E_ext_stem(type, si, -1, P);
if (sc)
if (sc->energy_up)
en += sc->energy_up[ii - 1][1];
array[i] = MIN2(array[i], en);
}
}
}
} else if (j < right) {
if (hc->up_ext[jj + 1]) {
if (array[j + 2] != INF) {
en = array[j + 2] + energy + vrna_E_ext_stem(type, -1, sj, P);
if (sc)
if (sc->energy_up)
en += sc->energy_up[jj + 1][1];
array[i] = MIN2(array[i], en);
}
}
}
break;
}
}
}
if (with_gquad) {
if (sn[ii] == sn[jj])
if (array[j - inc] != INF)
array[i] = MIN2(array[i], array[j - inc] + ggg[indx[jj] + ii]);
}
if (dangle_model % 2 == 1) {
/* interval ends in a dangle (i.e. i-inc is paired) */
if (i > j) {
ii = j;
jj = i - 1;
} /* inc>0 */
else {
ii = i + 1;
jj = j;
} /* inc<0 */
if (!(hard_constraints[length * ii + jj] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP))
continue;
type = vrna_get_ptype(indx[jj] + ii, ptype);
si = (ii > left) && (sn[ii - 1] == sn[ii]) ? S1[ii - 1] : -1;
sj = (jj < right) && (sn[jj] == sn[jj + 1]) ? S1[jj + 1] : -1;
energy = c[indx[jj] + ii];
if (energy != INF) {
if (inc > 0) {
if (hc->up_ext[jj - 1]) {
if (array[j - inc] != INF) {
en = array[j - inc] + energy + vrna_E_ext_stem(type, -1, sj, P);
if (sc)
if (sc->energy_up)
en += sc->energy_up[jj + 1][1];
array[i] = MIN2(array[i], en);
}
}
} else {
if (hc->up_ext[ii - 1]) {
if (array[j - inc] != INF) {
en = array[j - inc] + energy + vrna_E_ext_stem(type, si, -1, P);
if (sc)
if (sc->energy_up)
en += sc->energy_up[ii - 1][1];
array[i] = MIN2(array[i], en);
}
}
}
if (j != start) {
/* dangle_model on both sides */
if (hc->up_ext[jj - 1] && hc->up_ext[ii - 1]) {
if (array[j - 2 * inc] != INF) {
en = array[j - 2 * inc] + energy + vrna_E_ext_stem(type, si, sj, P);
if (sc)
if (sc->energy_up)
en += sc->energy_up[ii - 1][1] + sc->energy_up[jj + 1][1];
array[i] = MIN2(array[i], en);
}
}
}
}
}
}
}
PRIVATE int
backtrack(sect bt_stack[],
vrna_bp_stack_t *bp_list,
vrna_fold_compound_t *vc)
{
/*routine to call backtrack_co from 1 to n, backtrack type??*/
return backtrack_co(bt_stack, bp_list, 0, 0, vc);
}
PRIVATE void
doubleseq(vrna_fold_compound_t *vc)
{
unsigned int length, i, s;
length = vc->length;
/* do some magic to re-use cofold code */
vc->sequence = vrna_realloc(vc->sequence, sizeof(char) * (2 * length + 2));
memcpy(vc->sequence + length, vc->sequence, sizeof(char) * length);
vc->sequence[2 * length] = '\0';
vc->length = (unsigned int)strlen(vc->sequence);
vc->cutpoint = length + 1;
vc->strands = 2;
free(vc->strand_number);
vc->strand_number = (unsigned int *)vrna_alloc(sizeof(unsigned int) * (vc->length + 1));
for (s = i = 0; i <= vc->length; i++) {
if (i == length + 1)
s++;
vc->strand_number[i] = s;
}
free(vc->strand_order);
free(vc->strand_start);
free(vc->strand_end);
vc->strand_order = (unsigned int *)vrna_alloc(sizeof(unsigned int) * (vc->strands + 1));
vc->strand_start = (unsigned int *)vrna_alloc(sizeof(unsigned int) * (vc->strands + 1));
vc->strand_end = (unsigned int *)vrna_alloc(sizeof(unsigned int) * (vc->strands + 1));
vc->strand_order[0] = 0;
vc->strand_order[1] = 1;
vc->strand_start[0] = 1;
vc->strand_end[0] = vc->strand_start[0] + length - 1;
vc->strand_start[1] = vc->strand_end[0] + 1;
vc->strand_end[1] = vc->strand_start[1] + length - 1;
vc->sequence_encoding = vrna_realloc(vc->sequence_encoding, sizeof(short) * (vc->length + 2));
memcpy(vc->sequence_encoding + length + 1, vc->sequence_encoding + 1, sizeof(short) * length);
vc->sequence_encoding[0] = vc->sequence_encoding[vc->length];
vc->sequence_encoding[vc->length + 1] = vc->sequence_encoding[1];
vc->sequence_encoding2 = vrna_realloc(vc->sequence_encoding2, sizeof(short) * (vc->length + 2));
memcpy(vc->sequence_encoding2 + length + 1, vc->sequence_encoding2 + 1, sizeof(short) * length);
vc->sequence_encoding2[0] = vc->length;
vc->sequence_encoding2[vc->length + 1] = 0;
free(vc->ptype);
vc->ptype = vrna_ptypes(vc->sequence_encoding2, &(vc->params->model_details));
free(vc->iindx);
vc->iindx = vrna_idx_row_wise(vc->length);
free(vc->jindx);
vc->jindx = vrna_idx_col_wise(vc->length);
vrna_hc_init(vc);
/* add DP matrices */
vrna_mx_mfe_add(vc, VRNA_MX_DEFAULT, 0);
}
PRIVATE void
halfseq(vrna_fold_compound_t *vc)
{
unsigned int halflength;
halflength = vc->length / 2;
vc->sequence = vrna_realloc(vc->sequence, sizeof(char) * (halflength + 1));
vc->sequence[halflength] = '\0';
vc->length = (unsigned int)strlen(vc->sequence);
vc->cutpoint = -1;
vc->strands = 1;
vc->strand_number = (unsigned int *)vrna_realloc(vc->strand_number,
sizeof(unsigned int) * (vc->length + 1));
vc->strand_order = (unsigned int *)vrna_realloc(vc->strand_order,
sizeof(unsigned int) * (vc->strands + 1));
vc->strand_start = (unsigned int *)vrna_realloc(vc->strand_start,
sizeof(unsigned int) * (vc->strands + 1));
vc->strand_end = (unsigned int *)vrna_realloc(vc->strand_end,
sizeof(unsigned int) * (vc->strands + 1));
vc->sequence_encoding =
vrna_realloc(vc->sequence_encoding, sizeof(short) * (vc->length + 2));
vc->sequence_encoding[0] = vc->sequence_encoding[vc->length];
vc->sequence_encoding[vc->length + 1] = vc->sequence_encoding[1];
vc->sequence_encoding2 =
vrna_realloc(vc->sequence_encoding2, sizeof(short) * (vc->length + 2));
vc->sequence_encoding2[0] = vc->length;
vc->sequence_encoding2[vc->length + 1] = 0;
free(vc->ptype);
vc->ptype = vrna_ptypes(vc->sequence_encoding2, &(vc->params->model_details));
free(vc->iindx);
vc->iindx = vrna_idx_row_wise(vc->length);
free(vc->jindx);
vc->jindx = vrna_idx_col_wise(vc->length);
vrna_hc_init(vc);
/* add DP matrices */
vrna_mx_mfe_add(vc, VRNA_MX_DEFAULT, 0);
}
typedef struct {
int i;
int j;
int e;
int idxj;
} zuker_pair;
PRIVATE int
comp_pair(const void *A,
const void *B)
{
zuker_pair *x, *y;
int ex, ey;
x = (zuker_pair *)A;
y = (zuker_pair *)B;
ex = x->e;
ey = y->e;
if (ex > ey)
return 1;
if (ex < ey)
return -1;
return x->idxj + x->i - y->idxj + y->i;
}
PUBLIC SOLUTION *
vrna_subopt_zuker(vrna_fold_compound_t *vc)
{
/* Compute zuker suboptimal. Here, we're abusing the cofold() code
* "double" sequence, compute dimerarray entries, track back every base pair.
* This is slightly wasteful compared to the normal solution */
char *structure, *mfestructure, **todo, *ptype;
int i, j, counter, num_pairs, psize, p, *indx, *c, turn;
unsigned int length, doublelength;
float energy;
SOLUTION *zukresults;
vrna_bp_stack_t *bp_list;
zuker_pair *pairlist;
sect bt_stack[MAXSECTORS]; /* stack of partial structures for backtracking */
vrna_mx_mfe_t *matrices;
vrna_md_t *md;
md = &(vc->params->model_details);
turn = md->min_loop_size;
/* do some magic to re-use cofold code although vc is single sequence */
md->min_loop_size = 0;
doubleseq(vc);
if (!vrna_fold_compound_prepare(vc, VRNA_OPTION_MFE | VRNA_OPTION_HYBRID)) {
vrna_message_warning("vrna_subopt_zuker@cofold.c: Failed to prepare vrna_fold_compound");
return NULL;
}
doublelength = vc->length;
length = doublelength / 2;
indx = vc->jindx;
ptype = vc->ptype;
matrices = vc->matrices;
c = matrices->c;
num_pairs = counter = 0;
mfestructure = (char *)vrna_alloc((unsigned)doublelength + 1);
structure = (char *)vrna_alloc((unsigned)doublelength + 1);
zukresults = (SOLUTION *)vrna_alloc(((length * (length - 1)) / 2) * sizeof(SOLUTION));
mfestructure[0] = '\0';
/* store length at pos. 0 */
vc->sequence_encoding[0] = vc->sequence_encoding2[0];
/* get mfe and do forward recursion */
(void)fill_arrays(vc, 1);
psize = length;
pairlist = (zuker_pair *)vrna_alloc(sizeof(zuker_pair) * (psize + 1));
bp_list = (vrna_bp_stack_t *)vrna_alloc(sizeof(vrna_bp_stack_t) * (1 + length / 2));
todo = (char **)vrna_alloc(sizeof(char *) * (length + 1));
for (i = 1; i < length; i++)
todo[i] = (char *)vrna_alloc(sizeof(char) * (length + 1));
/* Make a list of all base pairs */
for (i = 1; i < length; i++) {
for (j = i + turn + 1 /*??*/; j <= length; j++) {
if (ptype[indx[j] + i] == 0)
continue;
if (num_pairs >= psize) {
psize = 1.2 * psize + 32;
pairlist = vrna_realloc(pairlist, sizeof(zuker_pair) * (psize + 1));
}
pairlist[num_pairs].i = i;
pairlist[num_pairs].j = j;
pairlist[num_pairs].e = c[indx[j] + i] + c[indx[i + length] + j];
pairlist[num_pairs++].idxj = indx[j];
todo[i][j] = 1;
}
}
qsort(pairlist, num_pairs, sizeof(zuker_pair), comp_pair);
for (p = 0; p < num_pairs; p++) {
i = pairlist[p].i;
j = pairlist[p].j;
if (todo[i][j]) {
int k;
char *sz;
bt_stack[1].i = i;
bt_stack[1].j = j;
bt_stack[1].ml = 2;
backtrack_co(bt_stack, bp_list, 1, 0, vc);
bt_stack[1].i = j;
bt_stack[1].j = i + length;
bt_stack[1].ml = 2;
backtrack_co(bt_stack, bp_list, 1, bp_list[0].i, vc);
energy = pairlist[p].e;
sz = vrna_db_from_bp_stack(bp_list, length);
zukresults[counter].energy = energy / 100.;
zukresults[counter++].structure = sz;
for (k = 1; k <= bp_list[0].i; k++) {
/* mark all pairs in structure as done */
int x, y;
x = bp_list[k].i;
y = bp_list[k].j;
if (x > length)
x -= length;
if (y > length)
y -= length;
if (x > y) {
int temp;
temp = x;
x = y;
y = temp;
}
todo[x][y] = 0;
}
}
}
/* clean up */
free(pairlist);
for (i = 1; i < length; i++)
free(todo[i]);
free(todo);
free(structure);
free(mfestructure);
free(bp_list);
/* undo magic */
halfseq(vc);
md->min_loop_size = turn;
return zukresults;
}
/*
*###########################################
*# deprecated functions below #
*###########################################
*/
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
PRIVATE void
wrap_array_export(int **f5_p,
int **c_p,
int **fML_p,
int **fM1_p,
int **fc_p,
int **indx_p,
char **ptype_p)
{
/* make the DP arrays available to routines such as subopt() */
if (backward_compat_compound) {
*f5_p = backward_compat_compound->matrices->f5;
*c_p = backward_compat_compound->matrices->c;
*fML_p = backward_compat_compound->matrices->fML;
*fM1_p = backward_compat_compound->matrices->fM1;
*fc_p = backward_compat_compound->matrices->fc;
*indx_p = backward_compat_compound->jindx;
*ptype_p = backward_compat_compound->ptype;
}
}
/*--------------------------------------------------------------------------*/
PRIVATE float
wrap_cofold(const char *string,
char *structure,
vrna_param_t *parameters,
int is_constrained)
{
unsigned int length;
char *seq;
vrna_fold_compound_t *vc;
vrna_param_t *P;
float mfe;
vc = NULL;
length = strlen(string);
#ifdef _OPENMP
/* Explicitly turn off dynamic threads */
omp_set_dynamic(0);
#endif
/* we need the parameter structure for hard constraints */
if (parameters) {
P = vrna_params_copy(parameters);
} else {
vrna_md_t md;
set_model_details(&md);
md.temperature = temperature;
P = vrna_params(&md);
}
P->model_details.min_loop_size = 0; /* set min loop length to 0 */
/* dirty hack to reinsert the '&' according to the global variable 'cut_point' */
seq = vrna_cut_point_insert(string, cut_point);
/* get compound structure */
vc = vrna_fold_compound(seq, &(P->model_details), 0);
if (parameters) {
/* replace params if necessary */
free(vc->params);
vc->params = P;
} else {
free(P);
}
/* handle hard constraints in pseudo dot-bracket format if passed via simple interface */
if (is_constrained && structure) {
unsigned int constraint_options = 0;
constraint_options |= VRNA_CONSTRAINT_DB
| VRNA_CONSTRAINT_DB_PIPE
| VRNA_CONSTRAINT_DB_DOT
| VRNA_CONSTRAINT_DB_X
| VRNA_CONSTRAINT_DB_ANG_BRACK
| VRNA_CONSTRAINT_DB_RND_BRACK
| VRNA_CONSTRAINT_DB_INTRAMOL
| VRNA_CONSTRAINT_DB_INTERMOL;
vrna_constraints_add(vc, (const char *)structure, constraint_options);
}
if (backward_compat_compound)
vrna_fold_compound_free(backward_compat_compound);
backward_compat_compound = vc;
backward_compat = 1;
/* cleanup */
free(seq);
/* call mfe_dimer without backtracing */
mfe = vrna_mfe_dimer(vc, NULL);
/* now we backtrace in a backward compatible way */
if (structure && vc->params->model_details.backtrack) {
char *s;
sect bt_stack[MAXSECTORS];
vrna_bp_stack_t *bp;
bp = (vrna_bp_stack_t *)vrna_alloc(sizeof(vrna_bp_stack_t) * (4 * (1 + length / 2))); /* add a guess of how many G's may be involved in a G quadruplex */
backtrack(bt_stack, bp, vc);
s = vrna_db_from_bp_stack(bp, length);
strncpy(structure, s, length + 1);
free(s);
if (base_pair)
free(base_pair);
base_pair = bp;
}
return mfe;
}
PRIVATE SOLUTION *
wrap_zukersubopt(const char *string,
vrna_param_t *parameters)
{
vrna_fold_compound_t *vc;
vrna_param_t *P;
vc = NULL;
#ifdef _OPENMP
/* Explicitly turn off dynamic threads */
omp_set_dynamic(0);
#endif
/* we need the parameter structure for hard constraints */
if (parameters) {
P = vrna_params_copy(parameters);
} else {
vrna_md_t md;
set_model_details(&md);
md.temperature = temperature;
P = vrna_params(&md);
}
/* get compound structure */
vc = vrna_fold_compound(string, &(P->model_details), VRNA_OPTION_DEFAULT);
if (parameters) {
/* replace params if necessary */
free(vc->params);
vc->params = P;
} else {
free(P);
}
if (backward_compat_compound)
vrna_fold_compound_free(backward_compat_compound);
backward_compat_compound = vc;
backward_compat = 1;
return vrna_subopt_zuker(vc);
}
PUBLIC void
initialize_cofold(int length)
{
/* DO NOTHING */ }
PUBLIC void
free_co_arrays(void)
{
if (backward_compat_compound && backward_compat) {
vrna_fold_compound_free(backward_compat_compound);
backward_compat_compound = NULL;
backward_compat = 0;
}
}
/*--------------------------------------------------------------------------*/
PUBLIC void
export_cofold_arrays_gq(int **f5_p,
int **c_p,
int **fML_p,
int **fM1_p,
int **fc_p,
int **ggg_p,
int **indx_p,
char **ptype_p)
{
/* make the DP arrays available to routines such as subopt() */
wrap_array_export(f5_p, c_p, fML_p, fM1_p, fc_p, indx_p, ptype_p);
if (backward_compat_compound)
*ggg_p = backward_compat_compound->matrices->ggg;
}
PUBLIC void
export_cofold_arrays(int **f5_p,
int **c_p,
int **fML_p,
int **fM1_p,
int **fc_p,
int **indx_p,
char **ptype_p)
{
wrap_array_export(f5_p, c_p, fML_p, fM1_p, fc_p, indx_p, ptype_p);
}
PUBLIC float
cofold(const char *string,
char *structure)
{
return wrap_cofold(string, structure, NULL, fold_constrained);
}
PUBLIC float
cofold_par(const char *string,
char *structure,
vrna_param_t *parameters,
int is_constrained)
{
return wrap_cofold(string, structure, parameters, is_constrained);
}
PUBLIC SOLUTION *
zukersubopt(const char *string)
{
return wrap_zukersubopt(string, NULL);
}
PUBLIC SOLUTION *
zukersubopt_par(const char *string,
vrna_param_t *parameters)
{
return wrap_zukersubopt(string, parameters);
}
PUBLIC void
update_cofold_params(void)
{
vrna_fold_compound_t *v;
if (backward_compat_compound && backward_compat) {
vrna_md_t md;
v = backward_compat_compound;
if (v->params)
free(v->params);
set_model_details(&md);
v->params = vrna_params(&md);
}
}
PUBLIC void
update_cofold_params_par(vrna_param_t *parameters)
{
vrna_fold_compound_t *v;
if (backward_compat_compound && backward_compat) {
v = backward_compat_compound;
if (v->params)
free(v->params);
if (parameters) {
v->params = vrna_params_copy(parameters);
} else {
vrna_md_t md;
set_model_details(&md);
md.temperature = temperature;
v->params = vrna_params(&md);
}
}
}
PUBLIC void
get_monomere_mfes(float *e1,
float *e2)
{
/*exports monomere free energies*/
*e1 = mfe1;
*e2 = mfe2;
}
#endif
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define PrimitiveExtentPad 128
#define MaxBezierCoordinates 4194304
#define ThrowPointExpectedException(token,exception) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),DrawError, \
"NonconformingDrawingPrimitiveDefinition","`%s'",token); \
status=MagickFalse; \
break; \
}
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _MVGInfo
{
PrimitiveInfo
**primitive_info;
size_t
*extent;
ssize_t
offset;
PointInfo
point;
ExceptionInfo
*exception;
} MVGInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static Image
*DrawClippingMask(Image *,const DrawInfo *,const char *,const char *,
ExceptionInfo *);
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *,
ExceptionInfo *),
RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *),
TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(MVGInfo *,const size_t),
TraceCircle(MVGInfo *,const PointInfo,const PointInfo),
TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
static PrimitiveInfo
*TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *);
static size_t
TracePath(MVGInfo *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
ExceptionInfo
*exception;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
exception=AcquireExceptionInfo();
if (draw_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->compliance=draw_info->compliance;
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
exception);
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (x+4),
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t)
(x+4)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops,
(size_t) number_stops*sizeof(*clone_info->gradient.stops));
}
clone_info->bounds=draw_info->bounds;
clone_info->fill_alpha=draw_info->fill_alpha;
clone_info->stroke_alpha=draw_info->stroke_alpha;
clone_info->element_reference=draw_info->element_reference;
clone_info->clip_path=draw_info->clip_path;
clone_info->clip_units=draw_info->clip_units;
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0,
MagickTrue,exception);
if (draw_info->composite_mask != (Image *) NULL)
clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0,
MagickTrue,exception);
clone_info->render=draw_info->render;
clone_info->debug=IsEventLogging();
exception=DestroyExceptionInfo(exception);
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
%
% A description of each parameter follows:
%
% o Method ConvertPathToPolygon returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int DrawCompareEdges(const void *p_edge,const void *q_edge)
{
#define DrawCompareEdge(p,q) \
{ \
if (((p)-(q)) < 0.0) \
return(-1); \
if (((p)-(q)) > 0.0) \
return(1); \
}
register const PointInfo
*p,
*q;
/*
Edge sorting for right-handed coordinate system.
*/
p=((const EdgeInfo *) p_edge)->points;
q=((const EdgeInfo *) q_edge)->points;
DrawCompareEdge(p[0].y,q[0].y);
DrawCompareEdge(p[0].x,q[0].x);
DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)*
(q[1].x-q[0].x));
DrawCompareEdge(p[1].y,q[1].y);
DrawCompareEdge(p[1].x,q[1].x);
return(0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
return((PolygonInfo *) NULL);
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
(void) memset(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) memset(&point,0,sizeof(point));
(void) memset(&bounds,0,sizeof(bounds));
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=0.0;
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) direction;
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->number_edges=0;
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < MagickEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
ghostline=MagickFalse;
edge++;
}
}
polygon_info->number_edges=edge;
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),DrawCompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o Method ConvertPrimitiveToPath returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info)
{
MagickBooleanType
closed_subpath;
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case AlphaPrimitive:
case ColorPrimitive:
case ImagePrimitive:
case PointPrimitive:
case TextPrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
return((PathInfo *) NULL);
coordinates=0;
closed_subpath=MagickFalse;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
/*
New subpath.
*/
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
closed_subpath=primitive_info[i].closed_subpath;
}
coordinates--;
if ((code == MoveToCode) || (coordinates <= 0) ||
(fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
/*
Eliminate duplicate points.
*/
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue; /* next point in current subpath */
if (closed_subpath != MagickFalse)
{
closed_subpath=MagickFalse;
continue;
}
/*
Mark the p point as open if the subpath is not closed.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1),
sizeof(*path_info));
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
assert(draw_info != (DrawInfo *) NULL);
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask);
if (draw_info->composite_mask != (Image *) NULL)
draw_info->composite_mask=DestroyImage(draw_info->composite_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y E d g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyEdge() destroys the specified polygon edge.
%
% The format of the DestroyEdge method is:
%
% ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
% o edge: the polygon edge number to destroy.
%
*/
static size_t DestroyEdge(PolygonInfo *polygon_info,
const size_t edge)
{
assert(edge < polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < polygon_info->number_edges)
(void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P o l y g o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPolygonInfo() destroys the PolygonInfo data structure.
%
% The format of the DestroyPolygonInfo method is:
%
% PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges);
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
% o exception: return any errors or warnings in this structure.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine,ExceptionInfo *exception)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
extent[4],
min,
max;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
PointInfo
point;
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetPixelInfo(image,&zero);
start=(ssize_t) ceil(edge.y1-0.5);
stop=(ssize_t) floor(edge.y2+0.5);
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
PixelInfo
composite,
pixel;
PointInfo
point;
register ssize_t
x;
register Quantum
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),
1,exception);
if (q == (Quantum *) NULL)
continue;
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel,
point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
GetPixelInfoPixel(image,q,&composite);
CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha,
&composite);
SetPixelViaPixelInfo(image,&composite,q);
x_offset++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% MagickBooleanType DrawBoundingRectangles(Image *image,
% const DrawInfo *draw_info,PolygonInfo *polygon_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double SaneStrokeWidth(const Image *image,
const DrawInfo *draw_info)
{
return(MagickMin((double) draw_info->stroke_width,
(2.0*sqrt(2.0)+MagickEpsilon)*MagickMax(image->columns,image->rows)));
}
static MagickBooleanType DrawBoundingRectangles(Image *image,
const DrawInfo *draw_info,const PolygonInfo *polygon_info,
ExceptionInfo *exception)
{
double
mid;
DrawInfo
*clone_info;
MagickBooleanType
status;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
(void) memset(primitive_info,0,sizeof(primitive_info));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(status);
}
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
SaneStrokeWidth(image,clone_info)/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke,
exception);
else
status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
break;
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
if (status == MagickFalse)
break;
}
if (i < (ssize_t) polygon_info->number_edges)
{
clone_info=DestroyDrawInfo(clone_info);
return(status);
}
}
status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(status);
}
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
clone_info=DestroyDrawInfo(clone_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *id,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *id,ExceptionInfo *exception)
{
const char
*clip_path;
Image
*clipping_mask;
MagickBooleanType
status;
clip_path=GetImageArtifact(image,id);
if (clip_path == (const char *) NULL)
return(MagickFalse);
clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path,
exception);
if (clipping_mask == (Image *) NULL)
return(MagickFalse);
status=SetImageMask(image,WritePixelMask,clipping_mask,exception);
clipping_mask=DestroyImage(clipping_mask);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p p i n g M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClippingMask() draws the clip path and returns it as an image clipping
% mask.
%
% The format of the DrawClippingMask method is:
%
% Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *clip_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o clip_path: the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *clip_path,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
Image
*clip_mask,
*separate_mask;
MagickStatusType
status;
/*
Draw a clip path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
clip_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(clip_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(clip_mask));
status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception);
status=QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
clip_mask->background_color.alpha_trait=BlendPixelTrait;
status=SetImageBackgroundColor(clip_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,clip_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
if (clone_info->clip_mask != (char *) NULL)
clone_info->clip_mask=DestroyString(clone_info->clip_mask);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
clone_info->clip_path=MagickTrue;
status=RenderMVGContent(clip_mask,clone_info,1,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(clip_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
clip_mask=DestroyImage(clip_mask);
clip_mask=separate_mask;
status=NegateImage(clip_mask,MagickFalse,exception);
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(clip_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C o m p o s i t e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawCompositeMask() draws the mask path and returns it as an image mask.
%
% The format of the DrawCompositeMask method is:
%
% Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *mask_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the mask path id.
%
% o mask_path: the mask path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *mask_path,ExceptionInfo *exception)
{
Image
*composite_mask,
*separate_mask;
DrawInfo
*clone_info;
MagickStatusType
status;
/*
Draw a mask path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
composite_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(composite_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(composite_mask));
status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL,
exception);
status=QueryColorCompliance("#0000",AllCompliance,
&composite_mask->background_color,exception);
composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
composite_mask->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(composite_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,mask_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
status=RenderMVGContent(composite_mask,clone_info,1,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(composite_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
composite_mask=DestroyImage(composite_mask);
composite_mask=separate_mask;
status=NegateImage(composite_mask,MagickFalse,exception);
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path");
return(composite_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception)
{
double
length,
maximum_length,
offset,
scale,
total_length;
DrawInfo
*clone_info;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register double
dx,
dy;
register ssize_t
i;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+32UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
return(MagickFalse);
(void) memset(dash_polygon,0,(2UL*number_vertices+32UL)*
sizeof(*dash_polygon));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*draw_info->dash_pattern[0];
offset=fabs(draw_info->dash_offset) >= MagickEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*draw_info->dash_pattern[n];
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > MaxBezierCoordinates)
break;
if (fabs(length) < MagickEpsilon)
{
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
j=1;
}
else
{
if ((j+1) > (ssize_t) number_vertices)
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((total_length < maximum_length) && ((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
static int StopInfoCompare(const void *x,const void *y)
{
StopInfo
*stop_1,
*stop_2;
stop_1=(StopInfo *) x;
stop_2=(StopInfo *) y;
if (stop_1->offset > stop_2->offset)
return(1);
if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon)
return(0);
return(-1);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info,ExceptionInfo *exception)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo),
StopInfoCompare);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
PixelInfo
composite,
pixel;
double
alpha,
offset;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
MagickBooleanType
antialias;
double
repeat;
antialias=MagickFalse;
repeat=0.0;
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=PerceptibleReciprocal(length)*repeat;
}
else
{
repeat=fmod(offset,gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,gradient->radius);
else
repeat=fmod(offset,gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha,
&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info,
const size_t pad)
{
double
extent;
size_t
quantum;
/*
Check if there is enough storage for drawing pimitives.
*/
extent=(double) mvg_info->offset+pad+PrimitiveExtentPad;
quantum=sizeof(**mvg_info->primitive_info);
if (((extent*quantum) < (double) SSIZE_MAX) &&
((extent*quantum) < (double) GetMaxMemoryRequest()))
{
if (extent <= (double) *mvg_info->extent)
return(MagickTrue);
*mvg_info->primitive_info=ResizeQuantumMemory(*mvg_info->primitive_info,
(size_t) extent,quantum);
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
{
*mvg_info->extent=(size_t) extent;
return(MagickTrue);
}
}
/*
Reallocation failed, allocate a primitive to facilitate unwinding.
*/
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
*mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(
*mvg_info->primitive_info);
*mvg_info->primitive_info=AcquireCriticalMemory(PrimitiveExtentPad*quantum);
(void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum);
*mvg_info->extent=1;
return(MagickFalse);
}
static SplayTreeInfo *GetMVGMacros(const char *primitive)
{
char
*token;
const char
*q;
size_t
extent;
SplayTreeInfo
*macros;
/*
Scan graphic primitives for definitions and classes.
*/
if (primitive == (const char *) NULL)
return((SplayTreeInfo *) NULL);
macros=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
for (q=primitive; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (*token == '\0')
break;
if (LocaleCompare("push",token) == 0)
{
register const char
*end,
*start;
GetNextToken(q,&q,extent,token);
if (*q == '"')
{
char
name[MagickPathExtent];
const char
*p;
ssize_t
n;
/*
Named macro (e.g. push graphic-context "wheel").
*/
GetNextToken(q,&q,extent,token);
start=q;
end=q;
(void) CopyMagickString(name,token,MagickPathExtent);
n=1;
for (p=q; *p != '\0'; )
{
GetNextToken(p,&p,extent,token);
if (*token == '\0')
break;
if (LocaleCompare(token,"pop") == 0)
{
end=p-strlen(token)-1;
n--;
}
if (LocaleCompare(token,"push") == 0)
n++;
if ((n == 0) && (end > start))
{
char
*macro;
/*
Extract macro.
*/
GetNextToken(p,&p,extent,token);
macro=AcquireString(start);
macro[end-start]='\0';
(void) AddValueToSplayTree(macros,ConstantString(name),
ConstantString(macro));
macro=DestroyString(macro);
break;
}
}
}
}
}
token=DestroyString(token);
return(macros);
}
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=StringToDouble(point,&p);
return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse :
MagickTrue);
}
static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->closed_subpath=MagickFalse;
primitive_info->point=point;
return(MagickTrue);
}
static MagickBooleanType RenderMVGContent(Image *image,
const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
keyword[MagickPathExtent],
geometry[MagickPathExtent],
*next_token,
pattern[MagickPathExtent],
*primitive,
*token;
const char
*q;
double
angle,
coordinates,
cursor,
factor,
primitive_extent;
DrawInfo
*clone_info,
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
MVGInfo
mvg_info;
PointInfo
point;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_points,
number_stops;
SplayTreeInfo
*macros;
ssize_t
defsDepth,
j,
k,
n,
symbolDepth;
StopInfo
*stops;
TypeMetric
metrics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (depth > MagickMaxRecursionDepth)
ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply",
image->filename);
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
{
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
if (status == MagickFalse)
return(status);
}
primitive=(char *) NULL;
if (*draw_info->primitive != '@')
primitive=AcquireString(draw_info->primitive);
else
if ((strlen(draw_info->primitive) > 1) &&
(*(draw_info->primitive+1) != '-'))
primitive=FileToString(draw_info->primitive+1,~0UL,exception);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"MVG",primitive);
n=0;
number_stops=0;
stops=(StopInfo *) NULL;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=PrimitiveExtentPad;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(primitive_info,0,(size_t) number_points*
sizeof(*primitive_info));
mvg_info.primitive_info=(&primitive_info);
mvg_info.extent=(&number_points);
mvg_info.offset=0;
mvg_info.exception=exception;
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
defsDepth=0;
symbolDepth=0;
cursor=0.0;
macros=GetMVGMacros(primitive);
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
GetNextToken(q,&q,MagickPathExtent,keyword);
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.rx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.ry=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("alpha",keyword) == 0)
{
primitive_type=AlphaPrimitive;
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->border_color,exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("class",keyword) == 0)
{
const char
*mvg_class;
GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
mvg_class=(const char *) GetValueFromSplayTree(macros,token);
if (mvg_class != (const char *) NULL)
{
char
*elements;
ssize_t
offset;
/*
Inject class elements in stream.
*/
offset=(ssize_t) (p-primitive);
elements=AcquireString(primitive);
elements[offset]='\0';
(void) ConcatenateString(&elements,mvg_class);
(void) ConcatenateString(&elements,"\n");
(void) ConcatenateString(&elements,q);
primitive=DestroyString(primitive);
primitive=elements;
q=primitive+offset;
}
break;
}
if (LocaleCompare("clip-path",keyword) == 0)
{
const char
*clip_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
(void) CloneString(&graphic_context[n]->clip_mask,token);
clip_path=(const char *) GetValueFromSplayTree(macros,token);
if (clip_path != (const char *) NULL)
{
if (graphic_context[n]->clipping_mask != (Image *) NULL)
graphic_context[n]->clipping_mask=
DestroyImage(graphic_context[n]->clipping_mask);
graphic_context[n]->clipping_mask=DrawClippingMask(image,
graphic_context[n],token,clip_path,exception);
if (draw_info->compliance != SVGCompliance)
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
if (LocaleCompare("compliance",keyword) == 0)
{
/*
MVG compliance associates a clipping mask with an image; SVG
compliance associates a clipping mask with a graphics context.
*/
GetNextToken(q,&q,extent,token);
graphic_context[n]->compliance=(ComplianceType) ParseCommandOption(
MagickComplianceOptions,MagickFalse,token);
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->fill,exception);
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
double
opacity;
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->fill_alpha*=opacity;
if (graphic_context[n]->fill.alpha != TransparentAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
else
graphic_context[n]->fill.alpha=ClampToQuantum(QuantumRange*
opacity);
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("letter-spacing",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
clone_info->text=AcquireString(" ");
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
graphic_context[n]->kerning=metrics.width*
StringToDouble(token,&next_token);
clone_info=DestroyDrawInfo(clone_info);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("mask",keyword) == 0)
{
const char
*mask_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
GetNextToken(q,&q,extent,token);
mask_path=(const char *) GetValueFromSplayTree(macros,token);
if (mask_path != (const char *) NULL)
{
if (graphic_context[n]->composite_mask != (Image *) NULL)
graphic_context[n]->composite_mask=
DestroyImage(graphic_context[n]->composite_mask);
graphic_context[n]->composite_mask=DrawCompositeMask(image,
graphic_context[n],token,mask_path,exception);
if (draw_info->compliance != SVGCompliance)
status=SetImageMask(image,CompositePixelMask,
graphic_context[n]->composite_mask,exception);
}
break;
}
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
double
opacity;
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->fill_alpha*=opacity;
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
graphic_context[n]->stroke_alpha*=opacity;
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha;
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare("class",token) == 0)
break;
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
{
defsDepth--;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
DrawError,"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if ((graphic_context[n]->clip_mask != (char *) NULL) &&
(draw_info->compliance != SVGCompliance))
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
status=SetImageMask(image,WritePixelMask,(Image *) NULL,
exception);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("mask",token) == 0)
break;
if (LocaleCompare("pattern",token) == 0)
break;
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth--;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare("class",token) == 0)
{
/*
Class context.
*/
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"class") != 0)
continue;
break;
}
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("clip-path",token) == 0)
{
char
name[MaxTextExtent];
const char
*clip_path;
GetNextToken(q,&q,extent,token);
(void) FormatLocaleString(name,MaxTextExtent,"%s",token);
clip_path=(const char *) GetValueFromSplayTree(macros,name);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,name,clip_path);
break;
}
if (LocaleCompare("defs",token) == 0)
{
defsDepth++;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent],
type[MagickPathExtent];
SegmentInfo
segment;
GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MagickPathExtent);
GetNextToken(q,&q,extent,token);
segment.x1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
segment.y1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
segment.x2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
segment.y2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (LocaleCompare(type,"radial") == 0)
{
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
if (*q == '"')
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("mask",token) == 0)
{
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent];
RectangleInfo
bounds;
GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
GetNextToken(q,&q,extent,token);
bounds.x=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
bounds.y=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
bounds.width=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
bounds.height=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double)
bounds.height,(double) bounds.x,(double) bounds.y);
(void) SetImageArtifact(image,key,geometry);
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth++;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
PixelInfo
stop_color;
number_stops++;
if (number_stops == 1)
stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops));
else
if (number_stops > 2)
stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops,
sizeof(*stops));
if (stops == (StopInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,&stop_color,
exception);
stops[number_stops-1].color=stop_color;
GetNextToken(q,&q,extent,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
stops[number_stops-1].offset=factor*StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->stroke,exception);
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=
graphic_context[n]->stroke_alpha;
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*r;
r=q;
GetNextToken(r,&r,extent,token);
if (*token == ',')
GetNextToken(r,&r,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
GetNextToken(r,&r,extent,token);
if (*token == ',')
GetNextToken(r,&r,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2*x+4),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
status=MagickFalse;
break;
}
(void) memset(graphic_context[n]->dash_pattern,0,(size_t)
(2*x+4)*sizeof(*graphic_context[n]->dash_pattern));
for (j=0; j < x; j++)
{
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
double
opacity;
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->stroke_alpha*=opacity;
if (graphic_context[n]->stroke.alpha != TransparentAlpha)
graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha;
else
graphic_context[n]->stroke.alpha=ClampToQuantum(QuantumRange*
opacity);
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
graphic_context[n]->stroke_width=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
cursor=0.0;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->undercolor,exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
cursor=0.0;
break;
}
status=MagickFalse;
break;
}
case 'u':
case 'U':
{
if (LocaleCompare("use",keyword) == 0)
{
const char
*use;
/*
Get a macro from the MVG document, and "use" it here.
*/
GetNextToken(q,&q,extent,token);
use=(const char *) GetValueFromSplayTree(macros,token);
if (use != (const char *) NULL)
{
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
(void) CloneString(&clone_info->primitive,use);
status=RenderMVGContent(image,clone_info,depth+1,exception);
clone_info=DestroyDrawInfo(clone_info);
}
break;
}
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'w':
case 'W':
{
if (LocaleCompare("word-spacing",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= MagickEpsilon) ||
(fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) ||
(fabs(affine.sy-1.0) >= MagickEpsilon) ||
(fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if (*q == '\0')
{
if (number_stops > 1)
{
GradientType
type;
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,stops,number_stops,
exception);
}
if (number_stops > 0)
stops=(StopInfo *) RelinquishMagickMemory(stops);
}
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p-1),p);
continue;
}
/*
Parse the primitive attributes.
*/
i=0;
mvg_info.offset=i;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
primitive_info[0].coordinates=0;
primitive_info[0].method=FloodfillMethod;
primitive_info[0].closed_subpath=MagickFalse;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
GetNextToken(q,&q,extent,token);
point.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
point.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
primitive_info[i].closed_subpath=MagickFalse;
i++;
mvg_info.offset=i;
if (i < (ssize_t) number_points)
continue;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
if (status == MagickFalse)
break;
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].closed_subpath=MagickFalse;
primitive_info[j].text=(char *) NULL;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
coordinates=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
coordinates*=5.0;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot((double) alpha,(double) beta);
coordinates*=5.0;
coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0*
BezierQuantum+360.0;
break;
}
case BezierPrimitive:
{
coordinates=(double) (BezierQuantum*primitive_info[j].coordinates);
if (primitive_info[j].coordinates > (107*BezierQuantum))
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
break;
}
case PathPrimitive:
{
char
*s,
*t;
GetNextToken(q,&q,extent,token);
coordinates=1.0;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=StringToDouble(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
coordinates++;
}
for (s=token; *s != '\0'; s++)
if (strspn(s,"AaCcQqSsTt") != 0)
coordinates+=(20.0*BezierQuantum)+360.0;
break;
}
case CirclePrimitive:
case ArcPrimitive:
case EllipsePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0;
if (coordinates > (MaxBezierCoordinates/4))
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
}
break;
}
default:
break;
}
if (coordinates > MaxBezierCoordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",token);
status=MagickFalse;
}
if (status == MagickFalse)
break;
if (((size_t) (i+coordinates)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=coordinates+1;
if (number_points < (size_t) coordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
mvg_info.offset=i;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad);
if (status == MagickFalse)
break;
mvg_info.offset=j;
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
status&=TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+2].point.x < 0.0) ||
(primitive_info[j+2].point.y < 0.0))
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0)
{
status=MagickFalse;
break;
}
status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
primitive_type=UndefinedPrimitive;
break;
}
status&=TraceArc(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x < 0.0) ||
(primitive_info[j+1].point.y < 0.0))
{
status=MagickFalse;
break;
}
status&=TraceEllipse(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceCircle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
{
if (primitive_info[j].coordinates < 1)
{
status=MagickFalse;
break;
}
break;
}
case PolygonPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
primitive_info[j].closed_subpath=MagickTrue;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
status&=TraceBezier(&mvg_info,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
coordinates=(double) TracePath(&mvg_info,token,exception);
if (coordinates == 0.0)
{
status=MagickFalse;
break;
}
i=(ssize_t) (j+coordinates);
break;
}
case AlphaPrimitive:
case ColorPrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
/*
Compute text cursor offset.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) &&
(fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon))
{
mvg_info.point=primitive_info->point;
primitive_info->point.x+=cursor;
}
else
{
mvg_info.point=primitive_info->point;
cursor=0.0;
}
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
clone_info->render=MagickFalse;
clone_info->text=AcquireString(token);
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
clone_info=DestroyDrawInfo(clone_info);
cursor+=metrics.width;
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
break;
}
}
mvg_info.offset=i;
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),
p);
if (status == MagickFalse)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (draw_info->compliance != SVGCompliance) &&
(graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
status&=DrawPrimitive(image,graphic_context[n],primitive_info,
exception);
}
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
macros=DestroySplayTree(macros);
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
{
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
}
primitive=DestroyString(primitive);
if (stops != (StopInfo *) NULL)
stops=(StopInfo *) RelinquishMagickMemory(stops);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition",
keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
ExceptionInfo *exception)
{
return(RenderMVGContent(image,draw_info,1,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern,
ExceptionInfo *exception)
{
char
property[MagickPathExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MagickPathExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info,exception);
image_info=DestroyImageInfo(image_info);
(void) QueryColorCompliance("#000000ff",AllCompliance,
&(*pattern)->background_color,exception);
(void) SetImageBackgroundColor(*pattern,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill_pattern=NewImageList();
clone_info->stroke_pattern=NewImageList();
(void) FormatLocaleString(property,MagickPathExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=RenderMVGContent(*pattern,clone_info,1,exception);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(
const PrimitiveInfo *primitive_info)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
return((PolygonInfo **) NULL);
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(primitive_info);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
polygon_info[i]=ConvertPathToPolygon(path_info);
if (polygon_info[i] == (PolygonInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static double GetFillAlpha(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_alpha)
{
double
alpha,
beta,
distance,
subpath_alpha;
PointInfo
delta;
register const PointInfo
*q;
register EdgeInfo
*p;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_alpha=0.0;
subpath_alpha=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,(size_t) j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta <= 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta >= alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=PerceptibleReciprocal(alpha);
beta=delta.x*(y-q->y)-delta.y*(x-q->x);
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_alpha < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_alpha=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25)))
*stroke_alpha=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_alpha=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < MagickEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_alpha < (alpha*alpha))
subpath_alpha=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_alpha >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) (p->number_points-1); i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_alpha);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
fill,
status;
double
mid;
PolygonInfo
**magick_restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates <= 1)
return(MagickTrue);
/*
Compute bounding box.
*/
polygon_info=AcquirePolygonThreadSet(primitive_info);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
DisableMSCWarning(4127)
if (0)
{
status=DrawBoundingRectangles(image,draw_info,polygon_info[0],exception);
if (status == MagickFalse)
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(status);
}
}
RestoreMSCWarning
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
bounds=polygon_info[0]->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.y1-=(mid+1.0);
bounds.x2+=(mid+1.0);
bounds.y2+=(mid+1.0);
if ((bounds.x1 >= (double) image->columns) ||
(bounds.y1 >= (double) image->rows) ||
(bounds.x2 <= 0.0) || (bounds.y2 <= 0.0))
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(MagickTrue); /* virtual polygon */
}
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x1;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y1;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x2;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for ( ; x <= stop_x; x++)
{
if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) &&
(y == (ssize_t) ceil(primitive_info->point.y-0.5)))
{
GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+
1),1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
double
fill_alpha,
stroke_alpha;
PixelInfo
fill_color,
stroke_color;
/*
Fill and/or stroke.
*/
fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule,
x,y,&stroke_alpha);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_alpha=fill_alpha > 0.25 ? 1.0 : 0.0;
stroke_alpha=stroke_alpha > 0.25 ? 1.0 : 0.0;
}
GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception);
CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception);
CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
q,
point;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
status=MagickTrue;
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelInfoGray(&draw_info->fill) == MagickFalse) ||
(IsPixelInfoGray(&draw_info->stroke) == MagickFalse)))
status=SetImageColorspace(image,sRGBColorspace,exception);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask,
exception);
status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask,
exception);
}
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel,
target;
(void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
ChannelType
channel_mask;
PixelInfo
target;
(void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
(void) SetImageChannelMask(image,channel_mask);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetPixelInfo(image,&pixel);
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel,
target;
(void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
PixelInfo
target;
(void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel;
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MagickPathExtent];
Image
*composite_image,
*composite_images;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_images=ReadInlineImage(clone_info,primitive_info->text,
exception);
else
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
composite_images=ReadImage(clone_info,exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_images == (Image *) NULL)
{
status=0;
break;
}
composite_image=RemoveFirstImageFromList(&composite_images);
composite_images=DestroyImageList(composite_images);
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=(ssize_t) ceil(primitive_info[1].point.x-0.5);
y1=(ssize_t) ceil(primitive_info[1].point.y-0.5);
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
/*
Resize image.
*/
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
(void) TransformImage(&composite_image,(char *) NULL,
composite_geometry,exception);
}
if (composite_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel,
exception);
if (draw_info->alpha != OpaqueAlpha)
(void) SetImageAlpha(composite_image,draw_info->alpha,exception);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
status&=DrawAffineImage(image,composite_image,&affine,exception);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelInfo
fill_color;
register Quantum
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&fill_color,exception);
CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.alpha != (Quantum) TransparentAlpha))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
status=DrawDashPolygon(draw_info,primitive_info,image,exception);
break;
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.alpha != (Quantum) TransparentAlpha) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
double
x,
y;
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
closed_path=primitive_info[0].closed_subpath;
i=(ssize_t) primitive_info[0].coordinates;
x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x);
y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
closed_path=MagickTrue;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
status=DrawPolygonPrimitive(image,draw_info,primitive_info,
exception);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
status&=DrawStrokePolygon(image,draw_info,primitive_info,exception);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception);
break;
}
}
image_view=DestroyCacheView(image_view);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception);
status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static MagickBooleanType DrawRoundLinecap(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*MagickEpsilon;
linecap[2].point.x+=2.0*MagickEpsilon;
linecap[2].point.y+=2.0*MagickEpsilon;
linecap[3].point.y+=2.0*MagickEpsilon;
linecap[4].primitive=UndefinedPrimitive;
return(DrawPolygonPrimitive(image,draw_info,linecap,exception));
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
if (p->coordinates == 1)
continue;
stroke_polygon=TraceStrokePolygon(image,draw_info,p);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
if (status == 0)
break;
q=p+p->coordinates-1;
closed_path=p->closed_subpath;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
status&=DrawRoundLinecap(image,draw_info,p,exception);
status&=DrawRoundLinecap(image,draw_info,q,exception);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) memset(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) memset(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke,
exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->alpha=OpaqueAlpha;
draw_info->fill_alpha=OpaqueAlpha;
draw_info->stroke_alpha=OpaqueAlpha;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
draw_info->pointsize=12.0;
draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha;
draw_info->compose=OverCompositeOp;
draw_info->render=MagickTrue;
draw_info->clip_path=MagickFalse;
draw_info->debug=IsEventLogging();
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
if (fabs(clone_info->pointsize) >= MagickEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->border_color=clone_info->border_color;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->fill,
exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke,
exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor,
exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radius;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radius.x=fabs(center.x-start.x);
radius.y=fabs(center.y-start.y);
return(TraceEllipse(mvg_info,center,radius,degrees));
}
static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
MagickBooleanType
status;
PointInfo
center,
points[3],
radii;
register double
cosine,
sine;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
ssize_t
offset;
offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
return(TracePoint(primitive_info,end));
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return(TraceLine(primitive_info,start,end));
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+
MagickEpsilon))));
status=MagickTrue;
p=primitive_info;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
status&=TraceBezier(mvg_info,4);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
p+=p->coordinates;
}
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(status);
}
static MagickBooleanType TraceBezier(MVGInfo *mvg_info,
const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coefficients.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
quantum=(size_t) MagickMin((double) quantum/number_coordinates,
(double) BezierQuantum);
control_points=quantum*number_coordinates;
if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse)
return(MagickFalse);
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
coefficients=(double *) AcquireQuantumMemory((size_t)
number_coordinates,sizeof(*coefficients));
points=(PointInfo *) AcquireQuantumMemory((size_t) control_points,
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
if (TracePoint(p,points[i]) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
}
if (TracePoint(p,end) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickTrue);
}
static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
return(TraceEllipse(mvg_info,start,offset,degrees));
}
static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center,
const PointInfo radii,const PointInfo arc)
{
double
coordinates,
delta,
step,
x,
y;
PointInfo
angle,
point;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return(MagickTrue);
delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y));
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0);
angle.x=DegreesToRadians(arc.x);
y=arc.y;
while (y < arc.x)
y+=360.0;
angle.y=DegreesToRadians(y);
coordinates=ceil((angle.y-angle.x)/step+1.0);
if ((coordinates > (double) SSIZE_MAX) ||
(coordinates > (double) GetMaxMemoryRequest()))
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse)
return(MagickFalse);
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
x=fabs(primitive_info[0].point.x-
primitive_info[primitive_info->coordinates-1].point.x);
y=fabs(primitive_info[0].point.y-
primitive_info[primitive_info->coordinates-1].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
if (TracePoint(primitive_info,start) == MagickFalse)
return(MagickFalse);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return(MagickTrue);
}
if (TracePoint(primitive_info+1,end) == MagickFalse)
return(MagickFalse);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
primitive_info->closed_subpath=MagickFalse;
return(MagickTrue);
}
static size_t TracePath(MVGInfo *mvg_info,const char *path,
ExceptionInfo *exception)
{
char
*next_token,
token[MagickPathExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
MagickBooleanType
status;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
ssize_t
subpath_offset;
subpath_offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
status=MagickTrue;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
if (status == MagickFalse)
break;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle = 0.0;
MagickBooleanType
large_arc = MagickFalse,
sweep = MagickFalse;
PointInfo
arc = {0.0, 0.0};
/*
Elliptical arc.
*/
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
arc.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
arc.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
/*
Line to.
*/
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
/*
Move to.
*/
if (mvg_info->offset != subpath_offset)
{
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
}
i=0;
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (status == MagickFalse)
break;
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
/*
Line to.
*/
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
/*
Close path.
*/
point=start;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
primitive_info->closed_subpath=MagickTrue;
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
z_count++;
break;
}
default:
{
ThrowPointExpectedException(token,exception);
break;
}
}
}
if (status == MagickFalse)
return(0);
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return(number_coordinates);
}
static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
if ((fabs(start.x-end.x) < MagickEpsilon) ||
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->coordinates=0;
return(MagickTrue);
}
p=primitive_info;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,end) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
point,
segment;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
ssize_t
offset;
offset=mvg_info->offset;
segment.x=fabs(end.x-start.x);
segment.y=fabs(end.y-start.y);
if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon))
{
(*mvg_info->primitive_info+mvg_info->offset)->coordinates=0;
return(MagickTrue);
}
if (arc.x > (0.5*segment.x))
arc.x=0.5*segment.x;
if (arc.y > (0.5*segment.y))
arc.y=0.5*segment.y;
point.x=start.x+segment.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+segment.x-arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
register double
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
return(MagickTrue);
}
static PrimitiveInfo *TraceStrokePolygon(const Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
#define CheckPathExtent(pad) \
if ((q+(pad)) >= (ssize_t) max_strokes) \
{ \
if (~max_strokes < (pad)) \
{ \
path_p=(PointInfo *) RelinquishMagickMemory(path_p); \
path_q=(PointInfo *) RelinquishMagickMemory(path_q); \
} \
else \
{ \
max_strokes+=(pad); \
path_p=(PointInfo *) ResizeQuantumMemory(path_p,max_strokes, \
sizeof(*path_p)); \
path_q=(PointInfo *) ResizeQuantumMemory(path_q,max_strokes, \
sizeof(*path_q)); \
} \
if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL)) \
{ \
if (path_p != (PointInfo *) NULL) \
path_p=(PointInfo *) RelinquishMagickMemory(path_p); \
if (path_q != (PointInfo *) NULL) \
path_q=(PointInfo *) RelinquishMagickMemory(path_q); \
polygon_primitive=(PrimitiveInfo *) \
RelinquishMagickMemory(polygon_primitive); \
return((PrimitiveInfo *) NULL); \
} \
}
typedef struct _LineSegment
{
double
p,
q;
} LineSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
LineSegment
dx = {0,0},
dy = {0,0},
inverse_slope = {0,0},
slope = {0,0},
theta = {0,0};
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*path_p,
*path_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
max_strokes,
number_vertices;
ssize_t
j,
n,
p,
q;
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
max_strokes=2*number_vertices+6*BezierQuantum+360;
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if (polygon_primitive == (PrimitiveInfo *) NULL)
return((PrimitiveInfo *) NULL);
(void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices*
sizeof(*polygon_primitive));
closed_path=primitive_info[0].closed_subpath;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
{
if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse))
{
/*
Zero length subpath.
*/
stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory(
sizeof(*stroke_polygon));
stroke_polygon[0]=polygon_primitive[0];
stroke_polygon[0].coordinates=0;
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
n=(ssize_t) number_vertices-1L;
}
path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_p));
if (path_p == (PointInfo *) NULL)
{
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_q));
if (path_q == (PointInfo *) NULL)
{
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < MagickEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.p) < MagickEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
(void) TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
path_q[p++]=box_q[0];
path_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < MagickEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.q) < MagickEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
CheckPathExtent(6*BezierQuantum+360);
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+6*BezierQuantum+360);
path_q[q].x=box_q[1].x;
path_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
path_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+6*BezierQuantum+360);
path_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
path_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
path_p[p++]=box_p[1];
path_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon != (PrimitiveInfo *) NULL)
{
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
}
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
fixed_size_vector.h | // -----------------------------------------------------------------------------
//
// Copyright (C) 2021 CERN & University of Surrey for the benefit of the
// BioDynaMo collaboration. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef CORE_CONTAINER_FIXED_SIZE_VECTOR_H_
#define CORE_CONTAINER_FIXED_SIZE_VECTOR_H_
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <initializer_list>
namespace bdm {
/// Vector with fixed number of elements == Array with push_back function that
/// keeps track of its size
/// NB: No bounds checking. Do not push_back more often than the number of
/// maximum elements given by the template parameter N
template <typename T, std::size_t N>
class FixedSizeVector {
public:
FixedSizeVector() {}
/// Constructor which accepts an std::initiliazer_list to set
/// the array's content.
/// \param l an initializer list
constexpr FixedSizeVector(const std::initializer_list<T>& l) {
assert(l.size() <= N);
auto it = l.begin();
for (size_t i = 0; i < N; i++) {
data_[i] = *(it++);
}
size_ = l.size();
}
size_t size() const { return size_; } // NOLINT
const T& operator[](size_t idx) const { return data_[idx]; }
T& operator[](size_t idx) { return data_[idx]; }
bool operator==(const FixedSizeVector& other) const {
if (size_ != other.size_) {
return false;
}
for (size_t i = 0; i < std::min(size_, other.size_); i++) {
if (data_[i] != other.data_[i]) {
return false;
}
}
return true;
}
FixedSizeVector& operator++() {
#pragma omp simd
for (size_t i = 0; i < N; i++) {
++data_[i];
}
return *this;
}
void clear() { size_ = 0; } // NOLINT
void push_back(const T& value) { // NOLINT
assert(size_ < N);
data_[size_++] = value;
}
const T* data() const { return data_; }
const T* begin() const { return &(data_[0]); } // NOLINT
const T* end() const { return &(data_[size_]); } // NOLINT
T* begin() { return &(data_[0]); } // NOLINT
T* end() { return &(data_[size_]); } // NOLINT
private:
T data_[N];
std::size_t size_ = 0;
};
} // namespace bdm
#endif // CORE_CONTAINER_FIXED_SIZE_VECTOR_H_
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.