source
stringlengths
3
92
c
stringlengths
26
2.25M
dds.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD DDDD SSSSS % % D D D D SS % % D D D D SSS % % D D D D SS % % DDDD DDDD SSSSS % % % % % % Read/Write Microsoft Direct Draw Surface Image Format % % % % Software Design % % Bianca van Schaik % % March 2008 % % Dirk Lemstra % % September 2013 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/module.h" #include "MagickCore/transform.h" /* Definitions */ #define DDSD_CAPS 0x00000001 #define DDSD_HEIGHT 0x00000002 #define DDSD_WIDTH 0x00000004 #define DDSD_PITCH 0x00000008 #define DDSD_PIXELFORMAT 0x00001000 #define DDSD_MIPMAPCOUNT 0x00020000 #define DDSD_LINEARSIZE 0x00080000 #define DDSD_DEPTH 0x00800000 #define DDPF_ALPHAPIXELS 0x00000001 #define DDPF_FOURCC 0x00000004 #define DDPF_RGB 0x00000040 #define DDPF_LUMINANCE 0x00020000 #define FOURCC_DXT1 0x31545844 #define FOURCC_DXT3 0x33545844 #define FOURCC_DXT5 0x35545844 #define DDSCAPS_COMPLEX 0x00000008 #define DDSCAPS_TEXTURE 0x00001000 #define DDSCAPS_MIPMAP 0x00400000 #define DDSCAPS2_CUBEMAP 0x00000200 #define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400 #define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800 #define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000 #define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000 #define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000 #define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000 #define DDSCAPS2_VOLUME 0x00200000 #ifndef SIZE_MAX #define SIZE_MAX ((size_t) -1) #endif /* Structure declarations. */ typedef struct _DDSPixelFormat { size_t flags, fourcc, rgb_bitcount, r_bitmask, g_bitmask, b_bitmask, alpha_bitmask; } DDSPixelFormat; typedef struct _DDSInfo { size_t flags, height, width, pitchOrLinearSize, depth, mipmapcount, ddscaps1, ddscaps2; DDSPixelFormat pixelformat; } DDSInfo; typedef struct _DDSColors { unsigned char r[4], g[4], b[4], a[4]; } DDSColors; typedef struct _DDSVector4 { float x, y, z, w; } DDSVector4; typedef struct _DDSVector3 { float x, y, z; } DDSVector3; typedef struct _DDSSourceBlock { unsigned char start, end, error; } DDSSourceBlock; typedef struct _DDSSingleColorLookup { DDSSourceBlock sources[2]; } DDSSingleColorLookup; typedef MagickBooleanType DDSDecoder(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType, ExceptionInfo *); typedef MagickBooleanType DDSPixelDecoder(Image *,DDSInfo *,ExceptionInfo *); static const DDSSingleColorLookup DDSLookup_5_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 1 } } }, { { { 0, 0, 2 }, { 0, 1, 0 } } }, { { { 0, 0, 3 }, { 0, 1, 1 } } }, { { { 0, 0, 4 }, { 0, 2, 1 } } }, { { { 1, 0, 3 }, { 0, 2, 0 } } }, { { { 1, 0, 2 }, { 0, 2, 1 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 1, 2, 1 } } }, { { { 1, 0, 2 }, { 1, 2, 0 } } }, { { { 1, 0, 3 }, { 0, 4, 0 } } }, { { { 1, 0, 4 }, { 0, 5, 1 } } }, { { { 2, 0, 3 }, { 0, 5, 0 } } }, { { { 2, 0, 2 }, { 0, 5, 1 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 2, 3, 1 } } }, { { { 2, 0, 2 }, { 2, 3, 0 } } }, { { { 2, 0, 3 }, { 0, 7, 0 } } }, { { { 2, 0, 4 }, { 1, 6, 1 } } }, { { { 3, 0, 3 }, { 1, 6, 0 } } }, { { { 3, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 2 }, { 0, 10, 1 } } }, { { { 3, 0, 3 }, { 0, 10, 0 } } }, { { { 3, 0, 4 }, { 2, 7, 1 } } }, { { { 4, 0, 4 }, { 2, 7, 0 } } }, { { { 4, 0, 3 }, { 0, 11, 0 } } }, { { { 4, 0, 2 }, { 1, 10, 1 } } }, { { { 4, 0, 1 }, { 1, 10, 0 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 1 } } }, { { { 4, 0, 2 }, { 0, 13, 0 } } }, { { { 4, 0, 3 }, { 0, 13, 1 } } }, { { { 4, 0, 4 }, { 0, 14, 1 } } }, { { { 5, 0, 3 }, { 0, 14, 0 } } }, { { { 5, 0, 2 }, { 2, 11, 1 } } }, { { { 5, 0, 1 }, { 2, 11, 0 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 1, 14, 1 } } }, { { { 5, 0, 2 }, { 1, 14, 0 } } }, { { { 5, 0, 3 }, { 0, 16, 0 } } }, { { { 5, 0, 4 }, { 0, 17, 1 } } }, { { { 6, 0, 3 }, { 0, 17, 0 } } }, { { { 6, 0, 2 }, { 0, 17, 1 } } }, { { { 6, 0, 1 }, { 0, 18, 1 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 2, 15, 1 } } }, { { { 6, 0, 2 }, { 2, 15, 0 } } }, { { { 6, 0, 3 }, { 0, 19, 0 } } }, { { { 6, 0, 4 }, { 1, 18, 1 } } }, { { { 7, 0, 3 }, { 1, 18, 0 } } }, { { { 7, 0, 2 }, { 0, 20, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 2 }, { 0, 22, 1 } } }, { { { 7, 0, 3 }, { 0, 22, 0 } } }, { { { 7, 0, 4 }, { 2, 19, 1 } } }, { { { 8, 0, 4 }, { 2, 19, 0 } } }, { { { 8, 0, 3 }, { 0, 23, 0 } } }, { { { 8, 0, 2 }, { 1, 22, 1 } } }, { { { 8, 0, 1 }, { 1, 22, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 1 } } }, { { { 8, 0, 2 }, { 0, 25, 0 } } }, { { { 8, 0, 3 }, { 0, 25, 1 } } }, { { { 8, 0, 4 }, { 0, 26, 1 } } }, { { { 9, 0, 3 }, { 0, 26, 0 } } }, { { { 9, 0, 2 }, { 2, 23, 1 } } }, { { { 9, 0, 1 }, { 2, 23, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 1, 26, 1 } } }, { { { 9, 0, 2 }, { 1, 26, 0 } } }, { { { 9, 0, 3 }, { 0, 28, 0 } } }, { { { 9, 0, 4 }, { 0, 29, 1 } } }, { { { 10, 0, 3 }, { 0, 29, 0 } } }, { { { 10, 0, 2 }, { 0, 29, 1 } } }, { { { 10, 0, 1 }, { 0, 30, 1 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 2, 27, 1 } } }, { { { 10, 0, 2 }, { 2, 27, 0 } } }, { { { 10, 0, 3 }, { 0, 31, 0 } } }, { { { 10, 0, 4 }, { 1, 30, 1 } } }, { { { 11, 0, 3 }, { 1, 30, 0 } } }, { { { 11, 0, 2 }, { 4, 24, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 0 }, { 1, 31, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 2 }, { 2, 30, 1 } } }, { { { 11, 0, 3 }, { 2, 30, 0 } } }, { { { 11, 0, 4 }, { 2, 31, 1 } } }, { { { 12, 0, 4 }, { 2, 31, 0 } } }, { { { 12, 0, 3 }, { 4, 27, 0 } } }, { { { 12, 0, 2 }, { 3, 30, 1 } } }, { { { 12, 0, 1 }, { 3, 30, 0 } } }, { { { 12, 0, 0 }, { 4, 28, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 1 } } }, { { { 12, 0, 2 }, { 3, 31, 0 } } }, { { { 12, 0, 3 }, { 3, 31, 1 } } }, { { { 12, 0, 4 }, { 4, 30, 1 } } }, { { { 13, 0, 3 }, { 4, 30, 0 } } }, { { { 13, 0, 2 }, { 6, 27, 1 } } }, { { { 13, 0, 1 }, { 6, 27, 0 } } }, { { { 13, 0, 0 }, { 4, 31, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 1 } } }, { { { 13, 0, 2 }, { 5, 30, 0 } } }, { { { 13, 0, 3 }, { 8, 24, 0 } } }, { { { 13, 0, 4 }, { 5, 31, 1 } } }, { { { 14, 0, 3 }, { 5, 31, 0 } } }, { { { 14, 0, 2 }, { 5, 31, 1 } } }, { { { 14, 0, 1 }, { 6, 30, 1 } } }, { { { 14, 0, 0 }, { 6, 30, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 1 } } }, { { { 14, 0, 2 }, { 6, 31, 0 } } }, { { { 14, 0, 3 }, { 8, 27, 0 } } }, { { { 14, 0, 4 }, { 7, 30, 1 } } }, { { { 15, 0, 3 }, { 7, 30, 0 } } }, { { { 15, 0, 2 }, { 8, 28, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 0 }, { 7, 31, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 2 }, { 8, 30, 1 } } }, { { { 15, 0, 3 }, { 8, 30, 0 } } }, { { { 15, 0, 4 }, { 10, 27, 1 } } }, { { { 16, 0, 4 }, { 10, 27, 0 } } }, { { { 16, 0, 3 }, { 8, 31, 0 } } }, { { { 16, 0, 2 }, { 9, 30, 1 } } }, { { { 16, 0, 1 }, { 9, 30, 0 } } }, { { { 16, 0, 0 }, { 12, 24, 0 } } }, { { { 16, 0, 1 }, { 9, 31, 1 } } }, { { { 16, 0, 2 }, { 9, 31, 0 } } }, { { { 16, 0, 3 }, { 9, 31, 1 } } }, { { { 16, 0, 4 }, { 10, 30, 1 } } }, { { { 17, 0, 3 }, { 10, 30, 0 } } }, { { { 17, 0, 2 }, { 10, 31, 1 } } }, { { { 17, 0, 1 }, { 10, 31, 0 } } }, { { { 17, 0, 0 }, { 12, 27, 0 } } }, { { { 17, 0, 1 }, { 11, 30, 1 } } }, { { { 17, 0, 2 }, { 11, 30, 0 } } }, { { { 17, 0, 3 }, { 12, 28, 0 } } }, { { { 17, 0, 4 }, { 11, 31, 1 } } }, { { { 18, 0, 3 }, { 11, 31, 0 } } }, { { { 18, 0, 2 }, { 11, 31, 1 } } }, { { { 18, 0, 1 }, { 12, 30, 1 } } }, { { { 18, 0, 0 }, { 12, 30, 0 } } }, { { { 18, 0, 1 }, { 14, 27, 1 } } }, { { { 18, 0, 2 }, { 14, 27, 0 } } }, { { { 18, 0, 3 }, { 12, 31, 0 } } }, { { { 18, 0, 4 }, { 13, 30, 1 } } }, { { { 19, 0, 3 }, { 13, 30, 0 } } }, { { { 19, 0, 2 }, { 16, 24, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 0 }, { 13, 31, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 2 }, { 14, 30, 1 } } }, { { { 19, 0, 3 }, { 14, 30, 0 } } }, { { { 19, 0, 4 }, { 14, 31, 1 } } }, { { { 20, 0, 4 }, { 14, 31, 0 } } }, { { { 20, 0, 3 }, { 16, 27, 0 } } }, { { { 20, 0, 2 }, { 15, 30, 1 } } }, { { { 20, 0, 1 }, { 15, 30, 0 } } }, { { { 20, 0, 0 }, { 16, 28, 0 } } }, { { { 20, 0, 1 }, { 15, 31, 1 } } }, { { { 20, 0, 2 }, { 15, 31, 0 } } }, { { { 20, 0, 3 }, { 15, 31, 1 } } }, { { { 20, 0, 4 }, { 16, 30, 1 } } }, { { { 21, 0, 3 }, { 16, 30, 0 } } }, { { { 21, 0, 2 }, { 18, 27, 1 } } }, { { { 21, 0, 1 }, { 18, 27, 0 } } }, { { { 21, 0, 0 }, { 16, 31, 0 } } }, { { { 21, 0, 1 }, { 17, 30, 1 } } }, { { { 21, 0, 2 }, { 17, 30, 0 } } }, { { { 21, 0, 3 }, { 20, 24, 0 } } }, { { { 21, 0, 4 }, { 17, 31, 1 } } }, { { { 22, 0, 3 }, { 17, 31, 0 } } }, { { { 22, 0, 2 }, { 17, 31, 1 } } }, { { { 22, 0, 1 }, { 18, 30, 1 } } }, { { { 22, 0, 0 }, { 18, 30, 0 } } }, { { { 22, 0, 1 }, { 18, 31, 1 } } }, { { { 22, 0, 2 }, { 18, 31, 0 } } }, { { { 22, 0, 3 }, { 20, 27, 0 } } }, { { { 22, 0, 4 }, { 19, 30, 1 } } }, { { { 23, 0, 3 }, { 19, 30, 0 } } }, { { { 23, 0, 2 }, { 20, 28, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 0 }, { 19, 31, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 2 }, { 20, 30, 1 } } }, { { { 23, 0, 3 }, { 20, 30, 0 } } }, { { { 23, 0, 4 }, { 22, 27, 1 } } }, { { { 24, 0, 4 }, { 22, 27, 0 } } }, { { { 24, 0, 3 }, { 20, 31, 0 } } }, { { { 24, 0, 2 }, { 21, 30, 1 } } }, { { { 24, 0, 1 }, { 21, 30, 0 } } }, { { { 24, 0, 0 }, { 24, 24, 0 } } }, { { { 24, 0, 1 }, { 21, 31, 1 } } }, { { { 24, 0, 2 }, { 21, 31, 0 } } }, { { { 24, 0, 3 }, { 21, 31, 1 } } }, { { { 24, 0, 4 }, { 22, 30, 1 } } }, { { { 25, 0, 3 }, { 22, 30, 0 } } }, { { { 25, 0, 2 }, { 22, 31, 1 } } }, { { { 25, 0, 1 }, { 22, 31, 0 } } }, { { { 25, 0, 0 }, { 24, 27, 0 } } }, { { { 25, 0, 1 }, { 23, 30, 1 } } }, { { { 25, 0, 2 }, { 23, 30, 0 } } }, { { { 25, 0, 3 }, { 24, 28, 0 } } }, { { { 25, 0, 4 }, { 23, 31, 1 } } }, { { { 26, 0, 3 }, { 23, 31, 0 } } }, { { { 26, 0, 2 }, { 23, 31, 1 } } }, { { { 26, 0, 1 }, { 24, 30, 1 } } }, { { { 26, 0, 0 }, { 24, 30, 0 } } }, { { { 26, 0, 1 }, { 26, 27, 1 } } }, { { { 26, 0, 2 }, { 26, 27, 0 } } }, { { { 26, 0, 3 }, { 24, 31, 0 } } }, { { { 26, 0, 4 }, { 25, 30, 1 } } }, { { { 27, 0, 3 }, { 25, 30, 0 } } }, { { { 27, 0, 2 }, { 28, 24, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 0 }, { 25, 31, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 2 }, { 26, 30, 1 } } }, { { { 27, 0, 3 }, { 26, 30, 0 } } }, { { { 27, 0, 4 }, { 26, 31, 1 } } }, { { { 28, 0, 4 }, { 26, 31, 0 } } }, { { { 28, 0, 3 }, { 28, 27, 0 } } }, { { { 28, 0, 2 }, { 27, 30, 1 } } }, { { { 28, 0, 1 }, { 27, 30, 0 } } }, { { { 28, 0, 0 }, { 28, 28, 0 } } }, { { { 28, 0, 1 }, { 27, 31, 1 } } }, { { { 28, 0, 2 }, { 27, 31, 0 } } }, { { { 28, 0, 3 }, { 27, 31, 1 } } }, { { { 28, 0, 4 }, { 28, 30, 1 } } }, { { { 29, 0, 3 }, { 28, 30, 0 } } }, { { { 29, 0, 2 }, { 30, 27, 1 } } }, { { { 29, 0, 1 }, { 30, 27, 0 } } }, { { { 29, 0, 0 }, { 28, 31, 0 } } }, { { { 29, 0, 1 }, { 29, 30, 1 } } }, { { { 29, 0, 2 }, { 29, 30, 0 } } }, { { { 29, 0, 3 }, { 29, 30, 1 } } }, { { { 29, 0, 4 }, { 29, 31, 1 } } }, { { { 30, 0, 3 }, { 29, 31, 0 } } }, { { { 30, 0, 2 }, { 29, 31, 1 } } }, { { { 30, 0, 1 }, { 30, 30, 1 } } }, { { { 30, 0, 0 }, { 30, 30, 0 } } }, { { { 30, 0, 1 }, { 30, 31, 1 } } }, { { { 30, 0, 2 }, { 30, 31, 0 } } }, { { { 30, 0, 3 }, { 30, 31, 1 } } }, { { { 30, 0, 4 }, { 31, 30, 1 } } }, { { { 31, 0, 3 }, { 31, 30, 0 } } }, { { { 31, 0, 2 }, { 31, 30, 1 } } }, { { { 31, 0, 1 }, { 31, 31, 1 } } }, { { { 31, 0, 0 }, { 31, 31, 0 } } } }; static const DDSSingleColorLookup DDSLookup_6_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 0 } } }, { { { 0, 0, 2 }, { 0, 2, 0 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 0, 4, 0 } } }, { { { 1, 0, 2 }, { 0, 5, 0 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 0, 7, 0 } } }, { { { 2, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 10, 0 } } }, { { { 3, 0, 2 }, { 0, 11, 0 } } }, { { { 4, 0, 1 }, { 0, 12, 1 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 0 } } }, { { { 4, 0, 2 }, { 0, 14, 0 } } }, { { { 5, 0, 1 }, { 0, 15, 1 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 0, 16, 0 } } }, { { { 5, 0, 2 }, { 1, 15, 0 } } }, { { { 6, 0, 1 }, { 0, 17, 0 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 0, 19, 0 } } }, { { { 6, 0, 2 }, { 3, 14, 0 } } }, { { { 7, 0, 1 }, { 0, 20, 0 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 22, 0 } } }, { { { 7, 0, 2 }, { 4, 15, 0 } } }, { { { 8, 0, 1 }, { 0, 23, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 0 } } }, { { { 8, 0, 2 }, { 6, 14, 0 } } }, { { { 9, 0, 1 }, { 0, 26, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 0, 28, 0 } } }, { { { 9, 0, 2 }, { 7, 15, 0 } } }, { { { 10, 0, 1 }, { 0, 29, 0 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 0, 31, 0 } } }, { { { 10, 0, 2 }, { 9, 14, 0 } } }, { { { 11, 0, 1 }, { 0, 32, 0 } } }, { { { 11, 0, 0 }, { 0, 33, 0 } } }, { { { 11, 0, 1 }, { 2, 30, 0 } } }, { { { 11, 0, 2 }, { 0, 34, 0 } } }, { { { 12, 0, 1 }, { 0, 35, 0 } } }, { { { 12, 0, 0 }, { 0, 36, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 0 } } }, { { { 12, 0, 2 }, { 0, 37, 0 } } }, { { { 13, 0, 1 }, { 0, 38, 0 } } }, { { { 13, 0, 0 }, { 0, 39, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 0 } } }, { { { 13, 0, 2 }, { 0, 40, 0 } } }, { { { 14, 0, 1 }, { 0, 41, 0 } } }, { { { 14, 0, 0 }, { 0, 42, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 0 } } }, { { { 14, 0, 2 }, { 0, 43, 0 } } }, { { { 15, 0, 1 }, { 0, 44, 0 } } }, { { { 15, 0, 0 }, { 0, 45, 0 } } }, { { { 15, 0, 1 }, { 8, 30, 0 } } }, { { { 15, 0, 2 }, { 0, 46, 0 } } }, { { { 16, 0, 2 }, { 0, 47, 0 } } }, { { { 16, 0, 1 }, { 1, 46, 0 } } }, { { { 16, 0, 0 }, { 0, 48, 0 } } }, { { { 16, 0, 1 }, { 0, 49, 0 } } }, { { { 16, 0, 2 }, { 0, 50, 0 } } }, { { { 17, 0, 1 }, { 2, 47, 0 } } }, { { { 17, 0, 0 }, { 0, 51, 0 } } }, { { { 17, 0, 1 }, { 0, 52, 0 } } }, { { { 17, 0, 2 }, { 0, 53, 0 } } }, { { { 18, 0, 1 }, { 4, 46, 0 } } }, { { { 18, 0, 0 }, { 0, 54, 0 } } }, { { { 18, 0, 1 }, { 0, 55, 0 } } }, { { { 18, 0, 2 }, { 0, 56, 0 } } }, { { { 19, 0, 1 }, { 5, 47, 0 } } }, { { { 19, 0, 0 }, { 0, 57, 0 } } }, { { { 19, 0, 1 }, { 0, 58, 0 } } }, { { { 19, 0, 2 }, { 0, 59, 0 } } }, { { { 20, 0, 1 }, { 7, 46, 0 } } }, { { { 20, 0, 0 }, { 0, 60, 0 } } }, { { { 20, 0, 1 }, { 0, 61, 0 } } }, { { { 20, 0, 2 }, { 0, 62, 0 } } }, { { { 21, 0, 1 }, { 8, 47, 0 } } }, { { { 21, 0, 0 }, { 0, 63, 0 } } }, { { { 21, 0, 1 }, { 1, 62, 0 } } }, { { { 21, 0, 2 }, { 1, 63, 0 } } }, { { { 22, 0, 1 }, { 10, 46, 0 } } }, { { { 22, 0, 0 }, { 2, 62, 0 } } }, { { { 22, 0, 1 }, { 2, 63, 0 } } }, { { { 22, 0, 2 }, { 3, 62, 0 } } }, { { { 23, 0, 1 }, { 11, 47, 0 } } }, { { { 23, 0, 0 }, { 3, 63, 0 } } }, { { { 23, 0, 1 }, { 4, 62, 0 } } }, { { { 23, 0, 2 }, { 4, 63, 0 } } }, { { { 24, 0, 1 }, { 13, 46, 0 } } }, { { { 24, 0, 0 }, { 5, 62, 0 } } }, { { { 24, 0, 1 }, { 5, 63, 0 } } }, { { { 24, 0, 2 }, { 6, 62, 0 } } }, { { { 25, 0, 1 }, { 14, 47, 0 } } }, { { { 25, 0, 0 }, { 6, 63, 0 } } }, { { { 25, 0, 1 }, { 7, 62, 0 } } }, { { { 25, 0, 2 }, { 7, 63, 0 } } }, { { { 26, 0, 1 }, { 16, 45, 0 } } }, { { { 26, 0, 0 }, { 8, 62, 0 } } }, { { { 26, 0, 1 }, { 8, 63, 0 } } }, { { { 26, 0, 2 }, { 9, 62, 0 } } }, { { { 27, 0, 1 }, { 16, 48, 0 } } }, { { { 27, 0, 0 }, { 9, 63, 0 } } }, { { { 27, 0, 1 }, { 10, 62, 0 } } }, { { { 27, 0, 2 }, { 10, 63, 0 } } }, { { { 28, 0, 1 }, { 16, 51, 0 } } }, { { { 28, 0, 0 }, { 11, 62, 0 } } }, { { { 28, 0, 1 }, { 11, 63, 0 } } }, { { { 28, 0, 2 }, { 12, 62, 0 } } }, { { { 29, 0, 1 }, { 16, 54, 0 } } }, { { { 29, 0, 0 }, { 12, 63, 0 } } }, { { { 29, 0, 1 }, { 13, 62, 0 } } }, { { { 29, 0, 2 }, { 13, 63, 0 } } }, { { { 30, 0, 1 }, { 16, 57, 0 } } }, { { { 30, 0, 0 }, { 14, 62, 0 } } }, { { { 30, 0, 1 }, { 14, 63, 0 } } }, { { { 30, 0, 2 }, { 15, 62, 0 } } }, { { { 31, 0, 1 }, { 16, 60, 0 } } }, { { { 31, 0, 0 }, { 15, 63, 0 } } }, { { { 31, 0, 1 }, { 24, 46, 0 } } }, { { { 31, 0, 2 }, { 16, 62, 0 } } }, { { { 32, 0, 2 }, { 16, 63, 0 } } }, { { { 32, 0, 1 }, { 17, 62, 0 } } }, { { { 32, 0, 0 }, { 25, 47, 0 } } }, { { { 32, 0, 1 }, { 17, 63, 0 } } }, { { { 32, 0, 2 }, { 18, 62, 0 } } }, { { { 33, 0, 1 }, { 18, 63, 0 } } }, { { { 33, 0, 0 }, { 27, 46, 0 } } }, { { { 33, 0, 1 }, { 19, 62, 0 } } }, { { { 33, 0, 2 }, { 19, 63, 0 } } }, { { { 34, 0, 1 }, { 20, 62, 0 } } }, { { { 34, 0, 0 }, { 28, 47, 0 } } }, { { { 34, 0, 1 }, { 20, 63, 0 } } }, { { { 34, 0, 2 }, { 21, 62, 0 } } }, { { { 35, 0, 1 }, { 21, 63, 0 } } }, { { { 35, 0, 0 }, { 30, 46, 0 } } }, { { { 35, 0, 1 }, { 22, 62, 0 } } }, { { { 35, 0, 2 }, { 22, 63, 0 } } }, { { { 36, 0, 1 }, { 23, 62, 0 } } }, { { { 36, 0, 0 }, { 31, 47, 0 } } }, { { { 36, 0, 1 }, { 23, 63, 0 } } }, { { { 36, 0, 2 }, { 24, 62, 0 } } }, { { { 37, 0, 1 }, { 24, 63, 0 } } }, { { { 37, 0, 0 }, { 32, 47, 0 } } }, { { { 37, 0, 1 }, { 25, 62, 0 } } }, { { { 37, 0, 2 }, { 25, 63, 0 } } }, { { { 38, 0, 1 }, { 26, 62, 0 } } }, { { { 38, 0, 0 }, { 32, 50, 0 } } }, { { { 38, 0, 1 }, { 26, 63, 0 } } }, { { { 38, 0, 2 }, { 27, 62, 0 } } }, { { { 39, 0, 1 }, { 27, 63, 0 } } }, { { { 39, 0, 0 }, { 32, 53, 0 } } }, { { { 39, 0, 1 }, { 28, 62, 0 } } }, { { { 39, 0, 2 }, { 28, 63, 0 } } }, { { { 40, 0, 1 }, { 29, 62, 0 } } }, { { { 40, 0, 0 }, { 32, 56, 0 } } }, { { { 40, 0, 1 }, { 29, 63, 0 } } }, { { { 40, 0, 2 }, { 30, 62, 0 } } }, { { { 41, 0, 1 }, { 30, 63, 0 } } }, { { { 41, 0, 0 }, { 32, 59, 0 } } }, { { { 41, 0, 1 }, { 31, 62, 0 } } }, { { { 41, 0, 2 }, { 31, 63, 0 } } }, { { { 42, 0, 1 }, { 32, 61, 0 } } }, { { { 42, 0, 0 }, { 32, 62, 0 } } }, { { { 42, 0, 1 }, { 32, 63, 0 } } }, { { { 42, 0, 2 }, { 41, 46, 0 } } }, { { { 43, 0, 1 }, { 33, 62, 0 } } }, { { { 43, 0, 0 }, { 33, 63, 0 } } }, { { { 43, 0, 1 }, { 34, 62, 0 } } }, { { { 43, 0, 2 }, { 42, 47, 0 } } }, { { { 44, 0, 1 }, { 34, 63, 0 } } }, { { { 44, 0, 0 }, { 35, 62, 0 } } }, { { { 44, 0, 1 }, { 35, 63, 0 } } }, { { { 44, 0, 2 }, { 44, 46, 0 } } }, { { { 45, 0, 1 }, { 36, 62, 0 } } }, { { { 45, 0, 0 }, { 36, 63, 0 } } }, { { { 45, 0, 1 }, { 37, 62, 0 } } }, { { { 45, 0, 2 }, { 45, 47, 0 } } }, { { { 46, 0, 1 }, { 37, 63, 0 } } }, { { { 46, 0, 0 }, { 38, 62, 0 } } }, { { { 46, 0, 1 }, { 38, 63, 0 } } }, { { { 46, 0, 2 }, { 47, 46, 0 } } }, { { { 47, 0, 1 }, { 39, 62, 0 } } }, { { { 47, 0, 0 }, { 39, 63, 0 } } }, { { { 47, 0, 1 }, { 40, 62, 0 } } }, { { { 47, 0, 2 }, { 48, 46, 0 } } }, { { { 48, 0, 2 }, { 40, 63, 0 } } }, { { { 48, 0, 1 }, { 41, 62, 0 } } }, { { { 48, 0, 0 }, { 41, 63, 0 } } }, { { { 48, 0, 1 }, { 48, 49, 0 } } }, { { { 48, 0, 2 }, { 42, 62, 0 } } }, { { { 49, 0, 1 }, { 42, 63, 0 } } }, { { { 49, 0, 0 }, { 43, 62, 0 } } }, { { { 49, 0, 1 }, { 48, 52, 0 } } }, { { { 49, 0, 2 }, { 43, 63, 0 } } }, { { { 50, 0, 1 }, { 44, 62, 0 } } }, { { { 50, 0, 0 }, { 44, 63, 0 } } }, { { { 50, 0, 1 }, { 48, 55, 0 } } }, { { { 50, 0, 2 }, { 45, 62, 0 } } }, { { { 51, 0, 1 }, { 45, 63, 0 } } }, { { { 51, 0, 0 }, { 46, 62, 0 } } }, { { { 51, 0, 1 }, { 48, 58, 0 } } }, { { { 51, 0, 2 }, { 46, 63, 0 } } }, { { { 52, 0, 1 }, { 47, 62, 0 } } }, { { { 52, 0, 0 }, { 47, 63, 0 } } }, { { { 52, 0, 1 }, { 48, 61, 0 } } }, { { { 52, 0, 2 }, { 48, 62, 0 } } }, { { { 53, 0, 1 }, { 56, 47, 0 } } }, { { { 53, 0, 0 }, { 48, 63, 0 } } }, { { { 53, 0, 1 }, { 49, 62, 0 } } }, { { { 53, 0, 2 }, { 49, 63, 0 } } }, { { { 54, 0, 1 }, { 58, 46, 0 } } }, { { { 54, 0, 0 }, { 50, 62, 0 } } }, { { { 54, 0, 1 }, { 50, 63, 0 } } }, { { { 54, 0, 2 }, { 51, 62, 0 } } }, { { { 55, 0, 1 }, { 59, 47, 0 } } }, { { { 55, 0, 0 }, { 51, 63, 0 } } }, { { { 55, 0, 1 }, { 52, 62, 0 } } }, { { { 55, 0, 2 }, { 52, 63, 0 } } }, { { { 56, 0, 1 }, { 61, 46, 0 } } }, { { { 56, 0, 0 }, { 53, 62, 0 } } }, { { { 56, 0, 1 }, { 53, 63, 0 } } }, { { { 56, 0, 2 }, { 54, 62, 0 } } }, { { { 57, 0, 1 }, { 62, 47, 0 } } }, { { { 57, 0, 0 }, { 54, 63, 0 } } }, { { { 57, 0, 1 }, { 55, 62, 0 } } }, { { { 57, 0, 2 }, { 55, 63, 0 } } }, { { { 58, 0, 1 }, { 56, 62, 1 } } }, { { { 58, 0, 0 }, { 56, 62, 0 } } }, { { { 58, 0, 1 }, { 56, 63, 0 } } }, { { { 58, 0, 2 }, { 57, 62, 0 } } }, { { { 59, 0, 1 }, { 57, 63, 1 } } }, { { { 59, 0, 0 }, { 57, 63, 0 } } }, { { { 59, 0, 1 }, { 58, 62, 0 } } }, { { { 59, 0, 2 }, { 58, 63, 0 } } }, { { { 60, 0, 1 }, { 59, 62, 1 } } }, { { { 60, 0, 0 }, { 59, 62, 0 } } }, { { { 60, 0, 1 }, { 59, 63, 0 } } }, { { { 60, 0, 2 }, { 60, 62, 0 } } }, { { { 61, 0, 1 }, { 60, 63, 1 } } }, { { { 61, 0, 0 }, { 60, 63, 0 } } }, { { { 61, 0, 1 }, { 61, 62, 0 } } }, { { { 61, 0, 2 }, { 61, 63, 0 } } }, { { { 62, 0, 1 }, { 62, 62, 1 } } }, { { { 62, 0, 0 }, { 62, 62, 0 } } }, { { { 62, 0, 1 }, { 62, 63, 0 } } }, { { { 62, 0, 2 }, { 63, 62, 0 } } }, { { { 63, 0, 1 }, { 63, 63, 1 } } }, { { { 63, 0, 0 }, { 63, 63, 0 } } } }; static const DDSSingleColorLookup* DDS_LOOKUP[] = { DDSLookup_5_4, DDSLookup_6_4, DDSLookup_5_4 }; /* Macros */ #define C565_r(x) (((x) & 0xF800) >> 11) #define C565_g(x) (((x) & 0x07E0) >> 5) #define C565_b(x) ((x) & 0x001F) #define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2)) #define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4)) #define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2)) #define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1) #define FixRange(min, max, steps) \ if (min > max) \ min = max; \ if ((ssize_t) max - min < steps) \ max = MagickMin(min + steps, 255); \ if ((ssize_t) max - min < steps) \ min = MagickMax(0, (ssize_t) max - steps) #define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z) #define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \ = value #define VectorInit3(vector, value) vector.x = vector.y = vector.z = value #define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \ g && mask.b_bitmask == b && mask.alpha_bitmask == a) /* Forward declarations */ static MagickBooleanType WriteDDSImage(const ImageInfo *,Image *,ExceptionInfo *); static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x + right.x; destination->y = left.y + right.y; destination->z = left.z + right.z; destination->w = left.w + right.w; } static inline void VectorClamp(DDSVector4 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); value->w = MagickMin(1.0f,MagickMax(0.0f,value->w)); } static inline void VectorClamp3(DDSVector3 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); } static inline void VectorCopy43(const DDSVector4 source, DDSVector3 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; } static inline void VectorCopy44(const DDSVector4 source, DDSVector4 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; destination->w = source.w; } static inline void VectorNegativeMultiplySubtract(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = c.x - (a.x * b.x); destination->y = c.y - (a.y * b.y); destination->z = c.z - (a.z * b.z); destination->w = c.w - (a.w * b.w); } static inline void VectorMultiply(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; destination->w = left.w * right.w; } static inline void VectorMultiply3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; } static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; destination->w = (a.w * b.w) + c.w; } static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b, const DDSVector3 c, DDSVector3 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; } static inline void VectorReciprocal(const DDSVector4 value, DDSVector4 *destination) { destination->x = 1.0f / value.x; destination->y = 1.0f / value.y; destination->z = 1.0f / value.z; destination->w = 1.0f / value.w; } static inline void VectorSubtract(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; destination->w = left.w - right.w; } static inline void VectorSubtract3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; } static inline void VectorTruncate(DDSVector4 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w); } static inline void VectorTruncate3(DDSVector3 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); } static inline size_t ClampToLimit(const float value, const size_t limit) { size_t result = (int) (value + 0.5f); if (result < 0.0f) return(0); if (result > limit) return(limit); return result; } static inline size_t ColorTo565(const DDSVector3 point) { size_t r = ClampToLimit(31.0f*point.x,31); size_t g = ClampToLimit(63.0f*point.y,63); size_t b = ClampToLimit(31.0f*point.z,31); return (r << 11) | (g << 5) | b; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s D D S % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsDDS() returns MagickTrue if the image format type, identified by the % magick string, is DDS. % % The format of the IsDDS method is: % % MagickBooleanType IsDDS(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((char *) magick,"DDS ", 4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadDDSImage() reads a DirectDraw Surface image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadDDSImage method is: % % Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: The image info. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info) { size_t hdr_size, required; /* Seek to start of header */ (void) SeekBlob(image, 4, SEEK_SET); /* Check header field */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 124) return MagickFalse; /* Fill in DDS info struct */ dds_info->flags = ReadBlobLSBLong(image); /* Check required flags */ required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); if ((dds_info->flags & required) != required) return MagickFalse; dds_info->height = ReadBlobLSBLong(image); dds_info->width = ReadBlobLSBLong(image); dds_info->pitchOrLinearSize = ReadBlobLSBLong(image); dds_info->depth = ReadBlobLSBLong(image); dds_info->mipmapcount = ReadBlobLSBLong(image); (void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */ /* Read pixel format structure */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 32) return MagickFalse; dds_info->pixelformat.flags = ReadBlobLSBLong(image); dds_info->pixelformat.fourcc = ReadBlobLSBLong(image); dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image); dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image); dds_info->ddscaps1 = ReadBlobLSBLong(image); dds_info->ddscaps2 = ReadBlobLSBLong(image); (void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */ return MagickTrue; } static MagickBooleanType SetDXT1Pixels(Image *image,ssize_t x,ssize_t y, DDSColors colors,size_t bits,Quantum *q) { register ssize_t i; ssize_t j; unsigned char code; for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3); SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); SetPixelOpacity(image,ScaleCharToQuantum(colors.a[code]),q); if ((colors.a[code] != 0) && (image->alpha_trait == UndefinedPixelTrait)) return(MagickFalse); q+=GetPixelChannels(image); } } } return(MagickTrue); } static MagickBooleanType ReadMipmaps(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,DDSPixelDecoder decoder,ExceptionInfo *exception) { MagickBooleanType status; /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } status=MagickTrue; if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { register ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { AcquireNextImage(image_info,image,exception); if (image->next == (Image *) NULL) return(MagickFalse); image->next->alpha_trait=image->alpha_trait; image=SyncNextImageInList(image); status=SetImageExtent(image,w,h,exception); if (status == MagickFalse) break; status=decoder(image,dds_info,exception); if (status == MagickFalse) break; if ((w == 1) && (h == 1)) break; w=DIV2(w); h=DIV2(h); } } return(status); } static void CalculateColors(unsigned short c0, unsigned short c1, DDSColors *c, MagickBooleanType ignoreAlpha) { c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0; c->r[0] = (unsigned char) C565_red(c0); c->g[0] = (unsigned char) C565_green(c0); c->b[0] = (unsigned char) C565_blue(c0); c->r[1] = (unsigned char) C565_red(c1); c->g[1] = (unsigned char) C565_green(c1); c->b[1] = (unsigned char) C565_blue(c1); if (ignoreAlpha != MagickFalse || c0 > c1) { c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3); c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3); c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3); c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3); c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3); c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3); } else { c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2); c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2); c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2); c->r[3] = c->g[3] = c->b[3] = 0; c->a[3] = 255; } } static MagickBooleanType ReadDXT1Pixels(Image *image, DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; register Quantum *q; register ssize_t x; size_t bits; ssize_t y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read 8 bytes of data from the image */ c0=ReadBlobLSBShort(image); c1=ReadBlobLSBShort(image); bits=ReadBlobLSBLong(image); CalculateColors(c0,c1,&colors,MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ if (SetDXT1Pixels(image,x,y,colors,bits,q) == MagickFalse) { /* Correct alpha */ SetImageAlpha(image,QuantumRange,exception); q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q != (Quantum *) NULL) SetDXT1Pixels(image,x,y,colors,bits,q); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } /* Skip the mipmap images for compressed (DXTn) dds files */ static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info, int texel_size,ExceptionInfo *exception) { /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { MagickOffsetType offset; register ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset=(MagickOffsetType)((w+3)/4)*((h+3)/4)*texel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w=DIV2(w); h=DIV2(h); if ((w == 1) && (h == 1)) break; } } return(MagickTrue); } static MagickBooleanType ReadDXT1(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT1Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT1Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,8,exception)); } static MagickBooleanType ReadDXT3Pixels(Image *image, DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; register Quantum *q; register ssize_t i, x; unsigned char alpha; size_t a0, a1, bits, code; ssize_t j, y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x), MagickMin(4, image->rows - y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read alpha values (8 bytes) */ a0 = ReadBlobLSBLong(image); a1 = ReadBlobLSBLong(image); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); /* Extract alpha value: multiply 0..15 by 17 to get range 0..255 */ if (j < 2) alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf); else alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q); q+=GetPixelChannels(image); } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadDXT3(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT3Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT3Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadDXT5Pixels(Image *image, DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; MagickSizeType alpha_bits; register Quantum *q; register ssize_t i, x; unsigned char a0, a1; size_t alpha, bits, code, alpha_code; ssize_t j, y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x), MagickMin(4, image->rows - y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read alpha values (8 bytes) */ a0 = (unsigned char) ReadBlobByte(image); a1 = (unsigned char) ReadBlobByte(image); alpha_bits = (MagickSizeType)ReadBlobLSBLong(image); alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); /* Extract alpha value */ alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7; if (alpha_code == 0) alpha = a0; else if (alpha_code == 1) alpha = a1; else if (a0 > a1) alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7; else if (alpha_code == 6) alpha = 0; else if (alpha_code == 7) alpha = 255; else alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q); q+=GetPixelChannels(image); } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadDXT5(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT5Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT5Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadUncompressedRGBPixels(Image *image, DDSInfo *dds_info,ExceptionInfo *exception) { register Quantum *q; ssize_t x, y; unsigned short color; for (y = 0; y < (ssize_t) image->rows; y++) { q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception); if (q == (Quantum *) NULL) return(MagickFalse); for (x = 0; x < (ssize_t) image->columns; x++) { if (dds_info->pixelformat.rgb_bitcount == 8) SetPixelGray(image,ScaleCharToQuantum(ReadBlobByte(image)),q); else if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); SetPixelRed(image,ScaleCharToQuantum((unsigned char) (((color >> 11)/31.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 5) >> 10)/63.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255)),q); } else { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); if (dds_info->pixelformat.rgb_bitcount == 32) (void) ReadBlobByte(image); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } /* Skip the mipmap images for uncompressed (RGB or RGBA) dds files */ static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info, int pixel_size,ExceptionInfo *exception) { /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { MagickOffsetType offset; register ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset=(MagickOffsetType)w*h*pixel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w=DIV2(w); h=DIV2(h); if ((w == 1) && (h == 1)) break; } } return(MagickTrue); } static MagickBooleanType ReadUncompressedRGB(const ImageInfo *image_info, Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (dds_info->pixelformat.rgb_bitcount == 8) (void) SetImageType(image,GrayscaleType,exception); else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask( dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000)) ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); if (ReadUncompressedRGBPixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBPixels, exception)); else return(SkipRGBMipmaps(image,dds_info,3,exception)); } static MagickBooleanType ReadUncompressedRGBAPixels(Image *image, DDSInfo *dds_info,ExceptionInfo *exception) { register Quantum *q; ssize_t alphaBits, x, y; unsigned short color; alphaBits=0; if (dds_info->pixelformat.rgb_bitcount == 16) { if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000)) alphaBits=1; else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00)) { alphaBits=2; (void) SetImageType(image,GrayscaleAlphaType,exception); } else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000)) alphaBits=4; else ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); } for (y = 0; y < (ssize_t) image->rows; y++) { q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception); if (q == (Quantum *) NULL) return(MagickFalse); for (x = 0; x < (ssize_t) image->columns; x++) { if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); if (alphaBits == 1) { SetPixelAlpha(image,(color & (1 << 15)) ? QuantumRange : 0,q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 1) >> 11)/31.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 6) >> 11)/31.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255)),q); } else if (alphaBits == 2) { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) (color >> 8)),q); SetPixelGray(image,ScaleCharToQuantum((unsigned char)color),q); } else { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) (((color >> 12)/15.0)*255)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 4) >> 12)/15.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 8) >> 12)/15.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 12) >> 12)/15.0)*255)),q); } } else { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadUncompressedRGBA(const ImageInfo *image_info, Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadUncompressedRGBAPixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBAPixels, exception)); else return(SkipRGBMipmaps(image,dds_info,4,exception)); } static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) { const char *option; CompressionType compression; DDSInfo dds_info; DDSDecoder *decoder; Image *image; MagickBooleanType status, cubemap, volume, read_mipmaps; PixelTrait alpha_trait; size_t n, num_images; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cubemap=MagickFalse, volume=MagickFalse, read_mipmaps=MagickFalse; image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Initialize image structure. */ if (ReadDDSInfo(image, &dds_info) != MagickTrue) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP) cubemap = MagickTrue; if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0) volume = MagickTrue; (void) SeekBlob(image, 128, SEEK_SET); /* Determine pixel format */ if (dds_info.pixelformat.flags & DDPF_RGB) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; } else { alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_LUMINANCE) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { /* Not sure how to handle this */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } else { alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_FOURCC) { switch (dds_info.pixelformat.fourcc) { case FOURCC_DXT1: { alpha_trait = UndefinedPixelTrait; compression = DXT1Compression; decoder = ReadDXT1; break; } case FOURCC_DXT3: { alpha_trait = BlendPixelTrait; compression = DXT3Compression; decoder = ReadDXT3; break; } case FOURCC_DXT5: { alpha_trait = BlendPixelTrait; compression = DXT5Compression; decoder = ReadDXT5; break; } default: { /* Unknown FOURCC */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } } } else { /* Neither compressed nor uncompressed... thus unsupported */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } num_images = 1; if (cubemap) { /* Determine number of faces defined in the cubemap */ num_images = 0; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++; } if (volume) num_images = dds_info.depth; if ((num_images == 0) || (num_images > GetBlobSize(image))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireMagickResource(ListLengthResource,num_images) == MagickFalse) ThrowReaderException(ResourceLimitError,"ListLengthExceedsLimit"); option=GetImageOption(image_info,"dds:skip-mipmaps"); if (IsStringFalse(option) != MagickFalse) read_mipmaps=MagickTrue; for (n = 0; n < num_images; n++) { if (n != 0) { /* Start a new image */ if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } image->alpha_trait=alpha_trait; image->compression=compression; image->columns=dds_info.width; image->rows=dds_info.height; image->storage_class=DirectClass; image->endian=LSBEndian; image->depth=8; if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); (void) SetImageBackgroundColor(image,exception); status=(decoder)(image_info,image,&dds_info,read_mipmaps,exception); if (status == MagickFalse) { (void) CloseBlob(image); if (n == 0) return(DestroyImageList(image)); return(GetFirstImageInList(image)); } } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterDDSImage() adds attributes for the DDS image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterDDSImage method is: % % RegisterDDSImage(void) % */ ModuleExport size_t RegisterDDSImage(void) { MagickInfo *entry; entry = AcquireMagickInfo("DDS","DDS","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry = AcquireMagickInfo("DDS","DXT1","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry = AcquireMagickInfo("DDS","DXT5","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } static void RemapIndices(const ssize_t *map, const unsigned char *source, unsigned char *target) { register ssize_t i; for (i = 0; i < 16; i++) { if (map[i] == -1) target[i] = 3; else target[i] = source[map[i]]; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterDDSImage() removes format registrations made by the % DDS module from the list of supported formats. % % The format of the UnregisterDDSImage method is: % % UnregisterDDSImage(void) % */ ModuleExport void UnregisterDDSImage(void) { (void) UnregisterMagickInfo("DDS"); (void) UnregisterMagickInfo("DXT1"); (void) UnregisterMagickInfo("DXT5"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format. % % The format of the WriteBMPImage method is: % % MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static size_t CompressAlpha(const size_t min, const size_t max, const size_t steps, const ssize_t *alphas, unsigned char* indices) { unsigned char codes[8]; register ssize_t i; size_t error, index, j, least, value; codes[0] = (unsigned char) min; codes[1] = (unsigned char) max; codes[6] = 0; codes[7] = 255; for (i=1; i < (ssize_t) steps; i++) codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps); error = 0; for (i=0; i<16; i++) { if (alphas[i] == -1) { indices[i] = 0; continue; } value = alphas[i]; least = SIZE_MAX; index = 0; for (j=0; j<8; j++) { size_t dist; dist = value - (size_t)codes[j]; dist *= dist; if (dist < least) { least = dist; index = j; } } indices[i] = (unsigned char)index; error += least; } return error; } static MagickBooleanType ConstructOrdering(const size_t count, const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights, DDSVector4 *xSumwSum, unsigned char *order, size_t iteration) { float dps[16], f; register ssize_t i; size_t j; unsigned char c, *o, *p; o = order + (16*iteration); for (i=0; i < (ssize_t) count; i++) { dps[i] = Dot(points[i],axis); o[i] = (unsigned char)i; } for (i=0; i < (ssize_t) count; i++) { for (j=i; j > 0 && dps[j] < dps[j - 1]; j--) { f = dps[j]; dps[j] = dps[j - 1]; dps[j - 1] = f; c = o[j]; o[j] = o[j - 1]; o[j - 1] = c; } } for (i=0; i < (ssize_t) iteration; i++) { MagickBooleanType same; p = order + (16*i); same = MagickTrue; for (j=0; j < count; j++) { if (o[j] != p[j]) { same = MagickFalse; break; } } if (same != MagickFalse) return MagickFalse; } xSumwSum->x = 0; xSumwSum->y = 0; xSumwSum->z = 0; xSumwSum->w = 0; for (i=0; i < (ssize_t) count; i++) { DDSVector4 v; j = (size_t) o[i]; v.x = points[j].w * points[j].x; v.y = points[j].w * points[j].y; v.z = points[j].w * points[j].z; v.w = points[j].w * 1.0f; VectorCopy44(v,&pointsWeights[i]); VectorAdd(*xSumwSum,v,xSumwSum); } return MagickTrue; } static void CompressClusterFit(const size_t count, const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3* end, unsigned char *indices) { DDSVector3 axis; DDSVector4 grid, gridrcp, half, onethird_onethird2, pointsWeights[16], two, twonineths, twothirds_twothirds2, xSumwSum; float bestError = 1e+37f; size_t bestIteration = 0, besti = 0, bestj = 0, bestk = 0, iterationIndex; ssize_t i; unsigned char *o, order[128], unordered[16]; VectorInit(half,0.5f); VectorInit(two,2.0f); VectorInit(onethird_onethird2,1.0f/3.0f); onethird_onethird2.w = 1.0f/9.0f; VectorInit(twothirds_twothirds2,2.0f/3.0f); twothirds_twothirds2.w = 4.0f/9.0f; VectorInit(twonineths,2.0f/9.0f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; grid.w = 0.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; gridrcp.w = 0.0f; xSumwSum.x = 0.0f; xSumwSum.y = 0.0f; xSumwSum.z = 0.0f; xSumwSum.w = 0.0f; ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0); for (iterationIndex = 0;;) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,1) \ num_threads(GetMagickResourceLimit(ThreadResource)) #endif for (i=0; i < (ssize_t) count; i++) { DDSVector4 part0, part1, part2; size_t ii, j, k, kmin; VectorInit(part0,0.0f); for(ii=0; ii < (size_t) i; ii++) VectorAdd(pointsWeights[ii],part0,&part0); VectorInit(part1,0.0f); for (j=(size_t) i;;) { if (j == 0) { VectorCopy44(pointsWeights[0],&part2); kmin = 1; } else { VectorInit(part2,0.0f); kmin = j; } for (k=kmin;;) { DDSVector4 a, alpha2_sum, alphax_sum, alphabeta_sum, b, beta2_sum, betax_sum, e1, e2, factor, part3; float error; VectorSubtract(xSumwSum,part2,&part3); VectorSubtract(part3,part1,&part3); VectorSubtract(part3,part0,&part3); VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum); VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum); VectorInit(alpha2_sum,alphax_sum.w); VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum); VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum); VectorInit(beta2_sum,betax_sum.w); VectorAdd(part1,part2,&alphabeta_sum); VectorInit(alphabeta_sum,alphabeta_sum.w); VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum); VectorMultiply(alpha2_sum,beta2_sum,&factor); VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor, &factor); VectorReciprocal(factor,&factor); VectorMultiply(alphax_sum,beta2_sum,&a); VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a); VectorMultiply(a,factor,&a); VectorMultiply(betax_sum,alpha2_sum,&b); VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b); VectorMultiply(b,factor,&b); VectorClamp(&a); VectorMultiplyAdd(grid,a,half,&a); VectorTruncate(&a); VectorMultiply(a,gridrcp,&a); VectorClamp(&b); VectorMultiplyAdd(grid,b,half,&b); VectorTruncate(&b); VectorMultiply(b,gridrcp,&b); VectorMultiply(b,b,&e1); VectorMultiply(e1,beta2_sum,&e1); VectorMultiply(a,a,&e2); VectorMultiplyAdd(e2,alpha2_sum,e1,&e1); VectorMultiply(a,b,&e2); VectorMultiply(e2,alphabeta_sum,&e2); VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2); VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2); VectorMultiplyAdd(two,e2,e1,&e2); VectorMultiply(e2,metric,&e2); error = e2.x + e2.y + e2.z; if (error < bestError) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (DDS_CompressClusterFit) #endif { if (error < bestError) { VectorCopy43(a,start); VectorCopy43(b,end); bestError = error; besti = i; bestj = j; bestk = k; bestIteration = iterationIndex; } } } if (k == count) break; VectorAdd(pointsWeights[k],part2,&part2); k++; } if (j == count) break; VectorAdd(pointsWeights[j],part1,&part1); j++; } } if (bestIteration != iterationIndex) break; iterationIndex++; if (iterationIndex == 8) break; VectorSubtract3(*end,*start,&axis); if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order, iterationIndex) == MagickFalse) break; } o = order + (16*bestIteration); for (i=0; i < (ssize_t) besti; i++) unordered[o[i]] = 0; for (i=besti; i < (ssize_t) bestj; i++) unordered[o[i]] = 2; for (i=bestj; i < (ssize_t) bestk; i++) unordered[o[i]] = 3; for (i=bestk; i < (ssize_t) count; i++) unordered[o[i]] = 1; RemapIndices(map,unordered,indices); } static void CompressRangeFit(const size_t count, const DDSVector4* points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end, unsigned char *indices) { float d, bestDist, max, min, val; DDSVector3 codes[4], grid, gridrcp, half, dist; register ssize_t i; size_t bestj, j; unsigned char closest[16]; VectorInit3(half,0.5f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; if (count > 0) { VectorCopy43(points[0],start); VectorCopy43(points[0],end); min = max = Dot(points[0],principle); for (i=1; i < (ssize_t) count; i++) { val = Dot(points[i],principle); if (val < min) { VectorCopy43(points[i],start); min = val; } else if (val > max) { VectorCopy43(points[i],end); max = val; } } } VectorClamp3(start); VectorMultiplyAdd3(grid,*start,half,start); VectorTruncate3(start); VectorMultiply3(*start,gridrcp,start); VectorClamp3(end); VectorMultiplyAdd3(grid,*end,half,end); VectorTruncate3(end); VectorMultiply3(*end,gridrcp,end); codes[0] = *start; codes[1] = *end; codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f)); codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f)); codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f)); codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f)); codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f)); codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f)); for (i=0; i < (ssize_t) count; i++) { bestDist = 1e+37f; bestj = 0; for (j=0; j < 4; j++) { dist.x = (points[i].x - codes[j].x) * metric.x; dist.y = (points[i].y - codes[j].y) * metric.y; dist.z = (points[i].z - codes[j].z) * metric.z; d = Dot(dist,dist); if (d < bestDist) { bestDist = d; bestj = j; } } closest[i] = (unsigned char) bestj; } RemapIndices(map, closest, indices); } static void ComputeEndPoints(const DDSSingleColorLookup *lookup[], const unsigned char *color, DDSVector3 *start, DDSVector3 *end, unsigned char *index) { register ssize_t i; size_t c, maxError = SIZE_MAX; for (i=0; i < 2; i++) { const DDSSourceBlock* sources[3]; size_t error = 0; for (c=0; c < 3; c++) { sources[c] = &lookup[c][color[c]].sources[i]; error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error); } if (error > maxError) continue; start->x = (float) sources[0]->start / 31.0f; start->y = (float) sources[1]->start / 63.0f; start->z = (float) sources[2]->start / 31.0f; end->x = (float) sources[0]->end / 31.0f; end->y = (float) sources[1]->end / 63.0f; end->z = (float) sources[2]->end / 31.0f; *index = (unsigned char) (2*i); maxError = error; } } static void ComputePrincipleComponent(const float *covariance, DDSVector3 *principle) { DDSVector4 row0, row1, row2, v; register ssize_t i; row0.x = covariance[0]; row0.y = covariance[1]; row0.z = covariance[2]; row0.w = 0.0f; row1.x = covariance[1]; row1.y = covariance[3]; row1.z = covariance[4]; row1.w = 0.0f; row2.x = covariance[2]; row2.y = covariance[4]; row2.z = covariance[5]; row2.w = 0.0f; VectorInit(v,1.0f); for (i=0; i < 8; i++) { DDSVector4 w; float a; w.x = row0.x * v.x; w.y = row0.y * v.x; w.z = row0.z * v.x; w.w = row0.w * v.x; w.x = (row1.x * v.y) + w.x; w.y = (row1.y * v.y) + w.y; w.z = (row1.z * v.y) + w.z; w.w = (row1.w * v.y) + w.w; w.x = (row2.x * v.z) + w.x; w.y = (row2.y * v.z) + w.y; w.z = (row2.z * v.z) + w.z; w.w = (row2.w * v.z) + w.w; a = (float) PerceptibleReciprocal(MagickMax(w.x,MagickMax(w.y,w.z))); v.x = w.x * a; v.y = w.y * a; v.z = w.z * a; v.w = w.w * a; } VectorCopy43(v,principle); } static void ComputeWeightedCovariance(const size_t count, const DDSVector4 *points, float *covariance) { DDSVector3 centroid; float total; size_t i; total = 0.0f; VectorInit3(centroid,0.0f); for (i=0; i < count; i++) { total += points[i].w; centroid.x += (points[i].x * points[i].w); centroid.y += (points[i].y * points[i].w); centroid.z += (points[i].z * points[i].w); } if( total > 1.192092896e-07F) { centroid.x /= total; centroid.y /= total; centroid.z /= total; } for (i=0; i < 6; i++) covariance[i] = 0.0f; for (i = 0; i < count; i++) { DDSVector3 a, b; a.x = points[i].x - centroid.x; a.y = points[i].y - centroid.y; a.z = points[i].z - centroid.z; b.x = points[i].w * a.x; b.y = points[i].w * a.y; b.z = points[i].w * a.z; covariance[0] += a.x*b.x; covariance[1] += a.x*b.y; covariance[2] += a.x*b.z; covariance[3] += a.y*b.y; covariance[4] += a.y*b.z; covariance[5] += a.z*b.z; } } static void WriteAlphas(Image *image, const ssize_t *alphas, size_t min5, size_t max5, size_t min7, size_t max7) { register ssize_t i; size_t err5, err7, j; unsigned char indices5[16], indices7[16]; FixRange(min5,max5,5); err5 = CompressAlpha(min5,max5,5,alphas,indices5); FixRange(min7,max7,7); err7 = CompressAlpha(min7,max7,7,alphas,indices7); if (err7 < err5) { for (i=0; i < 16; i++) { unsigned char index; index = indices7[i]; if( index == 0 ) indices5[i] = 1; else if (index == 1) indices5[i] = 0; else indices5[i] = 9 - index; } min5 = max7; max5 = min7; } (void) WriteBlobByte(image,(unsigned char) min5); (void) WriteBlobByte(image,(unsigned char) max5); for(i=0; i < 2; i++) { size_t value = 0; for (j=0; j < 8; j++) { size_t index = (size_t) indices5[j + i*8]; value |= ( index << 3*j ); } for (j=0; j < 3; j++) { size_t byte = (value >> 8*j) & 0xff; (void) WriteBlobByte(image,(unsigned char) byte); } } } static void WriteIndices(Image *image, const DDSVector3 start, const DDSVector3 end, unsigned char *indices) { register ssize_t i; size_t a, b; unsigned char remapped[16]; const unsigned char *ind; a = ColorTo565(start); b = ColorTo565(end); for (i=0; i<16; i++) { if( a < b ) remapped[i] = (indices[i] ^ 0x1) & 0x3; else if( a == b ) remapped[i] = 0; else remapped[i] = indices[i]; } if( a < b ) Swap(a,b); (void) WriteBlobByte(image,(unsigned char) (a & 0xff)); (void) WriteBlobByte(image,(unsigned char) (a >> 8)); (void) WriteBlobByte(image,(unsigned char) (b & 0xff)); (void) WriteBlobByte(image,(unsigned char) (b >> 8)); for (i=0; i<4; i++) { ind = remapped + 4*i; (void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) | (ind[3] << 6)); } } static void WriteCompressed(Image *image, const size_t count, DDSVector4 *points, const ssize_t *map, const MagickBooleanType clusterFit) { float covariance[16]; DDSVector3 end, principle, start; DDSVector4 metric; unsigned char indices[16]; VectorInit(metric,1.0f); VectorInit3(start,0.0f); VectorInit3(end,0.0f); ComputeWeightedCovariance(count,points,covariance); ComputePrincipleComponent(covariance,&principle); if ((clusterFit == MagickFalse) || (count == 0)) CompressRangeFit(count,points,map,principle,metric,&start,&end,indices); else CompressClusterFit(count,points,map,principle,metric,&start,&end,indices); WriteIndices(image,start,end,indices); } static void WriteSingleColorFit(Image *image, const DDSVector4 *points, const ssize_t *map) { DDSVector3 start, end; register ssize_t i; unsigned char color[3], index, indexes[16], indices[16]; color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255); color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255); color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255); index=0; ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index); for (i=0; i< 16; i++) indexes[i]=index; RemapIndices(map,indexes,indices); WriteIndices(image,start,end,indices); } static void WriteFourCC(Image *image, const size_t compression, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { register ssize_t x; ssize_t i, y, bx, by; register const Quantum *p; for (y=0; y < (ssize_t) image->rows; y+=4) { for (x=0; x < (ssize_t) image->columns; x+=4) { MagickBooleanType match; DDSVector4 point, points[16]; size_t count = 0, max5 = 0, max7 = 0, min5 = 255, min7 = 255, columns = 4, rows = 4; ssize_t alphas[16], map[16]; unsigned char alpha; if (x + columns >= image->columns) columns = image->columns - x; if (y + rows >= image->rows) rows = image->rows - y; p=GetVirtualPixels(image,x,y,columns,rows,exception); if (p == (const Quantum *) NULL) break; for (i=0; i<16; i++) { map[i] = -1; alphas[i] = -1; } for (by=0; by < (ssize_t) rows; by++) { for (bx=0; bx < (ssize_t) columns; bx++) { if (compression == FOURCC_DXT5) alpha = ScaleQuantumToChar(GetPixelAlpha(image,p)); else alpha = 255; if (compression == FOURCC_DXT5) { if (alpha < min7) min7 = alpha; if (alpha > max7) max7 = alpha; if (alpha != 0 && alpha < min5) min5 = alpha; if (alpha != 255 && alpha > max5) max5 = alpha; } alphas[4*by + bx] = (size_t)alpha; point.x = (float)ScaleQuantumToChar(GetPixelRed(image,p)) / 255.0f; point.y = (float)ScaleQuantumToChar(GetPixelGreen(image,p)) / 255.0f; point.z = (float)ScaleQuantumToChar(GetPixelBlue(image,p)) / 255.0f; point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f; p+=GetPixelChannels(image); match = MagickFalse; for (i=0; i < (ssize_t) count; i++) { if ((points[i].x == point.x) && (points[i].y == point.y) && (points[i].z == point.z) && (alpha >= 128 || compression == FOURCC_DXT5)) { points[i].w += point.w; map[4*by + bx] = i; match = MagickTrue; break; } } if (match != MagickFalse) continue; points[count].x = point.x; points[count].y = point.y; points[count].z = point.z; points[count].w = point.w; map[4*by + bx] = count; count++; } } for (i=0; i < (ssize_t) count; i++) points[i].w = sqrt(points[i].w); if (compression == FOURCC_DXT5) WriteAlphas(image,alphas,min5,max5,min7,max7); if (count == 1) WriteSingleColorFit(image,points,map); else WriteCompressed(image,count,points,map,clusterFit); } } } static void WriteUncompressed(Image *image, ExceptionInfo *exception) { register const Quantum *p; register ssize_t x; ssize_t y; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(image,p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(image,p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(image,p))); if (image->alpha_trait != UndefinedPixelTrait) (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(image,p))); p+=GetPixelChannels(image); } } } static void WriteImageData(Image *image, const size_t pixelFormat, const size_t compression,const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { if (pixelFormat == DDPF_FOURCC) WriteFourCC(image,compression,clusterFit,weightByAlpha,exception); else WriteUncompressed(image,exception); } static MagickBooleanType WriteMipmaps(Image *image,const ImageInfo *image_info, const size_t pixelFormat,const size_t compression,const size_t mipmaps, const MagickBooleanType fromlist,const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,ExceptionInfo *exception) { const char *option; Image *mipmap_image, *resize_image; MagickBooleanType fast_mipmaps, status; register ssize_t i; size_t columns, rows; columns=DIV2(image->columns); rows=DIV2(image->rows); option=GetImageOption(image_info,"dds:fast-mipmaps"); fast_mipmaps=IsStringTrue(option); mipmap_image=image; resize_image=image; status=MagickTrue; for (i=0; i < (ssize_t) mipmaps; i++) { if (fromlist == MagickFalse) { mipmap_image=ResizeImage(resize_image,columns,rows,TriangleFilter, exception); if (mipmap_image == (Image *) NULL) { status=MagickFalse; break; } } else { mipmap_image=mipmap_image->next; if ((mipmap_image->columns != columns) || (mipmap_image->rows != rows)) ThrowBinaryException(CoderError,"ImageColumnOrRowSizeIsNotSupported", image->filename); } DestroyBlob(mipmap_image); mipmap_image->blob=ReferenceBlob(image->blob); WriteImageData(mipmap_image,pixelFormat,compression,weightByAlpha, clusterFit,exception); if (fromlist == MagickFalse) { if (fast_mipmaps == MagickFalse) mipmap_image=DestroyImage(mipmap_image); else { if (resize_image != image) resize_image=DestroyImage(resize_image); resize_image=mipmap_image; } } columns=DIV2(columns); rows=DIV2(rows); } if (resize_image != image) resize_image=DestroyImage(resize_image); return(status); } static void WriteDDSInfo(Image *image, const size_t pixelFormat, const size_t compression, const size_t mipmaps) { char software[MagickPathExtent]; register ssize_t i; unsigned int format, caps, flags; flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); caps=(unsigned int) DDSCAPS_TEXTURE; format=(unsigned int) pixelFormat; if (format == DDPF_FOURCC) flags=flags | DDSD_LINEARSIZE; else flags=flags | DDSD_PITCH; if (mipmaps > 0) { flags=flags | (unsigned int) DDSD_MIPMAPCOUNT; caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX); } if (format != DDPF_FOURCC && image->alpha_trait != UndefinedPixelTrait) format=format | DDPF_ALPHAPIXELS; (void) WriteBlob(image,4,(unsigned char *) "DDS "); (void) WriteBlobLSBLong(image,124); (void) WriteBlobLSBLong(image,flags); (void) WriteBlobLSBLong(image,(unsigned int) image->rows); (void) WriteBlobLSBLong(image,(unsigned int) image->columns); if (pixelFormat == DDPF_FOURCC) { /* Compressed DDS requires linear compressed size of first image */ if (compression == FOURCC_DXT1) (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8)); else /* DXT5 */ (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16)); } else { /* Uncompressed DDS requires byte pitch of first image */ if (image->alpha_trait != UndefinedPixelTrait) (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4)); else (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3)); } (void) WriteBlobLSBLong(image,0x00); (void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1); (void) memset(software,0,sizeof(software)); (void) CopyMagickString(software,"IMAGEMAGICK",MagickPathExtent); (void) WriteBlob(image,44,(unsigned char *) software); (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,format); if (pixelFormat == DDPF_FOURCC) { (void) WriteBlobLSBLong(image,(unsigned int) compression); for(i=0;i < 5;i++) /* bitcount / masks */ (void) WriteBlobLSBLong(image,0x00); } else { (void) WriteBlobLSBLong(image,0x00); if (image->alpha_trait != UndefinedPixelTrait) { (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0xff000000); } else { (void) WriteBlobLSBLong(image,24); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0x00); } } (void) WriteBlobLSBLong(image,caps); for(i=0;i < 4;i++) /* ddscaps2 + reserved region */ (void) WriteBlobLSBLong(image,0x00); } static MagickBooleanType WriteDDSImage(const ImageInfo *image_info, Image *image, ExceptionInfo *exception) { const char *option; size_t compression, columns, maxMipmaps, mipmaps, pixelFormat, rows; MagickBooleanType clusterFit, fromlist, status, weightByAlpha; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); (void) TransformImageColorspace(image,sRGBColorspace,exception); pixelFormat=DDPF_FOURCC; compression=FOURCC_DXT5; if (image->alpha_trait == UndefinedPixelTrait) compression=FOURCC_DXT1; if (LocaleCompare(image_info->magick,"dxt1") == 0) compression=FOURCC_DXT1; if (image_info->compression == DXT1Compression) compression=FOURCC_DXT1; else if (image_info->compression == NoCompression) pixelFormat=DDPF_RGB; option=GetImageOption(image_info,"dds:compression"); if (option != (char *) NULL) { if (LocaleCompare(option,"dxt1") == 0) compression=FOURCC_DXT1; if (LocaleCompare(option,"none") == 0) pixelFormat=DDPF_RGB; } clusterFit=MagickFalse; weightByAlpha=MagickFalse; if (pixelFormat == DDPF_FOURCC) { option=GetImageOption(image_info,"dds:cluster-fit"); if (IsStringTrue(option) != MagickFalse) { clusterFit=MagickTrue; if (compression != FOURCC_DXT1) { option=GetImageOption(image_info,"dds:weight-by-alpha"); if (IsStringTrue(option) != MagickFalse) weightByAlpha=MagickTrue; } } } mipmaps=0; fromlist=MagickFalse; option=GetImageOption(image_info,"dds:mipmaps"); if (option != (char *) NULL) { if (LocaleNCompare(option,"fromlist",8) == 0) { Image *next; fromlist=MagickTrue; next=image->next; while(next != (Image *) NULL) { mipmaps++; next=next->next; } } } if ((mipmaps == 0) && ((image->columns & (image->columns - 1)) == 0) && ((image->rows & (image->rows - 1)) == 0)) { maxMipmaps=SIZE_MAX; if (option != (char *) NULL) maxMipmaps=StringToUnsignedLong(option); if (maxMipmaps != 0) { columns=image->columns; rows=image->rows; while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps) { columns=DIV2(columns); rows=DIV2(rows); mipmaps++; } } } option=GetImageOption(image_info,"dds:raw"); if (IsStringTrue(option) == MagickFalse) WriteDDSInfo(image,pixelFormat,compression,mipmaps); else mipmaps=0; WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha, exception); if ((mipmaps > 0) && (WriteMipmaps(image,image_info,pixelFormat,compression, mipmaps,fromlist,clusterFit,weightByAlpha,exception) == MagickFalse)) return(MagickFalse); (void) CloseBlob(image); return(MagickTrue); }
oyranos_cmm_oyra_image_ppm.c
/** @file oyranos_cmm_oyra_image.c * * Oyranos is an open source Color Management System * * @par Copyright: * 2008-2015 (C) Kai-Uwe Behrmann * * @brief modules for Oyranos * @internal * @author Kai-Uwe Behrmann <ku.b@gmx.de> * @par License: * new BSD <http://www.opensource.org/licenses/BSD-3-Clause> * @since 2008/10/07 */ #include "oyCMMapi4_s.h" #include "oyCMMapi4_s_.h" #include "oyCMMapi7_s.h" #include "oyCMMapi7_s_.h" #include "oyCMMapiFilters_s.h" #include "oyCMMui_s_.h" #include "oyConnectorImaging_s_.h" #include "oyFilterNode_s_.h" /* for oyFilterNode_TextToInfo_ */ #include "oyRectangle_s_.h" #include "oyranos_config_internal.h" #include "oyranos_cmm.h" #include "oyranos_cmm_oyra.h" #include "oyranos_generic.h" /* oy_connector_imaging_static_object */ #include "oyranos_helper.h" #include "oyranos_icc.h" #include "oyranos_i18n.h" #include "oyranos_io.h" #include "oyranos_definitions.h" #include "oyranos_string.h" #include "oyranos_texts.h" #include <math.h> #include <stdarg.h> #include <stdlib.h> #include <stdio.h> #include <string.h> typedef uint16_t half; int wread ( unsigned char *data, /* read a word */ size_t pos, size_t max, size_t *start, size_t *length ); oyOptions_s* oyraFilter_ImageOutputPPMValidateOptions ( oyFilterCore_s * filter, oyOptions_s * validate, int statical OY_UNUSED, uint32_t * result ) { uint32_t error = !filter; #if 0 oyDATATYPE_e data_type = 0; int planar, channels; oyImage_s * image = 0; if(!error) filter = node->filter; if(!error) error = filter->type_ != oyOBJECT_FILTER_S; if(!error) { if(filter->image_ && filter->image_->layout_) { data_type = oyToDataType_m( filter->image_->layout_[0] ); if(!(data_type == oyUINT8 || data_type == oyUINT16 || data_type == oyFLOAT || data_type == oyDOUBLE )) error = 1; planar = oyToPlanar_m( filter->image_->layout_[0] ); if(!error && planar) error = 1; channels = oyToChannels_m( filter->image_->layout_[0] ); if(!error && channels > 4) error = 1; } } #endif if(!error) error = !oyOptions_FindString( validate, "filename", 0 ); *result = error; return 0; } /** @func oyraFilterPlug_ImageOutputPPMWrite * @brief implement oyCMMFilter_GetNext_f() * * @version Oyranos: 0.3.1 * @since 2008/10/07 (Oyranos: 0.1.8) * @date 2011/05/12 */ int oyraFilterPlug_ImageOutputPPMWrite ( oyFilterPlug_s * requestor_plug, oyPixelAccess_s * ticket ) { oyFilterSocket_s * socket; oyFilterNode_s * node = 0; oyOptions_s * node_opts = 0; int result = 0; const char * filename = 0; FILE * fp = 0; socket = oyFilterPlug_GetSocket( requestor_plug ); node = oyFilterSocket_GetNode( socket ); node_opts = oyFilterNode_GetOptions( node, 0 ); /* to reuse the requestor_plug is a exception for the starting request */ if(node) result = oyFilterNode_Run( node, requestor_plug, ticket ); else result = 1; if(result <= 0) filename = oyOptions_FindString( node_opts, "filename", 0 ); if(filename) fp = fopen( filename, "wb" ); if(fp) { oyImage_s *image_output = (oyImage_s*)oyFilterSocket_GetData( socket ); const char * comment = oyOptions_FindString( node_opts, "comment", NULL ); fclose (fp); fp = 0; result = oyImage_WritePPM( image_output, filename, comment ? comment : oyFilterNode_GetRelatives( node ) ); } return result; } const char ppm_write_extra_options[] = { "\n\ <" OY_TOP_SHARED ">\n\ <" OY_DOMAIN_INTERNAL ">\n\ <" OY_TYPE_STD ">\n\ <" "file_write" ">\n\ <filename></filename>\n\ <comment></comment>\n\ </" "file_write" ">\n\ </" OY_TYPE_STD ">\n\ </" OY_DOMAIN_INTERNAL ">\n\ </" OY_TOP_SHARED ">\n" }; int oyraPPMwriteUiGet ( oyCMMapiFilter_s * module OY_UNUSED, oyOptions_s * opts OY_UNUSED, int flags OY_UNUSED, char ** xforms_layout, oyAlloc_f allocateFunc ) { char * text = (char*)allocateFunc(5); text[0] = 0; *xforms_layout = text; return 0; } oyDATATYPE_e oyra_image_ppm_data_types[6] = {oyUINT8, oyUINT16, oyHALF, oyFLOAT, oyDOUBLE, 0}; oyConnectorImaging_s_ oyra_imageOutputPPM_connector_out = { oyOBJECT_CONNECTOR_IMAGING_S,0,0, (oyObject_s)&oy_connector_imaging_static_object, oyCMMgetImageConnectorSocketText, /* getText */ oy_image_connector_texts, /* texts */ "//" OY_TYPE_STD "/image.data", /* connector_type */ oyFilterSocket_MatchImagingPlug, /* filterSocket_MatchPlug */ 0, /* is_plug == oyFilterPlug_s */ oyra_image_ppm_data_types, 4, /* data_types_n; elements in data_types array */ -1, /* max_color_offset */ 1, /* min_channels_count; */ 4, /* max_channels_count; */ 1, /* min_color_count; */ 4, /* max_color_count; */ 0, /* can_planar; can read separated channels */ 1, /* can_interwoven; can read continuous channels */ 0, /* can_swap; can swap color channels (BGR)*/ 0, /* can_swap_bytes; non host byte order */ 0, /* can_revert; revert 1 -> 0 and 0 -> 1 */ 1, /* can_premultiplied_alpha; */ 1, /* can_nonpremultiplied_alpha; */ 0, /* can_subpixel; understand subpixel order */ 0, /* oyCHANNELTYPE_e * channel_types; */ 0, /* count in channel_types */ 1, /* id; relative to oyFilter_s, e.g. 1 */ 0 /* is_mandatory; mandatory flag */ }; oyConnectorImaging_s_ * oyra_imageOutputPPM_connectors_socket[2] = { &oyra_imageOutputPPM_connector_out, 0 }; oyConnectorImaging_s_ oyra_imageOutputPPM_connector_in = { oyOBJECT_CONNECTOR_IMAGING_S,0,0, (oyObject_s)&oy_connector_imaging_static_object, oyCMMgetImageConnectorPlugText, /* getText */ oy_image_connector_texts, /* texts */ "//" OY_TYPE_STD "/image.data", /* connector_type */ oyFilterSocket_MatchImagingPlug, /* filterSocket_MatchPlug */ 1, /* is_plug == oyFilterPlug_s */ oyra_image_ppm_data_types, 4, /* data_types_n; elements in data_types array */ -1, /* max_color_offset */ 1, /* min_channels_count; */ 4, /* max_channels_count; */ 1, /* min_color_count; */ 4, /* max_color_count; */ 0, /* can_planar; can read separated channels */ 1, /* can_interwoven; can read continuous channels */ 0, /* can_swap; can swap color channels (BGR)*/ 0, /* can_swap_bytes; non host byte order */ 0, /* can_revert; revert 1 -> 0 and 0 -> 1 */ 1, /* can_premultiplied_alpha; */ 1, /* can_nonpremultiplied_alpha; */ 0, /* can_subpixel; understand subpixel order */ 0, /* oyCHANNELTYPE_e * channel_types; */ 0, /* count in channel_types */ 2, /* id; relative to oyFilter_s, e.g. 1 */ 0 /* is_mandatory; mandatory flag */ }; oyConnectorImaging_s_ * oyra_imageOutputPPM_connectors_plug[2] = { &oyra_imageOutputPPM_connector_in, 0 }; /** * This function implements oyCMMGetText_f. * * @version Oyranos: 0.1.10 * @since 2009/12/22 (Oyranos: 0.1.10) * @date 2009/12/22 */ const char * oyraApi4ImageWriteUiGetText ( const char * select, oyNAME_e type, oyStruct_s * context OY_UNUSED ) { static char * category = 0; if(strcmp(select,"name") == 0) { if(type == oyNAME_NICK) return "write_ppm"; else if(type == oyNAME_NAME) return _("Image[write_ppm]"); else return _("Write PPM Image Filter Object"); } else if(strcmp(select,"category") == 0) { if(!category) { STRING_ADD( category, _("Files") ); STRING_ADD( category, _("/") ); STRING_ADD( category, _("Write PPM") ); } if(type == oyNAME_NICK) return "category"; else if(type == oyNAME_NAME) return category; else return category; } else if(strcmp(select,"help") == 0) { if(type == oyNAME_NICK) return "help"; else if(type == oyNAME_NAME) return _("Option \"filename\", a valid filename"); else return _("The Option \"filename\" should contain a valid filename to write the ppm data into. A existing file will be overwritten without notice."); } return 0; } const char * oyra_api4_image_write_ppm_ui_texts[] = {"name", "category", "help", 0}; /** @instance oyra_api4_image_write_ppm_ui * @brief oyra oyCMMapi4_s::ui implementation * * The UI for filter write ppm. * * @version Oyranos: 0.1.10 * @since 2009/09/09 (Oyranos: 0.1.10) * @date 2009/12/22 */ oyCMMui_s_ oyra_api4_image_write_ppm_ui = { oyOBJECT_CMM_DATA_TYPES_S, /**< oyOBJECT_e type; */ 0,0,0, /* unused oyStruct_s fields; keep to zero */ CMM_VERSION, /**< int32_t version[3] */ CMM_API_VERSION, /**< int32_t module_api[3] */ oyraFilter_ImageOutputPPMValidateOptions, /* oyCMMFilter_ValidateOptions_f */ oyraWidgetEvent, /* oyWidgetEvent_f */ "Files/Write PPM", /* category */ ppm_write_extra_options, /* const char * options */ oyraPPMwriteUiGet, /* oyCMMuiGet_f oyCMMuiGet */ oyraApi4ImageWriteUiGetText, /* oyCMMGetText_f getText */ oyra_api4_image_write_ppm_ui_texts, /* const char ** texts */ (oyCMMapiFilter_s*)&oyra_api4_image_write_ppm /* oyCMMapiFilter_s*parent */ }; /** @instance oyra_api4 * @brief oyra oyCMMapi4_s implementation * * A filter writing a PPM image. * * @par Options: * - "filename" - the file name to write to * * @version Oyranos: 0.1.8 * @since 2008/10/07 (Oyranos: 0.1.8) * @date 2008/10/07 */ oyCMMapi4_s_ oyra_api4_image_write_ppm = { oyOBJECT_CMM_API4_S, /* oyStruct_s::type oyOBJECT_CMM_API4_S */ 0,0,0, /* unused oyStruct_s fileds; keep to zero */ (oyCMMapi_s*) & oyra_api7_image_write_ppm, /* oyCMMapi_s * next */ oyraCMMInit, /* oyCMMInit_f */ oyraCMMMessageFuncSet, /* oyCMMMessageFuncSet_f */ /* registration */ OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD "/file_write.write_ppm._CPU._" CMM_NICK, CMM_VERSION, /* int32_t version[3] */ CMM_API_VERSION, /**< int32_t module_api[3] */ 0, /* id_; keep empty */ 0, /* api5_; keep empty */ 0, /* runtime_context */ (oyCMMFilterNode_ContextToMem_f)oyFilterNode_TextToInfo_, /* oyCMMFilterNode_ContextToMem_f */ 0, /* oyCMMFilterNode_GetText_f oyCMMFilterNode_GetText */ {0}, /* char context_type[8] */ (oyCMMui_s_*)&oyra_api4_image_write_ppm_ui /**< oyCMMui_s *ui */ }; char * oyra_api7_image_output_ppm_properties[] = { "file=write", /* file read|write */ "image=pixel", /* image type, pixel/vector/font */ "layers=1", /* layer count, one for plain images */ "icc=0", /* image type ICC profile support */ "ext=ppm,pnm,pbm,pgm,pfm", /* supported extensions */ 0 }; /** @instance oyra_api7 * @brief oyra oyCMMapi7_s implementation * * A filter writing a PPM image. * * @par Options: * - "filename" - the file name to write to * * @version Oyranos: 0.1.8 * @since 2008/10/07 (Oyranos: 0.1.8) * @date 2008/10/07 */ oyCMMapi7_s_ oyra_api7_image_write_ppm = { oyOBJECT_CMM_API7_S, /* oyStruct_s::type oyOBJECT_CMM_API7_S */ 0,0,0, /* unused oyStruct_s fileds; keep to zero */ (oyCMMapi_s*) & oyra_api4_image_input_ppm, /* oyCMMapi_s * next */ oyraCMMInit, /* oyCMMInit_f */ oyraCMMMessageFuncSet, /* oyCMMMessageFuncSet_f */ /* registration */ OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD "/file_write.write_ppm._CPU._" CMM_NICK, CMM_VERSION, /* int32_t version[3] */ CMM_API_VERSION, /**< int32_t module_api[3] */ 0, /* id_; keep empty */ 0, /* api5_; keep empty */ 0, /* runtime_context */ oyraFilterPlug_ImageOutputPPMWrite, /* oyCMMFilterPlug_Run_f */ {0}, /* char data_type[8] */ (oyConnector_s**) oyra_imageOutputPPM_connectors_plug, /* plugs */ 1, /* plugs_n */ 0, /* plugs_last_add */ (oyConnector_s**) oyra_imageOutputPPM_connectors_socket, /* sockets */ 1, /* sockets_n */ 0, /* sockets_last_add */ oyra_api7_image_output_ppm_properties /* char * properties */ }; /* ---------------------------------------------------------------------------*/ oyOptions_s* oyraFilter_ImageInputPPMValidateOptions ( oyFilterCore_s * filter, oyOptions_s * validate, int statical OY_UNUSED, uint32_t * result ) { uint32_t error = !filter; if(!error) error = !oyOptions_FindString( validate, "filename", 0 ); *result = error; return 0; } int wread ( unsigned char* data, size_t pos, size_t max, size_t *start, size_t *end ) { int end_found = 0; if( max <= 1 ) return 0; while(pos < max && isspace( data[pos] )) ++pos; *start = pos; while(pos < max && !end_found) { if( isspace( data[pos] ) ) { end_found = 1; break; } else ++pos; } *end = pos; return end_found; } /** @func oyraFilterPlug_ImageInputPPMRun * @brief implement oyCMMFilter_GetNext_f() * * @version Oyranos: 0.1.10 * @since 2009/02/18 (Oyranos: 0.1.10) * @date 2009/02/18 */ int oyraFilterPlug_ImageInputPPMRun ( oyFilterPlug_s * requestor_plug, oyPixelAccess_s * ticket ) { oyFilterSocket_s * socket = 0; oyStruct_s * socket_data = 0; oyFilterNode_s * node = 0; oyOptions_s * tags = 0; int error = 0; const char * filename = 0; FILE * fp = 0; oyDATATYPE_e data_type = oyUINT8; oyPROFILE_e profile_type = oyEDITING_RGB; oyProfile_s * prof = 0; oyImage_s * image_in = 0, * output_image = 0; oyPixel_t pixel_type = 0; int fsize = 0; size_t fpos = 0; uint8_t * data = 0, * buf = 0; size_t mem_n = 0; /* needed memory in bytes */ int info_good = 1; int32_t icc_profile_flags = 0; int type = 0; /* PNM type */ int width = 0; int height = 0; int spp = 0; /* samples per pixel */ int byteps = 1; /* byte per sample */ double maxval = 0; size_t start, end; if(requestor_plug->type_ == oyOBJECT_FILTER_PLUG_S) { socket = oyFilterPlug_GetSocket( requestor_plug ); socket_data = oyFilterSocket_GetData( socket ); } /* passing through the data reading */ if(requestor_plug->type_ == oyOBJECT_FILTER_PLUG_S && socket_data) { error = oyraFilterPlug_ImageRootRun( requestor_plug, ticket ); return error; } else if(requestor_plug->type_ == oyOBJECT_FILTER_SOCKET_S) { /* To open the a image here seems not so straight forward. * Still the plug-in should be prepared to initialise the image data before * normal processing occurs. */ socket = oyFilterSocket_Copy( (oyFilterSocket_s*)requestor_plug, 0 ); requestor_plug = 0; } node = oyFilterSocket_GetNode( socket ); if(error <= 0) { oyOptions_s * opts = oyFilterNode_GetOptions( node ,0 ); filename = oyOptions_FindString( opts, "filename", 0 ); oyOptions_FindInt( opts, "icc_profile_flags", 0, &icc_profile_flags ); oyOptions_Release( &opts ); } if(filename) fp = fopen( filename, "rm" ); if(!fp) { oyra_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_ " could not open: %s", OY_DBG_ARGS_, oyNoEmptyString_m_( filename ) ); return 1; } fseek(fp,0L,SEEK_END); fsize = ftell(fp); rewind(fp); oyAllocHelper_m_( data, uint8_t, fsize, 0, fclose(fp); return 1); fpos = fread( data, sizeof(uint8_t), fsize, fp ); if( fpos < (size_t)fsize ) { oyra_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_ " could not read: %s %d %d", OY_DBG_ARGS_, oyNoEmptyString_m_( filename ), fsize, (int)fpos ); oyFree_m_( data ) fclose (fp); return FALSE; } fpos = 0; fclose (fp); fp = NULL; /* parse Infos */ if(data[fpos] == 'P') { if(isdigit(data[++fpos])) { char tmp[2] = {0, 0}; tmp[0] = data[fpos]; type = atoi(tmp); } else if (!isspace(data[fpos])) { if(data[fpos] == 'F') /* PFM rgb */ type = -6; else if (data[fpos] == 'f') /* PFM gray */ type = -5; else if(data[fpos] == 'H') /* PFM Half rgb */ type = -9; else if (data[fpos] == 'h') /* PFM Half gray */ type = -8; else info_good = 0; } else info_good = 0; } fpos++; /* parse variables */ { int in_c = 0; /* within comment */ int v_read = 0; /* number of variables allready read */ int v_need = 3; /* number of needed variable; start with three */ int l_end = 0; /* line end position */ int l_pos = 0; /* line position */ int l_rdg = 1; /* line reading */ char * tupltype = NULL; /* ICC profile internal color space */ int tupl = 0; if(type == 1 || type == 4) v_need = 2; if(type == 7) /* pam */ v_need = 12; while(v_read < v_need && info_good) { l_pos = l_end = fpos; l_rdg = 1; /* read line */ while(fpos < (size_t)fsize && l_rdg) { if(data[fpos-1] == '\n' && data[fpos] == '#') { in_c = 1; l_end = fpos-1; } else if(data[fpos] == 10 || data[fpos] == 13) { /* line break */ l_rdg = 0; } else if(data[fpos] != 0) { if(!in_c) ++l_end; } else { l_rdg = 0; } if(!l_rdg) { in_c = 0; } ++fpos; } /* lockup color space */ if(fpos - l_pos > 0) { if(fpos - l_pos >= 14 && memcmp(&data[l_pos],"# COLORSPACE: ", 14) == 0) { char * t = oyAllocateFunc_(fpos - l_pos + 1); if(t) { memcpy( t, &data[l_pos+14], fpos - l_pos - 15 ); t[fpos - l_pos - 15] = 0; prof = oyProfile_FromName(t, icc_profile_flags, NULL); if(prof) { if(oy_debug) oyra_msg( oyMSG_DBG, (oyStruct_s*)node, OY_DBG_FORMAT_ "found ICC: %s", OY_DBG_ARGS_, oyNoEmptyString_m_( t ) ); } else oyra_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_ "could not find ICC: %s", OY_DBG_ARGS_, oyNoEmptyString_m_( t ) ); oyDeAllocateFunc_(t); } } } if(!prof && getenv("COLORSPACE")) { const char * t = getenv("COLORSPACE"); prof = oyProfile_FromName(t, icc_profile_flags, NULL); if(!prof) oyra_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_ "could not find \"COLORSPACE\" from environment variable: %s", OY_DBG_ARGS_, oyNoEmptyString_m_( t ) ); } /* parse line */ while(info_good && v_read < v_need && l_pos < l_end) { if( info_good ) { double var = -2; char var_s[64]; int l = 0; wread ( data, l_pos, l_end, &start, &end ); l = end - start; if ( l < 63 ) { memcpy(var_s, &data[start], l); var_s[l] = 0; oyStringToDouble(var_s, &var); # ifdef DEBUG_ fprintf(stderr, "var = \"%s\" %d\n",var_s, l); # endif } l_pos = end + 1; if(type == 7) { if(height == -1) height = (int)var; if(width == -1) width = (int)var; if(spp == -1) spp = (int)var; if(maxval == -0.5) maxval = var; if(tupl == -1) { tupl = 1; tupltype = oyStringCopy(var_s, oyAllocateFunc_); } if(strcmp(var_s, "HEIGHT") == 0) height = -1; /* expecting the next token is the val */ if(strcmp(var_s, "WIDTH") == 0) width = -1; if(strcmp(var_s, "DEPTH") == 0) spp = -1; if(strcmp(var_s, "MAXVAL") == 0) maxval = -0.5; if(strcmp(var_s, "TUPLTYPE") == 0) tupl = -1; if(strcmp(var_s, "ENDHDR") == 0) v_need = v_read; } else { if (!var) info_good = 0; if(v_read == 0) width = (int)var; else if(v_read == 1) height = (int)var; else if(v_read == 2) maxval = var; } ++v_read; } } } if(tupltype && !prof) { const char * colorspace = "rgbi"; if(strcmp(tupltype, "GRAY") == 0 || strcmp(tupltype, "GRAY_ALPHA") == 0) colorspace = "grayi"; if(strcmp(tupltype, "RGB") == 0 || strcmp(tupltype, "RGB_ALPHA") == 0) colorspace = "rgbi"; if(strcmp(tupltype, "CMYK") == 0 || strcmp(tupltype, "CMYK_ALPHA") == 0) colorspace = "cmyki"; prof = oyProfile_FromName( colorspace, icc_profile_flags, NULL ); if(!prof) oyra_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_ "could not find \"COLORSPACE\" from environment variable: %s", OY_DBG_ARGS_, oyNoEmptyString_m_( tupltype ) ); oyFree_m_(tupltype) } } if(strstr(strrchr(filename, '.')+1, "raw")) { const char * t; info_good = 1; t = getenv("RAW_WIDTH"); if(t) width = atoi(t); else info_good = 0; t = getenv("RAW_HEIGHT"); if(t) height = atoi(t); else info_good = 0; t = getenv("RAW_TYPE"); if(t) type = atoi(t); else info_good = 0; fpos = 0; t = getenv("RAW_MAXVAL"); if(t) maxval = atoi(t); else info_good = 0; if(info_good == 0) oyra_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_ "need RAW_WIDTH, RAW_HEIGHT, RAW_TYPE and RAW_MAXVAL environment variables", OY_DBG_ARGS_ ); } if(info_good) switch(type) { case 1: case 4: data_type = oyUINT8; spp = 1; info_good = 0; break; case 2: case 5: if(maxval <= 255) { data_type = oyUINT8; byteps = 1; } else if (maxval <= 65535) { data_type = oyUINT16; byteps = 2; } spp = 1; break; case 3: case 6: if(maxval <= 255) { data_type = oyUINT8; byteps = 1; } else if (maxval <= 65535) { data_type = oyUINT16; byteps = 2; } spp = 3; break; case -5: data_type = oyFLOAT; byteps = 4; spp = 1; break; case -6: byteps = 4; spp = 3; data_type = oyFLOAT; break; case -8: data_type = oyHALF; byteps = 2; spp = 1; break; case -9: byteps = 2; spp = 3; data_type = oyHALF; break; case 7: /* pam */ if (maxval == 1.0 || maxval == -1.0) { byteps = 4; data_type = oyFLOAT; } else if(maxval <= 255) { byteps = 1; data_type = oyUINT8; } else if (maxval <= 65535) { byteps = 2; data_type = oyUINT16; } break; default: info_good = 0; } switch(spp) { case 1: profile_type = oyASSUMED_GRAY; break; case 2: profile_type = oyASSUMED_GRAY; break; case 3: profile_type = oyASSUMED_RGB; break; case 4: profile_type = oyASSUMED_RGB; break; } if( !info_good ) { oyra_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_ "failed to get info of %s", OY_DBG_ARGS_, oyNoEmptyString_m_( filename )); oyFree_m_( data ) return FALSE; } /* check if the file can hold the expected data (for raw only) */ mem_n = width*height*byteps*spp; if(type == 5 || type == 6 || type == -5 || type == -6 || type == -8 || type == -9 || type == 7) { if (mem_n > fsize-fpos) { oyra_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_ "\n storage size of %s is too small: %d", OY_DBG_ARGS_, oyNoEmptyString_m_( filename ), (int)mem_n-fsize-fpos ); oyFree_m_( data ) return FALSE; } } else { if (type == 2 || type == 3) { oyra_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_ "\n %s contains ascii data, which are not handled by this pnm reader", OY_DBG_ARGS_, oyNoEmptyString_m_( filename )); } else if (type == 1 || type == 4) { oyra_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_ "\n %s contains bitmap data, which are not handled by this pnm reader", OY_DBG_ARGS_, oyNoEmptyString_m_( filename ) ); } oyFree_m_( data ) return FALSE; } oyAllocHelper_m_( buf, uint8_t, mem_n, 0, oyFree_m_( data ); return 1); DBG_NUM2_S("allocate image data: 0x%x size: %d ", (int)(intptr_t) buf, mem_n ); /* the following code is almost completely taken from ku.b's ppm CP plug-in */ { int h, j_h = 0, p, n_samples = 0, n_bytes = 0; int byte_swap = 0; unsigned char *d_8 = 0; unsigned char *src = &data[fpos]; uint16_t *d_16; half *d_f16; float *d_f; int adapt = 0; if(oyBigEndian()) { if( maxval < 0 && (byteps == 2 || byteps == 4) ) byte_swap = 1; } else { if( maxval > 0 && (byteps == 2 || byteps == 4) ) byte_swap = 1; } maxval = fabs(maxval); for(h = 0; h < height; ++h) { n_samples = 1 * width * spp; n_bytes = n_samples * byteps; d_8 = buf; d_16 = (uint16_t*)buf; d_f16= (half*)buf; d_f = (float*)buf; /* TODO 1 bit raw and ascii */ if (type == 1 || type == 4) { /* TODO ascii */ } else if (type == 2 || type == 3) { /* raw and floats */ } else if (type == 5 || type == 6 || type == -5 || type == -6 || type == -8 || type == -9 || type == 7 ) { if(byteps == 1) { d_8 = &src[ h * width * spp * byteps ]; } else if(byteps == 2) { d_f16 = d_16 = (uint16_t*)& src[ h * width * spp * byteps ]; } else if(byteps == 4) { d_f = (float*)&src[ h * width * spp * byteps ]; } memcpy (&buf[ h * width * spp * byteps ], &src[ (j_h + h) * width * spp * byteps ], 1 * width * spp * byteps); } /* normalise and byteswap */ if( byte_swap ) { unsigned char *c_buf = &buf[ h * width * spp * byteps ]; char tmp; adapt |= 1; if (byteps == 2) { /* 16 bit */ #pragma omp parallel for private(tmp) for (p = 0; p < n_bytes; p += 2) { tmp = c_buf[p]; c_buf[p] = c_buf[p+1]; c_buf[p+1] = tmp; } } else if (byteps == 4) { /* float */ #pragma omp parallel for private(tmp) for (p = 0; p < n_bytes; p += 4) { tmp = c_buf[p]; c_buf[p] = c_buf[p+3]; c_buf[p+3] = tmp; tmp = c_buf[p+1]; c_buf[p+1] = c_buf[p+2]; c_buf[p+2] = tmp; } } } if (byteps == 1 && maxval < 255) { /* 8 bit */ adapt |= 2; #pragma omp parallel for for (p = 0; p < n_samples; ++p) d_8[p] = (d_8[p] * 255) / maxval; } else if (byteps == 2 && maxval != 1.0 && (type == -8 || type == -9)) { /* half float */ adapt |= 2; #pragma omp parallel for for (p = 0; p < n_samples; ++p) d_f16[p] = d_f16[p] * maxval; } else if (byteps == 2 && maxval < 65535 && type != -8 && type != -9) {/* 16 bit */ adapt |= 2; #pragma omp parallel for for (p = 0; p < n_samples; ++p) d_16 [p] = (d_16[p] * 65535) / maxval; } else if (byteps == 4 && maxval != 1.0) { /* float */ adapt |= 2; #pragma omp parallel for for (p = 0; p < n_samples; ++p) d_f[p] = d_f[p] * maxval; } } if((adapt & 1) && oy_debug) oyra_msg( oyMSG_DBG, (oyStruct_s*)node, OY_DBG_FORMAT_ "going to swap bytes %d %d", OY_DBG_ARGS_, byteps, n_bytes ); if((adapt & 2) && oy_debug) oyra_msg( oyMSG_DBG, (oyStruct_s*)node, OY_DBG_FORMAT_ "going to adapt intensity %g %d", OY_DBG_ARGS_, maxval, n_samples ); } pixel_type = oyChannels_m(spp) | oyDataType_m(data_type); if(!prof) prof = oyProfile_FromStd( profile_type, icc_profile_flags, 0 ); image_in = oyImage_Create( width, height, buf, pixel_type, prof, 0 ); if (!image_in) { oyra_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_ "PNM can't create a new image\n%dx%d %d", OY_DBG_ARGS_, width, height, pixel_type ); oyFree_m_ (data) return FALSE; } tags = oyImage_GetTags( image_in ); error = oyOptions_SetFromString( &tags, "//" OY_TYPE_STD "/file_read.input_ppm" "/filename", filename, OY_CREATE_NEW ); oyOptions_Release( &tags ); if(error <= 0) { oyFilterSocket_SetData( socket, (oyStruct_s*)image_in ); } if(ticket) output_image = oyPixelAccess_GetOutputImage( ticket ); if(ticket && output_image && oyImage_GetWidth( output_image ) == 0 && oyImage_GetHeight( output_image ) == 0) { oyImage_SetCritical( output_image, oyImage_GetPixelLayout( image_in, oyLAYOUT ), 0,0, oyImage_GetWidth( image_in ), oyImage_GetHeight( image_in ) ); } oyImage_Release( &image_in ); oyImage_Release( &output_image ); oyFilterNode_Release( &node ); oyFilterSocket_Release( &socket ); oyFree_m_ (data) /* return an error to cause the graph to retry */ return 1; } const char ppm_read_extra_options[] = { "\n\ <" OY_TOP_SHARED ">\n\ <" OY_DOMAIN_INTERNAL ">\n\ <" OY_TYPE_STD ">\n\ <" "file_read" ">\n\ <filename></filename>\n\ </" "file_read" ">\n\ </" OY_TYPE_STD ">\n\ </" OY_DOMAIN_INTERNAL ">\n\ </" OY_TOP_SHARED ">\n" }; int oyraPPMreadUiGet ( oyCMMapiFilter_s * module OY_UNUSED, oyOptions_s * opts OY_UNUSED, int flags OY_UNUSED, char ** xforms_layout, oyAlloc_f allocateFunc ) { char * text = (char*)allocateFunc(5); text[0] = 0; *xforms_layout = text; return 0; } oyConnectorImaging_s_ oyra_imageInputPPM_connector = { oyOBJECT_CONNECTOR_IMAGING_S,0,0, (oyObject_s)&oy_connector_imaging_static_object, oyCMMgetImageConnectorSocketText, /* getText */ oy_image_connector_texts, /* texts */ "//" OY_TYPE_STD "/image.data", /* connector_type */ oyFilterSocket_MatchImagingPlug, /* filterSocket_MatchPlug */ 0, /* is_plug == oyFilterPlug_s */ oyra_image_ppm_data_types, 4, /* data_types_n; elements in data_types array */ -1, /* max_color_offset */ 1, /* min_channels_count; */ 4, /* max_channels_count; */ 1, /* min_color_count; */ 4, /* max_color_count; */ 0, /* can_planar; can read separated channels */ 1, /* can_interwoven; can read continuous channels */ 0, /* can_swap; can swap color channels (BGR)*/ 0, /* can_swap_bytes; non host byte order */ 0, /* can_revert; revert 1 -> 0 and 0 -> 1 */ 1, /* can_premultiplied_alpha; */ 1, /* can_nonpremultiplied_alpha; */ 0, /* can_subpixel; understand subpixel order */ 0, /* oyCHANNELTYPE_e * channel_types; */ 0, /* count in channel_types */ 1, /* id; relative to oyFilter_s, e.g. 1 */ 0 /* is_mandatory; mandatory flag */ }; oyConnectorImaging_s_ * oyra_imageInputPPM_connectors[2] = { &oyra_imageInputPPM_connector, 0 }; /** * This function implements oyCMMGetText_f. * * @version Oyranos: 0.1.10 * @since 2009/12/22 (Oyranos: 0.1.10) * @date 2009/12/22 */ const char * oyraApi4ImageInputUiGetText ( const char * select, oyNAME_e type, oyStruct_s * context OY_UNUSED ) { static char * category = 0; if(strcmp(select,"name") == 0) { if(type == oyNAME_NICK) return "input_ppm"; else if(type == oyNAME_NAME) return _("Image[input_ppm]"); else return _("Input PPM Image Filter Object"); } else if(strcmp(select,"category") == 0) { if(!category) { STRING_ADD( category, _("Files") ); STRING_ADD( category, _("/") ); STRING_ADD( category, _("Read PPM") ); } if(type == oyNAME_NICK) return "category"; else if(type == oyNAME_NAME) return category; else return category; } else if(strcmp(select,"help") == 0) { if(type == oyNAME_NICK) return "help"; else if(type == oyNAME_NAME) return _("Option \"filename\", a valid filename of a existing PPM image"); else return _("The Option \"filename\" should contain a valid filename to read the ppm data from. If the file does not exist, a error will occure.\nThe oyEDITING_RGB ICC profile is attached."); } return 0; } const char * oyra_api4_image_input_ppm_ui_texts[] = {"name", "category", "help", 0}; /** @instance oyra_api4_ui_image_input_ppm * @brief oyra oyCMMapi4_s::ui implementation * * The UI for filter input ppm. * * @version Oyranos: 0.1.10 * @since 2009/09/09 (Oyranos: 0.1.10) * @date 2009/09/09 */ oyCMMui_s_ oyra_api4_ui_image_input_ppm = { oyOBJECT_CMM_DATA_TYPES_S, /**< oyOBJECT_e type; */ 0,0,0, /* unused oyStruct_s fields; keep to zero */ CMM_VERSION, /**< int32_t version[3] */ CMM_API_VERSION, /**< int32_t module_api[3] */ oyraFilter_ImageInputPPMValidateOptions, /* oyCMMFilter_ValidateOptions_f */ oyraWidgetEvent, /* oyWidgetEvent_f */ "Files/Read PPM", /* category */ ppm_read_extra_options, /* const char * options */ oyraPPMreadUiGet, /* oyCMMuiGet_f oyCMMuiGet */ oyraApi4ImageInputUiGetText, /* oyCMMGetText_f getText */ oyra_api4_image_input_ppm_ui_texts, /* const char ** texts */ (oyCMMapiFilter_s*)&oyra_api4_image_input_ppm /* oyCMMapiFilter_s*parent */ }; /** @instance oyra_api4 * @brief oyra oyCMMapi4_s implementation * * A filter for reading a PPM image. * * @par Options: * - "filename" - the file name to read from * * @version Oyranos: 0.1.10 * @since 2009/02/18 (Oyranos: 0.1.10) * @date 2009/02/18 */ oyCMMapi4_s_ oyra_api4_image_input_ppm = { oyOBJECT_CMM_API4_S, /* oyStruct_s::type oyOBJECT_CMM_API4_S */ 0,0,0, /* unused oyStruct_s fileds; keep to zero */ (oyCMMapi_s*) & oyra_api7_image_input_ppm, /* oyCMMapi_s * next */ oyraCMMInit, /* oyCMMInit_f */ oyraCMMMessageFuncSet, /* oyCMMMessageFuncSet_f */ /* registration */ OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD "/file_read.input_ppm._CPU._" CMM_NICK, CMM_VERSION, /* int32_t version[3] */ CMM_API_VERSION, /**< int32_t module_api[3] */ 0, /* id_; keep empty */ 0, /* api5_; keep empty */ 0, /* runtime_context */ (oyCMMFilterNode_ContextToMem_f)oyFilterNode_TextToInfo_, /* oyCMMFilterNode_ContextToMem_f */ 0, /* oyCMMFilterNode_GetText_f oyCMMFilterNode_GetText */ {0}, /* char context_type[8] */ (oyCMMui_s_*)&oyra_api4_ui_image_input_ppm /**< oyCMMui_s *ui */ }; char * oyra_api7_image_input_ppm_properties[] = { "file=read", /* file read|write */ "image=pixel", /* image type, pixel/vector/font */ "layers=1", /* layer count, one for plain images */ "icc=1", /* image type ICC profile support */ "ext=pam,ppm,pnm,pbm,pgm,pfm,raw", /* supported extensions */ 0 }; /** @instance oyra_api7 * @brief oyra oyCMMapi7_s implementation * * A filter reading a PPM image. * * @par Options: * - "filename" - the file name to read from * * @version Oyranos: 0.1.10 * @since 2009/02/18 (Oyranos: 0.1.10) * @date 2009/02/18 */ oyCMMapi7_s_ oyra_api7_image_input_ppm = { oyOBJECT_CMM_API7_S, /* oyStruct_s::type oyOBJECT_CMM_API7_S */ 0,0,0, /* unused oyStruct_s fileds; keep to zero */ (oyCMMapi_s*) & oyra_api4_image_load, /* oyCMMapi_s * next */ oyraCMMInit, /* oyCMMInit_f */ oyraCMMMessageFuncSet, /* oyCMMMessageFuncSet_f */ /* registration */ OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD "/file_read.input_ppm._CPU._" CMM_NICK, CMM_VERSION, /* int32_t version[3] */ CMM_API_VERSION, /**< int32_t module_api[3] */ 0, /* id_; keep empty */ 0, /* api5_; keep empty */ 0, /* runtime_context */ oyraFilterPlug_ImageInputPPMRun, /* oyCMMFilterPlug_Run_f */ {0}, /* char data_type[8] */ 0, /* plugs */ 0, /* plugs_n */ 0, /* plugs_last_add */ (oyConnector_s**) oyra_imageInputPPM_connectors, /* sockets */ 1, /* sockets_n */ 0, /* sockets_last_add */ oyra_api7_image_input_ppm_properties /* char ** properties */ };
layerramdistancetransform.h
/********************************************************************************* * * Inviwo - Interactive Visualization Workshop * * Copyright (c) 2017-2020 Inviwo Foundation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *********************************************************************************/ #ifndef IVW_LAYERRAMDISTANCETRANSFORM_H #define IVW_LAYERRAMDISTANCETRANSFORM_H #include <modules/base/basemoduledefine.h> #include <inviwo/core/common/inviwo.h> #include <inviwo/core/util/indexmapper.h> #include <inviwo/core/datastructures/image/layer.h> #include <inviwo/core/datastructures/image/layerram.h> #include <inviwo/core/datastructures/image/layerramprecision.h> #ifndef __clang__ #include <omp.h> #endif namespace inviwo { namespace util { /** * Implementation of Euclidean Distance Transform according to Saito's algorithm: * T. Saito and J.I. Toriwaki. New algorithms for Euclidean distance transformations * of an n-dimensional digitized picture with applications. Pattern Recognition, 27(11). * pp. 1551-1565, 1994. * http://www.cs.jhu.edu/~misha/ReadingSeminar/Papers/Saito94.pdf * * Calculates the distance in base mat space * * Predicate is a function of type (const T &value) -> bool to deside if a value in the input * is a "feature". * * ValueTransform is a function of type (const U& squaredDist) -> U that is appiled to all * squared distance values at the end of the calculation. * * ProcessCallback is a function of type (double progress) -> void that is called with a value * from 0 to 1 to indicate the progress of the calculation. */ template <typename T, typename U, typename Predicate, typename ValueTransform, typename ProgressCallback> void layerRAMDistanceTransform(const LayerRAMPrecision<T> *inLayer, LayerRAMPrecision<U> *outDistanceField, const Matrix<2, U> basis, const size2_t upsample, Predicate predicate, ValueTransform valueTransform, ProgressCallback callback); template <typename T, typename U> void layerRAMDistanceTransform(const LayerRAMPrecision<T> *inVolume, LayerRAMPrecision<U> *outDistanceField, const Matrix<2, U> basis, const size2_t upsample); template <typename U, typename Predicate, typename ValueTransform, typename ProgressCallback> void layerDistanceTransform(const Layer *inLayer, LayerRAMPrecision<U> *outDistanceField, const size2_t upsample, Predicate predicate, ValueTransform valueTransform, ProgressCallback callback); template <typename U, typename ProgressCallback> void layerDistanceTransform(const Layer *inLayer, LayerRAMPrecision<U> *outDistanceField, const size2_t upsample, double threshold, bool normalize, bool flip, bool square, double scale, ProgressCallback callback); template <typename U> void layerDistanceTransform(const Layer *inLayer, LayerRAMPrecision<U> *outDistanceField, const size2_t upsample, double threshold, bool normalize, bool flip, bool square, double scale); } // namespace util template <typename T, typename U, typename Predicate, typename ValueTransform, typename ProgressCallback> void util::layerRAMDistanceTransform(const LayerRAMPrecision<T> *inLayer, LayerRAMPrecision<U> *outDistanceField, const Matrix<2, U> basis, const size2_t upsample, Predicate predicate, ValueTransform valueTransform, ProgressCallback callback) { #ifndef __clang__ omp_set_num_threads(std::thread::hardware_concurrency()); #endif using int64 = glm::int64; using i64vec2 = glm::tvec2<int64>; auto square = [](auto a) { return a * a; }; callback(0.0); const T *src = inLayer->getDataTyped(); U *dst = outDistanceField->getDataTyped(); const i64vec2 srcDim{inLayer->getDimensions()}; const i64vec2 dstDim{outDistanceField->getDimensions()}; const i64vec2 sm{upsample}; const auto squareBasis = glm::transpose(basis) * basis; const Vector<2, U> squareBasisDiag{squareBasis[0][0], squareBasis[1][1]}; const Vector<2, U> squareVoxelSize{squareBasisDiag / Vector<2, U>{dstDim * dstDim}}; const Vector<2, U> invSquareVoxelSize{Vector<2, U>{1.0f} / squareVoxelSize}; { const auto maxdist = glm::compMax(squareBasisDiag); bool orthogonal = true; for (size_t i = 0; i < squareBasis.length(); i++) { for (size_t j = 0; j < squareBasis.length(); j++) { if (i != j) { if (std::abs(squareBasis[i][j]) > 10.0e-8 * maxdist) { orthogonal = false; break; } } } } if (!orthogonal) { LogWarnCustom( "layerRAMDistanceTransform", "Calculating the distance transform on a non-orthogonal layer will not give " "correct values"); } } if (srcDim * sm != dstDim) { throw Exception( "DistanceTransformRAM: Dimensions does not match src = " + toString(srcDim) + " dst = " + toString(dstDim) + " scaling = " + toString(sm), IVW_CONTEXT_CUSTOM("layerRAMDistanceTransform")); } util::IndexMapper<2, int64> srcInd(srcDim); util::IndexMapper<2, int64> dstInd(dstDim); auto is_feature = [&](const int64 x, const int64 y) { return predicate(src[srcInd(x / sm.x, y / sm.y)]); }; // first pass, forward and backward scan along x // result: min distance in x direction #pragma omp parallel for for (int64 y = 0; y < dstDim.y; ++y) { // forward U dist = static_cast<U>(dstDim.x); for (int64 x = 0; x < dstDim.x; ++x) { if (!is_feature(x, y)) { ++dist; } else { dist = U(0); } dst[dstInd(x, y)] = squareVoxelSize.x * square(dist); } // backward dist = static_cast<U>(dstDim.x); for (int64 x = dstDim.x - 1; x >= 0; --x) { if (!is_feature(x, y)) { ++dist; } else { dist = U(0); } dst[dstInd(x, y)] = std::min<U>(dst[dstInd(x, y)], squareVoxelSize.x * square(dist)); } } // second pass, scan y direction // for each voxel v(x,y,z) find min_i(data(x,i,z) + (y - i)^2), 0 <= i < dimY // result: min distance in x and y direction callback(0.45); #pragma omp parallel { std::vector<U> buff; buff.resize(dstDim.y); #pragma omp for for (int64 x = 0; x < dstDim.x; ++x) { // cache column data into temporary buffer for (int64 y = 0; y < dstDim.y; ++y) { buff[y] = dst[dstInd(x, y)]; } for (int64 y = 0; y < dstDim.y; ++y) { auto d = buff[y]; if (d != U(0)) { const auto rMax = static_cast<int64>(std::sqrt(d * invSquareVoxelSize.y)) + 1; const auto rStart = std::min(rMax, y - 1); const auto rEnd = std::min(rMax, dstDim.y - y); for (int64 n = -rStart; n < rEnd; ++n) { const auto w = buff[y + n] + squareVoxelSize.y * square(n); if (w < d) d = w; } } dst[dstInd(x, y)] = d; } } } // scale data callback(0.9); const int64 layerSize = dstDim.x * dstDim.y; #pragma omp parallel for for (int64 i = 0; i < layerSize; ++i) { dst[i] = valueTransform(dst[i]); } callback(1.0); } template <typename T, typename U> void util::layerRAMDistanceTransform(const LayerRAMPrecision<T> *inLayer, LayerRAMPrecision<U> *outDistanceField, const Matrix<2, U> basis, const size2_t upsample) { util::layerRAMDistanceTransform( inLayer, outDistanceField, basis, upsample, [](const T &val) { return util::glm_convert_normalized<double>(val) > 0.5; }, [](const U &squareDist) { return static_cast<U>(std::sqrt(static_cast<double>(squareDist))); }, [](double f) {}); } template <typename U, typename Predicate, typename ValueTransform, typename ProgressCallback> void util::layerDistanceTransform(const Layer *inLayer, LayerRAMPrecision<U> *outDistanceField, const size2_t upsample, Predicate predicate, ValueTransform valueTransform, ProgressCallback callback) { const auto inputLayerRep = inLayer->getRepresentation<LayerRAM>(); inputLayerRep->dispatch<void, dispatching::filter::Scalars>([&](const auto lrprecision) { layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, predicate, valueTransform, callback); }); } template <typename U, typename ProgressCallback> void util::layerDistanceTransform(const Layer *inLayer, LayerRAMPrecision<U> *outDistanceField, const size2_t upsample, double threshold, bool normalize, bool flip, bool square, double scale, ProgressCallback progress) { const auto inputLayerRep = inLayer->getRepresentation<LayerRAM>(); inputLayerRep->dispatch<void, dispatching::filter::Scalars>([&](const auto lrprecision) { using ValueType = util::PrecisionValueType<decltype(lrprecision)>; const auto predicateIn = [threshold](const ValueType &val) { return val < threshold; }; const auto predicateOut = [threshold](const ValueType &val) { return val > threshold; }; const auto normPredicateIn = [threshold](const ValueType &val) { return util::glm_convert_normalized<double>(val) < threshold; }; const auto normPredicateOut = [threshold](const ValueType &val) { return util::glm_convert_normalized<double>(val) > threshold; }; const auto valTransIdent = [scale](const float &squareDist) { return static_cast<float>(scale * squareDist); }; const auto valTransSqrt = [scale](const float &squareDist) { return static_cast<float>(scale * std::sqrt(squareDist)); }; if (normalize && square && flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, normPredicateIn, valTransIdent, progress); } else if (normalize && square && !flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, normPredicateOut, valTransIdent, progress); } else if (normalize && !square && flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, normPredicateIn, valTransSqrt, progress); } else if (normalize && !square && !flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, normPredicateOut, valTransSqrt, progress); } else if (!normalize && square && flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, predicateIn, valTransIdent, progress); } else if (!normalize && square && !flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, predicateOut, valTransIdent, progress); } else if (!normalize && !square && flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, predicateIn, valTransSqrt, progress); } else if (!normalize && !square && !flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, predicateOut, valTransSqrt, progress); } }); } template <typename U> void util::layerDistanceTransform(const Layer *inLayer, LayerRAMPrecision<U> *outDistanceField, const size2_t upsample, double threshold, bool normalize, bool flip, bool square, double scale) { util::layerDistanceTransform(inLayer, outDistanceField, upsample, threshold, normalize, flip, square, scale, [](double) {}); } } // namespace inviwo #endif // IVW_LAYERRAMDISTANCETRANSFORM_H
mkl_util.h
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_CORE_UTIL_MKL_UTIL_H_ #define TENSORFLOW_CORE_UTIL_MKL_UTIL_H_ #ifdef INTEL_MKL #include <memory> #include <string> #include <unordered_map> #include <utility> #include <vector> #if defined(INTEL_MKL_ML_ONLY) || defined(INTEL_MKL_DNN_ONLY) #ifndef INTEL_MKL #error "INTEL_MKL_{ML,DNN}_ONLY require INTEL_MKL" #endif #endif #if defined(INTEL_MKL_ML_ONLY) && defined(INTEL_MKL_DNN_ONLY) #error "at most one of INTEL_MKL_ML_ONLY and INTEL_MKL_DNN_ONLY may be defined" #endif #ifdef INTEL_MKL_ML_ONLY #error \ "Compiling for INTEL MKL ML only is no longer supported.Please use MKL DNN (the default option for --config=mkl)" #endif #ifdef INTEL_MKL_ML_ONLY #include "mkl_dnn.h" #include "mkl_dnn_types.h" #include "mkl_service.h" #include "mkl_trans.h" #endif #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/graph/mkl_graph_util.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/array_slice.h" #include "tensorflow/core/platform/cpu_info.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/util/env_var.h" #include "tensorflow/core/util/padding.h" #include "tensorflow/core/util/tensor_format.h" #ifndef INTEL_MKL_ML_ONLY #include "mkldnn.hpp" #include "tensorflow/core/lib/core/stringpiece.h" using mkldnn::engine; using mkldnn::memory; using mkldnn::padding_kind; using mkldnn::primitive; using mkldnn::reorder; #endif #ifdef _WIN32 typedef unsigned int uint; #endif namespace tensorflow { // The file contains a number of utility classes and functions used by MKL // enabled kernels // This class encapsulates all the meta data that is associated with an MKL // tensor. A tensor is an MKL tensor if it was created as the result of an // MKL operation, and did not go through a conversion to a standard // Tensorflow tensor. // For use with MKL ML, has been deprecated typedef enum { W = 0, H = 1, C = 2, N = 3 } MklDims; // The dimensions order that MKL DNN internally uses for 2D activations // [Batch, Channel, Height, Width] and // for 2D filters [Out_Channel, In_Channel, Height, Width]. typedef enum { Dim_N = 0, Dim_C = 1, Dim_H = 2, Dim_W = 3, Dim_O = 0, Dim_I = 1 } MklDnnDims; // The dimensions order that MKL DNN internally uses for 3D activations // [Batch, Channel, Depth, Height, Width] and // for 3D filters [Out_Channel, In_Channel, Depth, Height, Width]. typedef enum { Dim3d_N = 0, Dim3d_C = 1, Dim3d_D = 2, Dim3d_H = 3, Dim3d_W = 4, Dim3d_O = 0, Dim3d_I = 1 } MklDnnDims3D; // Enum for the order of dimensions of a TF 2D filter with shape [filter_height, // filter_width, in_channels, out_channels] typedef enum { TF_2DFILTER_DIM_H = 0, TF_2DFILTER_DIM_W = 1, TF_2DFILTER_DIM_I = 2, TF_2DFILTER_DIM_O = 3 } TFFilterDims2d; // Enum for the order of dimensions of a TF 3D filter with shape [filter_depth, // filter_height, filter_width, in_channels, out_channels] typedef enum { TF_3DFILTER_DIM_P = 0, TF_3DFILTER_DIM_H = 1, TF_3DFILTER_DIM_W = 2, TF_3DFILTER_DIM_I = 3, TF_3DFILTER_DIM_O = 4 } TFFilterDims3d; // The dimensions order that MKL DNN requires for the filter in a grouped // convolution (2D only) typedef enum { MKL_GROUP_FILTER_DIM_G = 0, MKL_GROUP_FILTER_DIM_O = 1, MKL_GROUP_FILTER_DIM_I = 2, MKL_GROUP_FILTER_DIM_H = 3, MKL_GROUP_FILTER_DIM_W = 4 } MklDnnFilterGroupDims; // Enum used to templatize MklOp kernel implementations // that support both fp32 and int8 versions. enum class MklQuantization { QUANTIZED_VERSION, FP_VERSION, }; static const int kSmallBatchSize = 32; #ifdef INTEL_MKL_ML_ONLY class MklShape { public: MklShape() {} TF_DISALLOW_COPY_AND_ASSIGN(MklShape); // Cannot copy ~MklShape() { if (sizes_) delete[] sizes_; if (strides_) delete[] strides_; if (mklLayout_) CHECK_EQ(dnnLayoutDelete_F32(mklLayout_), E_SUCCESS); if (tfLayout_) CHECK_EQ(dnnLayoutDelete_F32(tfLayout_), E_SUCCESS); if (tf_to_mkl_dim_map_) delete[] tf_to_mkl_dim_map_; } const bool IsMklTensor() const { return isMklTensor_; } void SetMklTensor(const bool isMklTensor) { isMklTensor_ = isMklTensor; } void SetDimensions(const size_t dimension) { dimension_ = dimension; } void SetMklLayout(dnnLayout_t mklLayout) { mklLayout_ = mklLayout; } void SetMklLayout(const void* primitive, size_t resourceType) { CHECK_EQ( dnnLayoutCreateFromPrimitive_F32(&mklLayout_, (dnnPrimitive_t)primitive, (dnnResourceType_t)resourceType), E_SUCCESS); } void SetTfLayout(const size_t dimension, const size_t* sizes, const size_t* strides) { dimension_ = dimension; if (dimension > 0) { // MKl doesn't support zero dimension tensors sizes_ = new size_t[dimension]; strides_ = new size_t[dimension]; for (int ii = 0; ii < dimension; ii++) { sizes_[ii] = sizes[ii]; strides_[ii] = strides[ii]; } CHECK_EQ(dnnLayoutCreate_F32(&tfLayout_, dimension, sizes, strides), E_SUCCESS); } } // Default case - MKL dim ordering is opposite of TF dim ordering // MKL -> (DIMS-1)...0 where (DIMS-1) is outermost dim and 0 is innermost dim // TF -> 0...(DIMS-1) where 0 is outermost dim and (DIMS-1) is innermost dim // For layers that rely on data_format semantics (conv, pooling etc.) // or operate only on certain dimensions (relu, concat, split etc.), // Mkl APIs might require us to reorder these dimensions. In such cases, // kernels should explicitly set this map void SetTfDimOrder(const size_t dimension) { CHECK(dimension == dimension_); if (tf_to_mkl_dim_map_ == nullptr) { tf_to_mkl_dim_map_ = new size_t[dimension]; } for (size_t ii = 0; ii < dimension; ii++) { tf_to_mkl_dim_map_[ii] = dimension - (ii + 1); } } void SetTfDimOrder(const size_t dimension, const size_t* tf_to_mkl_dim_map) { CHECK(dimension == dimension_); if (tf_to_mkl_dim_map_ == nullptr) { tf_to_mkl_dim_map_ = new size_t[dimension]; } for (size_t ii = 0; ii < dimension; ii++) { tf_to_mkl_dim_map_[ii] = tf_to_mkl_dim_map[ii]; } } void SetTfDimOrder(const size_t dimension, TensorFormat data_format) { CHECK_EQ(dimension, 4); CHECK(dimension == dimension_); if (tf_to_mkl_dim_map_ == nullptr) { tf_to_mkl_dim_map_ = new size_t[dimension]; } tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDims::W; tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDims::H; tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDims::C; tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDims::N; } const dnnLayout_t GetMklLayout() const { return mklLayout_; } const dnnLayout_t GetTfLayout() const { return tfLayout_; } const dnnLayout_t GetCurLayout() const { return isMklTensor_ ? mklLayout_ : tfLayout_; } size_t GetDimension() const { return dimension_; } const size_t* GetSizes() const { return sizes_; } int64 dim_size(int index) const { return sizes_[index]; } int64 tf_dim_size(int index) const { return sizes_[tf_to_mkl_dim_map_[index]]; } const size_t* GetStrides() const { return strides_; } const size_t* GetTfToMklDimMap() const { return tf_to_mkl_dim_map_; } size_t tf_dim_idx(int index) const { return tf_to_mkl_dim_map_[index]; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Channel dimension. bool IsMklChannelDim(int d) const { return tf_dim_idx(d) == MklDims::C; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Batch dimension. bool IsMklBatchDim(int d) const { return tf_dim_idx(d) == MklDims::N; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Width dimension. bool IsMklWidthDim(int d) const { return tf_dim_idx(d) == MklDims::W; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Height dimension. bool IsMklHeightDim(int d) const { return tf_dim_idx(d) == MklDims::H; } // Check if the TF-Mkl dimension ordering map specifies if the input // tensor is in NCHW format. bool IsTensorInNCHWFormat() const { TensorFormat data_format = FORMAT_NCHW; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } // Check if the TF-Mkl dimension ordering map specifies if the input // tensor is in NHWC format. bool IsTensorInNHWCFormat() const { TensorFormat data_format = FORMAT_NHWC; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } void GetConvertedFlatData(dnnLayout_t targetLayout, void* input, void* output) const { dnnLayout_t curLayout; if (isMklTensor_) curLayout = mklLayout_; else curLayout = tfLayout_; dnnPrimitive_t convert; CHECK_EQ(dnnConversionCreate_F32(&convert, curLayout, targetLayout), E_SUCCESS); CHECK_EQ(dnnConversionExecute_F32(convert, input, output), E_SUCCESS); CHECK_EQ(dnnDelete_F32(convert), E_SUCCESS); } // The following methods are used for serializing and de-serializing the // contents of the mklshape object. // The data is serialized in this order // isMklTensor_ // dimension_ // sizes_ // strides_ // mklLayout_ // tfLayout_ // tf_to_mkl_dim_map_ #define SIZE_OF_MKL_DNN_BUF \ (dnnLayoutSerializationBufferSize_F32()) // Size of buffer needed to // serialize dnn_layout pointer // Size of buffer to hold the serialized object, the size is computed as // follows sizeof(isMklTensor_) + sizeof(dimension_) + sizeof(sizes_) + // sizeof(strides_) // + sizeof(mklLayout_ buffer) + sizeof(tfLayout_ buffer) // + sizeof(tf_to_mkl_dim_map_) #define SIZE_OF_MKL_SERIAL_DATA(dims) \ (2 * sizeof(size_t) + 3 * dims * sizeof(size_t) + 2 * SIZE_OF_MKL_DNN_BUF) // First we need to define some macro for offsets into the serial buffer where // different elements of Mklshape is written/read from #define IS_MKL_TENSOR_OFFSET 0 // Location from start of buffer where isMklTensor_ is serialized #define DIMS_OFFSET \ (IS_MKL_TENSOR_OFFSET + sizeof(size_t)) // Location of dimension_ // Location of sizes. Note dim is not used here, left here // to make macros consistent. #define SIZES_OFFSET(dims) (DIMS_OFFSET + sizeof(size_t)) #define STRIDES_OFFSET(dims) \ (SIZES_OFFSET(dims) + dims * sizeof(size_t)) // Location of strides #define MKL_LAYOUT_OFFSET(dims) \ (STRIDES_OFFSET(dims) + dims * sizeof(size_t)) // Location of mklLayout_ #define TF_LAYOUT_OFFSET(dims) \ (MKL_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF) // Location of tfLayout_ // Location of tf_to_mkl_dim_map_ #define TF_TO_MKL_DIM_MAP_OFFSET(dims) \ (TF_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF) // TODO(agramesh1) make sure to create a const to share with rewrite pass // for min size of MKL metadata tensor. void DeSerializeMklShape(const unsigned char* buf, size_t buf_size) { CHECK(buf_size >= sizeof(size_t)) << "Bufsize too small in DeSerialize"; // Make sure buffer holds at least isMklTensor_ isMklTensor_ = *reinterpret_cast<const size_t*>(buf + IS_MKL_TENSOR_OFFSET) != 0; if (isMklTensor_) { // If it is an MKL Tensor then read the rest dimension_ = *(reinterpret_cast<const size_t*>(buf + DIMS_OFFSET)); CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_)) << "Bufsize too small in DeSerialize"; sizes_ = new size_t[dimension_]; strides_ = new size_t[dimension_]; tf_to_mkl_dim_map_ = new size_t[dimension_]; for (int i = 0; i < dimension_; i++) { sizes_[i] = reinterpret_cast<const size_t*>(buf + SIZES_OFFSET(dimension_))[i]; strides_[i] = reinterpret_cast<const size_t*>( buf + STRIDES_OFFSET(dimension_))[i]; tf_to_mkl_dim_map_[i] = reinterpret_cast<const size_t*>( buf + TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i]; } CHECK_EQ(dnnLayoutDeserialize_F32(&mklLayout_, buf + MKL_LAYOUT_OFFSET(dimension_)), E_SUCCESS); CHECK_EQ(dnnLayoutDeserialize_F32(&tfLayout_, buf + TF_LAYOUT_OFFSET(dimension_)), E_SUCCESS); } } void SerializeMklShape(unsigned char* buf, size_t buf_size) const { CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_)) << "Bufsize too small to Serialize"; *reinterpret_cast<size_t*>(buf + IS_MKL_TENSOR_OFFSET) = isMklTensor_ ? 1 : 0; if (isMklTensor_) { *(reinterpret_cast<size_t*>(buf + DIMS_OFFSET)) = dimension_; for (int i = 0; i < dimension_; i++) { reinterpret_cast<size_t*>(buf + SIZES_OFFSET(dimension_))[i] = sizes_[i]; reinterpret_cast<size_t*>(buf + STRIDES_OFFSET(dimension_))[i] = strides_[i]; reinterpret_cast<size_t*>(buf + TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i] = tf_to_mkl_dim_map_[i]; } CHECK_EQ(dnnLayoutSerialize_F32(mklLayout_, buf + MKL_LAYOUT_OFFSET(dimension_)), E_SUCCESS); CHECK_EQ( dnnLayoutSerialize_F32(tfLayout_, buf + TF_LAYOUT_OFFSET(dimension_)), E_SUCCESS); } } private: bool isMklTensor_ = false; // Flag to indicate if the tensor is an MKL tensor or not dnnLayout_t mklLayout_ = nullptr; // Pointer to the MKL layout dnnLayout_t tfLayout_ = nullptr; // Pointer to layout of corresponding // Tensorflow tensor, used when conversion from MKL to standard tensor size_t dimension_ = 0; size_t* sizes_ = nullptr; // Required by MKL for conversions size_t* strides_ = nullptr; // Required by MKL for conversions size_t* tf_to_mkl_dim_map_ = nullptr; // TF dimension corresponding to this MKL dimension }; #else // Forward decl TensorFormat MklDnn3DDataFormatToTFDataFormat(memory::format format); TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format); memory::dims CalculateTFStrides(const memory::dims& dims_tf_order); memory::desc CreateBlockedMemDescHelper(const memory::dims& dim, const memory::dims& strides, memory::data_type dtype); class MklDnnShape { private: typedef struct { /// Flag to indicate if the tensor is an MKL tensor or not bool is_mkl_tensor_ = false; /// Number of dimensions in Tensorflow format size_t dimension_ = 0; /// Required by MKLDNN for conversions mkldnn_dims_t sizes_; // Required by MKL for conversions memory::format tf_data_format_ = memory::format::format_undef; memory::data_type T_ = memory::data_type::data_undef; // MKL layout mkldnn_memory_desc_t mkl_md_; /// TF dimension corresponding to this MKL dimension mkldnn_dims_t map_; } MklShapeData; MklShapeData data_; typedef std::remove_extent<mkldnn_dims_t>::type mkldnn_dim_t; #define INVALID_DIM_SIZE -1 public: MklDnnShape() { for (size_t i = 0; i < sizeof(data_.sizes_) / sizeof(data_.sizes_[0]); ++i) { data_.sizes_[i] = -1; } for (size_t i = 0; i < sizeof(data_.map_) / sizeof(data_.map_[0]); ++i) { data_.map_[i] = -1; } } ~MklDnnShape() {} TF_DISALLOW_COPY_AND_ASSIGN(MklDnnShape); // Cannot copy /// Helper function to compare memory::desc objects for MklDnn. /// May be this should go into MklDnn directly. inline bool CompareMklDnnLayouts(const memory::desc& md1, const memory::desc& md2) const { mkldnn_memory_desc_t mdd1 = md1.data; mkldnn_memory_desc_t mdd2 = md2.data; const char* d1 = reinterpret_cast<const char*>(&mdd1); const char* d2 = reinterpret_cast<const char*>(&mdd2); size_t md_size = sizeof(mdd1); for (size_t i = 0; i < md_size; i++) { if (*d1++ != *d2++) { return false; } } return true; } /// Equality function for MklDnnShape objects /// @return true if both are equal; false otherwise. inline bool operator==(const MklDnnShape& input_shape) const { if (this->IsMklTensor() != input_shape.IsMklTensor()) { return false; } // If input tensors are in Mkl layout, then we check for dimensions and // sizes. if (this->IsMklTensor()) { return this->GetTfShape() == input_shape.GetTfShape() && CompareMklDnnLayouts(this->GetMklLayout(), input_shape.GetMklLayout()); } return true; } /// Equality operator for MklDnnShape and TFShape. /// Returns: true if TF shapes for both are the same, false otherwise inline bool operator==(const TensorShape& input_shape) const { if (!this->IsMklTensor()) { return false; } return this->GetTfShape() == input_shape; } inline const bool IsMklTensor() const { return data_.is_mkl_tensor_; } inline void SetMklTensor(bool is_mkl_tensor) { data_.is_mkl_tensor_ = is_mkl_tensor; } inline void SetDimensions(const size_t dimension) { data_.dimension_ = dimension; } inline size_t GetDimension(char dimension) const { int index = GetMklDnnTensorDimIndex(dimension); CHECK(index >= 0 && index < this->GetDimension()) << "Invalid index from the dimension: " << index << ", " << dimension; return this->DimSize(index); } inline size_t GetDimension3D(char dimension) const { int index = GetMklDnnTensor3DDimIndex(dimension); CHECK(index >= 0 && index < this->GetDimension()) << "Invalid index from the dimension: " << index << ", " << dimension; return this->DimSize(index); } inline int32 GetMklDnnTensorDimIndex(char dimension) const { switch (dimension) { case 'N': return MklDnnDims::Dim_N; case 'C': return MklDnnDims::Dim_C; case 'H': return MklDnnDims::Dim_H; case 'W': return MklDnnDims::Dim_W; default: LOG(FATAL) << "Invalid dimension: " << dimension; return -1; // Avoid compiler warning about missing return value } } inline int32 GetMklDnnTensor3DDimIndex(char dimension) const { switch (dimension) { case 'N': return MklDnnDims3D::Dim3d_N; case 'C': return MklDnnDims3D::Dim3d_C; case 'D': return MklDnnDims3D::Dim3d_D; case 'H': return MklDnnDims3D::Dim3d_H; case 'W': return MklDnnDims3D::Dim3d_W; default: LOG(FATAL) << "Invalid dimension: " << dimension; return -1; // Avoid compiler warning about missing return value } } inline size_t GetDimension() const { return data_.dimension_; } inline const int* GetSizes() const { return reinterpret_cast<const int*>(&data_.sizes_[0]); } // Returns an mkldnn::memory::dims object that contains the sizes of this // MklDnnShape object. inline memory::dims GetSizesAsMklDnnDims() const { memory::dims retVal; if (data_.is_mkl_tensor_) { size_t dimensions = sizeof(data_.sizes_) / sizeof(data_.sizes_[0]); for (size_t i = 0; i < dimensions; i++) { if (data_.sizes_[i] != INVALID_DIM_SIZE) retVal.push_back(data_.sizes_[i]); } } else { CHECK_EQ(data_.is_mkl_tensor_, true); } return retVal; } inline int64 DimSize(int index) const { CHECK_LT(index, sizeof(data_.sizes_) / sizeof(data_.sizes_[0])); return data_.sizes_[index]; } /// Return TensorShape that describes the Tensorflow shape of the tensor /// represented by this MklShape. inline TensorShape GetTfShape() const { CHECK_EQ(data_.is_mkl_tensor_, true); std::vector<int32> shape(data_.dimension_, -1); if (data_.tf_data_format_ != memory::format::blocked) { for (size_t idx = 0; idx < data_.dimension_; ++idx) { shape[idx] = data_.sizes_[TfDimIdx(idx)]; } } else { // If Tensorflow shape is in Blocked format, then we don't have dimension // map for it. So we just create Tensorflow shape from sizes in the // specified order. for (size_t idx = 0; idx < data_.dimension_; ++idx) { shape[idx] = data_.sizes_[idx]; } } TensorShape ts; bool ret = TensorShapeUtils::MakeShape(shape, &ts).ok(); CHECK_EQ(ret, true); return ts; } inline void SetElemType(memory::data_type dt) { data_.T_ = dt; } inline const memory::data_type GetElemType() { return data_.T_; } inline void SetMklLayout(memory::primitive_desc* pd) { CHECK_NOTNULL(pd); data_.mkl_md_ = pd->desc().data; } inline void SetMklLayout(memory::desc* md) { CHECK_NOTNULL(md); data_.mkl_md_ = md->data; } inline const memory::desc GetMklLayout() const { return memory::desc(data_.mkl_md_); } inline memory::format GetTfDataFormat() const { return data_.tf_data_format_; } /// We don't create primitive_descriptor for TensorFlow layout now. /// We use lazy evaluation and create it only when needed. Input format can /// also be Blocked format. inline void SetTfLayout(size_t dims, const memory::dims& sizes, memory::format format) { CHECK_EQ(dims, sizes.size()); data_.dimension_ = dims; for (size_t ii = 0; ii < dims; ii++) { data_.sizes_[ii] = sizes[ii]; } data_.tf_data_format_ = format; if (format != memory::format::blocked) { SetTfDimOrder(dims, format); } } inline const memory::desc GetTfLayout() const { memory::dims dims; for (size_t ii = 0; ii < data_.dimension_; ii++) { dims.push_back(data_.sizes_[ii]); } // Create Blocked memory desc if input TF format was set like that. if (data_.tf_data_format_ == memory::format::blocked) { auto strides = CalculateTFStrides(dims); return CreateBlockedMemDescHelper(dims, strides, data_.T_); } else { return memory::desc(dims, data_.T_, data_.tf_data_format_); } } inline const memory::desc GetCurLayout() const { return IsMklTensor() ? GetMklLayout() : GetTfLayout(); } // nhasabni - I've removed SetTfDimOrder that was setting default order in // case of MKL-ML. We don't need a case of default dimension order because // when an operator that does not get data_format attribute gets all inputs // in Tensorflow format, it will produce output in Tensorflow format. inline void SetTfDimOrder(const size_t dimension, const mkldnn_dims_t map) { CHECK(dimension == data_.dimension_); for (size_t ii = 0; ii < dimension; ii++) { data_.map_[ii] = map[ii]; } } inline void SetTfDimOrder(const size_t dimension, TensorFormat data_format) { if (dimension == 5) { CHECK(dimension == data_.dimension_); data_.map_[GetTensorDimIndex<3>(data_format, '0')] = MklDnnDims3D::Dim3d_D; data_.map_[GetTensorDimIndex<3>(data_format, '1')] = MklDnnDims3D::Dim3d_H; data_.map_[GetTensorDimIndex<3>(data_format, '2')] = MklDnnDims3D::Dim3d_W; data_.map_[GetTensorDimIndex<3>(data_format, 'C')] = MklDnnDims3D::Dim3d_C; data_.map_[GetTensorDimIndex<3>(data_format, 'N')] = MklDnnDims3D::Dim3d_N; } else { CHECK_EQ(dimension, 4); CHECK(dimension == data_.dimension_); data_.map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDnnDims::Dim_W; data_.map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDnnDims::Dim_H; data_.map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDnnDims::Dim_C; data_.map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDnnDims::Dim_N; } } inline void SetTfDimOrder(const size_t dimension, memory::format format) { TensorFormat data_format = MklDnnDataFormatToTFDataFormat(format); SetTfDimOrder(dimension, data_format); } inline const mkldnn_dim_t* GetTfToMklDimMap() const { return &data_.map_[0]; } inline size_t TfDimIdx(int index) const { return data_.map_[index]; } inline int64 TfDimSize(int index) const { return data_.sizes_[TfDimIdx(index)]; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Channel dimension. inline bool IsMklChannelDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_C; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Batch dimension. inline bool IsMklBatchDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_N; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Width dimension. inline bool IsMklWidthDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_W; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Height dimension. inline bool IsMklHeightDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_H; } /// Check if the TF-Mkl dimension ordering map specifies if the input /// tensor is in NCHW format. inline bool IsTensorInNCHWFormat() const { TensorFormat data_format = FORMAT_NCHW; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } /// Check if the TF-Mkl dimension ordering map specifies if the input /// tensor is in NHWC format. inline bool IsTensorInNHWCFormat() const { TensorFormat data_format = FORMAT_NHWC; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } /// The following methods are used for serializing and de-serializing the /// contents of the mklshape object. /// The data is serialized in this order /// is_mkl_tensor_ : dimension_ : sizes_ : map_: format_ : T_ : mkl_pd_; /// Size of buffer to hold the serialized object, the size is computed by /// following above mentioned order inline size_t GetSerializeBufferSize() const { return sizeof(MklShapeData); } void SerializeMklDnnShape(unsigned char* buf, size_t buf_size) const { CHECK(buf_size >= GetSerializeBufferSize()) << "Buffer size is too small to SerializeMklDnnShape"; *reinterpret_cast<MklShapeData*>(buf) = data_; } void DeSerializeMklDnnShape(const unsigned char* buf, size_t buf_size) { // Make sure buffer holds at least is_mkl_tensor_. CHECK(buf_size >= sizeof(data_.is_mkl_tensor_)) << "Buffer size is too small in DeSerializeMklDnnShape"; const bool is_mkl_tensor = *reinterpret_cast<const bool*>(buf); if (is_mkl_tensor) { // If it is an MKL Tensor then read the rest CHECK(buf_size >= GetSerializeBufferSize()) << "Buffer size is too small in DeSerializeMklDnnShape"; data_ = *reinterpret_cast<const MklShapeData*>(buf); } } }; #endif // List of MklShape objects. Used in Concat/Split layers. #ifndef INTEL_MKL_ML_ONLY typedef std::vector<MklDnnShape> MklDnnShapeList; #else typedef std::vector<MklShape> MklShapeList; #endif #ifdef INTEL_MKL_ML_ONLY // Check if all tensors specified by MklShapes are MKL tensors. inline bool AreAllMklTensors(const MklShapeList& shapes) { for (auto& s : shapes) { if (!s.IsMklTensor()) { return false; } } return true; } template <typename T> inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor, const MklShape& mkl_shape) { Tensor output_tensor; TensorShape output_shape; for (size_t j = 0; j < mkl_shape.GetDimension(); j++) { // Outermost to innermost dimension output_shape.AddDim(mkl_shape.GetSizes()[mkl_shape.tf_dim_idx(j)]); } // Allocate output tensor. context->allocate_temp(DataTypeToEnum<T>::v(), output_shape, &output_tensor); dnnLayout_t output_layout = static_cast<dnnLayout_t>(mkl_shape.GetTfLayout()); void* input_buffer = const_cast<T*>(mkl_tensor.flat<T>().data()); void* output_buffer = const_cast<T*>(output_tensor.flat<T>().data()); if (mkl_tensor.NumElements() != 0) { mkl_shape.GetConvertedFlatData(output_layout, input_buffer, output_buffer); } return output_tensor; } #else using mkldnn::stream; template <typename T> class MklDnnData; template <typename T> inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor, const MklDnnShape& mkl_shape) { Tensor output_tensor; try { if (!mkl_shape.IsMklTensor()) return mkl_tensor; // return input since it is already TF tensor TensorShape output_shape = mkl_shape.GetTfShape(); ; // Allocate output tensor. context->allocate_temp(DataTypeToEnum<T>::v(), output_shape, &output_tensor); auto cpu_engine = engine(engine::cpu, 0); MklDnnData<T> input(&cpu_engine); // Get Mkl layout of input tensor. auto input_mkl_md = mkl_shape.GetMklLayout(); auto output_tf_md = mkl_shape.GetTfLayout(); auto output_tf_pd = memory::primitive_desc(output_tf_md, cpu_engine); input.SetUsrMem(input_mkl_md, &mkl_tensor); // reorder if (input.IsReorderNeeded(output_tf_pd)) { std::vector<primitive> net; CHECK_EQ(input.CheckReorderToOpMem(output_tf_pd, &output_tensor, &net), true); stream(stream::kind::eager).submit(net).wait(); } else { // If not, just forward input tensor to output tensor. CHECK(output_tensor.CopyFrom(mkl_tensor, output_shape)); } } catch (mkldnn::error& e) { string error_msg = "Status: " + std::to_string(e.status) + ", message: " + string(e.message) + ", in file " + string(__FILE__) + ":" + std::to_string(__LINE__); LOG(FATAL) << "Operation received an exception: " << error_msg; } return output_tensor; } #endif // Get the MKL shape from the second string tensor #ifdef INTEL_MKL_ML_ONLY inline void GetMklShape(OpKernelContext* ctext, int n, MklShape* mklshape) { mklshape->DeSerializeMklShape( ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .data(), ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .size() * sizeof(uint8)); } #else inline void GetMklShape(OpKernelContext* ctext, int n, MklDnnShape* mklshape) { mklshape->DeSerializeMklDnnShape( ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .data(), ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .size() * sizeof(uint8)); } #endif // Gets the actual input inline const Tensor& MklGetInput(OpKernelContext* ctext, int n) { return ctext->input(GetTensorDataIndex(n, ctext->num_inputs())); } inline void GetMklInputList(OpKernelContext* ctext, StringPiece name, OpInputList* input_tensors) { CHECK_NOTNULL(input_tensors); ctext->input_list(name, input_tensors); } #ifdef INTEL_MKL_ML_ONLY inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name, MklShapeList* mkl_shapes) { OpInputList input_mkl_tensors; GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors); for (int i = 0; i < input_mkl_tensors.size(); i++) { (*mkl_shapes)[i].DeSerializeMklShape( input_mkl_tensors[i].flat<uint8>().data(), input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8)); } } #else inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name, MklDnnShapeList* mkl_shapes) { OpInputList input_mkl_tensors; GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors); for (int i = 0; i < input_mkl_tensors.size(); i++) { (*mkl_shapes)[i].DeSerializeMklDnnShape( input_mkl_tensors[i].flat<uint8>().data(), input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8)); } } #endif #ifndef INTEL_MKL_ML_ONLY /// Get shape of input tensor pointed by 'input_idx' in TensorShape format. /// If the input tensor is in MKL layout, then obtains TensorShape from /// MklShape. inline TensorShape GetTfShape(OpKernelContext* context, size_t input_idx) { // Sanity check. CHECK_NOTNULL(context); CHECK_LT(input_idx, context->num_inputs()); MklDnnShape input_mkl_shape; GetMklShape(context, input_idx, &input_mkl_shape); if (input_mkl_shape.IsMklTensor()) { return input_mkl_shape.GetTfShape(); } else { const Tensor& t = MklGetInput(context, input_idx); return t.shape(); } } #endif #ifdef INTEL_MKL_ML_ONLY // Allocate the second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, const MklShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension())); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #else // Allocate the second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, const MklDnnShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(mkl_shape.GetSerializeBufferSize()); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklDnnShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #endif #ifdef INTEL_MKL_ML_ONLY // Allocate the output tensor, create a second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, Tensor** output, const TensorShape& tf_shape, const MklShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension())); OP_REQUIRES_OK( ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()), tf_shape, output)); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #else // Allocate the output tensor, create a second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, Tensor** output, const TensorShape& tf_shape, const MklDnnShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(mkl_shape.GetSerializeBufferSize()); OP_REQUIRES_OK( ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()), tf_shape, output)); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklDnnShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #endif // Allocates a temp tensor and returns the data buffer for temporary storage. // Currently #ifndef INTEL_MKL_ML_ONLY template <typename T> inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out, const memory::primitive_desc& pd, void** buf_out) { TensorShape tf_shape; tf_shape.AddDim(pd.get_size() / sizeof(T) + 1); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(), tf_shape, tensor_out)); *buf_out = static_cast<void*>(tensor_out->flat<T>().data()); } #else inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out, dnnLayout_t lt_buff, void** buf_out) { TensorShape tf_shape; tf_shape.AddDim( dnnLayoutGetMemorySize_F32(static_cast<dnnLayout_t>(lt_buff)) / sizeof(float) + 1); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<float>::v(), tf_shape, tensor_out)); *buf_out = static_cast<void*>(tensor_out->flat<float>().data()); } #endif template <typename T> inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out, TensorShape tf_shape) { OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(), tf_shape, tensor_out)); } inline void GetStridesFromSizes(TensorFormat data_format, size_t* strides, const size_t* sizes) { // MKL requires strides in NCHW if (data_format == FORMAT_NHWC) { strides[0] = sizes[2]; strides[1] = sizes[0] * sizes[2]; strides[2] = 1; strides[3] = sizes[0] * sizes[1] * sizes[2]; } else { strides[0] = 1; strides[1] = sizes[0]; strides[2] = sizes[0] * sizes[1]; strides[3] = sizes[0] * sizes[1] * sizes[2]; } } #ifdef INTEL_MKL_ML_ONLY inline void MklSizesToTFSizes(OpKernelContext* context, TensorFormat data_format_, const MklShape& mkl_shape, TensorShape* tf_shape) { size_t tf_dim = mkl_shape.GetDimension(); const size_t* tf_sizes = mkl_shape.GetSizes(); OP_REQUIRES(context, tf_dim == 4, errors::InvalidArgument("MKLSizesToTFSizes: size must be 4-dim")); std::vector<int32> sizes; sizes.push_back(tf_sizes[3]); if (data_format_ == FORMAT_NHWC) { sizes.push_back(tf_sizes[1]); sizes.push_back(tf_sizes[0]); sizes.push_back(tf_sizes[2]); } else { sizes.push_back(tf_sizes[2]); sizes.push_back(tf_sizes[1]); sizes.push_back(tf_sizes[0]); } OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(sizes, tf_shape)); } #endif inline int32 GetMklTensorDimIndex(char dimension) { switch (dimension) { case 'N': return MklDims::N; case 'C': return MklDims::C; case 'H': return MklDims::H; case 'W': return MklDims::W; default: LOG(FATAL) << "Invalid dimension: " << dimension; return -1; // Avoid compiler warning about missing return value } } #ifdef INTEL_MKL_ML_ONLY inline int64 GetMklTensorDim(const MklShape& mkl_shape, char dimension) { int index = GetMklTensorDimIndex(dimension); CHECK(index >= 0 && index < mkl_shape.GetDimension()) << "Invalid index from the dimension: " << index << ", " << dimension; return mkl_shape.dim_size(index); } #endif inline void CopyMklTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs); const Tensor& data = context->input(idx_data_in); const Tensor& meta = context->input(idx_meta_in); Tensor output(data.dtype()); Tensor meta_output(meta.dtype()); // TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...) CHECK(output.CopyFrom(data, data.shape())); CHECK(meta_output.CopyFrom(meta, meta.shape())); context->set_output(idx_data_out, output); context->set_output(idx_meta_out, meta_output); } #ifdef INTEL_MKL_ML_ONLY inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in, int idx_out, const TensorShape& shape) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); const Tensor& data = context->input(idx_data_in); MklShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, mkl_shape_output); Tensor output(data.dtype()); // TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...) CHECK(output.CopyFrom(data, shape)); context->set_output(idx_data_out, output); } #else inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in, int idx_out, const TensorShape& shape) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); const Tensor& data = context->input(idx_data_in); MklDnnShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, mkl_shape_output); Tensor output(data.dtype()); // TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...) CHECK(output.CopyFrom(data, shape)); context->set_output(idx_data_out, output); } #endif #ifdef INTEL_MKL_ML_ONLY inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); MklShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, mkl_shape_output); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); } } #else inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); MklDnnShape dnn_shape_output; dnn_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, dnn_shape_output); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); } } #endif inline void ForwardMklTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); context->set_output(idx_meta_out, context->input(idx_meta_in)); } } #ifndef INTEL_MKL_ML_ONLY // Set a dummy MKLDNN shape (called when the output is in TF format) inline void SetDummyMklDnnShapeOutput(OpKernelContext* context, uint32 idx_data_out) { MklDnnShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output); } inline void ForwardMklTensorInToOutWithMklShape(OpKernelContext* context, int idx_in, int idx_out, const MklDnnShape& mkl_shape) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); AllocateOutputSetMklShape(context, idx_out, mkl_shape); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); } } #endif // Forward the MKL shape ONLY (used in elementwise and other ops where // we call the eigen implementation and MKL shape is not used) inline void ForwardMklMetaDataInToOut(OpKernelContext* context, uint32 idx_data_in, uint32_t idx_data_out) { uint32 idx_meta_in = GetTensorMetaDataIndex(idx_data_in, context->num_inputs()); uint32 idx_meta_out = GetTensorMetaDataIndex(idx_data_out, context->num_outputs()); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out); } else { context->set_output(idx_meta_out, context->input(idx_meta_in)); } } #ifdef INTEL_MKL_ML_ONLY // Set a dummy MKL shape (called when the output is in TF format) inline void SetDummyMklShapeOutput(OpKernelContext* context, uint32 idx_data_out) { MklShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output); } // We don't need these functions in MKLDNN. We have defined equality operator // on MklDnnShape class directly. // Checks if the TF shape for both MKL tensors is the same or not // Returns: true if both TF shapes are the same, false otherwise inline bool MklCompareShapes(const MklShape* input_shape_0, const MklShape* input_shape_1) { // Check for number of dimensions if (input_shape_0->GetDimension() != input_shape_1->GetDimension()) { return false; } // Check size of each dimension size_t ndims = input_shape_0->GetDimension(); for (size_t i = 0; i < ndims; i++) { if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) { return false; } } return true; } // Checks if the TF shape for both tensors is the same or not // Returns: true if TF shapes for both are the same, false otherwise inline bool MklCompareShapes(const MklShape* input_shape_0, const TensorShape* input_shape_1) { // Check for number of dimensions if (input_shape_0->GetDimension() != input_shape_1->dims()) { return false; } // Check size of each dimension size_t ndims = input_shape_0->GetDimension(); for (size_t i = 0; i < ndims; i++) { if (input_shape_0->tf_dim_size(i) != input_shape_1->dim_size(i)) { return false; } } return true; } // Checks if the TF shape for both tensors is the same or not // Returns: true if TF shapes for both are the same, false otherwise inline bool MklCompareShapes(const TensorShape* input_shape_0, const MklShape* input_shape_1) { return MklCompareShapes(input_shape_1, input_shape_0); } // Checks if the TF shape for both tensors is the same or not // Returns: true if TF shapes for both are the same, false otherwise inline bool MklCompareShapes(const TensorShape* input_shape_0, const TensorShape* input_shape_1) { // Check for number of dimensions if (input_shape_0->dims() != input_shape_1->dims()) { return false; } // Check size of each dimension size_t ndims = input_shape_0->dims(); for (size_t i = 0; i < ndims; i++) { if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) { return false; } } return true; } // These functions do not compile with MKL-DNN since mkl.h is missing. // We may need to remove them later. // TODO(intel_tf): Remove this routine when faster MKL layout conversion is // out. inline void MklNHWCToNCHW(const Tensor& input, Tensor** output) { const float* buf_in = input.flat<float>().data(); float* buf_out = (*output)->flat<float>().data(); int64 N = input.dim_size(0); int64 H = input.dim_size(1); int64 W = input.dim_size(2); int64 C = input.dim_size(3); int64 stride_n = H * W * C; #pragma omp parallel for num_threads(16) for (int64 n = 0; n < N; ++n) { mkl_somatcopy('R', 'T', H * W, C, 1, buf_in + n * stride_n, C, buf_out + n * stride_n, H * W); } } inline void MklNCHWToNHWC(const Tensor& input, Tensor** output) { const float* buf_in = input.flat<float>().data(); float* buf_out = (*output)->flat<float>().data(); int64 N = (*output)->dim_size(0); int64 H = (*output)->dim_size(1); int64 W = (*output)->dim_size(2); int64 C = (*output)->dim_size(3); int64 stride_n = H * W * C; #pragma omp parallel for num_threads(16) for (int64 n = 0; n < N; ++n) { mkl_somatcopy('R', 'T', C, H * W, 1, buf_in + n * stride_n, H * W, buf_out + n * stride_n, C); } } #endif // ------------------------------------------------------------------- #ifndef INTEL_MKL_ML_ONLY /// Return MKL-DNN data type (memory::data_type) for input type T /// /// @input None /// @return memory::data_type corresponding to type T template <typename T> static memory::data_type MklDnnType(); /// Instantiation for float type. Add similar instantiations for other /// type if needed. template <> memory::data_type MklDnnType<float>() { return memory::data_type::f32; } template <> memory::data_type MklDnnType<quint8>() { return memory::data_type::u8; } template <> memory::data_type MklDnnType<qint8>() { return memory::data_type::s8; } template <> memory::data_type MklDnnType<qint32>() { return memory::data_type::s32; } /// Map TensorFlow's data format into MKL-DNN 3D data format /// @input: TensorFlow data format /// @return: memory::format corresponding to TensorFlow data format; /// Fails with an error if invalid data format. inline memory::format TFDataFormatToMklDnn3DDataFormat(TensorFormat format) { if (format == FORMAT_NHWC) return memory::format::ndhwc; else if (format == FORMAT_NCHW) return memory::format::ncdhw; TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format")); return memory::format::format_undef; } /// Map TensorFlow's data format into MKL-DNN data format /// /// @input: TensorFlow data format /// @return: memory::format corresponding to TensorFlow data format; /// Fails with an error if invalid data format. inline memory::format TFDataFormatToMklDnnDataFormat(TensorFormat format) { if (format == FORMAT_NHWC) return memory::format::nhwc; else if (format == FORMAT_NCHW) return memory::format::nchw; TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format")); return memory::format::format_undef; } /// Map MKL-DNN data format to TensorFlow's data format /// /// @input: memory::format /// @return: Tensorflow data format corresponding to memory::format /// Fails with an error if invalid data format. inline TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format) { if (format == memory::format::nhwc || format == memory::format::ndhwc) return FORMAT_NHWC; else if (format == memory::format::nchw || format == memory::format::ncdhw) return FORMAT_NCHW; TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format")); // Return to prevent compiler warnings, otherwise TF_CHECK_OK will ensure // that we don't come here. return FORMAT_NHWC; } /// Map TensorShape object into memory::dims required by MKL-DNN /// /// This function will simply map input TensorShape into MKL-DNN dims /// naively. So it will preserve the order of dimensions. E.g., if /// input tensor is in NHWC format, then dims will be in NHWC format /// also. /// /// @input TensorShape object in shape /// @return memory::dims corresponding to TensorShape inline memory::dims TFShapeToMklDnnDims(const TensorShape& shape) { memory::dims dims(shape.dims()); for (int d = 0; d < shape.dims(); ++d) { dims[d] = shape.dim_size(d); } return dims; } /// Map TensorShape object into memory::dims in NCHW format required by MKL-DNN /// /// This function is a specific one than above function. It will map input /// TensorShape into MKL-DNN dims in NCHW format. So it may not preserve the /// order of dimensions. E.g., if input tensor is in NHWC format, then dims /// will be in NCHW format, and not in NHWC format. /// /// @input TensorShape object in shape /// @return memory::dims in MKL-DNN required NCHW format inline memory::dims TFShapeToMklDnnDimsInNCHW(const TensorShape& shape, TensorFormat format) { // Check validity of format. CHECK_NE(TFDataFormatToMklDnnDataFormat(format), memory::format::format_undef); int n = shape.dim_size(GetTensorDimIndex(format, 'N')); int c = shape.dim_size(GetTensorDimIndex(format, 'C')); int h = shape.dim_size(GetTensorDimIndex(format, 'H')); int w = shape.dim_size(GetTensorDimIndex(format, 'W')); // MKL-DNN requires dimensions in NCHW format. return memory::dims({n, c, h, w}); } inline memory::dims TFShapeToMklDnnDimsInNCDHW(const TensorShape& shape, TensorFormat format) { // Check validity of format. CHECK_NE(TFDataFormatToMklDnn3DDataFormat(format), memory::format::format_undef); int n = shape.dim_size(GetTensorDimIndex<3>(format, 'N')); int c = shape.dim_size(GetTensorDimIndex<3>(format, 'C')); int d = shape.dim_size(GetTensorDimIndex<3>(format, '0')); int h = shape.dim_size(GetTensorDimIndex<3>(format, '1')); int w = shape.dim_size(GetTensorDimIndex<3>(format, '2')); // MKL-DNN requires dimensions in NCDHW format. return memory::dims({n, c, d, h, w}); } /// Overloaded version of function above. Input parameters are /// self-explanatory. inline memory::dims MklDnnDimsInNCHW(const memory::dims& in_dims, TensorFormat format) { // Check validity of format. CHECK_NE(TFDataFormatToMklDnnDataFormat(format), memory::format::format_undef); int n = in_dims[GetTensorDimIndex(format, 'N')]; int c = in_dims[GetTensorDimIndex(format, 'C')]; int h = in_dims[GetTensorDimIndex(format, 'H')]; int w = in_dims[GetTensorDimIndex(format, 'W')]; // MKL-DNN requires dimensions in NCHW format. return memory::dims({n, c, h, w}); } /// Map MklDnn memory::dims object into TensorShape object. /// /// This function will simply map input shape in MKL-DNN memory::dims format /// in Tensorflow's TensorShape object by preserving dimension order. /// /// @input MKL-DNN memory::dims object /// @output TensorShape corresponding to memory::dims inline TensorShape MklDnnDimsToTFShape(const memory::dims& dims) { std::vector<int32> shape(dims.size(), -1); for (int d = 0; d < dims.size(); d++) { shape[d] = dims[d]; } TensorShape ret; CHECK_EQ(TensorShapeUtils::MakeShape(shape, &ret).ok(), true); return ret; } /// Function to calculate strides given tensor shape in Tensorflow order /// E.g., if dims_tf_order is {1, 2, 3, 4}, then as per Tensorflow convention, /// dimesion with size 1 is outermost dimension; while dimension with size 4 is /// innermost dimension. So strides for this tensor would be {4 * 3 * 2, /// 4 * 3, 4, 1}, i.e., {24, 12, 4, 1}. /// /// @input Tensorflow shape in memory::dims type /// @return memory::dims containing strides for the tensor. inline memory::dims CalculateTFStrides(const memory::dims& dims_tf_order) { CHECK_GT(dims_tf_order.size(), 0); memory::dims strides(dims_tf_order.size()); int last_dim_idx = dims_tf_order.size() - 1; strides[last_dim_idx] = 1; for (int d = last_dim_idx - 1; d >= 0; d--) { strides[d] = strides[d + 1] * dims_tf_order[d + 1]; } return strides; } inline padding_kind TFPaddingToMklDnnPadding(Padding pad) { // MKL-DNN only supports zero padding. return padding_kind::zero; } /// Helper function to create memory descriptor in Blocked format /// /// @input: Tensor dimensions /// @input: strides corresponding to dimensions. One can use utility /// function such as CalculateTFStrides to compute strides /// for given dimensions. /// @return: memory::desc object corresponding to blocked memory format /// for given dimensions and strides. inline memory::desc CreateBlockedMemDescHelper(const memory::dims& dim, const memory::dims& strides, memory::data_type dtype) { CHECK_EQ(dim.size(), strides.size()); // We have to construct memory descriptor in a C style. This is not at all // ideal but MKLDNN does not offer any API to construct descriptor in // blocked format except a copy constructor that accepts // mkldnn_memory_desc_t. mkldnn_memory_desc_t md; md.primitive_kind = mkldnn_memory; md.ndims = dim.size(); md.format = mkldnn_blocked; md.data_type = memory::convert_to_c(dtype); for (size_t i = 0; i < dim.size(); i++) { md.layout_desc.blocking.block_dims[i] = 1; md.layout_desc.blocking.strides[1][i] = 1; md.layout_desc.blocking.strides[0][i] = strides[i]; md.layout_desc.blocking.padding_dims[i] = dim[i]; md.layout_desc.blocking.offset_padding_to_data[i] = 0; md.dims[i] = dim[i]; } md.layout_desc.blocking.offset_padding = 0; return memory::desc(md); } template <typename T> inline primitive FindOrCreateReorder(const memory* from, const memory* to); /* * Class to represent all the resources corresponding to a tensor in TensorFlow * that are required to execute an operation (such as Convolution). */ template <typename T> class MklDnnData { private: /// MKL-DNN memory primitive for input user memory memory* user_memory_; /// MKL-DNN memory primitive in case input or output reorder is needed. memory* reorder_memory_; /// Operations memory descriptor memory::desc* op_md_; // flat to indicate if data is 3D or not. bool bIs3D; /// Operations temp buffer void* allocated_buffer_; /// CPU engine on which operation will be executed const engine* cpu_engine_; public: explicit MklDnnData(const engine* e) : user_memory_(nullptr), reorder_memory_(nullptr), op_md_(nullptr), allocated_buffer_(nullptr), cpu_engine_(e) {} ~MklDnnData() { if (allocated_buffer_ != nullptr) { cpu_allocator()->DeallocateRaw(allocated_buffer_); } cpu_engine_ = nullptr; // We don't own this. delete (user_memory_); delete (reorder_memory_); delete (op_md_); } inline void* GetTensorBuffer(const Tensor* tensor) const { CHECK_NOTNULL(tensor); return const_cast<void*>( static_cast<const void*>(tensor->flat<T>().data())); } void SetIs3DData(bool bIs3D_) { bIs3D = bIs3D_; } bool GetIs3D() { return bIs3D; } /// Set user memory primitive using specified dimensions, memory format and /// data_buffer. Function automatically uses element data type by using /// input type T used for creating call object. /// /// In a nutshell, function allows user to describe the input tensor to /// an operation. E.g., filter of Conv2D is of shape {1, 2, 3, 4}, and /// memory format HWIO, and the buffer that contains actual values is /// pointed by data_buffer. inline void SetUsrMem(const memory::dims& dim, memory::format fm, void* data_buffer = nullptr) { auto md = memory::desc(dim, MklDnnType<T>(), fm); SetUsrMem(md, data_buffer); } inline void SetUsrMem(const memory::dims& dim, memory::format fm, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(dim, fm, GetTensorBuffer(tensor)); } /// Helper function to create memory descriptor in Blocked format /// /// @input: Tensor dimensions /// @input: strides corresponding to dimensions. One can use utility /// function such as CalculateTFStrides to compute strides /// for given dimensions. /// @return: memory::desc object corresponding to blocked memory format /// for given dimensions and strides. static inline memory::desc CreateBlockedMemDesc(const memory::dims& dim, const memory::dims& strides) { return CreateBlockedMemDescHelper(dim, strides, MklDnnType<T>()); } /// A version of SetUsrMem call that allows user to create memory in blocked /// format. So in addition to accepting dimensions, it also accepts strides. /// This allows user to create memory for tensor in a format that is not /// supported by MKLDNN. E.g., MKLDNN does not support tensor format for 6 /// dimensional tensor as a native format. But by using blocked format, a user /// can create memory for 6D tensor. inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides, void* data_buffer = nullptr) { CHECK_EQ(dim.size(), strides.size()); auto blocked_md = MklDnnData<T>::CreateBlockedMemDesc(dim, strides); SetUsrMem(blocked_md, data_buffer); } inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(dim, strides, GetTensorBuffer(tensor)); } /// A version of function to set user memory primitive that accepts memory /// descriptor directly, instead of accepting dimensions and format. This /// function is more generic that the one above, but the function above is /// sufficient in most cases. inline void SetUsrMem(const memory::desc& md, void* data_buffer = nullptr) { auto pd = memory::primitive_desc(md, *cpu_engine_); SetUsrMem(pd, data_buffer); } /// A version of SetUsrMem with memory descriptor and tensor inline void SetUsrMem(const memory::desc& md, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(md, GetTensorBuffer(tensor)); } /// A version of function to set user memory primitive that accepts primitive /// descriptor directly, instead of accepting dimensions and format. This /// function is more generic that the one above, but the function above is /// sufficient in most cases. inline void SetUsrMem(const memory::primitive_desc& pd, void* data_buffer = nullptr) { CHECK_NOTNULL(cpu_engine_); // TODO(nhasabni): can we remove dynamic memory allocation? if (data_buffer) { user_memory_ = new memory(pd, data_buffer); } else { user_memory_ = new memory(pd); } } /// A version of SetUsrMem with primitive descriptor and tensor inline void SetUsrMem(const memory::primitive_desc& pd, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(pd, GetTensorBuffer(tensor)); } /// Get function for user memory primitive. inline const memory* GetUsrMem() const { return user_memory_; } /// Get function for primitive descriptor of user memory primitive. inline const memory::primitive_desc GetUsrMemPrimDesc() const { CHECK_NOTNULL(user_memory_); return user_memory_->get_primitive_desc(); } /// Get function for descriptor of user memory. inline memory::desc GetUsrMemDesc() { // This is ugly. Why MKL-DNN does not provide desc() method of const type?? const memory::primitive_desc pd = GetUsrMemPrimDesc(); return const_cast<memory::primitive_desc*>(&pd)->desc(); } /// Get function for data buffer of user memory primitive. inline void* GetUsrMemDataHandle() const { CHECK_NOTNULL(user_memory_); return user_memory_->get_data_handle(); } /// Set function for data buffer of user memory primitive. inline void SetUsrMemDataHandle(void* data_buffer) { CHECK_NOTNULL(user_memory_); CHECK_NOTNULL(data_buffer); user_memory_->set_data_handle(data_buffer); } /// Set function for data buffer of user memory primitive. inline void SetUsrMemDataHandle(const Tensor* tensor) { CHECK_NOTNULL(user_memory_); CHECK_NOTNULL(tensor); user_memory_->set_data_handle(GetTensorBuffer(tensor)); } /// allocate function for data buffer inline void AllocateBuffer(size_t size) { const int64 kMemoryAlginment = 64; // For AVX512 memory alignment. allocated_buffer_ = cpu_allocator()->AllocateRaw(kMemoryAlginment, size); } inline void* GetAllocatedBuffer() { return allocated_buffer_; } /// Get the memory primitive for input and output of an op. If inputs /// to an op require reorders, then this function returns memory primitive /// for reorder. Otherwise, it will return memory primitive for user memory. /// /// E.g., Conv2D(I, F) is a primitive with I and F being inputs. Then to /// execute Conv2D, we need memory primitive for I and F. Buf if reorder is /// required for I and F (say I_r is reorder primitive for I; F_r is reorder /// primitive for F), then we need I_r and F_r to perform Conv2D. inline const memory& GetOpMem() const { return reorder_memory_ ? *reorder_memory_ : *user_memory_; } /// Set memory descriptor of an operation in terms of dimensions and memory /// format. E.g., For Conv2D, the dimensions would be same as user dimensions /// but memory::format would be mkldnn::any because we want MKL-DNN to choose /// best layout/format for given input dimensions. inline void SetOpMemDesc(const memory::dims& dim, memory::format fm) { // TODO(nhasabni): can we remove dynamic memory allocation? op_md_ = new memory::desc(dim, MklDnnType<T>(), fm); } /// Get function for memory descriptor for an operation inline const memory::desc& GetOpMemDesc() const { return *op_md_; } /// Predicate that checks if we need to reorder user's memory into memory /// pointed by op_pd. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @return: true in case reorder of input is needed; false, otherwise. inline bool IsReorderNeeded(const memory::primitive_desc& op_pd) const { CHECK_NOTNULL(user_memory_); return op_pd != user_memory_->get_primitive_desc(); } /// Predicate that checks if we need to reorder user's memory into memory /// based on the provided format. /// /// @input: target_format - memory format of the given input of an /// operation /// @return: true in case reorder of input is needed; false, otherwise. inline bool IsReorderNeeded(const memory::format& target_format) const { CHECK_NOTNULL(user_memory_); return target_format != user_memory_->get_primitive_desc().desc().data.format; } /// Function to create a reorder from memory pointed by from to memory pointed /// by to. Returns created primitive. inline primitive CreateReorder(const memory* from, const memory* to) const { CHECK_NOTNULL(from); CHECK_NOTNULL(to); return reorder(*from, *to); } /// Function to handle input reordering /// /// Check if we need to reorder this input of an operation. /// Return true and allocate reorder memory primitive if reorder is needed. /// Otherwise, return false and do not allocate reorder memory primitive. /// /// To check if reorder is needed, this function compares memory primitive /// descriptor of an operation (op_pd) for the given input with the /// user-specified memory primitive descriptor. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @input: net - net to which to add reorder primitive in case it is needed. /// @return: true in case reorder of input is needed; false, otherwise. inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? reorder_memory_ = new memory(op_pd); net->push_back(CreateReorder(user_memory_, reorder_memory_)); return true; } return false; } /// TODO: this is a faster path with reorder primitive cache compared with /// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove /// slow path in the future inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd) { CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? // primitive reuse don't allow two same reorder prim in // one stream, so submit it immediately reorder_memory_ = new memory(op_pd); std::vector<primitive> net; net.push_back(FindOrCreateReorder<T>(user_memory_, reorder_memory_)); stream(stream::kind::eager).submit(net).wait(); return true; } return false; } /// Overloaded version of above function that accepts memory buffer /// where output of reorder needs to be stored. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @reorder_data_handle - memory buffer where output of reorder needs to be /// stored. Primitive does not check if buffer is /// enough size to write. /// @input: net - net to which to add reorder primitive in case it is needed. /// @return: true in case reorder of input is needed; false, otherwise. inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, void* reorder_data_handle, std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(reorder_data_handle); CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? reorder_memory_ = new memory(op_pd, reorder_data_handle); net->push_back(CreateReorder(user_memory_, reorder_memory_)); return true; } return false; } /// TODO: this is a faster path with reorder primitive cache compared with /// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove /// slow path in the future inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, void* reorder_data_handle) { CHECK_NOTNULL(reorder_data_handle); CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? // primitive reuse don't allow two same reorder prim in // one stream, so submit it immediately std::vector<primitive> net; reorder_memory_ = new memory(op_pd, reorder_data_handle); net.push_back(FindOrCreateReorder<T>(user_memory_, reorder_memory_)); stream(stream::kind::eager).submit(net).wait(); return true; } return false; } /// Another overloaded version of CheckReorderToOpMem that accepts Tensor /// where output of reorder needs to be stored. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @reorder_tensor - Tensor whose buffer is to be used to store output of /// reorder. Primitive does not check if buffer is /// enough size to write. /// @input: net - net to which to add reorder primitive in case it is needed. /// @return: true in case reorder of input is needed; false, otherwise. inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, Tensor* reorder_tensor, std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(reorder_tensor); return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor), net); } /// TODO: this is a faster path with reorder primitive cache compared with /// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove /// slow path in the future inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, Tensor* reorder_tensor) { CHECK_NOTNULL(reorder_tensor); return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor)); } /// Function to handle output reorder /// /// This function performs very similar functionality as input reordering /// function above. The only difference is that this function does not add /// reorder primitive to the net. The reason for this is: the reorder /// primitive for output needs to be added to the list only after operation /// has executed. But we need to prepare a temporary buffer in case output /// reorder is needed. And this temporary buffer will hold the output of /// an operation before it is fed to reorder primitive. /// /// @input memory primitive descriptor for the given output of an operation /// @return: true in case reorder of output is needed; false, otherwise. inline bool PrepareReorderToUserMemIfReq( const memory::primitive_desc& op_pd) { CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? reorder_memory_ = new memory(op_pd); return true; } return false; } /// Function to actually insert reorder primitive in the net /// /// This function completes remaining part of output reordering. It inserts /// a reordering primitive from the temporary buffer that holds the output /// to the user-specified output buffer. /// /// @input: net - net to which to add reorder primitive inline void InsertReorderToUserMem(std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(user_memory_); CHECK_NOTNULL(reorder_memory_); net->push_back(CreateReorder(reorder_memory_, user_memory_)); } /// TODO: this is a faster path with reorder primitive cache compared with /// InsertReorderToUserMem(std::vector<primitive>* net), will remove /// slow path in the future inline void InsertReorderToUserMem() { CHECK_NOTNULL(user_memory_); CHECK_NOTNULL(reorder_memory_); // primitive reuse don't allow two same reorder prim in // one stream, so submit it immediately std::vector<primitive> net; net.push_back(FindOrCreateReorder<T>(reorder_memory_, user_memory_)); stream(stream::kind::eager).submit(net).wait(); } }; /// Base class for operations with reuse of primitives /// class MklPrimitive { public: virtual ~MklPrimitive() {} // Dummy data which MKL DNN never operates on unsigned char* DummyData = nullptr; }; const mkldnn::memory::dims NONE_DIMS = {}; template <typename T> class MklPrimitiveFactory { public: MklPrimitiveFactory() {} ~MklPrimitiveFactory() {} MklPrimitive* GetOp(const string& key) { auto& map = MklPrimitiveFactory<T>::GetHashMap(); auto stream_iter = map.find(key); if (stream_iter == map.end()) { return nullptr; } else { CHECK(stream_iter->second != nullptr) << "nullptr present in map"; return stream_iter->second; } } void SetOp(const string& key, MklPrimitive* op) { auto& map = MklPrimitiveFactory<T>::GetHashMap(); auto stream_iter = map.find(key); CHECK(stream_iter == map.end()); map[key] = op; } /// Function to decide whether HW has AVX512 or AVX2 /// For those legacy device(w/o AVX512 and AVX2), /// MKL-DNN GEMM will be used. static inline bool IsLegacyPlatform() { return (!port::TestCPUFeature(port::CPUFeature::AVX512F) && !port::TestCPUFeature(port::CPUFeature::AVX2)); } /// Fuction to check whether primitive memory optimization is enabled static inline bool IsPrimitiveMemOptEnabled() { bool is_primitive_mem_opt_enabled = true; TF_CHECK_OK(ReadBoolFromEnvVar("TF_MKL_OPTIMIZE_PRIMITIVE_MEMUSE", true, &is_primitive_mem_opt_enabled)); return is_primitive_mem_opt_enabled; } private: static inline std::unordered_map<string, MklPrimitive*>& GetHashMap() { static thread_local std::unordered_map<string, MklPrimitive*> map_; return map_; } }; // utility class for creating keys of MKL primitive pool. class FactoryKeyCreator { public: FactoryKeyCreator() { key_.reserve(kMaxKeyLength); } ~FactoryKeyCreator() {} void AddAsKey(const string& str) { Append(str); } void AddAsKey(const mkldnn::memory::dims& dims) { for (unsigned int i = 0; i < dims.size(); i++) { AddAsKey<int>(dims[i]); } } template <typename T> void AddAsKey(const T data) { auto buffer = reinterpret_cast<const char*>(&data); Append(StringPiece(buffer, sizeof(T))); } string GetKey() { return key_; } private: string key_; const char delimiter = 'x'; const int kMaxKeyLength = 256; void Append(StringPiece s) { key_.append(string(s)); key_.append(1, delimiter); } }; static inline memory::format get_desired_format(int channel, bool is_2d = true) { memory::format fmt_desired = memory::format::any; if (port::TestCPUFeature(port::CPUFeature::AVX512F)) { fmt_desired = is_2d ? memory::format::nChw16c : memory::format::nCdhw16c; } else if (port::TestCPUFeature(port::CPUFeature::AVX2) && (channel % 8) == 0) { fmt_desired = is_2d ? memory::format::nChw8c : memory::format::ncdhw; // no avx2 support for 3d yet. } else { fmt_desired = is_2d ? memory::format::nchw : memory::format::ncdhw; } return fmt_desired; } class MklReorderPrimitive : public MklPrimitive { public: explicit MklReorderPrimitive(const memory* from, const memory* to) { Setup(from, to); } ~MklReorderPrimitive() {} std::shared_ptr<primitive> GetPrimitive() { return context_.reorder_prim; } void SetMemory(const memory* from, const memory* to) { context_.src_mem->set_data_handle(from->get_data_handle()); context_.dst_mem->set_data_handle(to->get_data_handle()); } private: struct ReorderContext { std::shared_ptr<mkldnn::memory> src_mem; std::shared_ptr<mkldnn::memory> dst_mem; std::shared_ptr<primitive> reorder_prim; ReorderContext() : src_mem(nullptr), dst_mem(nullptr), reorder_prim(nullptr) {} } context_; engine cpu_engine_ = engine(engine::cpu, 0); void Setup(const memory* from, const memory* to) { context_.src_mem.reset(new memory( {from->get_primitive_desc().desc(), cpu_engine_}, DummyData)); context_.dst_mem.reset( new memory({to->get_primitive_desc().desc(), cpu_engine_}, DummyData)); context_.reorder_prim = std::make_shared<mkldnn::reorder>( reorder(*context_.src_mem, *context_.dst_mem)); } }; template <typename T> class MklReorderPrimitiveFactory : public MklPrimitiveFactory<T> { public: static MklReorderPrimitive* Get(const memory* from, const memory* to) { auto reorderPrim = static_cast<MklReorderPrimitive*>( MklReorderPrimitiveFactory<T>::GetInstance().GetReorder(from, to)); if (reorderPrim == nullptr) { reorderPrim = new MklReorderPrimitive(from, to); MklReorderPrimitiveFactory<T>::GetInstance().SetReorder(from, to, reorderPrim); } reorderPrim->SetMemory(from, to); return reorderPrim; } static MklReorderPrimitiveFactory& GetInstance() { static MklReorderPrimitiveFactory instance_; return instance_; } private: MklReorderPrimitiveFactory() {} ~MklReorderPrimitiveFactory() {} static string CreateKey(const memory* from, const memory* to) { string prefix = "reorder"; FactoryKeyCreator key_creator; auto const& from_desc = from->get_primitive_desc().desc().data; auto const& to_desc = to->get_primitive_desc().desc().data; const int KIdxFirstStride = 0; memory::dims from_dims(from_desc.dims, &from_desc.dims[from_desc.ndims]); memory::dims to_dims(to_desc.dims, &to_desc.dims[to_desc.ndims]); memory::dims from_strides( from_desc.layout_desc.blocking.strides[KIdxFirstStride], &from_desc.layout_desc.blocking .strides[KIdxFirstStride][from_desc.ndims]); memory::dims to_strides( to_desc.layout_desc.blocking.strides[KIdxFirstStride], &to_desc.layout_desc.blocking.strides[KIdxFirstStride][to_desc.ndims]); key_creator.AddAsKey(prefix); key_creator.AddAsKey(static_cast<int>(from_desc.format)); key_creator.AddAsKey(static_cast<int>(from_desc.data_type)); key_creator.AddAsKey(from_dims); key_creator.AddAsKey(from_strides); key_creator.AddAsKey(static_cast<int>(to_desc.format)); key_creator.AddAsKey(static_cast<int>(to_desc.data_type)); key_creator.AddAsKey(to_dims); key_creator.AddAsKey(to_strides); return key_creator.GetKey(); } MklPrimitive* GetReorder(const memory* from, const memory* to) { string key = CreateKey(from, to); return this->GetOp(key); } void SetReorder(const memory* from, const memory* to, MklPrimitive* op) { string key = CreateKey(from, to); this->SetOp(key, op); } }; /// Fuction to find(or create) a reorder from memory pointed by /// from to memory pointed by to, it will created primitive or /// get primitive from pool if it is cached. /// Returns the primitive. template <typename T> inline primitive FindOrCreateReorder(const memory* from, const memory* to) { CHECK_NOTNULL(from); CHECK_NOTNULL(to); MklReorderPrimitive* reorder_prim = MklReorderPrimitiveFactory<T>::Get(from, to); return *reorder_prim->GetPrimitive(); } // utility function to determine if it is conv 1x1 and stride != 1 // for purpose of temporarily disabling primitive reuse inline bool IsConv1x1StrideNot1(memory::dims filter_dims, memory::dims strides) { if (filter_dims.size() != 4 || strides.size() != 2) return false; return ((filter_dims[2] == 1) && (filter_dims[3] == 1) && ((strides[0] != 1) || (strides[1] != 1))); } #endif // INTEL_MKL_DNN } // namespace tensorflow #endif // INTEL_MKL #endif // TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
convolution_1x1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); int q = 0; for (; q + 3 < inch; q += 4) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q + 1); const float* img2 = bottom_blob.channel(q + 2); const float* img3 = bottom_blob.channel(q + 3); const float* kernel0 = kernel + p * inch + q; const float k0 = kernel0[0]; const float k1 = kernel0[1]; const float k2 = kernel0[2]; const float k3 = kernel0[3]; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; int size = outw * outh; int remain = size; for (; remain > 0; remain--) { float sum = *r0 * k0; float sum1 = *r1 * k1; float sum2 = *r2 * k2; float sum3 = *r3 * k3; *outptr += sum + sum1 + sum2 + sum3; r0++; r1++; r2++; r3++; outptr++; } } for (; q < inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p * inch + q; const float k0 = kernel0[0]; const float* r0 = img0; int size = outw * outh; int remain = size; for (; remain > 0; remain--) { float sum = *r0 * k0; *outptr += sum; r0++; outptr++; } } } } static void conv1x1s2_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); int q = 0; for (; q + 3 < inch; q += 4) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q + 1); const float* img2 = bottom_blob.channel(q + 2); const float* img3 = bottom_blob.channel(q + 3); const float* kernel0 = kernel + p * inch + q; const float k0 = kernel0[0]; const float k1 = kernel0[1]; const float k2 = kernel0[2]; const float k3 = kernel0[3]; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { float sum = *r0 * k0; float sum1 = *r1 * k1; float sum2 = *r2 * k2; float sum3 = *r3 * k3; *outptr += sum + sum1 + sum2 + sum3; r0 += 2; r1 += 2; r2 += 2; r3 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; } } for (; q < inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p * inch + q; const float k0 = kernel0[0]; const float* r0 = img0; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { float sum = *r0 * k0; *outptr += sum; r0 += 2; outptr++; } r0 += tailstep; } } } }
mclib_3d.c
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <stdbool.h> #include <unistd.h> #include <dirent.h> #include "hdf5.h" #include <math.h> #include <time.h> #include <gsl/gsl_math.h> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #include <gsl/gsl_blas.h> #include <gsl/gsl_vector.h> #include <gsl/gsl_matrix.h> #include <gsl/gsl_sf_bessel.h> #include "mclib_3d.h" #include <omp.h> #define R_DIM 1260 #define THETA_DIM 280 #define PHI_DIM 280 void read_hydro(char hydro_prefix[200], int frame, double r_inj, double **x, double **y, double **z, double **szx, double **szy, double **r, double **theta, double **phi,\ double **velx, double **vely, double **velz, double **dens, double **pres, double **gamma, double **dens_lab, double **temp, int *number, int ph_inj_switch, double min_r, double max_r, double fps, FILE *fPtr) { FILE *hydroPtr=NULL; char hydrofile[200]="", file_num[200]="", full_file[200]="",file_end[200]="" ; char buf[10]=""; int i=0, j=0, k=0, elem=0, elem_factor=0; int phi_min_index=0, phi_max_index=0, r_min_index=0, r_max_index=0, theta_min_index=0, theta_max_index=0; //all_index_buffer contains phi_min, phi_max, theta_min, theta_max, r_min, r_max indexes to get from grid files int r_index=0, theta_index=0, phi_index=0, hydro_index=0, all_index_buffer=0, adjusted_remapping_index=0, dr_index=0; int *remapping_indexes=NULL; float buffer=0; float *dens_unprc=NULL; float *vel_r_unprc=NULL; float *vel_theta_unprc=NULL; float *vel_phi_unprc=NULL; float *pres_unprc=NULL; double ph_rmin=0, ph_rmax=0; double r_in=1e10, r_ref=2e13; double *r_edge=NULL; double *dr=NULL; double *r_unprc=malloc(sizeof(double)*R_DIM); double *theta_unprc=malloc(sizeof(double)*THETA_DIM); double *phi_unprc=malloc(sizeof(double)*PHI_DIM); if (ph_inj_switch==0) { ph_rmin=min_r; ph_rmax=max_r; } snprintf(file_end,sizeof(file_end),"%s","small.data" ); //density snprintf(hydrofile,sizeof(hydrofile),"%s%s%d%s",hydro_prefix,"u0", 1,"-" ); modifyFlashName(file_num, hydrofile, frame,1); fprintf(fPtr,">> Opening file %s\n", file_num); fflush(fPtr); snprintf(full_file, sizeof(full_file), "%s%s", file_num, file_end); /* fprintf(fPtr,"Reading Density: %s\n", full_file); fflush(fPtr); */ hydroPtr=fopen(full_file, "rb"); fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran fread(&phi_min_index, sizeof(int)*1, 1,hydroPtr); //min and max indexes for the grid fread(&phi_max_index, sizeof(int)*1, 1,hydroPtr); fread(&theta_min_index, sizeof(int)*1, 1,hydroPtr); fread(&theta_max_index, sizeof(int)*1, 1,hydroPtr); fread(&r_min_index, sizeof(int)*1, 1,hydroPtr); fread(&r_max_index, sizeof(int)*1, 1,hydroPtr); fclose(hydroPtr); //fortran indexing starts @ 1, but C starts @ 0 r_min_index--; r_max_index--; theta_min_index--; theta_max_index--; phi_min_index--; phi_max_index--; //number of elements defined by this now elem=(r_max_index+1-r_min_index)*(theta_max_index+1-theta_min_index)*(phi_max_index+1-phi_min_index); //add 1 b/c max_index is 1 less than max number of elements in file /* fprintf(fPtr,"Elem %d\n", elem); fprintf(fPtr,"Limits %d, %d, %d, %d, %d, %d\n", phi_min_index, phi_max_index, theta_min_index, theta_max_index, r_min_index, r_max_index); fflush(fPtr); */ //now with number of elements allocate data, remember last element is some garbage that only fortran uses dens_unprc=malloc(elem*sizeof(float)); vel_r_unprc=malloc(elem*sizeof(float)); vel_theta_unprc=malloc(elem*sizeof(float)); pres_unprc=malloc(elem*sizeof(float)); vel_phi_unprc=malloc(elem*sizeof(float)); hydroPtr=fopen(full_file, "rb"); fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); //min and max indexes for the grid, dont need anymore so just save to dummy variable fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran fread(dens_unprc, sizeof(float),elem, hydroPtr); //data fclose(hydroPtr); /* for (i=0;i<R_DIM*THETA_DIM*PHI_DIM;i++) { if ((i>98784000-5) || (i<5)) { fprintf(fPtr,"Density %d: %0.7e\n", i, *(dens_unprc+i)); fflush(fPtr); } } */ //velocities divided by c //v_r snprintf(hydrofile,sizeof(hydrofile),"%s%s%d%s",hydro_prefix,"u0", 2,"-" ); modifyFlashName(file_num, hydrofile, frame,1); snprintf(full_file, sizeof(full_file), "%s%s", file_num, file_end); /* fprintf(fPtr,"Reading v_r: %s\n", full_file); fflush(fPtr); */ hydroPtr=fopen(full_file, "rb"); fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); //min and max indexes for the grid, dont need anymore so just save to dummy variable fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran fread(vel_r_unprc, sizeof(float),elem, hydroPtr); fclose(hydroPtr); /* for (i=0;i<5;i++) { fprintf(fPtr,"V_r %d: %e\n", i, *(vel_r_unprc+i)); fflush(fPtr); } */ //v_theta snprintf(hydrofile,sizeof(hydrofile),"%s%s%d%s",hydro_prefix,"u0", 3,"-" ); modifyFlashName(file_num, hydrofile, frame,1); snprintf(full_file, sizeof(full_file), "%s%s", file_num, file_end); /* fprintf(fPtr,"Reading v_theta: %s\n", full_file); fflush(fPtr); */ hydroPtr=fopen(full_file, "rb"); fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); //min and max indexes for the grid, dont need anymore so just save to dummy variable fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran fread(vel_theta_unprc, sizeof(float),elem, hydroPtr); fclose(hydroPtr); /* for (i=0;i<5;i++) { fprintf(fPtr,"V_theta %d: %e\n", i, *(vel_theta_unprc+i)); fflush(fPtr); } */ //v_phi snprintf(hydrofile,sizeof(hydrofile),"%s%s%d%s",hydro_prefix,"u0", 4,"-" ); modifyFlashName(file_num, hydrofile, frame,1); snprintf(full_file, sizeof(full_file), "%s%s", file_num, file_end); /* fprintf(fPtr,"Reading v_phi: %s\n", full_file); fflush(fPtr); */ hydroPtr=fopen(full_file, "rb"); fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); //min and max indexes for the grid, dont need anymore so just save to dummy variable fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran fread(vel_phi_unprc, sizeof(float),elem, hydroPtr); fclose(hydroPtr); /* for (i=0;i<5;i++) { fprintf(fPtr,"V_phi %d: %e\n", i, *(vel_phi_unprc+i)); fflush(fPtr); } */ //pressure (divided by c^2) snprintf(hydrofile,sizeof(hydrofile),"%s%s%d%s",hydro_prefix,"u0", 8,"-" ); modifyFlashName(file_num, hydrofile, frame,1); snprintf(full_file, sizeof(full_file), "%s%s", file_num, file_end); /* fprintf(fPtr,"Reading pres: %s\n", full_file); fflush(fPtr); */ hydroPtr=fopen(full_file, "rb"); fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); //min and max indexes for the grid, dont need anymore so just save to dummy variable fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&all_index_buffer, sizeof(int)*1, 1,hydroPtr); fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran fread(pres_unprc, sizeof(float),elem, hydroPtr); fclose(hydroPtr); /* for (i=PHI_DIM-1;i<PHI_DIM;i++) { for (j=THETA_DIM-1;j<THETA_DIM;j++) { for (k=R_DIM-5;k<R_DIM;k++) { fprintf(fPtr,"Pres %d: %e\n", (i*R_DIM*THETA_DIM + j*R_DIM + k ), *(pres_unprc+(i*R_DIM*THETA_DIM + j*R_DIM + k ))); fflush(fPtr); } } } */ // see how many elements there are to test if reading correctly /* hydroPtr=fopen(full_file, "rb"); fread(&buffer, sizeof(float), 1,hydroPtr); //random stuff about the file from fortran while (1 == fread(&buffer,sizeof(float),1,hydroPtr)) { elem++; } //fread(pres_unprc, sizeof(double)*HYDRO_DIM,HYDRO_DIM, hydroPtr); fclose(hydroPtr); fprintf(fPtr,"Elem %d\n", elem); */ //R //remapping_indexes=getIndexesForRadialRemapping(hydro_prefix); //can run this once in debug mode to find out delta index for each remapping and number of total r elements //for given set of remappings on July 12th 2017, grid00-x1.data[420]=grid01-x1.data[0], grid01-x1.data[420]=grid02-x1.data[0], etc. total number of r is 3780 if (frame<=1300) { snprintf(hydrofile,sizeof(hydrofile),"%s%s%d%s",hydro_prefix,"grid0", 0,"-x1.data" ); //adjusted_remapping_index=(*(remapping_indexes+0))+r_min_index ; //this if I am not hardcoding the dr index values adjusted_remapping_index=(0*420)+r_min_index; } else if (frame<=2000) { snprintf(hydrofile,sizeof(hydrofile),"%s%s%d%s",hydro_prefix,"grid0", 1,"-x1.data" ); //adjusted_remapping_index=(*(remapping_indexes+1))+r_min_index; adjusted_remapping_index=(1*420)+r_min_index; } else if (frame<=10000) { snprintf(hydrofile,sizeof(hydrofile),"%s%s%d%s",hydro_prefix,"grid0", 2,"-x1.data" ); //adjusted_remapping_index=(*(remapping_indexes+2))+r_min_index; adjusted_remapping_index=(2*420)+r_min_index; } else if (frame<=20000) { snprintf(hydrofile,sizeof(hydrofile),"%s%s%d%s",hydro_prefix,"grid0", 3,"-x1.data" ); //adjusted_remapping_index=(*(remapping_indexes+3))+r_min_index; adjusted_remapping_index=(3*420)+r_min_index; } else if (frame<=35000) { snprintf(hydrofile,sizeof(hydrofile),"%s%s%d%s",hydro_prefix,"grid0", 4,"-x1.data" ); //adjusted_remapping_index=(*(remapping_indexes+4))+r_min_index; adjusted_remapping_index=(4*420)+r_min_index; } else if (frame<=50000) { snprintf(hydrofile,sizeof(hydrofile),"%s%s%d%s",hydro_prefix,"grid0", 5,"-x1.data" ); //adjusted_remapping_index=(*(remapping_indexes+5))+r_min_index; adjusted_remapping_index=(5*420)+r_min_index; } else if (frame<=60000) { snprintf(hydrofile,sizeof(hydrofile),"%s%s%d%s",hydro_prefix,"grid0", 6,"-x1.data" ); //adjusted_remapping_index=(*(remapping_indexes+6))+r_min_index; adjusted_remapping_index=(6*420)+r_min_index; } fprintf(fPtr,"Reading Radius: %s\n", hydrofile); fflush(fPtr); hydroPtr=fopen(hydrofile, "r"); i=0; while (i<R_DIM) { fscanf(hydroPtr, "%lf", (r_unprc+i)); //read value fgets(buf, 3,hydroPtr); //read comma /* if (i<5) { fprintf(fPtr,"R %d: %e\n", i, *(r_unprc+i)); fflush(fPtr); } */ i++; } fclose(hydroPtr); r_edge=malloc(sizeof(double)*(3780+1)); dr=malloc(sizeof(double)*(3780)); //calculate radial grid edges *(r_edge+0)=r_in; i=0; for (i=1;i<3780;i++) { *(r_edge+i)=(*(r_edge+i-1))+((*(r_edge+i-1))*(M_PI/560)/(1+((*(r_edge+i-1))/r_ref))); //r_i = r_(i-1) + Dq r_(i-1) [1 + r_(i-1)/r0]-1 *(dr+i-1)=(*(r_edge+i))-(*(r_edge+i-1)); /* if (i<5) { fprintf(fPtr,"R Edge: %d: %e Dr: %e\n", i, *(r_edge+i), *(dr+i-1)); fflush(fPtr); } */ } free(r_edge); //Theta snprintf(hydrofile,sizeof(hydrofile),"%s%s",hydro_prefix,"grid-x2.data" ); fprintf(fPtr,"Reading Theta: %s\n", hydrofile); fflush(fPtr); hydroPtr=fopen(hydrofile, "r"); i=0; while (i<THETA_DIM) { fscanf(hydroPtr, "%lf", (theta_unprc+i)); //read value fgets(buf, 3,hydroPtr); //read comma /* if (i<5) { fprintf(fPtr,"R %d: %e\n", i, *(theta_unprc+i)); fflush(fPtr); } */ i++; } fclose(hydroPtr); //Phi snprintf(hydrofile,sizeof(hydrofile),"%s%s",hydro_prefix,"grid-x3.data" ); /* fprintf(fPtr,"Reading Phi: %s\n", hydrofile); fflush(fPtr); */ hydroPtr=fopen(hydrofile, "r"); i=0; while (i<PHI_DIM) { fscanf(hydroPtr, "%lf", (phi_unprc+i)); //read value fgets(buf, 3,hydroPtr); //read comma /* if (i<5) { fprintf(fPtr,"R %d: %e\n", i, *(phi_unprc+i)); fflush(fPtr); } */ i++; } fclose(hydroPtr); //limit number of array elements PUT WHILE LOOP TO MAKE SURE NUMBER OF ELEMENTS >0 elem_factor=0; elem=0; while (elem==0) { elem=0; elem_factor++; for (i=0;i<(phi_max_index+1-phi_min_index);i++) { for (j=0;j<(theta_max_index+1-theta_min_index);j++) { for (k=0;k<(r_max_index+1-r_min_index);k++) { r_index=r_min_index+k; //if I have photons do selection differently than if injecting photons if (ph_inj_switch==0) { //printf("R's:%d, %e\n", k, *(r_unprc+r_index)); //if calling this function when propagating photons, choose blocks based on where the photons are if (((ph_rmin - elem_factor*C_LIGHT/fps)<(*(r_unprc+r_index))) && (*(r_unprc+r_index) < (ph_rmax + elem_factor*C_LIGHT/fps) )) { // *(pres_unprc+(i*R_DIM*THETA_DIM + j*R_DIM + k ) elem++; } } else { //if calling this function to inject photons choose blocks based on injection parameters, r_inj, which is sufficient if (((r_inj - elem_factor*C_LIGHT/fps)<(*(r_unprc+r_index))) && (*(r_unprc+r_index) < (r_inj + elem_factor*C_LIGHT/fps) )) { // *(pres_unprc+(i*R_DIM*THETA_DIM + j*R_DIM + k ) elem++; } } } } } } fprintf(fPtr,"Number of post restricted Elems: %d %e\n", elem, r_inj); //fprintf(fPtr,"Ph_min, Ph_max: %e, %e\n With c: min: %e max: %e \n", ph_rmin, ph_rmax, (ph_rmin - 2*C_LIGHT/fps), (ph_rmax + 2*C_LIGHT/fps)); fflush(fPtr); //allocate space for new set of data (*pres)=malloc (elem * sizeof (double )); (*velx)=malloc (elem * sizeof (double )); (*vely)=malloc (elem * sizeof (double )); (*velz)=malloc (elem * sizeof (double )); (*dens)=malloc (elem * sizeof (double )); (*x)=malloc (elem * sizeof (double )); (*y)=malloc (elem * sizeof (double )); (*z)=malloc (elem * sizeof (double )); (*r)=malloc (elem * sizeof (double )); (*theta)=malloc (elem * sizeof (double )); (*phi)=malloc (elem * sizeof (double )); (*gamma)=malloc (elem * sizeof (double )); (*dens_lab)=malloc (elem * sizeof (double )); (*szx)=malloc (elem * sizeof (double )); //theta and phi resolution (*szy)=malloc (elem * sizeof (double )); //r resolution (*temp)=malloc (elem * sizeof (double )); //limit number of array elements elem=0; for (i=0;i<(phi_max_index+1-phi_min_index);i++) { for (j=0;j<(theta_max_index+1-theta_min_index);j++) { for (k=0;k<(r_max_index+1-r_min_index);k++) { r_index=r_min_index+k; //look at indexes of r that are included in small hydro file theta_index=theta_min_index+j; phi_index=phi_min_index+i; dr_index=adjusted_remapping_index+k; hydro_index=(i*(r_max_index+1-r_min_index)*(theta_max_index+1-theta_min_index) + j*(r_max_index+1-r_min_index) + k ); //if I have photons do selection differently than if injecting photons if (ph_inj_switch==0) { //if calling this function when propagating photons, choose blocks based on where the photons are if (((ph_rmin - elem_factor*C_LIGHT/fps)<(*(r_unprc+r_index))) && (*(r_unprc+r_index) < (ph_rmax + elem_factor*C_LIGHT/fps) )) { (*pres)[elem] = *(pres_unprc+hydro_index); (*dens)[elem] = *(dens_unprc+hydro_index); (*temp)[elem] = pow(3*(*(pres_unprc+hydro_index))*pow(C_LIGHT,2.0)/(A_RAD) ,1.0/4.0); (*gamma)[elem] = pow(pow(1.0-(pow(*(vel_r_unprc+hydro_index),2)+ pow(*(vel_theta_unprc+hydro_index),2)+pow(*(vel_phi_unprc+hydro_index),2)),0.5),-1); (*dens_lab)[elem] = (*(dens_unprc+hydro_index))*pow(pow(1.0-(pow(*(vel_r_unprc+hydro_index),2)+ pow(*(vel_theta_unprc+hydro_index),2)+pow(*(vel_phi_unprc+hydro_index),2)),0.5),-1); (*r)[elem] = *(r_unprc+r_index); (*theta)[elem] = *(theta_unprc+theta_index); (*phi)[elem] = *(phi_unprc+phi_index); (*x)[elem] = (*(r_unprc+r_index))*sin(*(theta_unprc+theta_index))*cos(*(phi_unprc+phi_index)); (*y)[elem] = (*(r_unprc+r_index))*sin(*(theta_unprc+theta_index))*sin(*(phi_unprc+phi_index)); (*z)[elem] = (*(r_unprc+r_index))*cos(*(theta_unprc+theta_index)); (*szx)[elem] = *(dr+dr_index); (*szy)[elem] = M_PI/560; (*velx)[elem]=((*(vel_r_unprc+hydro_index))*sin(*(theta_unprc+theta_index))*cos(*(phi_unprc+phi_index))) + ((*(vel_theta_unprc+hydro_index))*cos(*(theta_unprc+theta_index))*cos(*(phi_unprc+phi_index))) - ((*(vel_phi_unprc+hydro_index))*sin(*(phi_unprc+phi_index))); (*vely)[elem]=((*(vel_r_unprc+hydro_index))*sin(*(theta_unprc+theta_index))*sin(*(phi_unprc+phi_index))) + ((*(vel_theta_unprc+hydro_index))*cos(*(theta_unprc+theta_index))*sin(*(phi_unprc+phi_index))) + ((*(vel_phi_unprc+hydro_index))*cos(*(phi_unprc+phi_index))); (*velz)[elem]=((*(vel_r_unprc+hydro_index))*cos(*(theta_unprc+theta_index))) - ((*(vel_theta_unprc+hydro_index))*sin(*(theta_unprc+theta_index))); elem++; } } else { //if calling this function to inject photons choose blocks based on injection parameters, r_inj, which is sufficient if (((r_inj - elem_factor*C_LIGHT/fps)<(*(r_unprc+r_index))) && (*(r_unprc+r_index) < (r_inj + elem_factor*C_LIGHT/fps) )) { (*pres)[elem] = *(pres_unprc+hydro_index); (*dens)[elem] = *(dens_unprc+hydro_index); (*temp)[elem] = pow(3*(*(pres_unprc+hydro_index))*pow(C_LIGHT,2.0)/(A_RAD) ,1.0/4.0); (*gamma)[elem] = pow(pow(1.0-(pow(*(vel_r_unprc+hydro_index),2)+ pow(*(vel_theta_unprc+hydro_index),2)+pow(*(vel_phi_unprc+hydro_index),2)),0.5),-1); (*dens_lab)[elem] = (*(dens_unprc+hydro_index))*pow(pow(1.0-(pow(*(vel_r_unprc+hydro_index),2)+ pow(*(vel_theta_unprc+hydro_index),2)+pow(*(vel_phi_unprc+hydro_index),2)),0.5),-1); (*r)[elem] = *(r_unprc+r_index); (*theta)[elem] = *(theta_unprc+theta_index); (*phi)[elem] = *(phi_unprc+phi_index); (*x)[elem] = (*(r_unprc+r_index))*sin(*(theta_unprc+theta_index))*cos(*(phi_unprc+phi_index)); (*y)[elem] = (*(r_unprc+r_index))*sin(*(theta_unprc+theta_index))*sin(*(phi_unprc+phi_index)); (*z)[elem] = (*(r_unprc+r_index))*cos(*(theta_unprc+theta_index)); (*szx)[elem] = *(dr+dr_index); (*szy)[elem] = M_PI/560; (*velx)[elem]=((*(vel_r_unprc+hydro_index))*sin(*(theta_unprc+theta_index))*cos(*(phi_unprc+phi_index))) + ((*(vel_theta_unprc+hydro_index))*cos(*(theta_unprc+theta_index))*cos(*(phi_unprc+phi_index))) - ((*(vel_phi_unprc+hydro_index))*sin(*(phi_unprc+phi_index))); (*vely)[elem]=((*(vel_r_unprc+hydro_index))*sin(*(theta_unprc+theta_index))*sin(*(phi_unprc+phi_index))) + ((*(vel_theta_unprc+hydro_index))*cos(*(theta_unprc+theta_index))*sin(*(phi_unprc+phi_index))) + ((*(vel_phi_unprc+hydro_index))*cos(*(phi_unprc+phi_index))); (*velz)[elem]=((*(vel_r_unprc+hydro_index))*cos(*(theta_unprc+theta_index))) - ((*(vel_theta_unprc+hydro_index))*sin(*(theta_unprc+theta_index))); elem++; } } } } } *number=elem; free(pres_unprc); free(dens_unprc); free(r_unprc); free(theta_unprc); free(phi_unprc);free(dr);free(vel_r_unprc); free(vel_theta_unprc); free(vel_phi_unprc); } void photonInjection3D( struct photon **ph, int *ph_num, double r_inj, double ph_weight, int min_photons, int max_photons, char spect, int array_length, double fps, double theta_min, double theta_max,\ double *x, double *y, double *z, double *szx, double *szy, double *r, double *theta, double *phi, double *temps, double *vx, double *vy, double *vz, gsl_rng * rand, FILE *fPtr) { int i=0, block_cnt=0, *ph_dens=NULL, ph_tot=0, j=0,k=0; double ph_dens_calc=0.0, fr_dum=0.0, y_dum=0.0, yfr_dum=0.0, fr_max=0, bb_norm=0, position_phi, ph_weight_adjusted, theta_prime=0; double com_v_phi, com_v_theta, *p_comv=NULL, *boost=NULL; //comoving phi, theta, comoving 4 momentum for a photon, and boost for photon(to go to lab frame) double *l_boost=NULL; //pointer to hold array of lorentz boost, to lab frame, values float num_dens_coeff; if (spect=='w') //from MCRAT paper, w for wien spectrum { num_dens_coeff=8.44; //printf("in wien spectrum\n"); } else { num_dens_coeff=20.29; //this is for black body spectrum //printf("in BB spectrum"); } //find how many blocks are near the injection radius within the angles defined in mc.par, get temperatures and calculate number of photons to allocate memory for //and then rcord which blocks have to have "x" amount of photons injected there printf("%e, %e\n",*(phi+i), theta_max); for(i=0;i<array_length;i++) { //look at all boxes in width delta r=c/fps and within angles we are interested in NEED TO modify for RIKEN data- dont need r anymore, just theta and phi? (didnt work), just look at pojection on x-z plane theta_prime=acos(*(y+i)/(*(r+i))); //jet axis here is the y axis if ( (theta_prime< theta_max) && (theta_prime >= theta_min) ) //(*(r+i) > (r_inj - C_LIGHT/fps)) && (*(r+i) < (r_inj + C_LIGHT/fps) ) && { //printf("%e\n", theta_prime ); block_cnt++; } } printf("Blocks: %d\n", block_cnt); ph_dens=malloc(block_cnt * sizeof(int)); //calculate the photon density for each block and save it to the array j=0; ph_tot=0; ph_weight_adjusted=ph_weight; //printf("%d %d\n", max_photons, min_photons); while ((ph_tot>max_photons) || (ph_tot<min_photons) ) { j=0; ph_tot=0; //allocate memory to record density of photons for each block //ph_dens=malloc(block_cnt * sizeof(int)); for (i=0;i<array_length;i++) { //printf("%d\n",i); //printf("%e, %e, %e, %e, %e, %e\n", *(r+i),(r_inj - C_LIGHT/fps), (r_inj + C_LIGHT/fps), *(theta+i) , theta_max, theta_min); //NEED TO modify for RIKEN data - modified theta_prime=acos(*(y+i)/(*(r+i))); if ( (theta_prime< theta_max) && (theta_prime >= theta_min) ) { //NEED TO modify for RIKEN data - modified ph_dens_calc=(num_dens_coeff*pow(*(temps+i),3.0)*pow(*(r+i),2)*sin(*(theta+i))* pow(*(szy+i),2.0)*(*(szx+i)) /(ph_weight_adjusted))*pow(pow(1.0-(pow(*(vx+i),2)+pow(*(vy+i),2)+pow(*(vz+i),2)),0.5),-1) ; //a*T^3/(weight) dV, dV=2*PI*x*dx^2, (*(ph_dens+j))=gsl_ran_poisson(rand,ph_dens_calc) ; //choose from poission distribution with mean of ph_dens_calc //printf("%d, %lf \n",*(ph_dens+j), ph_dens_calc); //sum up all the densities to get total number of photons ph_tot+=(*(ph_dens+j)); j++; } } if (ph_tot>max_photons) { //if the number of photons is too big make ph_weight larger ph_weight_adjusted*=10; //free(ph_dens); } else if (ph_tot<min_photons) { ph_weight_adjusted*=0.5; //free(ph_dens); } //printf("dens: %d, photons: %d\n", *(ph_dens+(j-1)), ph_tot); } printf("%d\n", ph_tot); //allocate memory for that many photons and also allocate memory to hold comoving 4 momentum of each photon and the velocity of the fluid (*ph)=malloc (ph_tot * sizeof (struct photon )); p_comv=malloc(4*sizeof(double)); boost=malloc(3*sizeof(double)); l_boost=malloc(4*sizeof(double)); //go through blocks and assign random energies/locations to proper number of photons ph_tot=0; k=0; for (i=0;i<array_length;i++) { theta_prime=acos(*(y+i)/(*(r+i))); if ( (theta_prime< theta_max) && (theta_prime >= theta_min) ) //NEED TO modify for RIKEN data - modified { //*(temps+i)=0.76*(*(temps+i)); for(j=0;j<( *(ph_dens+k) ); j++ ) { //have to get random frequency for the photon comoving frequency y_dum=1; //initalize loop yfr_dum=0; while (y_dum>yfr_dum) { fr_dum=gsl_rng_uniform_pos(rand)*6.3e11*(*(temps+i)); //in Hz //printf("%lf, %lf ",gsl_rng_uniform_pos(rand), (*(temps+i))); y_dum=gsl_rng_uniform_pos(rand); //printf("%lf ",fr_dum); if (spect=='w') { yfr_dum=(1.0/(1.29e31))*pow((fr_dum/(*(temps+i))),3.0)/(exp((PL_CONST*fr_dum)/(K_B*(*(temps+i)) ))-1); //curve is normalized to maximum } else { fr_max=(5.88e10)*(*(temps+i));//(C_LIGHT*(*(temps+i)))/(0.29); //max frequency of bb bb_norm=(PL_CONST*fr_max * pow((fr_max/C_LIGHT),2.0))/(exp(PL_CONST*fr_max/(K_B*(*(temps+i))))-1); //find value of bb at fr_max yfr_dum=((1.0/bb_norm)*PL_CONST*fr_dum * pow((fr_dum/C_LIGHT),2.0))/(exp(PL_CONST*fr_dum/(K_B*(*(temps+i))))-1); //curve is normalized to vaue of bb @ max frequency } //printf("%lf, %lf,%lf,%e \n",(*(temps+i)),fr_dum, y_dum, yfr_dum); } //printf("%lf\n ",fr_dum); //position_phi= gsl_rng_uniform(rand)*2*M_PI; //NEED TO modify for RIKEN data-modified, dont need anymore com_v_phi=gsl_rng_uniform(rand)*2*M_PI; com_v_theta=acos((gsl_rng_uniform(rand)*2)-1); //printf("%lf, %lf, %lf\n", position_phi, com_v_phi, com_v_theta); //populate 4 momentum comoving array *(p_comv+0)=PL_CONST*fr_dum/C_LIGHT; *(p_comv+1)=(PL_CONST*fr_dum/C_LIGHT)*sin(com_v_theta)*cos(com_v_phi); *(p_comv+2)=(PL_CONST*fr_dum/C_LIGHT)*sin(com_v_theta)*sin(com_v_phi); *(p_comv+3)=(PL_CONST*fr_dum/C_LIGHT)*cos(com_v_theta); //populate boost matrix, not sure why multiplying by -1, seems to give correct answer in old python code... //NEED TO modify for RIKEN data - modified *(boost+0)=-1*(*(vx+i)); *(boost+1)=-1*(*(vy+i)); *(boost+2)=-1*(*(vz+i)); //printf("%lf, %lf, %lf\n", *(boost+0), *(boost+1), *(boost+2)); //boost to lab frame lorentzBoost(boost, p_comv, l_boost, 'p', fPtr); //printf("Assignemnt: %e, %e, %e, %e\n", *(l_boost+0), *(l_boost+1), *(l_boost+2),*(l_boost+3)); (*ph)[ph_tot].p0=(*(l_boost+0)); (*ph)[ph_tot].p1=(*(l_boost+1)); (*ph)[ph_tot].p2=(*(l_boost+2)); (*ph)[ph_tot].p3=(*(l_boost+3)); //NEED TO modify for RIKEN data-modified (*ph)[ph_tot].r0= (*(x+i)); //put photons @ center of box that they are supposed to be in with random phi (*ph)[ph_tot].r1=(*(y+i)) ; (*ph)[ph_tot].r2=(*(z+i)); //y coordinate in flash becomes z coordinate in MCRaT (*ph)[ph_tot].num_scatt=0; (*ph)[ph_tot].weight=ph_weight_adjusted; //printf("%d\n",ph_tot); ph_tot++; } k++; } } *ph_num=ph_tot; //save number of photons //printf(" %d: %d\n", *(ph_dens+(k-1)), *ph_num); free(ph_dens); free(p_comv);free(boost); free(l_boost); } void phMinMax(struct photon *ph, int ph_num, double *min, double *max, double *min_theta, double *max_theta, FILE *fPtr) { double temp_r_max=0, temp_r_min=DBL_MAX, temp_theta_max=0, temp_theta_min=DBL_MAX; int i=0, num_thread=omp_get_num_threads(); double ph_r=0, ph_theta=0; #pragma omp parallel for num_threads(num_thread) firstprivate(ph_r, ph_theta) reduction(min:temp_r_min) reduction(max:temp_r_max) reduction(min:temp_theta_min) reduction(max:temp_theta_max) for (i=0;i<ph_num;i++) { if ((ph+i)->weight != 0) { ph_r=pow(pow( ((ph+i)->r0), 2.0) + pow(((ph+i)->r1),2.0 ) + pow(((ph+i)->r2) , 2.0),0.5); ph_theta=acos(((ph+i)->r2) /ph_r); //this is the photons theta psition in the FLASH grid, gives in radians if (ph_r > temp_r_max ) { temp_r_max=ph_r; //fprintf(fPtr, "The new max is: %e from photon %d with x: %e y: %e z: %e\n", temp_r_max, i, ((ph+i)->r0), (ph+i)->r1, (ph+i)->r2); } //if ((i==0) || (ph_r<temp_r_min)) if (ph_r<temp_r_min) { temp_r_min=ph_r; //fprintf(fPtr, "The new min is: %e from photon %d with x: %e y: %e z: %e\n", temp_r_min, i, ((ph+i)->r0), (ph+i)->r1, (ph+i)->r2); } if (ph_theta > temp_theta_max ) { temp_theta_max=ph_theta; //fprintf(fPtr, "The new max is: %e from photon %d with x: %e y: %e z: %e\n", temp_r_max, i, ((ph+i)->r0), (ph+i)->r1, (ph+i)->r2); } //if ((i==0) || (ph_r<temp_r_min)) if (ph_theta<temp_theta_min) { temp_theta_min=ph_theta; //fprintf(fPtr, "The new min is: %e from photon %d with x: %e y: %e z: %e\n", temp_r_min, i, ((ph+i)->r0), (ph+i)->r1, (ph+i)->r2); } } } *max=temp_r_max; *min=temp_r_min; *max_theta=temp_theta_max; *min_theta=temp_theta_min; } int *getIndexesForRadialRemapping(char hydro_prefix[200]) { FILE *hydroPtr=NULL; char hydrofile[200]=""; char buf[10]=""; int i=0, j=0; int *remapping_start_index=malloc(sizeof(int)*7); //what index out of total range of r does each remapping begin at double r_in=1e10, r_ref=2e13; double *r_unprc_0=malloc(sizeof(double)*R_DIM), *r_unprc_1=malloc(sizeof(double)*R_DIM), *r_unprc_2=malloc(sizeof(double)*R_DIM), *r_unprc_3=malloc(sizeof(double)*R_DIM); double *r_unprc_4=malloc(sizeof(double)*R_DIM), *r_unprc_5=malloc(sizeof(double)*R_DIM), *r_unprc_6=malloc(sizeof(double)*R_DIM); double *r_edge=NULL, *dr=NULL, *rPtr=NULL; for (i=0;i<7;i++) { snprintf(hydrofile,sizeof(hydrofile),"%s%s%d%s",hydro_prefix,"grid0", i,"-x1.data" ); hydroPtr=fopen(hydrofile, "r"); j=0; while (j<R_DIM) { switch (i) { case 0: fscanf(hydroPtr, "%lf", (r_unprc_0+j)); //read value case 1: fscanf(hydroPtr, "%lf", (r_unprc_1+j)); case 2: fscanf(hydroPtr, "%lf", (r_unprc_2+j)); case 3: fscanf(hydroPtr, "%lf", (r_unprc_3+j)); case 4: fscanf(hydroPtr, "%lf", (r_unprc_4+j)); case 5: fscanf(hydroPtr, "%lf", (r_unprc_5+j)); case 6: fscanf(hydroPtr, "%lf", (r_unprc_6+j)); } fgets(buf, 3,hydroPtr); //read comma /* if (i<5) { fprintf(fPtr,"R %d: %e\n", i, *(r_unprc+i)); fflush(fPtr); } */ j++; } fclose(hydroPtr); } //calculate the indexes in which each remapping takes over j=0; //keeps track of indexes of all of the possible r values i=0; //keeps track of index within a certain remapping, when get to R_DIM know that were @ end of last remapping b/c remappings overlap with one another rPtr=r_unprc_0; //start off looking at 0th remapping *(remapping_start_index+1)=j; //0th remapping starts at index 0 while (i<R_DIM) { if (*(rPtr+i)== *(r_unprc_1+0)) { //if the element of the 0th remapping is equal to the 1st element of the 1st remapping, start to look at the 1st remapping rPtr=r_unprc_1; i=0; *(remapping_start_index+1)=j; } else if (*(rPtr+i)== *(r_unprc_2+0)) { rPtr=r_unprc_2; i=0; *(remapping_start_index+2)=j; } else if (*(rPtr+i)== *(r_unprc_3+0)) { rPtr=r_unprc_3; i=0; *(remapping_start_index+3)=j; } else if (*(rPtr+i)== *(r_unprc_4+0)) { rPtr=r_unprc_4; i=0; *(remapping_start_index+4)=j; } else if (*(rPtr+i)== *(r_unprc_5+0)) { rPtr=r_unprc_5; i=0; *(remapping_start_index+5)=j; } else if (*(rPtr+i)== *(r_unprc_6+0)) { rPtr=r_unprc_6; i=0; *(remapping_start_index+6)=j; } j++; i++; } printf("Indexes %d, %d, %d, %d, %d, %d, %d\n Elems: %d\n", *(remapping_start_index+0), *(remapping_start_index+1), *(remapping_start_index+2), *(remapping_start_index+3), *(remapping_start_index+4), *(remapping_start_index+5), *(remapping_start_index+6), j); //exit(0); r_edge=malloc(sizeof(double)*(j+1)); dr=malloc(sizeof(double)*j); //calculate radial grid edges *(r_edge+0)=r_in; i=0; for (i=1;i<j;i++) { *(r_edge+i)=(*(r_edge+i-1))+((*(r_edge+i-1))*(M_PI/560)/(1+((*(r_edge+i-1))/r_ref))); //r_i = r_(i-1) + Dq r_(i-1) [1 + r_(i-1)/r0]-1 *(dr+i-1)=(*(r_edge+i))-(*(r_edge+i-1)); /* if (i<5) { fprintf(fPtr,"R Edge: %d: %e Dr: %e\n", i, *(r_edge+i), *(dr+i-1)); fflush(fPtr); } */ } free(r_edge); free(r_unprc_0); free(r_unprc_1); free(r_unprc_2); free(r_unprc_3); free(r_unprc_4); free(r_unprc_5); free(r_unprc_6); return remapping_start_index; }
GB_binop__minus_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_uint32) // A.*B function (eWiseMult): GB (_AemultB_01__minus_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__minus_uint32) // A.*B function (eWiseMult): GB (_AemultB_03__minus_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_uint32) // A*D function (colscale): GB (_AxD__minus_uint32) // D*A function (rowscale): GB (_DxB__minus_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__minus_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__minus_uint32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_uint32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_uint32) // C=scalar+B GB (_bind1st__minus_uint32) // C=scalar+B' GB (_bind1st_tran__minus_uint32) // C=A+scalar GB (_bind2nd__minus_uint32) // C=A'+scalar GB (_bind2nd_tran__minus_uint32) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_UINT32 || GxB_NO_MINUS_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__minus_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__minus_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__minus_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
deprecate.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD EEEEE PPPP RRRR EEEEE CCCC AAA TTTTT EEEEE % % D D E P P R R E C A A T E % % D D EEE PPPPP RRRR EEE C AAAAA T EEE % % D D E P R R E C A A T E % % DDDD EEEEE P R R EEEEE CCCC A A T EEEEE % % % % % % MagickWand Deprecated Methods % % % % Software Design % % Cristy % % October 2002 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "wand/studio.h" #include "wand/MagickWand.h" #include "wand/magick-wand-private.h" #include "wand/wand.h" #include "magick/monitor-private.h" #include "magick/thread-private.h" /* Define declarations. */ #define PixelViewId "PixelView" /* Typedef declarations. */ struct _PixelView { size_t id; char name[MaxTextExtent]; ExceptionInfo *exception; MagickWand *wand; CacheView *view; RectangleInfo region; size_t number_threads; PixelWand ***pixel_wands; MagickBooleanType debug; size_t signature; }; #if !defined(MAGICKCORE_EXCLUDE_DEPRECATED) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w A l l o c a t e W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawAllocateWand() allocates an initial drawing wand which is an opaque % handle required by the remaining drawing methods. % % The format of the DrawAllocateWand method is: % % DrawingWand DrawAllocateWand(const DrawInfo *draw_info,Image *image) % % A description of each parameter follows: % % o draw_info: Initial drawing defaults. Set to NULL to use defaults. % % o image: the image to draw on. % */ WandExport DrawingWand *DrawAllocateWand(const DrawInfo *draw_info,Image *image) { return(AcquireDrawingWand(draw_info,image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k A v e r a g e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickAverageImages() average a set of images. % % The format of the MagickAverageImages method is: % % MagickWand *MagickAverageImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ static MagickWand *CloneMagickWandFromImages(const MagickWand *wand, Image *images) { MagickWand *clone_wand; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); clone_wand=(MagickWand *) AcquireMagickMemory(sizeof(*clone_wand)); if (clone_wand == (MagickWand *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", images->filename); (void) ResetMagickMemory(clone_wand,0,sizeof(*clone_wand)); clone_wand->id=AcquireWandId(); (void) FormatLocaleString(clone_wand->name,MaxTextExtent,"%s-%.20g", MagickWandId,(double) clone_wand->id); clone_wand->exception=AcquireExceptionInfo(); InheritException(clone_wand->exception,wand->exception); clone_wand->image_info=CloneImageInfo(wand->image_info); clone_wand->quantize_info=CloneQuantizeInfo(wand->quantize_info); clone_wand->images=images; clone_wand->debug=IsEventLogging(); if (clone_wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_wand->name); clone_wand->signature=WandSignature; return(clone_wand); } WandExport MagickWand *MagickAverageImages(MagickWand *wand) { Image *average_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); average_image=EvaluateImages(wand->images,MeanEvaluateOperator, wand->exception); if (average_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,average_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelView() makes a copy of the specified pixel view. % % The format of the ClonePixelView method is: % % PixelView *ClonePixelView(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport PixelView *ClonePixelView(const PixelView *pixel_view) { PixelView *clone_view; register ssize_t i; assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); if (pixel_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name); clone_view=(PixelView *) AcquireMagickMemory(sizeof(*clone_view)); if (clone_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", pixel_view->name); (void) ResetMagickMemory(clone_view,0,sizeof(*clone_view)); clone_view->id=AcquireWandId(); (void) FormatLocaleString(clone_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) clone_view->id); clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,pixel_view->exception); clone_view->view=CloneCacheView(pixel_view->view); clone_view->region=pixel_view->region; clone_view->number_threads=pixel_view->number_threads; for (i=0; i < (ssize_t) pixel_view->number_threads; i++) clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **) pixel_view->pixel_wands[i],pixel_view->region.width); clone_view->debug=pixel_view->debug; if (clone_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name); clone_view->signature=WandSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelView() deallocates memory associated with a pixel view. % % The format of the DestroyPixelView method is: % % PixelView *DestroyPixelView(PixelView *pixel_view, % const size_t number_wands,const size_t number_threads) % % A description of each parameter follows: % % o pixel_view: the pixel view. % % o number_wand: the number of pixel wands. % % o number_threads: number of threads. % */ static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands, const size_t number_wands,const size_t number_threads) { register ssize_t i; assert(pixel_wands != (PixelWand ***) NULL); for (i=0; i < (ssize_t) number_threads; i++) if (pixel_wands[i] != (PixelWand **) NULL) pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands); pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands); return(pixel_wands); } WandExport PixelView *DestroyPixelView(PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); pixel_view->pixel_wands=DestroyPixelsThreadSet(pixel_view->pixel_wands, pixel_view->region.width,pixel_view->number_threads); pixel_view->view=DestroyCacheView(pixel_view->view); pixel_view->exception=DestroyExceptionInfo(pixel_view->exception); pixel_view->signature=(~WandSignature); RelinquishWandId(pixel_view->id); pixel_view=(PixelView *) RelinquishMagickMemory(pixel_view); return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferPixelViewIterator() iterates over three pixel views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel region is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination pixel view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferPixelViewIterator method is: % % MagickBooleanType DuplexTransferPixelViewIterator(PixelView *source, % PixelView *duplex,PixelView *destination, % DuplexTransferPixelViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o duplex: the duplex pixel view. % % o destination: the destination pixel view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType DuplexTransferPixelViewIterator( PixelView *source,PixelView *duplex,PixelView *destination, DuplexTransferPixelViewMethod transfer,void *context) { #define DuplexTransferPixelViewTag "PixelView/DuplexTransfer" ExceptionInfo *exception; Image *destination_image, *duplex_image, *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (transfer == (DuplexTransferPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; duplex_image=duplex->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *magick_restrict duplex_indexes, *magick_restrict indexes; register const PixelPacket *magick_restrict duplex_pixels, *magick_restrict pixels; register IndexPacket *magick_restrict destination_indexes; register ssize_t x; register PixelPacket *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->region.x,y, duplex->region.width,1,duplex->exception); if (duplex_pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view); for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x); if (duplex_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetBlackQuantum(duplex->pixel_wands[id][x], GetPixelIndex(duplex_indexes+x)); if (duplex_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetIndex(duplex->pixel_wands[id][x], GetPixelIndex(duplex_indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->region.x,y,destination->region.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelIndex(destination_indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(destination_indexes+x)); if (transfer(source,duplex,destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_DuplexTransferPixelViewIterator) #endif proceed=SetImageProgress(source_image,DuplexTransferPixelViewTag, progress++,source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewException() returns the severity, reason, and description of any % error that occurs when utilizing a pixel view. % % The format of the GetPixelViewException method is: % % char *GetPixelViewException(const PixelWand *pixel_view, % ExceptionType *severity) % % A description of each parameter follows: % % o pixel_view: the pixel pixel_view. % % o severity: the severity of the error is returned here. % */ WandExport char *GetPixelViewException(const PixelView *pixel_view, ExceptionType *severity) { char *description; assert(pixel_view != (const PixelView *) NULL); assert(pixel_view->signature == WandSignature); if (pixel_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name); assert(severity != (ExceptionType *) NULL); *severity=pixel_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent, sizeof(*description)); if (description == (char *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", pixel_view->name); *description='\0'; if (pixel_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( pixel_view->exception->severity,pixel_view->exception->reason), MaxTextExtent); if (pixel_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MaxTextExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( pixel_view->exception->severity,pixel_view->exception->description), MaxTextExtent); (void) ConcatenateMagickString(description,")",MaxTextExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w H e i g h t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewHeight() returns the pixel view height. % % The format of the GetPixelViewHeight method is: % % size_t GetPixelViewHeight(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport size_t GetPixelViewHeight(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.height); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewIterator() iterates over the pixel view in parallel and calls % your get method for each scanline of the view. The pixel region is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetPixelViewIterator method is: % % MagickBooleanType GetPixelViewIterator(PixelView *source, % GetPixelViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o get: the get callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType GetPixelViewIterator(PixelView *source, GetPixelViewMethod get,void *context) { #define GetPixelViewTag "PixelView/Get" Image *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (get == (GetPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *indexes; register const PixelPacket *pixels; register ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (get(source,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_GetPixelViewIterator) #endif proceed=SetImageProgress(source_image,GetPixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewPixels() returns the pixel view pixel_wands. % % The format of the GetPixelViewPixels method is: % % PixelWand *GetPixelViewPixels(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport PixelWand **GetPixelViewPixels(const PixelView *pixel_view) { const int id = GetOpenMPThreadId(); assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->pixel_wands[id]); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewWand() returns the magick wand associated with the pixel view. % % The format of the GetPixelViewWand method is: % % MagickWand *GetPixelViewWand(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport MagickWand *GetPixelViewWand(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w W i d t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewWidth() returns the pixel view width. % % The format of the GetPixelViewWidth method is: % % size_t GetPixelViewWidth(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport size_t GetPixelViewWidth(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w X % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewX() returns the pixel view x offset. % % The format of the GetPixelViewX method is: % % ssize_t GetPixelViewX(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport ssize_t GetPixelViewX(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.x); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w Y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewY() returns the pixel view y offset. % % The format of the GetPixelViewY method is: % % ssize_t GetPixelViewY(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport ssize_t GetPixelViewY(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.y); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPixelView() returns MagickTrue if the the parameter is verified as a pixel % view container. % % The format of the IsPixelView method is: % % MagickBooleanType IsPixelView(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport MagickBooleanType IsPixelView(const PixelView *pixel_view) { size_t length; if (pixel_view == (const PixelView *) NULL) return(MagickFalse); if (pixel_view->signature != WandSignature) return(MagickFalse); length=strlen(PixelViewId); if (LocaleNCompare(pixel_view->name,PixelViewId,length) != 0) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k C l i p P a t h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickClipPathImage() clips along the named paths from the 8BIM profile, if % present. Later operations take effect inside the path. Id may be a number % if preceded with #, to work on a numbered path, e.g., "#1" to use the first % path. % % The format of the MagickClipPathImage method is: % % MagickBooleanType MagickClipPathImage(MagickWand *wand, % const char *pathname,const MagickBooleanType inside) % % A description of each parameter follows: % % o wand: the magick wand. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % */ WandExport MagickBooleanType MagickClipPathImage(MagickWand *wand, const char *pathname,const MagickBooleanType inside) { return(MagickClipImagePath(wand,pathname,inside)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G e t F i l l A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGetFillAlpha() returns the alpha used when drawing using the fill % color or fill texture. Fully opaque is 1.0. % % The format of the DrawGetFillAlpha method is: % % double DrawGetFillAlpha(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport double DrawGetFillAlpha(const DrawingWand *wand) { return(DrawGetFillOpacity(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G e t S t r o k e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGetStrokeAlpha() returns the alpha of stroked object outlines. % % The format of the DrawGetStrokeAlpha method is: % % double DrawGetStrokeAlpha(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. */ WandExport double DrawGetStrokeAlpha(const DrawingWand *wand) { return(DrawGetStrokeOpacity(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P e e k G r a p h i c W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPeekGraphicWand() returns the current drawing wand. % % The format of the PeekDrawingWand method is: % % DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand) { return(PeekDrawingWand(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P o p G r a p h i c C o n t e x t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPopGraphicContext() destroys the current drawing wand and returns to the % previously pushed drawing wand. Multiple drawing wands may exist. It is an % error to attempt to pop more drawing wands than have been pushed, and it is % proper form to pop all drawing wands which have been pushed. % % The format of the DrawPopGraphicContext method is: % % MagickBooleanType DrawPopGraphicContext(DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport void DrawPopGraphicContext(DrawingWand *wand) { (void) PopDrawingWand(wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P u s h G r a p h i c C o n t e x t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPushGraphicContext() clones the current drawing wand to create a new % drawing wand. The original drawing wand(s) may be returned to by % invoking PopDrawingWand(). The drawing wands are stored on a drawing wand % stack. For every Pop there must have already been an equivalent Push. % % The format of the DrawPushGraphicContext method is: % % MagickBooleanType DrawPushGraphicContext(DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport void DrawPushGraphicContext(DrawingWand *wand) { (void) PushDrawingWand(wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w S e t F i l l A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawSetFillAlpha() sets the alpha to use when drawing using the fill % color or fill texture. Fully opaque is 1.0. % % The format of the DrawSetFillAlpha method is: % % void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha) % % A description of each parameter follows: % % o wand: the drawing wand. % % o fill_alpha: fill alpha % */ WandExport void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha) { DrawSetFillOpacity(wand,fill_alpha); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w S e t S t r o k e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawSetStrokeAlpha() specifies the alpha of stroked object outlines. % % The format of the DrawSetStrokeAlpha method is: % % void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha) % % A description of each parameter follows: % % o wand: the drawing wand. % % o stroke_alpha: stroke alpha. The value 1.0 is opaque. % */ WandExport void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha) { DrawSetStrokeOpacity(wand,stroke_alpha); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k C o l o r F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickColorFloodfillImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % The format of the MagickColorFloodfillImage method is: % % MagickBooleanType MagickColorFloodfillImage(MagickWand *wand, % const PixelWand *fill,const double fuzz,const PixelWand *bordercolor, % const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o fill: the floodfill color pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickColorFloodfillImage(MagickWand *wand, const PixelWand *fill,const double fuzz,const PixelWand *bordercolor, const ssize_t x,const ssize_t y) { DrawInfo *draw_info; MagickBooleanType status; PixelPacket target; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL); PixelGetQuantumColor(fill,&draw_info->fill); (void) GetOneVirtualPixel(wand->images,x % wand->images->columns, y % wand->images->rows,&target,wand->exception); if (bordercolor != (PixelWand *) NULL) PixelGetQuantumColor(bordercolor,&target); wand->images->fuzz=fuzz; status=ColorFloodfillImage(wand->images,draw_info,target,x,y, bordercolor != (PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k D e s c r i b e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickDescribeImage() identifies an image by printing its attributes to the % file. Attributes include the image width, height, size, and others. % % The format of the MagickDescribeImage method is: % % const char *MagickDescribeImage(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport char *MagickDescribeImage(MagickWand *wand) { return(MagickIdentifyImage(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k F l a t t e n I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickFlattenImages() merges a sequence of images. This useful for % combining Photoshop layers into a single image. % % The format of the MagickFlattenImages method is: % % MagickWand *MagickFlattenImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickFlattenImages(MagickWand *wand) { Image *flatten_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); flatten_image=FlattenImages(wand->images,wand->exception); if (flatten_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,flatten_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageAttribute() returns a value associated with the specified % property. Use MagickRelinquishMemory() to free the value when you are % finished with it. % % The format of the MagickGetImageAttribute method is: % % char *MagickGetImageAttribute(MagickWand *wand,const char *property) % % A description of each parameter follows: % % o wand: the magick wand. % % o property: the property. % */ WandExport char *MagickGetImageAttribute(MagickWand *wand,const char *property) { return(MagickGetImageProperty(wand,property)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e I n d e x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageIndex() returns the index of the current image. % % The format of the MagickGetImageIndex method is: % % ssize_t MagickGetImageIndex(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport ssize_t MagickGetImageIndex(MagickWand *wand) { return(MagickGetIteratorIndex(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e C h a n n e l E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageChannelExtrema() gets the extrema for one or more image % channels. % % The format of the MagickGetImageChannelExtrema method is: % % MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand, % const ChannelType channel,size_t *minima,size_t *maxima) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the image channel(s). % % o minima: The minimum pixel value for the specified channel(s). % % o maxima: The maximum pixel value for the specified channel(s). % */ WandExport MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand, const ChannelType channel,size_t *minima,size_t *maxima) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=GetImageChannelExtrema(wand->images,channel,minima,maxima, wand->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageExtrema() gets the extrema for the image. % % The format of the MagickGetImageExtrema method is: % % MagickBooleanType MagickGetImageExtrema(MagickWand *wand, % size_t *minima,size_t *maxima) % % A description of each parameter follows: % % o wand: the magick wand. % % o minima: The minimum pixel value for the specified channel(s). % % o maxima: The maximum pixel value for the specified channel(s). % */ WandExport MagickBooleanType MagickGetImageExtrema(MagickWand *wand, size_t *minima,size_t *maxima) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=GetImageExtrema(wand->images,minima,maxima,wand->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e M a t t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageMatte() returns MagickTrue if the image has a matte channel % otherwise MagickFalse. % % The format of the MagickGetImageMatte method is: % % size_t MagickGetImageMatte(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickBooleanType MagickGetImageMatte(MagickWand *wand) { assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); return(wand->images->matte); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImagePixels() extracts pixel data from an image and returns it to % you. The method returns MagickTrue on success otherwise MagickFalse if an % error is encountered. The data is returned as char, short int, int, ssize_t, % float, or double in the order specified by map. % % Suppose you want to extract the first scanline of a 640x480 image as % character data in red-green-blue order: % % MagickGetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels); % % The format of the MagickGetImagePixels method is: % % MagickBooleanType MagickGetImagePixels(MagickWand *wand, % const ssize_t x,const ssize_t y,const size_t columns, % const size_t rows,const char *map,const StorageType storage, % void *pixels) % % A description of each parameter follows: % % o wand: the magick wand. % % o x, y, columns, rows: These values define the perimeter % of a region of pixels you want to extract. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from % these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel, % LongPixel, QuantumPixel, or ShortPixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % */ WandExport MagickBooleanType MagickGetImagePixels(MagickWand *wand, const ssize_t x,const ssize_t y,const size_t columns, const size_t rows,const char *map,const StorageType storage, void *pixels) { return(MagickExportImagePixels(wand,x,y,columns,rows,map,storage,pixels)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageSize() returns the image length in bytes. % % The format of the MagickGetImageSize method is: % % MagickBooleanType MagickGetImageSize(MagickWand *wand, % MagickSizeType *length) % % A description of each parameter follows: % % o wand: the magick wand. % % o length: the image length in bytes. % */ WandExport MagickSizeType MagickGetImageSize(MagickWand *wand) { assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); return(GetBlobSize(wand->images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMapImage() replaces the colors of an image with the closest color % from a reference image. % % The format of the MagickMapImage method is: % % MagickBooleanType MagickMapImage(MagickWand *wand, % const MagickWand *map_wand,const MagickBooleanType dither) % % A description of each parameter follows: % % o wand: the magick wand. % % o map: the map wand. % % o dither: Set this integer value to something other than zero to dither % the mapped image. % */ WandExport MagickBooleanType MagickMapImage(MagickWand *wand, const MagickWand *map_wand,const MagickBooleanType dither) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if ((wand->images == (Image *) NULL) || (map_wand->images == (Image *) NULL)) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=MapImage(wand->images,map_wand->images,dither); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a t t e F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMatteFloodfillImage() changes the transparency value of any pixel that % matches target and is an immediate neighbor. If the method % FillToBorderMethod is specified, the transparency value is changed for any % neighbor pixel that does not match the bordercolor member of image. % % The format of the MagickMatteFloodfillImage method is: % % MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand, % const double alpha,const double fuzz,const PixelWand *bordercolor, % const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand, const double alpha,const double fuzz,const PixelWand *bordercolor, const ssize_t x,const ssize_t y) { DrawInfo *draw_info; MagickBooleanType status; PixelPacket target; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL); (void) GetOneVirtualPixel(wand->images,x % wand->images->columns, y % wand->images->rows,&target,wand->exception); if (bordercolor != (PixelWand *) NULL) PixelGetQuantumColor(bordercolor,&target); wand->images->fuzz=fuzz; status=MatteFloodfillImage(wand->images,target,ClampToQuantum( (MagickRealType) QuantumRange-QuantumRange*alpha),x,y,bordercolor != (PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M e d i a n F i l t e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMedianFilterImage() applies a digital filter that improves the quality % of a noisy image. Each pixel is replaced by the median in a set of % neighboring pixels as defined by radius. % % The format of the MagickMedianFilterImage method is: % % MagickBooleanType MagickMedianFilterImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickMedianFilterImage(MagickWand *wand, const double radius) { Image *median_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); median_image=MedianFilterImage(wand->images,radius,wand->exception); if (median_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,median_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M i n i m u m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMinimumImages() returns the minimum intensity of an image sequence. % % The format of the MagickMinimumImages method is: % % MagickWand *MagickMinimumImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMinimumImages(MagickWand *wand) { Image *minimum_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); minimum_image=EvaluateImages(wand->images,MinEvaluateOperator, wand->exception); if (minimum_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,minimum_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickModeImage() makes each pixel the 'predominant color' of the % neighborhood of the specified radius. % % The format of the MagickModeImage method is: % % MagickBooleanType MagickModeImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickModeImage(MagickWand *wand, const double radius) { Image *mode_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); mode_image=ModeImage(wand->images,radius,wand->exception); if (mode_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,mode_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M o s a i c I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMosaicImages() inlays an image sequence to form a single coherent % picture. It returns a wand with each image in the sequence composited at % the location defined by the page offset of the image. % % The format of the MagickMosaicImages method is: % % MagickWand *MagickMosaicImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMosaicImages(MagickWand *wand) { Image *mosaic_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); mosaic_image=MosaicImages(wand->images,wand->exception); if (mosaic_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,mosaic_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickOpaqueImage() changes any pixel that matches color with the color % defined by fill. % % The format of the MagickOpaqueImage method is: % % MagickBooleanType MagickOpaqueImage(MagickWand *wand, % const PixelWand *target,const PixelWand *fill,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o target: Change this target color to the fill color within the image. % % o fill: the fill pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickOpaqueImage(MagickWand *wand, const PixelWand *target,const PixelWand *fill,const double fuzz) { return(MagickPaintOpaqueImage(wand,target,fill,fuzz)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintFloodfillImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % The format of the MagickPaintFloodfillImage method is: % % MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand, % const ChannelType channel,const PixelWand *fill,const double fuzz, % const PixelWand *bordercolor,const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o fill: the floodfill color pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand, const ChannelType channel,const PixelWand *fill,const double fuzz, const PixelWand *bordercolor,const ssize_t x,const ssize_t y) { MagickBooleanType status; status=MagickFloodfillPaintImage(wand,channel,fill,fuzz,bordercolor,x,y, MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintOpaqueImage() changes any pixel that matches color with the color % defined by fill. % % The format of the MagickPaintOpaqueImage method is: % % MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand, % const PixelWand *target,const PixelWand *fill,const double fuzz) % MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand, % const ChannelType channel,const PixelWand *target, % const PixelWand *fill,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o target: Change this target color to the fill color within the image. % % o fill: the fill pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand, const PixelWand *target,const PixelWand *fill,const double fuzz) { return(MagickPaintOpaqueImageChannel(wand,DefaultChannels,target,fill,fuzz)); } WandExport MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand, const ChannelType channel,const PixelWand *target,const PixelWand *fill, const double fuzz) { MagickBooleanType status; status=MagickOpaquePaintImageChannel(wand,channel,target,fill,fuzz, MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t T r a n s p a r e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintTransparentImage() changes any pixel that matches color with the % color defined by fill. % % The format of the MagickPaintTransparentImage method is: % % MagickBooleanType MagickPaintTransparentImage(MagickWand *wand, % const PixelWand *target,const double alpha,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o target: Change this target color to specified opacity value within % the image. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickPaintTransparentImage(MagickWand *wand, const PixelWand *target,const double alpha,const double fuzz) { return(MagickTransparentPaintImage(wand,target,alpha,fuzz,MagickFalse)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R a d i a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickRadialBlurImage() radial blurs an image. % % The format of the MagickRadialBlurImage method is: % % MagickBooleanType MagickRadialBlurImage(MagickWand *wand, % const double angle) % MagickBooleanType MagickRadialBlurImageChannel(MagickWand *wand, % const ChannelType channel,const double angle) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the image channel(s). % % o angle: the angle of the blur in degrees. % */ WandExport MagickBooleanType MagickRadialBlurImage(MagickWand *wand, const double angle) { return(MagickRotationalBlurImage(wand,angle)); } WandExport MagickBooleanType MagickRadialBlurImageChannel(MagickWand *wand, const ChannelType channel,const double angle) { return(MagickRotationalBlurImageChannel(wand,channel,angle)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e c o l o r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickRecolorImage() apply color transformation to an image. The method % permits saturation changes, hue rotation, luminance to alpha, and various % other effects. Although variable-sized transformation matrices can be used, % typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA % (or RGBA with offsets). The matrix is similar to those used by Adobe Flash % except offsets are in column 6 rather than 5 (in support of CMYKA images) % and offsets are normalized (divide Flash offset by 255). % % The format of the MagickRecolorImage method is: % % MagickBooleanType MagickRecolorImage(MagickWand *wand, % const size_t order,const double *color_matrix) % % A description of each parameter follows: % % o wand: the magick wand. % % o order: the number of columns and rows in the color matrix. % % o color_matrix: An array of doubles representing the color matrix. % */ WandExport MagickBooleanType MagickRecolorImage(MagickWand *wand, const size_t order,const double *color_matrix) { Image *transform_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (color_matrix == (const double *) NULL) return(MagickFalse); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); transform_image=RecolorImage(wand->images,order,color_matrix, wand->exception); if (transform_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,transform_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e d u c e N o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickReduceNoiseImage() smooths the contours of an image while still % preserving edge information. The algorithm works by replacing each pixel % with its neighbor closest in value. A neighbor is defined by radius. Use % a radius of 0 and ReduceNoise() selects a suitable radius for you. % % The format of the MagickReduceNoiseImage method is: % % MagickBooleanType MagickReduceNoiseImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickReduceNoiseImage(MagickWand *wand, const double radius) { Image *noise_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); noise_image=ReduceNoiseImage(wand->images,radius,wand->exception); if (noise_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,noise_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a x i m u m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMaximumImages() returns the maximum intensity of an image sequence. % % The format of the MagickMaximumImages method is: % % MagickWand *MagickMaximumImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMaximumImages(MagickWand *wand) { Image *maximum_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); maximum_image=EvaluateImages(wand->images,MaxEvaluateOperator, wand->exception); if (maximum_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,maximum_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageAttribute() associates a property with an image. % % The format of the MagickSetImageAttribute method is: % % MagickBooleanType MagickSetImageAttribute(MagickWand *wand, % const char *property,const char *value) % % A description of each parameter follows: % % o wand: the magick wand. % % o property: the property. % % o value: the value. % */ WandExport MagickBooleanType MagickSetImageAttribute(MagickWand *wand, const char *property,const char *value) { return(SetImageProperty(wand->images,property,value)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e I n d e x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageIndex() set the current image to the position of the list % specified with the index parameter. % % The format of the MagickSetImageIndex method is: % % MagickBooleanType MagickSetImageIndex(MagickWand *wand, % const ssize_t index) % % A description of each parameter follows: % % o wand: the magick wand. % % o index: the scene number. % */ WandExport MagickBooleanType MagickSetImageIndex(MagickWand *wand, const ssize_t index) { return(MagickSetIteratorIndex(wand,index)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k S e t I m a g e O p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageOption() associates one or options with a particular image % format (.e.g MagickSetImageOption(wand,"jpeg","perserve","yes"). % % The format of the MagickSetImageOption method is: % % MagickBooleanType MagickSetImageOption(MagickWand *wand, % const char *format,const char *key,const char *value) % % A description of each parameter follows: % % o wand: the magick wand. % % o format: the image format. % % o key: The key. % % o value: The value. % */ WandExport MagickBooleanType MagickSetImageOption(MagickWand *wand, const char *format,const char *key,const char *value) { char option[MaxTextExtent]; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); (void) FormatLocaleString(option,MaxTextExtent,"%s:%s=%s",format,key,value); return(DefineImageOption(wand->image_info,option)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k T r a n s p a r e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickTransparentImage() changes any pixel that matches color with the % color defined by fill. % % The format of the MagickTransparentImage method is: % % MagickBooleanType MagickTransparentImage(MagickWand *wand, % const PixelWand *target,const double alpha,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o target: Change this target color to specified opacity value within % the image. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickTransparentImage(MagickWand *wand, const PixelWand *target,const double alpha,const double fuzz) { return(MagickPaintTransparentImage(wand,target,alpha,fuzz)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e g i o n O f I n t e r e s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickRegionOfInterestImage() extracts a region of the image and returns it % as a new wand. % % The format of the MagickRegionOfInterestImage method is: % % MagickWand *MagickRegionOfInterestImage(MagickWand *wand, % const size_t width,const size_t height,const ssize_t x, % const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o width: the region width. % % o height: the region height. % % o x: the region x offset. % % o y: the region y offset. % */ WandExport MagickWand *MagickRegionOfInterestImage(MagickWand *wand, const size_t width,const size_t height,const ssize_t x, const ssize_t y) { return(MagickGetImageRegion(wand,width,height,x,y)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImagePixels() accepts pixel datand stores it in the image at the % location you specify. The method returns MagickFalse on success otherwise % MagickTrue if an error is encountered. The pixel data can be either char, % short int, int, ssize_t, float, or double in the order specified by map. % % Suppose your want to upload the first scanline of a 640x480 image from % character data in red-green-blue order: % % MagickSetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels); % % The format of the MagickSetImagePixels method is: % % MagickBooleanType MagickSetImagePixels(MagickWand *wand, % const ssize_t x,const ssize_t y,const size_t columns, % const size_t rows,const char *map,const StorageType storage, % const void *pixels) % % A description of each parameter follows: % % o wand: the magick wand. % % o x, y, columns, rows: These values define the perimeter of a region % of pixels you want to define. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from % these types: CharPixel, ShortPixel, IntegerPixel, LongPixel, FloatPixel, % or DoublePixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % */ WandExport MagickBooleanType MagickSetImagePixels(MagickWand *wand, const ssize_t x,const ssize_t y,const size_t columns, const size_t rows,const char *map,const StorageType storage, const void *pixels) { return(MagickImportImagePixels(wand,x,y,columns,rows,map,storage,pixels)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k W r i t e I m a g e B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickWriteImageBlob() implements direct to memory image formats. It % returns the image as a blob and its length. Use MagickSetFormat() to % set the format of the returned blob (GIF, JPEG, PNG, etc.). % % Use MagickRelinquishMemory() to free the blob when you are done with it. % % The format of the MagickWriteImageBlob method is: % % unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length) % % A description of each parameter follows: % % o wand: the magick wand. % % o length: the length of the blob. % */ WandExport unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length) { return(MagickGetImageBlob(wand,length)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewPixelView() returns a pixel view required for all other methods in the % Pixel View API. % % The format of the NewPixelView method is: % % PixelView *NewPixelView(MagickWand *wand) % % A description of each parameter follows: % % o wand: the wand. % */ static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands, const size_t number_threads) { PixelWand ***pixel_wands; register ssize_t i; pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads, sizeof(*pixel_wands)); if (pixel_wands == (PixelWand ***) NULL) return((PixelWand ***) NULL); (void) ResetMagickMemory(pixel_wands,0,number_threads*sizeof(*pixel_wands)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_wands[i]=NewPixelWands(number_wands); if (pixel_wands[i] == (PixelWand **) NULL) return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads)); } return(pixel_wands); } WandExport PixelView *NewPixelView(MagickWand *wand) { PixelView *pixel_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickCoreSignature); pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view)); if (pixel_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) ResetMagickMemory(pixel_view,0,sizeof(*pixel_view)); pixel_view->id=AcquireWandId(); (void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) pixel_view->id); pixel_view->exception=AcquireExceptionInfo(); pixel_view->wand=wand; pixel_view->view=AcquireVirtualCacheView(pixel_view->wand->images, pixel_view->exception); pixel_view->region.width=wand->images->columns; pixel_view->region.height=wand->images->rows; pixel_view->number_threads=GetOpenMPMaximumThreads(); pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width, pixel_view->number_threads); if (pixel_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); pixel_view->debug=IsEventLogging(); pixel_view->signature=WandSignature; return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w P i x e l V i e w R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewPixelViewRegion() returns a pixel view required for all other methods % in the Pixel View API. % % The format of the NewPixelViewRegion method is: % % PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a region of % pixel_wands view. % */ WandExport PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x, const ssize_t y,const size_t width,const size_t height) { PixelView *pixel_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickCoreSignature); pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view)); if (pixel_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) ResetMagickMemory(pixel_view,0,sizeof(*pixel_view)); pixel_view->id=AcquireWandId(); (void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) pixel_view->id); pixel_view->exception=AcquireExceptionInfo(); pixel_view->view=AcquireVirtualCacheView(pixel_view->wand->images, pixel_view->exception); pixel_view->wand=wand; pixel_view->region.width=width; pixel_view->region.height=height; pixel_view->region.x=x; pixel_view->region.y=y; pixel_view->number_threads=GetOpenMPMaximumThreads(); pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width, pixel_view->number_threads); if (pixel_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); pixel_view->debug=IsEventLogging(); pixel_view->signature=WandSignature; return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i x e l G e t N e x t R o w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PixelGetNextRow() returns the next row as an array of pixel wands from the % pixel iterator. % % The format of the PixelGetNextRow method is: % % PixelWand **PixelGetNextRow(PixelIterator *iterator, % size_t *number_wands) % % A description of each parameter follows: % % o iterator: the pixel iterator. % % o number_wands: the number of pixel wands. % */ WandExport PixelWand **PixelGetNextRow(PixelIterator *iterator) { size_t number_wands; return(PixelGetNextIteratorRow(iterator,&number_wands)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i x e l I t e r a t o r G e t E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PixelIteratorGetException() returns the severity, reason, and description of % any error that occurs when using other methods in this API. % % The format of the PixelIteratorGetException method is: % % char *PixelIteratorGetException(const Pixeliterator *iterator, % ExceptionType *severity) % % A description of each parameter follows: % % o iterator: the pixel iterator. % % o severity: the severity of the error is returned here. % */ WandExport char *PixelIteratorGetException(const PixelIterator *iterator, ExceptionType *severity) { return(PixelGetIteratorException(iterator,severity)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelViewIterator() iterates over the pixel view in parallel and calls % your set method for each scanline of the view. The pixel region is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetPixelViewIterator method is: % % MagickBooleanType SetPixelViewIterator(PixelView *destination, % SetPixelViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the pixel view. % % o set: the set callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType SetPixelViewIterator(PixelView *destination, SetPixelViewMethod set,void *context) { #define SetPixelViewTag "PixelView/Set" ExceptionInfo *exception; Image *destination_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(destination != (PixelView *) NULL); assert(destination->signature == WandSignature); if (set == (SetPixelViewMethod) NULL) return(MagickFalse); destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=destination->region.y; y < (ssize_t) destination->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->region.x, y,destination->region.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(destination->view); if (set(destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) SetPixelIndex(indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; } if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_SetPixelViewIterator) #endif proceed=SetImageProgress(destination_image,SetPixelViewTag,progress++, destination->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferPixelViewIterator() iterates over two pixel views in parallel and % calls your transfer method for each scanline of the view. The source pixel % region is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination pixel view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferPixelViewIterator method is: % % MagickBooleanType TransferPixelViewIterator(PixelView *source, % PixelView *destination,TransferPixelViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o destination: the destination pixel view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType TransferPixelViewIterator(PixelView *source, PixelView *destination,TransferPixelViewMethod transfer,void *context) { #define TransferPixelViewTag "PixelView/Transfer" ExceptionInfo *exception; Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (transfer == (TransferPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict pixels; register IndexPacket *magick_restrict destination_indexes; register ssize_t x; register PixelPacket *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->region.x,y,destination->region.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (transfer(source,destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_TransferPixelViewIterator) #endif proceed=SetImageProgress(source_image,TransferPixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdatePixelViewIterator() iterates over the pixel view in parallel and calls % your update method for each scanline of the view. The pixel region is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdatePixelViewIterator method is: % % MagickBooleanType UpdatePixelViewIterator(PixelView *source, % UpdatePixelViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o update: the update callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType UpdatePixelViewIterator(PixelView *source, UpdatePixelViewMethod update,void *context) { #define UpdatePixelViewTag "PixelView/Update" ExceptionInfo *exception; Image *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (update == (UpdatePixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; if (SetImageStorageClass(source_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=source->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->region.x,y, source->region.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(source->exception,GetCacheViewException( source->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (update(source,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) source->region.width; x++) PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) SetPixelIndex(indexes+x,PixelGetBlackQuantum( source->pixel_wands[id][x])); if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse) { InheritException(source->exception,GetCacheViewException(source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_UpdatePixelViewIterator) #endif proceed=SetImageProgress(source_image,UpdatePixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } #endif
sparse_distance.c
#ifndef BASE_DIST #error "BASE_DIST not defined for 'sparse_distance.c' file" #elif BASE_DIST < 0 #error "BASE_DIST should be positive or 0" #elif BASE_DIST > 4 #error "BASE_DIST should be less or equal than 4" #endif #include "sparse_distance.h" #include <stdio.h> #include <sched.h> #include <omp.h> // ------------------------------------------------------------------------------------ // Local distance functions that are inlined in the caller's code // Note: BASE_DIST 0 and 1 are 0 if one element is 0 // BASE_DIST == 0 : x_i * y_i // no need to put it here // BASE_DIST == 1 : min(x_i, y_i) static inline double intersection(double x, double y) { return (x < y) ? x : y; } // BASE_DIST == 2 : |x_i - y_i| static inline double totvar(double x, double y) { return (x < y) ? (y-x) : (x-y); } // BASE_DIST == 3 : (x_i - y_i)^2 / (x_i + y_i) static inline double chisquare(double x, double y) { return (x + y) ? ((x-y)*(x-y)/(x+y)) : 0.0; } // BASE_DIST == 4 : (x_i - y_i)^2 static inline double l2(double x, double y) { return (x-y)*(x-y); } // ------------------------------------------------------------------------------------ // Vector and matrix distance computations // vector to vector double v2v( int x_nnz, int *x_indices, double *x_data, int y_nnz, int *y_indices, double *y_data) { int big = 1<<30; double out_val = 0.0; // number of non-zero elements processed in x, y respectively int nx = 0; int ny = 0; while ( 1 ) { // compare the indices of current non-zero vw count in both bofs // current vw index int cur_x_ind, cur_y_ind; // check if we processed all elements of x if (nx < x_nnz) cur_x_ind = x_indices[nx]; else cur_x_ind = big; // all non-zero x elements processed // check if we processed all elements of y if (ny < y_nnz) cur_y_ind = y_indices[ny]; else cur_y_ind = big; // all non-zero y elements processed // check if we finished processing all non-zero elements of *both* vectors if (cur_x_ind == big && cur_y_ind == big) return out_val; if (cur_x_ind < cur_y_ind) {// we are further in y than in x => 0 at this index in y #if BASE_DIST == 2 out_val += totvar(x_data[nx], 0.0); // total variation #elif BASE_DIST == 3 out_val += x_data[nx]; // chi-square #elif BASE_DIST == 4 out_val += l2(x_data[nx], 0.0); // l2 #endif nx++; // => linear or min = 0 and advance in x only } else if (cur_x_ind > cur_y_ind) {// reverse case wrt above #if BASE_DIST == 2 out_val += totvar(0.0, y_data[ny]); // total variation #elif BASE_DIST == 3 out_val += y_data[ny]; // chi-square #elif BASE_DIST == 4 out_val += l2(0.0, y_data[ny]); // l2 #endif ny++; } else { // same index => both have non-zero element at this index #if BASE_DIST == 0 out_val += x_data[nx] * y_data[ny]; // use linear #elif BASE_DIST == 1 out_val += intersection(x_data[nx], y_data[ny]); // use intersection #elif BASE_DIST == 2 out_val += totvar(x_data[nx], y_data[ny]); // use total variation #elif BASE_DIST == 3 out_val += chisquare(x_data[nx], y_data[ny]); // use chi-square #elif BASE_DIST == 4 out_val += l2(x_data[nx], y_data[ny]); // use l2 #endif // advance in both nx++; ny++; } } } // vector to matrix void v2m( double *out_values, int n, int *x_indptr, int x_indptr_dim, int *x_indices, int x_indices_dim, double *x_data, int x_data_dim, int *m_indptr, int m_indptr_dim, int *m_indices, int m_indices_dim, double *m_data, int m_data_dim) { // iterate over all columns of m // parallelization with OpenMP: not efficient here because tasks are too small in general int i; for (i=0; i < n; i++) { // compute K(x, m[i]) // in python, list of indices (in the dense vector) of the non-zero elements for m column i: // m_indices[m_indptr[i]:m_indptr[i+1]] (similar for data) int mi_nnz = m_indptr[i+1] - m_indptr[i]; int *mi_indices = m_indices + m_indptr[i]; double *mi_data = m_data + m_indptr[i]; out_values[i] = v2v( x_indptr[1], x_indices, x_data, mi_nnz, mi_indices, mi_data); } } // matrix to matrix void m2m( double *out_mat_test, int num_test_samps, int num_train_samps, int *mtest_indptr, int mtest_indptr_dim, int *mtest_indices, int mtest_indices_dim, double *mtest_data, int mtest_data_dim, int *m_indptr, int m_indptr_dim, int *m_indices, int m_indices_dim, double *m_data, int m_data_dim, int num_threads) { // parallelization with OpenMP if (num_threads > 0) omp_set_num_threads(num_threads); else omp_set_num_threads(omp_get_num_procs()); // requests 1 thread per core int i; #pragma omp parallel { #pragma omp for for (i=0; i < num_test_samps; i++) { // distances for test sample i int mti_nnz = mtest_indptr[i+1] - mtest_indptr[i]; int *mti_indices = mtest_indices + mtest_indptr[i]; double *mti_data = mtest_data + mtest_indptr[i]; // compute K(mtest[i], m[j]) int j; for (j=0; j < num_train_samps; j++) { out_mat_test[i*num_train_samps+j] = v2v( mti_nnz, mti_indices, mti_data, m_indptr[j+1] - m_indptr[j], m_indices + m_indptr[j], m_data + m_indptr[j]); //fprintf(stderr,"num_test_samps=%d num_train_samps=%d i=%d j=%d k=%f\n", // num_test_samps, num_train_samps, i, j, out_mat_test[i*num_test_samps+j]); //fflush(stderr); } } } } // Gram matrix void gram( double *out_mat, int num_samps, int *m_indptr, int m_indptr_dim, int *m_indices, int m_indices_dim, double *m_data, int m_data_dim, int num_threads) { // parallelization with OpenMP if (num_threads > 0) omp_set_num_threads(num_threads); else omp_set_num_threads(omp_get_num_procs()); // requests 1 thread per core int i; #pragma omp parallel { #pragma omp for for (i=0; i < num_samps; i++) { // fill lower triangular matrix (including diagonal) //fprintf(stderr,"thread_id = %u, #threads = %u, #procs_available = %u, in parallel: %s\n", // omp_get_thread_num(), omp_get_num_threads(), omp_get_num_procs(), omp_in_parallel()?"yes":"no"); //fflush(stderr); int mi_nnz = m_indptr[i+1] - m_indptr[i]; int *mi_indices = m_indices + m_indptr[i]; double *mi_data = m_data + m_indptr[i]; // compute K(m[i], m[j]) int j; for (j=0; j <= i; j++) out_mat[i*num_samps+j] = v2v( mi_nnz, mi_indices, mi_data, m_indptr[j+1] - m_indptr[j], m_indices + m_indptr[j], m_data + m_indptr[j]); } } // symmetrically fill upper triangular matrix (excluding diagonal) int k, p; for (k=1; k < num_samps; k++) for (p=0; p < k; p++) out_mat[p*num_samps+k] = out_mat[k*num_samps+p]; // K[p,k] }
imd_main_risc_2d.c
/****************************************************************************** * * IMD -- The ITAP Molecular Dynamics Program * * Copyright 1996-2011 Institute for Theoretical and Applied Physics, * University of Stuttgart, D-70550 Stuttgart * ******************************************************************************/ /****************************************************************************** * * imd_main_risc_2d.c -- main loop, risc specific part, two dimensions * ******************************************************************************/ /****************************************************************************** * $Revision$ * $Date$ ******************************************************************************/ #include "imd.h" /***************************************************************************** * * calc_forces * *****************************************************************************/ void calc_forces(int steps) { int n, k; /* clear global accumulation variables */ tot_pot_energy = 0.0; virial = 0.0; vir_xx = 0.0; vir_yy = 0.0; vir_xy = 0.0; nfc++; /* clear per atom accumulation variables */ #ifdef _OPENMP #pragma omp parallel for #endif for (k=0; k<ncells; ++k) { int i; cell *p; p = cell_array + k; for (i=0; i<p->n; ++i) { KRAFT(p,i,X) = 0.0; KRAFT(p,i,Y) = 0.0; POTENG(p,i) = 0.0; #ifdef NNBR NBANZ(p,i) = 0; #endif #if defined(STRESS_TENS) PRESSTENS(p,i,xx) = 0.0; PRESSTENS(p,i,yy) = 0.0; PRESSTENS(p,i,xy) = 0.0; #endif } } #ifdef RIGID /* clear total forces */ if ( nsuperatoms>0 ) for(k=0; k<nsuperatoms; k++) { superforce[k].x = 0.0; superforce[k].y = 0.0; } #endif /* compute forces for all pairs of cells */ for (n=0; n<nlists; ++n ) { #ifdef _OPENMP #pragma omp parallel for schedule(runtime) reduction(+:tot_pot_energy,virial,vir_xx,vir_yy,vir_xy) #endif for (k=0; k<npairs[n]; ++k) { vektor pbc; pair *P; P = pairs[n]+k; pbc.x = P->ipbc[0] * box_x.x + P->ipbc[1] * box_y.x; pbc.y = P->ipbc[0] * box_x.y + P->ipbc[1] * box_y.y; do_forces(cell_array + P->np, cell_array + P->nq, pbc, &tot_pot_energy, &virial, &vir_xx, &vir_yy, &vir_zz, &vir_yz, &vir_zx, &vir_xy); } } } /****************************************************************************** * * fix_cells * * check if each atom is in the correct cell; * move atoms that have left their cell * ******************************************************************************/ void fix_cells(void) { int i,j,l,clone; cell *p, *q; ivektor coord, dcpu, to_coord; /* apply periodic boundary conditions */ do_boundaries(); /* for each cell in bulk */ for (i=cellmin.x; i < cellmax.x; ++i) for (j=cellmin.y; j < cellmax.y; ++j) { p = PTR_2D_V(cell_array, i, j, cell_dim); /* loop over atoms in cell */ l=0; while( l < p->n ) { coord = cell_coord( ORT(p,l,X), ORT(p,l,Y) ); q = PTR_2D_VV(cell_array,coord,cell_dim); /* if it's in the wrong cell, move it to the right cell */ if (p != q) { MOVE_ATOM(q, p, l); #ifdef CLONE if (l < p->n - nclones) for (clone=1; clone<nclones; clone++) MOVE_ATOM(q, p, l+clone); else /* we are dealing with the last in the stack */ for (clone=1; clone<nclones; clone++) MOVE_ATOM(q, p, l); #endif } else ++l; } } }
buggy_version.c
#include <stdio.h> int main(){ int sum = 0; int DATA_MAG = 100; int H[100]; int scale_factor = 10; #pragma omp parallel for reduction(+: sum) for (int i =0; i < DATA_MAG;i++) { H[i] = i; } int LUT[100]; #pragma omp parallel for reduction(+: sum) for (int i = 0; i < DATA_MAG; i++) { sum += H[i]; LUT[i] = sum * scale_factor; } for (int i = 0; i < 100; i++) { printf("%d \n",LUT[i]); } return 0; }
BallTracking.h
#pragma once //#include <opencv2/core/core.hpp> //#include <opencv2/core/optim.hpp> //#include <iostream> //#include <string> //#include "spdlog/spdlog.h" //#include <ConcurrentQueue.h> //#include <omp.h> //#include <boost/lockfree/queue.hpp> class RotModel : public cv::MinProblemSolver::Function { private: cv::Mat radialFlow; cv::Mat tangenFlow; uint nPoints; //num of points to fit by function float freq; //2PI/nPoints float ampRatio; //radialFlowAmplitude = ampRatio*tangenFlow; public: int mode; static const int MODE_TRACKING = 3; static const int MODE_CALIBRATION = 4; RotModel(int fit_mode = MODE_TRACKING) { mode = fit_mode; } double calc(const double* x)const { /* Calculates SSE between actual and fitted optical flow distributions x = [amplitude_tan, offset_tan, phase_tan] */ float res = 0.0; if (mode == MODE_TRACKING) { #pragma omp parallel for// reduction(+ : res) for (int i = 0; i < nPoints; i++) { float r = pow( radialFlow.at<float>(i) - (x[0] * sin(i*freq + x[2])), 2 ); r += pow( tangenFlow.at<float>(i) - (x[1] + x[0] * cos(i*freq + x[2])), 2 ); res += r; } } /* x = [amplitude_rad, amplitude_tan, offset_tan, phase_tan] */ else { #pragma omp parallel for// reduction(+ : res) for (int i = 0; i < nPoints; i++) { float r = pow( radialFlow.at<float>(0, i) - (x[0] * sin(i*freq + x[3])), 2 ); r += pow( tangenFlow.at<float>(0, i) - (x[2] + x[1] * cos(i*freq + x[3])), 2 ); res += r; } } return res; } int getDims() const { return mode; }; void setDataPoints(cv::Mat& radial, cv::Mat& tangential) { radialFlow = radial; tangenFlow = tangential; float ch = radial.channels(); nPoints = radialFlow.size[0]; freq = 3.14151926 * 2.0 / (float)nPoints; } }; struct BallTrackingParameters { cv::Point2f polarCenter = cv::Point2f(112, 70); float visibleBallRadius = 116; //px uint roiRhoMin = 40; //px uint roiRhoMax = 100; //px uint roiDownscaledWidth = 30; //px uint roiDownscaledRhoMin = 2; //px uint roiDownscaledRhoMax = 13; //px float calibrCXYrad = 100.31; //px/rad float calibrCXYtan = 76.85; //px/rad float calibrCZ = 20.63; //px/rad }; class BallTracking { private: cv::Mat prevFrame; cv::Mat prevFit; bool visualize; cv::Ptr<cv::DownhillSolver> pDhSolver; cv::Ptr<RotModel> pRotModel; public: BallTracking(bool enableVisualize, int mode); BallTrackingParameters parameters; cv::Mat debugPlot; cv::Mat update(const cv::Mat& frame, float& fit_quality); ~BallTracking(); };
ocp_nlp_common.c
/* * Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren, * Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor, * Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan, * Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl * * This file is part of acados. * * The 2-Clause BSD License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE.; */ #include "acados/ocp_nlp/ocp_nlp_common.h" #include <assert.h> #include <stdlib.h> #include <string.h> #include <math.h> // blasfeo #include "blasfeo/include/blasfeo_common.h" #include "blasfeo/include/blasfeo_d_blas.h" // hpipm #include "hpipm/include/hpipm_d_ocp_qp_dim.h" // acados #include "acados/utils/mem.h" // openmp #if defined(ACADOS_WITH_OPENMP) #include <omp.h> #endif /************************************************ * config ************************************************/ int ocp_nlp_config_calculate_size(int N) { int ii; int size = 0; // self size += sizeof(ocp_nlp_config); // qp solver size += 1 * ocp_qp_xcond_solver_config_calculate_size(); // regularization size += ocp_nlp_reg_config_calculate_size(); // dynamics size += N * sizeof(ocp_nlp_dynamics_config *); for (ii = 0; ii < N; ii++) size += ocp_nlp_dynamics_config_calculate_size(); // cost size += (N + 1) * sizeof(ocp_nlp_cost_config *); for (ii = 0; ii <= N; ii++) size += ocp_nlp_cost_config_calculate_size(); // constraints size += (N + 1) * sizeof(ocp_nlp_constraints_config *); for (ii = 0; ii <= N; ii++) size += ocp_nlp_constraints_config_calculate_size(); return size; } ocp_nlp_config *ocp_nlp_config_assign(int N, void *raw_memory) { int ii; char *c_ptr = (char *) raw_memory; ocp_nlp_config *config = (ocp_nlp_config *) c_ptr; c_ptr += sizeof(ocp_nlp_config); config->N = N; // qp solver config->qp_solver = ocp_qp_xcond_solver_config_assign(c_ptr); c_ptr += ocp_qp_xcond_solver_config_calculate_size(); // regularization config->regularize = ocp_nlp_reg_config_assign(c_ptr); c_ptr += ocp_nlp_reg_config_calculate_size(); // dynamics config->dynamics = (ocp_nlp_dynamics_config **) c_ptr; c_ptr += N * sizeof(ocp_nlp_dynamics_config *); for (ii = 0; ii < N; ii++) { config->dynamics[ii] = ocp_nlp_dynamics_config_assign(c_ptr); c_ptr += ocp_nlp_dynamics_config_calculate_size(); } // cost config->cost = (ocp_nlp_cost_config **) c_ptr; c_ptr += (N + 1) * sizeof(ocp_nlp_cost_config *); for (ii = 0; ii <= N; ii++) { config->cost[ii] = ocp_nlp_cost_config_assign(c_ptr); c_ptr += ocp_nlp_cost_config_calculate_size(); } // constraints config->constraints = (ocp_nlp_constraints_config **) c_ptr; c_ptr += (N + 1) * sizeof(ocp_nlp_constraints_config *); for (ii = 0; ii <= N; ii++) { config->constraints[ii] = ocp_nlp_constraints_config_assign(c_ptr); c_ptr += ocp_nlp_constraints_config_calculate_size(); } return config; } /************************************************ * dims ************************************************/ static int ocp_nlp_dims_calculate_size_self(int N) { int size = 0; size += sizeof(ocp_nlp_dims); // nlp sizes size += 6 * (N + 1) * sizeof(int); // nv, nx, nu, ni, nz, ns // dynamics size += N * sizeof(void *); // cost size += (N + 1) * sizeof(void *); // constraints size += (N + 1) * sizeof(void *); // regularization size += ocp_nlp_reg_dims_calculate_size(N); size += sizeof(ocp_nlp_reg_dims); size += 8; // initial align return size; } int ocp_nlp_dims_calculate_size(void *config_) { ocp_nlp_config *config = config_; int N = config->N; int ii; int size = 0; // self size += ocp_nlp_dims_calculate_size_self(N); // dynamics for (ii = 0; ii < N; ii++) size += config->dynamics[ii]->dims_calculate_size(config->dynamics[ii]); // cost for (ii = 0; ii <= N; ii++) size += config->cost[ii]->dims_calculate_size(config->cost[ii]); // constraints for (ii = 0; ii <= N; ii++) size += config->constraints[ii]->dims_calculate_size(config->constraints[ii]); // qp solver size += config->qp_solver->dims_calculate_size(config->qp_solver, N); return size; } static ocp_nlp_dims *ocp_nlp_dims_assign_self(int N, void *raw_memory) { char *c_ptr = (char *) raw_memory; int ii; // initial align align_char_to(8, &c_ptr); // struct ocp_nlp_dims *dims = (ocp_nlp_dims *) c_ptr; c_ptr += sizeof(ocp_nlp_dims); // nv assign_and_advance_int(N + 1, &dims->nv, &c_ptr); // nx assign_and_advance_int(N + 1, &dims->nx, &c_ptr); // nu assign_and_advance_int(N + 1, &dims->nu, &c_ptr); // ni assign_and_advance_int(N + 1, &dims->ni, &c_ptr); // nz assign_and_advance_int(N + 1, &dims->nz, &c_ptr); // ns assign_and_advance_int(N + 1, &dims->ns, &c_ptr); // dynamics dims->dynamics = (void **) c_ptr; c_ptr += N * sizeof(void *); // cost dims->cost = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); // constraints dims->constraints = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); // regularization dims->regularize = ocp_nlp_reg_dims_assign(N, c_ptr); c_ptr += ocp_nlp_reg_dims_calculate_size(N); /* initialize qp_solver dimensions */ // dims->qp_solver->N = N; // for (ii = 0; ii <= N; ii++) // { // TODO(dimitris): values below are needed for reformulation of QP when soft constraints // are not supported. Make this a bit more transparent as it clushes with nbx/nbu above. // dims->qp_solver->nsbx[ii] = 0; // dims->qp_solver->nsbu[ii] = 0; // dims->qp_solver->nsg[ii] = 0; // } // N dims->N = N; // initialize dimensions to zero by default // nv for(ii=0; ii<=N; ii++) dims->nv[ii] = 0; // nx for(ii=0; ii<=N; ii++) dims->nx[ii] = 0; // nu for(ii=0; ii<=N; ii++) dims->nu[ii] = 0; // ni for(ii=0; ii<=N; ii++) dims->ni[ii] = 0; // nz for(ii=0; ii<=N; ii++) dims->nz[ii] = 0; // ns for(ii=0; ii<=N; ii++) dims->ns[ii] = 0; // TODO initialize dims to zero by default also in modules !!!!!!! // assert assert((char *) raw_memory + ocp_nlp_dims_calculate_size_self(N) >= c_ptr); return dims; } ocp_nlp_dims *ocp_nlp_dims_assign(void *config_, void *raw_memory) { ocp_nlp_config *config = config_; int N = config->N; int ii; char *c_ptr = (char *) raw_memory; // self ocp_nlp_dims *dims = ocp_nlp_dims_assign_self(N, c_ptr); c_ptr += ocp_nlp_dims_calculate_size_self(N); // dynamics for (ii = 0; ii < N; ii++) { dims->dynamics[ii] = config->dynamics[ii]->dims_assign(config->dynamics[ii], c_ptr); c_ptr += config->dynamics[ii]->dims_calculate_size(config->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { dims->cost[ii] = config->cost[ii]->dims_assign(config->cost[ii], c_ptr); c_ptr += config->cost[ii]->dims_calculate_size(config->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { dims->constraints[ii] = config->constraints[ii]->dims_assign(config->constraints[ii], c_ptr); c_ptr += config->constraints[ii]->dims_calculate_size(config->constraints[ii]); } // qp solver dims->qp_solver = config->qp_solver->dims_assign(config->qp_solver, N, c_ptr); c_ptr += config->qp_solver->dims_calculate_size(config->qp_solver, N); // assert assert((char *) raw_memory + ocp_nlp_dims_calculate_size(config_) >= c_ptr); return dims; } void ocp_nlp_dims_set_opt_vars(void *config_, void *dims_, const char *field, const void* value_array) { // to set dimension nx, nu, nz, ns (number of slacks = number of soft constraints) ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; int ii; int N = config->N; int *int_array = (int *) value_array; /* set ocp_nlp dimension */ if (!strcmp(field, "nx")) { // opt var for (ii = 0; ii <= N; ii++) { // set nx dims->nx[ii] = int_array[ii]; // update nv dims->nv[ii] = dims->nu[ii] + dims->nx[ii] + 2 * dims->ns[ii]; } // cost for (int i = 0; i <= N; i++) { config->cost[i]->dims_set(config->cost[i], dims->cost[i], "nx", &int_array[i]); } // dynamics for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nx", &int_array[i]); } for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nx1", &int_array[i+1]); } // constraints for (int i = 0; i <= N; i++) { config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i], "nx", &int_array[i]); } // qp solver for (int i = 0; i <= N; i++) { config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nx", &int_array[i]); } // regularization for (ii = 0; ii <= N; ii++) { config->regularize->dims_set(config->regularize, dims->regularize, ii, "nx", &int_array[ii]); } } else if (!strcmp(field, "nu")) { // nlp opt var for (int ii = 0; ii <= N; ii++) { // set nu dims->nu[ii] = int_array[ii]; // update nv dims->nv[ii] = dims->nu[ii] + dims->nx[ii] + 2 * dims->ns[ii]; } // cost for (int i = 0; i <= N; i++) { config->cost[i]->dims_set(config->cost[i], dims->cost[i], "nu", &int_array[i]); } // dynamics for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nu", &int_array[i]); } for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nu1", &int_array[i+1]); } // constraints for (int i = 0; i <= N; i++) { config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i], "nu", &int_array[i]); } // qp solver for (int i = 0; i <= N; i++) { config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nu", &int_array[i]); } // regularization for (ii = 0; ii <= N; ii++) { config->regularize->dims_set(config->regularize, dims->regularize, ii, "nu", &int_array[ii]); } } else if (!strcmp(field, "nz")) { // nlp opt var for (int ii = 0; ii <= N; ii++) { // set nz dims->nz[ii] = int_array[ii]; } // cost for (int i = 0; i <= N; i++) { config->cost[i]->dims_set(config->cost[i], dims->cost[i], "nz", &int_array[i]); } // dynamics for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nz", &int_array[i]); } // constraints for (int i = 0; i <= N; i++) { config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i], "nz", &int_array[i]); } } else if (!strcmp(field, "ns")) { // nlp opt var for (int ii = 0; ii <= N; ii++) { // set ns dims->ns[ii] = int_array[ii]; // update nv dims->nv[ii] = dims->nu[ii] + dims->nx[ii] + 2 * dims->ns[ii]; } // cost for (int i = 0; i <= N; i++) { config->cost[i]->dims_set(config->cost[i], dims->cost[i], "ns", &int_array[i]); } // qp solver for (int i = 0; i <= N; i++) { config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "ns", &int_array[i]); } } else { printf("error: dims type not available in module ocp_nlp: %s", field); exit(1); } #if 0 /* set ocp_nlp submodule dimensions */ if (strcmp(field, "ns")) // dynamics do not contain slack/soft constraints { for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], field, &int_array[i]); } } if (!strcmp(field, "nu")) { for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nu1", &int_array[i+1]); } } if (!strcmp(field, "nx")) { for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nx1", &int_array[i+1]); } } for (int i = 0; i <= N; i++) // cost { config->cost[i]->dims_set(config->cost[i], dims->cost[i], field, &int_array[i]); } for (int i = 0; i <= N; i++) // constraints { config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i], field, &int_array[i]); } if (strcmp(field, "nz")) // qp_solver does not contain nz { for (int i = 0; i <= N; i++) // qp_solver { config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, &int_array[i]); } } #endif return; } void ocp_nlp_dims_set_constraints(void *config_, void *dims_, int stage, const char *field, const void* value_) { // to set dimension nbx, nbu, ng, nh, nq (quadratic over nonlinear) ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; int *int_value = (int *) value_; int i = stage; // set in constraint module config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i], field, int_value); // update ni in ocp_nlp dimensions config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i], "ni", &dims->ni[i]); // update qp_solver dims if ( (!strcmp(field, "nbx")) || (!strcmp(field, "nbu")) ) { // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, int_value); // regularization config->regularize->dims_set(config->regularize, dims->regularize, i, (char *) field, int_value); } else if ( (!strcmp(field, "nsbx")) || (!strcmp(field, "nsbu")) ) { // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, int_value); } else if ( (!strcmp(field, "ng")) || (!strcmp(field, "nh")) || (!strcmp(field, "nphi"))) { // update ng_qp_solver in qp_solver int ng_qp_solver; config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i], "ng_qp_solver", &ng_qp_solver); // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "ng", &ng_qp_solver); // regularization config->regularize->dims_set(config->regularize, dims->regularize, i, "ng", &ng_qp_solver); } else if ( (!strcmp(field, "nsg")) || (!strcmp(field, "nsh")) || (!strcmp(field, "nsphi"))) { // update ng_qp_solver in qp_solver int nsg_qp_solver; config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i], "nsg_qp_solver", &nsg_qp_solver); // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nsg", &nsg_qp_solver); } else if ( (!strcmp(field, "nbxe")) || (!strcmp(field, "nbue")) ) { // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, int_value); } else if ( (!strcmp(field, "nge")) || (!strcmp(field, "nhe")) || (!strcmp(field, "nphie"))) { // update ng_qp_solver in qp_solver int ng_qp_solver; config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i], "nge_qp_solver", &ng_qp_solver); // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nge", &ng_qp_solver); } return; } void ocp_nlp_dims_set_cost(void *config_, void *dims_, int stage, const char *field, const void* value_) { // to set dimension ny (output) ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; int *int_value = (int *) value_; config->cost[stage]->dims_set(config->cost[stage], dims->cost[stage], field, int_value); } void ocp_nlp_dims_set_dynamics(void *config_, void *dims_, int stage, const char *field, const void* value) { // mainly for gnsf dimensions ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; int *int_value = (int *) value; config->dynamics[stage]->dims_set(config->dynamics[stage], dims->dynamics[stage], field, int_value); } /************************************************ * in ************************************************/ int ocp_nlp_in_calculate_size_self(int N) { int size = sizeof(ocp_nlp_in); size += N * sizeof(double); // Ts size += N * sizeof(void *); // dynamics size += (N + 1) * sizeof(void *); // cost size += (N + 1) * sizeof(void *); // constraints return size; } int ocp_nlp_in_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims) { int ii; int N = dims->N; int size = ocp_nlp_in_calculate_size_self(N); // dynamics for (ii = 0; ii < N; ii++) { size += config->dynamics[ii]->model_calculate_size(config->dynamics[ii], dims->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { size += config->cost[ii]->model_calculate_size(config->cost[ii], dims->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { size += config->constraints[ii]->model_calculate_size(config->constraints[ii], dims->constraints[ii]); } size += 8; // initial align size += 8; // final align // make_int_multiple_of(64, &size); return size; } ocp_nlp_in *ocp_nlp_in_assign_self(int N, void *raw_memory) { char *c_ptr = (char *) raw_memory; // initial align align_char_to(8, &c_ptr); // struct ocp_nlp_in *in = (ocp_nlp_in *) c_ptr; c_ptr += sizeof(ocp_nlp_in); // Ts assign_and_advance_double(N, &in->Ts, &c_ptr); // dynamics in->dynamics = (void **) c_ptr; c_ptr += N * sizeof(void *); // cost in->cost = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); // constraints in->constraints = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); align_char_to(8, &c_ptr); return in; } ocp_nlp_in *ocp_nlp_in_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, void *raw_memory) { int ii; int N = dims->N; char *c_ptr = (char *) raw_memory; // struct ocp_nlp_in *in = ocp_nlp_in_assign_self(N, c_ptr); c_ptr += ocp_nlp_in_calculate_size_self(N); // dynamics for (ii = 0; ii < N; ii++) { in->dynamics[ii] = config->dynamics[ii]->model_assign(config->dynamics[ii], dims->dynamics[ii], c_ptr); c_ptr += config->dynamics[ii]->model_calculate_size(config->dynamics[ii], dims->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { in->cost[ii] = config->cost[ii]->model_assign(config->cost[ii], dims->cost[ii], c_ptr); c_ptr += config->cost[ii]->model_calculate_size(config->cost[ii], dims->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { in->constraints[ii] = config->constraints[ii]->model_assign(config->constraints[ii], dims->constraints[ii], c_ptr); c_ptr += config->constraints[ii]->model_calculate_size(config->constraints[ii], dims->constraints[ii]); } assert((char *) raw_memory + ocp_nlp_in_calculate_size(config, dims) >= c_ptr); return in; } /************************************************ * out ************************************************/ int ocp_nlp_out_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims) { // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; int *nz = dims->nz; int size = sizeof(ocp_nlp_out); size += 4 * (N + 1) * sizeof(struct blasfeo_dvec); // ux, lam, t, z size += 1 * N * sizeof(struct blasfeo_dvec); // pi for (int ii = 0; ii < N; ii++) { size += 1 * blasfeo_memsize_dvec(nv[ii]); // ux size += 1 * blasfeo_memsize_dvec(nz[ii]); // z size += 2 * blasfeo_memsize_dvec(2 * ni[ii]); // lam, t size += 1 * blasfeo_memsize_dvec(nx[ii + 1]); // pi } size += 1 * blasfeo_memsize_dvec(nv[N]); // ux size += 1 * blasfeo_memsize_dvec(nz[N]); // z size += 2 * blasfeo_memsize_dvec(2 * ni[N]); // lam, t size += 8; // initial align size += 8; // blasfeo_struct align size += 64; // blasfeo_mem align make_int_multiple_of(8, &size); return size; } ocp_nlp_out *ocp_nlp_out_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, void *raw_memory) { // loop index int ii; // extract sizes int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; int *nz = dims->nz; char *c_ptr = (char *) raw_memory; // initial align align_char_to(8, &c_ptr); ocp_nlp_out *out = (ocp_nlp_out *) c_ptr; c_ptr += sizeof(ocp_nlp_out); // blasfeo_struct align align_char_to(8, &c_ptr); // blasfeo_dvec_struct // ux assign_and_advance_blasfeo_dvec_structs(N + 1, &out->ux, &c_ptr); // z assign_and_advance_blasfeo_dvec_structs(N + 1, &out->z, &c_ptr); // pi assign_and_advance_blasfeo_dvec_structs(N, &out->pi, &c_ptr); // lam assign_and_advance_blasfeo_dvec_structs(N + 1, &out->lam, &c_ptr); // t assign_and_advance_blasfeo_dvec_structs(N + 1, &out->t, &c_ptr); // blasfeo_mem align align_char_to(64, &c_ptr); // blasfeo_dvec // ux for (int ii = 0; ii <= N; ++ii) { assign_and_advance_blasfeo_dvec_mem(nv[ii], out->ux + ii, &c_ptr); } // z for (int ii = 0; ii <= N; ++ii) { assign_and_advance_blasfeo_dvec_mem(nz[ii], out->z + ii, &c_ptr); } // pi for (int ii = 0; ii < N; ++ii) { assign_and_advance_blasfeo_dvec_mem(nx[ii + 1], out->pi + ii, &c_ptr); } // lam for (int ii = 0; ii <= N; ++ii) { assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], out->lam + ii, &c_ptr); } // t for (int ii = 0; ii <= N; ++ii) { assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], out->t + ii, &c_ptr); } // zero solution for(ii=0; ii<N; ii++) { blasfeo_dvecse(nv[ii], 0.0, out->ux+ii, 0); blasfeo_dvecse(nz[ii], 0.0, out->z+ii, 0); blasfeo_dvecse(nx[ii+1], 0.0, out->pi+ii, 0); blasfeo_dvecse(2*ni[ii], 0.0, out->lam+ii, 0); blasfeo_dvecse(2*ni[ii], 0.0, out->t+ii, 0); } ii = N; blasfeo_dvecse(nv[ii], 0.0, out->ux+ii, 0); blasfeo_dvecse(nz[ii], 0.0, out->z+ii, 0); blasfeo_dvecse(2*ni[ii], 0.0, out->lam+ii, 0); blasfeo_dvecse(2*ni[ii], 0.0, out->t+ii, 0); assert((char *) raw_memory + ocp_nlp_out_calculate_size(config, dims) >= c_ptr); return out; } /************************************************ * options ************************************************/ int ocp_nlp_opts_calculate_size(void *config_, void *dims_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int N = dims->N; int size = 0; size += sizeof(ocp_nlp_opts); size += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver); size += config->regularize->opts_calculate_size(); // dynamics size += N * sizeof(void *); for (int ii = 0; ii < N; ii++) { size += dynamics[ii]->opts_calculate_size(dynamics[ii], dims->dynamics[ii]); } // cost size += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { size += cost[ii]->opts_calculate_size(cost[ii], dims->cost[ii]); } // constraints size += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { size += constraints[ii]->opts_calculate_size(constraints[ii], dims->constraints[ii]); } size += 2*8; // 2 aligns return size; } void *ocp_nlp_opts_assign(void *config_, void *dims_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int N = dims->N; char *c_ptr = (char *) raw_memory; align_char_to(8, &c_ptr); ocp_nlp_opts *opts = (ocp_nlp_opts *) c_ptr; c_ptr += sizeof(ocp_nlp_opts); /* pointers to substructures */ opts->dynamics = (void **) c_ptr; c_ptr += N * sizeof(void *); opts->cost = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); opts->constraints = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); align_char_to(8, &c_ptr); /* substructures */ opts->qp_solver_opts = qp_solver->opts_assign(qp_solver, dims->qp_solver, c_ptr); c_ptr += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver); opts->regularize = config->regularize->opts_assign(c_ptr); c_ptr += config->regularize->opts_calculate_size(); // dynamics for (int ii = 0; ii < N; ii++) { opts->dynamics[ii] = dynamics[ii]->opts_assign(dynamics[ii], dims->dynamics[ii], c_ptr); c_ptr += dynamics[ii]->opts_calculate_size(dynamics[ii], dims->dynamics[ii]); } // cost for (int ii = 0; ii <= N; ii++) { opts->cost[ii] = cost[ii]->opts_assign(cost[ii], dims->cost[ii], c_ptr); c_ptr += cost[ii]->opts_calculate_size(cost[ii], dims->cost[ii]); } // constraints for (int ii = 0; ii <= N; ii++) { opts->constraints[ii] = constraints[ii]->opts_assign(constraints[ii], dims->constraints[ii], c_ptr); c_ptr += constraints[ii]->opts_calculate_size(constraints[ii], dims->constraints[ii]); } assert((char *) raw_memory + ocp_nlp_opts_calculate_size(config, dims) >= c_ptr); return opts; } void ocp_nlp_opts_initialize_default(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_opts *opts = opts_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; ocp_nlp_reg_config *regularize = config->regularize; int ii; int N = dims->N; opts->reuse_workspace = 1; #if defined(ACADOS_WITH_OPENMP) #if defined(ACADOS_NUM_THREADS) opts->num_threads = ACADOS_NUM_THREADS; // printf("\nocp_nlp: openmp threads from macro = %d\n", opts->num_threads); #else opts->num_threads = omp_get_max_threads(); // printf("\nocp_nlp: omp_get_max_threads %d", omp_get_max_threads()); #endif #endif // printf("\nocp_nlp: openmp threads = %d\n", opts->num_threads); opts->globalization = FIXED_STEP; opts->step_length = 1.0; opts->levenberg_marquardt = 0.0; /* submodules opts */ // qp solver qp_solver->opts_initialize_default(qp_solver, dims->qp_solver, opts->qp_solver_opts); // regularization regularize->opts_initialize_default(regularize, dims->regularize, opts->regularize); // dynamics for (ii = 0; ii < N; ii++) { dynamics[ii]->opts_initialize_default(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { cost[ii]->opts_initialize_default(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { constraints[ii]->opts_initialize_default(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } return; } void ocp_nlp_opts_update(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_opts *opts = opts_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int ii; int N = dims->N; qp_solver->opts_update(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (ii = 0; ii < N; ii++) { dynamics[ii]->opts_update(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { cost[ii]->opts_update(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { constraints[ii]->opts_update(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } return; } void ocp_nlp_opts_set(void *config_, void *opts_, const char *field, void* value) { ocp_nlp_opts *opts = (ocp_nlp_opts *) opts_; ocp_nlp_config *config = config_; int ii; char module[MAX_STR_LEN]; char *ptr_module = NULL; int module_length = 0; // extract module name, i.e. substring in field before '_' char *char_ = strchr(field, '_'); if (char_!=NULL) { module_length = char_-field; for (ii=0; ii<module_length; ii++) module[ii] = field[ii]; module[module_length] = '\0'; // add end of string ptr_module = module; } // pass options to QP module if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) ) { config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, field+module_length+1, value); } else // nlp opts { if (!strcmp(field, "reuse_workspace")) { int* reuse_workspace = (int *) value; opts->reuse_workspace = *reuse_workspace; } else if (!strcmp(field, "num_threads")) { int* num_threads = (int *) value; opts->num_threads = *num_threads; } else if (!strcmp(field, "step_length")) { double* step_length = (double *) value; opts->step_length = *step_length; } else if (!strcmp(field, "globalization")) { char* globalization = (char *) value; if (!strcmp(globalization, "fixed_step")) { opts->globalization = FIXED_STEP; } else if (!strcmp(globalization, "merit_backtracking")) { opts->globalization = MERIT_BACKTRACKING; } else { printf("\nerror: ocp_nlp_opts_set: not supported value for globalization, got: %s\n", globalization); exit(1); } } else if (!strcmp(field, "levenberg_marquardt")) { double* levenberg_marquardt = (double *) value; opts->levenberg_marquardt = *levenberg_marquardt; } else if (!strcmp(field, "exact_hess")) { int N = config->N; // cost for (ii=0; ii<=N; ii++) config->cost[ii]->opts_set(config->cost[ii], opts->cost[ii], "exact_hess", value); // dynamics for (ii=0; ii<N; ii++) config->dynamics[ii]->opts_set(config->dynamics[ii], opts->dynamics[ii], "compute_hess", value); // constraints for (ii=0; ii<=N; ii++) config->constraints[ii]->opts_set(config->constraints[ii], opts->constraints[ii], "compute_hess", value); } // selectively turn on exact hessian contributions else if (!strcmp(field, "exact_hess_cost")) { int N = config->N; for (ii=0; ii<=N; ii++) config->cost[ii]->opts_set(config->cost[ii], opts->cost[ii], "exact_hess", value); } else if (!strcmp(field, "exact_hess_dyn")) { int N = config->N; for (ii=0; ii<N; ii++) config->dynamics[ii]->opts_set(config->dynamics[ii], opts->dynamics[ii], "compute_hess", value); } else if (!strcmp(field, "exact_hess_constr")) { int N = config->N; for (ii=0; ii<=N; ii++) config->constraints[ii]->opts_set(config->constraints[ii], opts->constraints[ii], "compute_hess", value); } else { printf("\nerror: ocp_nlp_opts_set: wrong field: %s\n", field); exit(1); } } return; } void ocp_nlp_opts_set_at_stage(void *config_, void *opts_, int stage, const char *field, void* value) { ocp_nlp_opts *opts = (ocp_nlp_opts *) opts_; ocp_nlp_config *config = config_; int ii; char module[MAX_STR_LEN]; char *ptr_module = NULL; int module_length = 0; // extract module name char *char_ = strchr(field, '_'); if (char_!=NULL) { module_length = char_-field; for (ii=0; ii<module_length; ii++) module[ii] = field[ii]; module[module_length] = '\0'; // add end of string ptr_module = module; } // pass options to dynamics module if ( ptr_module!=NULL && (!strcmp(ptr_module, "dynamics")) ) { config->dynamics[stage]->opts_set( config->dynamics[stage], opts->dynamics[stage], field+module_length+1, value ); } // pass options to cost module else if ( ptr_module!=NULL && (!strcmp(ptr_module, "cost")) ) { config->cost[stage]->opts_set( config->cost[stage], opts->cost[stage], field+module_length+1, value); } // pass options to constraint module else if ( ptr_module!=NULL && (!strcmp(ptr_module, "constraints")) ) { config->constraints[stage]->opts_set( config->constraints[stage], opts->constraints[stage], (char *) field+module_length+1, value); } else { printf("\nerror: ocp_nlp_opts_set_at_stage: wrong field: %s\n", field); exit(1); } return; } /************************************************ * memory ************************************************/ int ocp_nlp_memory_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts) { ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nz = dims->nz; int *nu = dims->nu; int *ni = dims->ni; int size = sizeof(ocp_nlp_memory); // qp in size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // qp out size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); // qp solver size += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // regularization size += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize); // dynamics size += N * sizeof(void *); for (int ii = 0; ii < N; ii++) { size += dynamics[ii]->memory_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost size += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { size += cost[ii]->memory_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints size += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { size += constraints[ii]->memory_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } // nlp res size += ocp_nlp_res_calculate_size(dims); size += (N+1)*sizeof(bool); // set_sim_guess size += (N+1)*sizeof(struct blasfeo_dmat); // dzduxt size += 6*(N+1)*sizeof(struct blasfeo_dvec); // cost_grad ineq_fun ineq_adj dyn_adj sim_guess z_alg size += 1*N*sizeof(struct blasfeo_dvec); // dyn_fun for (int ii = 0; ii < N; ii++) { size += 1*blasfeo_memsize_dmat(nu[ii]+nx[ii], nz[ii]); // dzduxt size += 1*blasfeo_memsize_dvec(nz[ii]); // z_alg size += 2*blasfeo_memsize_dvec(nv[ii]); // cost_grad ineq_adj size += 1*blasfeo_memsize_dvec(nu[ii] + nx[ii]); // dyn_adj size += 1*blasfeo_memsize_dvec(nx[ii + 1]); // dyn_fun size += 1*blasfeo_memsize_dvec(2 * ni[ii]); // ineq_fun size += 1*blasfeo_memsize_dvec(nx[ii] + nz[ii]); // sim_guess } size += 1*blasfeo_memsize_dmat(nu[N]+nx[N], nz[N]); // dzduxt size += 1*blasfeo_memsize_dvec(nz[N]); // z_alg size += 2*blasfeo_memsize_dvec(nv[N]); // cost_grad ineq_adj size += 1*blasfeo_memsize_dvec(nu[N] + nx[N]); // dyn_adj size += 1*blasfeo_memsize_dvec(2 * ni[N]); // ineq_fun size += 1*blasfeo_memsize_dvec(nx[N] + nz[N]); // sim_guess size += 8; // initial align size += 8; // middle align size += 8; // blasfeo_struct align size += 64; // blasfeo_mem align make_int_multiple_of(8, &size); return size; } ocp_nlp_memory *ocp_nlp_memory_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts, void *raw_memory) { ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; // extract sizes int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nz = dims->nz; int *nu = dims->nu; int *ni = dims->ni; char *c_ptr = (char *) raw_memory; // initial align align_char_to(8, &c_ptr); // struct ocp_nlp_memory *mem = (ocp_nlp_memory *) c_ptr; c_ptr += sizeof(ocp_nlp_memory); /* pointers to substructures */ // dynamics mem->dynamics = (void **) c_ptr; c_ptr += N*sizeof(void *); // cost mem->cost = (void **) c_ptr; c_ptr += (N+1)*sizeof(void *); // constraints mem->constraints = (void **) c_ptr; c_ptr += (N+1)*sizeof(void *); // middle align align_char_to(8, &c_ptr); /* substructures */ // qp in mem->qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // qp out mem->qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); // QP solver mem->qp_solver_mem = qp_solver->memory_assign(qp_solver, dims->qp_solver, opts->qp_solver_opts, c_ptr); c_ptr += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // regularization mem->regularize_mem = config->regularize->memory_assign(config->regularize, dims->regularize, opts->regularize, c_ptr); c_ptr += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize); // dynamics for (int ii = 0; ii < N; ii++) { mem->dynamics[ii] = dynamics[ii]->memory_assign(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii], c_ptr); c_ptr += dynamics[ii]->memory_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (int ii = 0; ii <= N; ii++) { mem->cost[ii] = cost[ii]->memory_assign(cost[ii], dims->cost[ii], opts->cost[ii], c_ptr); c_ptr += cost[ii]->memory_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (int ii = 0; ii <= N; ii++) { mem->constraints[ii] = constraints[ii]->memory_assign(constraints[ii], dims->constraints[ii], opts->constraints[ii], c_ptr); c_ptr += constraints[ii]->memory_calculate_size( constraints[ii], dims->constraints[ii], opts->constraints[ii]); } // nlp res mem->nlp_res = ocp_nlp_res_assign(dims, c_ptr); c_ptr += mem->nlp_res->memsize; // blasfeo_struct align align_char_to(8, &c_ptr); // dzduxt assign_and_advance_blasfeo_dmat_structs(N + 1, &mem->dzduxt, &c_ptr); // z_alg assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->z_alg, &c_ptr); // cost_grad assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->cost_grad, &c_ptr); // ineq_fun assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->ineq_fun, &c_ptr); // ineq_adj assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->ineq_adj, &c_ptr); // dyn_fun assign_and_advance_blasfeo_dvec_structs(N, &mem->dyn_fun, &c_ptr); // dyn_adj assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->dyn_adj, &c_ptr); // sim_guess assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->sim_guess, &c_ptr); // set_sim_guess assign_and_advance_bool(N+1, &mem->set_sim_guess, &c_ptr); for (int ii = 0; ii <= N; ++ii) { mem->set_sim_guess[ii] = false; } // blasfeo_mem align align_char_to(64, &c_ptr); // dzduxt for (int ii=0; ii<=N; ii++) { assign_and_advance_blasfeo_dmat_mem(nu[ii]+nx[ii], nz[ii], mem->dzduxt+ii, &c_ptr); } // z_alg for (int ii=0; ii<=N; ii++) { blasfeo_create_dvec(nz[ii], mem->z_alg+ii, c_ptr); c_ptr += blasfeo_memsize_dvec(nz[ii]); } // cost_grad for (int ii = 0; ii <= N; ii++) { assign_and_advance_blasfeo_dvec_mem(nv[ii], mem->cost_grad + ii, &c_ptr); } // ineq_fun for (int ii = 0; ii <= N; ii++) { assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], mem->ineq_fun + ii, &c_ptr); } // ineq_adj for (int ii = 0; ii <= N; ii++) { assign_and_advance_blasfeo_dvec_mem(nv[ii], mem->ineq_adj + ii, &c_ptr); } // dyn_fun for (int ii = 0; ii < N; ii++) { assign_and_advance_blasfeo_dvec_mem(nx[ii + 1], mem->dyn_fun + ii, &c_ptr); } // dyn_adj for (int ii = 0; ii <= N; ii++) { assign_and_advance_blasfeo_dvec_mem(nu[ii] + nx[ii], mem->dyn_adj + ii, &c_ptr); } // sim_guess for (int ii = 0; ii <= N; ii++) { assign_and_advance_blasfeo_dvec_mem(nx[ii] + nz[ii], mem->sim_guess + ii, &c_ptr); // set to 0; blasfeo_dvecse(nx[ii] + nz[ii], 0.0, mem->sim_guess+ii, 0); // printf("sim_guess ii %d: %p\n", ii, mem->sim_guess+ii); } // printf("created memory %p\n", mem); return mem; } /************************************************ * workspace ************************************************/ int ocp_nlp_workspace_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts) { ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int ii; int N = dims->N; // int *nx = dims->nx; // int *nu = dims->nu; // int *nz = dims->nz; int size = 0; // nlp size += sizeof(ocp_nlp_workspace); // tmp_nlp_out size += ocp_nlp_out_calculate_size(config, dims); // weight_merit_fun size += ocp_nlp_out_calculate_size(config, dims); // array of pointers // cost size += (N+1)*sizeof(void *); // dynamics size += N*sizeof(void *); // constraints size += (N+1)*sizeof(void *); // module workspace if (opts->reuse_workspace) { #if defined(ACADOS_WITH_OPENMP) // qp solver size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (ii = 0; ii < N; ii++) { size += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { size += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { size += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } #else int size_tmp = 0; int tmp; // qp solver tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); size_tmp = tmp > size_tmp ? tmp : size_tmp; // dynamics for (ii = 0; ii < N; ii++) { tmp = dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // cost for (ii = 0; ii <= N; ii++) { tmp = cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // constraints for (ii = 0; ii <= N; ii++) { tmp = constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } size += size_tmp; #endif } else { // qp solver size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (ii = 0; ii < N; ii++) { size += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { size += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { size += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } } size += 8; // struct align return size; } ocp_nlp_workspace *ocp_nlp_workspace_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts, ocp_nlp_memory *mem, void *raw_memory) { ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int N = dims->N; // int *nx = dims->nx; // int *nu = dims->nu; // int *nz = dims->nz; char *c_ptr = (char *) raw_memory; ocp_nlp_workspace *work = (ocp_nlp_workspace *) c_ptr; c_ptr += sizeof(ocp_nlp_workspace); /* pointers to substructures */ // work->dynamics = (void **) c_ptr; c_ptr += N*sizeof(void *); // work->cost = (void **) c_ptr; c_ptr += (N+1)*sizeof(void *); // work->constraints = (void **) c_ptr; c_ptr += (N+1)*sizeof(void *); align_char_to(8, &c_ptr); /* substructures */ // tmp_nlp_out work->tmp_nlp_out = ocp_nlp_out_assign(config, dims, c_ptr); c_ptr += ocp_nlp_out_calculate_size(config, dims); // weight_merit_fun work->weight_merit_fun = ocp_nlp_out_assign(config, dims, c_ptr); c_ptr += ocp_nlp_out_calculate_size(config, dims); if (opts->reuse_workspace) { #if defined(ACADOS_WITH_OPENMP) // qp solver work->qp_work = (void *) c_ptr; c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (int ii = 0; ii < N; ii++) { work->dynamics[ii] = c_ptr; c_ptr += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (int ii = 0; ii <= N; ii++) { work->cost[ii] = c_ptr; c_ptr += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (int ii = 0; ii <= N; ii++) { work->constraints[ii] = c_ptr; c_ptr += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } #else int size_tmp = 0; int tmp; // qp solver work->qp_work = (void *) c_ptr; tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); size_tmp = tmp > size_tmp ? tmp : size_tmp; // dynamics for (int ii = 0; ii < N; ii++) { work->dynamics[ii] = c_ptr; tmp = dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // cost for (int ii = 0; ii <= N; ii++) { work->cost[ii] = c_ptr; tmp = cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // constraints for (int ii = 0; ii <= N; ii++) { work->constraints[ii] = c_ptr; tmp = constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } c_ptr += size_tmp; #endif } else { // qp solver work->qp_work = (void *) c_ptr; c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (int ii = 0; ii < N; ii++) { work->dynamics[ii] = c_ptr; c_ptr += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (int ii = 0; ii <= N; ii++) { work->cost[ii] = c_ptr; c_ptr += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (int ii = 0; ii <= N; ii++) { work->constraints[ii] = c_ptr; c_ptr += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } } assert((char *) work + ocp_nlp_workspace_calculate_size(config, dims, opts) >= c_ptr); return work; } /************************************************ * functions ************************************************/ void ocp_nlp_initialize_qp(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int ii; int N = dims->N; #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (ii = 0; ii <= N; ii++) { // cost config->cost[ii]->initialize(config->cost[ii], dims->cost[ii], in->cost[ii], opts->cost[ii], mem->cost[ii], work->cost[ii]); // dynamics if (ii < N) config->dynamics[ii]->initialize(config->dynamics[ii], dims->dynamics[ii], in->dynamics[ii], opts->dynamics[ii], mem->dynamics[ii], work->dynamics[ii]); // constraints config->constraints[ii]->initialize(config->constraints[ii], dims->constraints[ii], in->constraints[ii], opts->constraints[ii], mem->constraints[ii], work->constraints[ii]); } return; } void ocp_nlp_initialize_t_slacks(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int ii; struct blasfeo_dvec *ineq_fun; int N = dims->N; int *ni = dims->ni; int *ns = dims->ns; int *nx = dims->nx; int *nu = dims->nu; #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (ii = 0; ii <= N; ii++) { // copy out->ux to tmp_nlp_out->ux, since this is used in compute_fun blasfeo_dveccp(nx[ii]+nu[ii]+2*ns[ii], out->ux+ii, 0, work->tmp_nlp_out->ux+ii, 0); // evaluate inequalities config->constraints[ii]->compute_fun(config->constraints[ii], dims->constraints[ii], in->constraints[ii], opts->constraints[ii], mem->constraints[ii], work->constraints[ii]); ineq_fun = config->constraints[ii]->memory_get_fun_ptr(mem->constraints[ii]); // t = -ineq_fun blasfeo_dveccpsc(2 * ni[ii], -1.0, ineq_fun, 0, out->t + ii, 0); } return; } void ocp_nlp_approximate_qp_matrices(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int i; int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nu = dims->nu; int *ni = dims->ni; /* stage-wise multiple shooting lagrangian evaluation */ #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i = 0; i <= N; i++) { // init Hessian to 0 blasfeo_dgese(nu[i] + nx[i], nu[i] + nx[i], 0.0, mem->qp_in->RSQrq+i, 0, 0); if (i < N) { // Levenberg Marquardt term: Ts[i] * levenberg_marquardt * eye() if (opts->levenberg_marquardt > 0.0) blasfeo_ddiare(nu[i] + nx[i], in->Ts[i] * opts->levenberg_marquardt, mem->qp_in->RSQrq+i, 0, 0); // dynamics config->dynamics[i]->update_qp_matrices(config->dynamics[i], dims->dynamics[i], in->dynamics[i], opts->dynamics[i], mem->dynamics[i], work->dynamics[i]); } else { // Levenberg Marquardt term: 1.0 * levenberg_marquardt * eye() if (opts->levenberg_marquardt > 0.0) blasfeo_ddiare(nu[i] + nx[i], opts->levenberg_marquardt, mem->qp_in->RSQrq+i, 0, 0); } // cost config->cost[i]->update_qp_matrices(config->cost[i], dims->cost[i], in->cost[i], opts->cost[i], mem->cost[i], work->cost[i]); // constraints config->constraints[i]->update_qp_matrices(config->constraints[i], dims->constraints[i], in->constraints[i], opts->constraints[i], mem->constraints[i], work->constraints[i]); } /* collect stage-wise evaluations */ #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i=0; i <= N; i++) { // nlp mem: cost_grad struct blasfeo_dvec *cost_grad = config->cost[i]->memory_get_grad_ptr(mem->cost[i]); blasfeo_dveccp(nv[i], cost_grad, 0, mem->cost_grad + i, 0); // nlp mem: dyn_fun if (i < N) { struct blasfeo_dvec *dyn_fun = config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]); blasfeo_dveccp(nx[i + 1], dyn_fun, 0, mem->dyn_fun + i, 0); } // nlp mem: dyn_adj if (i < N) { struct blasfeo_dvec *dyn_adj = config->dynamics[i]->memory_get_adj_ptr(mem->dynamics[i]); blasfeo_dveccp(nu[i] + nx[i], dyn_adj, 0, mem->dyn_adj + i, 0); } else { blasfeo_dvecse(nu[N] + nx[N], 0.0, mem->dyn_adj + N, 0); } if (i > 0) { struct blasfeo_dvec *dyn_adj = config->dynamics[i-1]->memory_get_adj_ptr(mem->dynamics[i-1]); blasfeo_daxpy(nx[i], 1.0, dyn_adj, nu[i-1]+nx[i-1], mem->dyn_adj+i, nu[i], mem->dyn_adj+i, nu[i]); } // nlp mem: ineq_fun struct blasfeo_dvec *ineq_fun = config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]); blasfeo_dveccp(2 * ni[i], ineq_fun, 0, mem->ineq_fun + i, 0); // nlp mem: ineq_adj struct blasfeo_dvec *ineq_adj = config->constraints[i]->memory_get_adj_ptr(mem->constraints[i]); blasfeo_dveccp(nv[i], ineq_adj, 0, mem->ineq_adj + i, 0); } for (i = 0; i <= N; i++) { // TODO(rien) where should the update happen??? move to qp update ??? // TODO(all): fix and move where appropriate // if (i<N) // { // ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i]; // sim_opts *opts = dynamics_opts->sim_solver; // if (opts->scheme != NULL && opts->scheme->type != exact) // { // for (int_t j = 0; j < nx; j++) // BLASFEO_DVECEL(nlp_mem->cost_grad+i, nu+j) += work->sim_out[i]->grad[j]; // for (int_t j = 0; j < nu; j++) // BLASFEO_DVECEL(nlp_mem->cost_grad+i, j) += work->sim_out[i]->grad[nx+j]; // } // } } return; } // update QP rhs for SQP (step prim var, abs dual var) // TODO(all): move in dynamics, cost, constraints modules ??? void ocp_nlp_approximate_qp_vectors_sqp(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int i; int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i = 0; i <= N; i++) { // g blasfeo_dveccp(nv[i], mem->cost_grad + i, 0, mem->qp_in->rqz + i, 0); // b if (i < N) blasfeo_dveccp(nx[i + 1], mem->dyn_fun + i, 0, mem->qp_in->b + i, 0); // d blasfeo_dveccp(2 * ni[i], mem->ineq_fun + i, 0, mem->qp_in->d + i, 0); } return; } void ocp_nlp_embed_initial_value(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int *ni = dims->ni; // constraints config->constraints[0]->bounds_update(config->constraints[0], dims->constraints[0], in->constraints[0], opts->constraints[0], mem->constraints[0], work->constraints[0]); // nlp mem: ineq_fun struct blasfeo_dvec *ineq_fun = config->constraints[0]->memory_get_fun_ptr(mem->constraints[0]); blasfeo_dveccp(2 * ni[0], ineq_fun, 0, mem->ineq_fun, 0); // d blasfeo_dveccp(2 * ni[0], mem->ineq_fun, 0, mem->qp_in->d, 0); return; } double ocp_nlp_evaluate_merit_fun(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int i, j; int N = dims->N; int *nx = dims->nx; int *ni = dims->ni; double merit_fun = 0.0; // compute fun value #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i=0; i<=N; i++) { // cost config->cost[i]->compute_fun(config->cost[i], dims->cost[i], in->cost[i], opts->cost[i], mem->cost[i], work->cost[i]); } #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i=0; i<N; i++) { // dynamics config->dynamics[i]->compute_fun(config->dynamics[i], dims->dynamics[i], in->dynamics[i], opts->dynamics[i], mem->dynamics[i], work->dynamics[i]); } #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i=0; i<=N; i++) { // constr config->constraints[i]->compute_fun(config->constraints[i], dims->constraints[i], in->constraints[i], opts->constraints[i], mem->constraints[i], work->constraints[i]); } double *tmp_fun; double tmp; struct blasfeo_dvec *tmp_fun_vec; double cost_fun = 0.0; for(i=0; i<=N; i++) { tmp_fun = config->cost[i]->memory_get_fun_ptr(mem->cost[i]); cost_fun += *tmp_fun; } double dyn_fun = 0.0; for(i=0; i<N; i++) { tmp_fun_vec = config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]); // printf("\nMerit: dyn will multiply tmp_fun, weights\n"); // blasfeo_print_exp_tran_dvec(nx[i+1], tmp_fun_vec, 0); // blasfeo_print_exp_tran_dvec(nx[i+1], work->weight_merit_fun->pi+i, 0); for(j=0; j<nx[i+1]; j++) { // printf("\n%e %e\n", fabs(BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j)), fabs(BLASFEO_DVECEL(tmp_fun_vec, j))); dyn_fun += fabs(BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j)) * fabs(BLASFEO_DVECEL(tmp_fun_vec, j)); } } double constr_fun = 0.0; for(i=0; i<=N; i++) { // printf("\ni %d\n", i); tmp_fun_vec = config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]); // blasfeo_print_exp_tran_dvec(2*ni[i], tmp_fun_vec, 0); // blasfeo_print_exp_tran_dvec(2*ni[i], work->weight_merit_fun->lam+i, 0); for(j=0; j<2*ni[i]; j++) { tmp = BLASFEO_DVECEL(tmp_fun_vec, j); tmp = tmp>0.0 ? fabs(tmp) : 0.0; // tmp = constraint violation // printf("IN merit fun: ineq i %d, j %d tmp_fun%e, multiplier %e\n", i, j, BLASFEO_DVECEL(tmp_fun_vec, j), BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j)); constr_fun += fabs(BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j)) * tmp; } } merit_fun = cost_fun + dyn_fun + constr_fun; // printf("\nMerit fun: %e cost: %e dyn: %e constr: %e\n", merit_fun, cost_fun, dyn_fun, constr_fun); return merit_fun; } static double ocp_nlp_line_search(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int i; int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *ni = dims->ni; double alpha = opts->step_length; double tmp0, tmp1; int j; #if 0 // Line Search Gianluca version // current point for (i = 0; i <= N; i++) blasfeo_dveccp(nv[i], out->ux+i, 0, work->tmp_nlp_out->ux+i, 0); for (i = 0; i < N; i++) blasfeo_dveccp(nx[i+1], out->pi+i, 0, work->tmp_nlp_out->pi+i, 0); for (i = 0; i <= N; i++) blasfeo_dveccp(2*ni[i], out->lam+i, 0, work->tmp_nlp_out->lam+i, 0); // linear update of algebraic variables using state and input sensitivity // if (i < N) // { // blasfeo_dgemv_t(nu[i]+nx[i], nz[i], alpha, mem->dzduxt+i, 0, 0, mem->qp_out->ux+i, 0, 1.0, mem->z_alg+i, 0, out->z+i, 0); // } // initialize weights if(mem->sqp_iter[0]==0) { for (i = 0; i < N; i++) blasfeo_dveccp(nx[i+1], out->pi+i, 0, work->weight_merit_fun->pi+i, 0); for (i = 0; i <= N; i++) blasfeo_dveccp(2*ni[i], out->lam+i, 0, work->weight_merit_fun->lam+i, 0); } // update weigths for (i = 0; i < N; i++) { for(j=0; j<nx[i+1]; j++) { tmp0 = fabs(BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j)); tmp1 = 0.5 * (tmp0 + fabs(BLASFEO_DVECEL(mem->qp_out->pi+i, j))); BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j) = tmp0>tmp1 ? tmp0 : tmp1; } } for (i = 0; i <= N; i++) { for(j=0; j<2*ni[i]; j++) { tmp0 = fabs(BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j)); tmp1 = 0.5 * (tmp0 + fabs(BLASFEO_DVECEL(mem->qp_out->lam+i, j))); BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j) = tmp0>tmp1 ? tmp0 : tmp1; } } printf("\n\nmerit fun value\n"); double merit_fun0 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work); double alpha_min = 0.1; for (j=0; j<10 & alpha>alpha_min; j++) { for (i = 0; i <= N; i++) blasfeo_daxpy(nv[i], alpha, mem->qp_out->ux+i, 0, out->ux+i, 0, work->tmp_nlp_out->ux+i, 0); printf("\n%d tmp merit fun value\n", j); double merit_fun1 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work); if(merit_fun1 < merit_fun0) { break; } else { alpha *= 0.7; } } printf("\nalpha %f\n", alpha); #endif if (opts->globalization == MERIT_BACKTRACKING) { // Line search version Jonathan // Following Leineweber1999 // copy out (current iterate) to work->tmp_nlp_out for (i = 0; i <= N; i++) blasfeo_dveccp(nv[i], out->ux+i, 0, work->tmp_nlp_out->ux+i, 0); for (i = 0; i < N; i++) blasfeo_dveccp(nx[i+1], out->pi+i, 0, work->tmp_nlp_out->pi+i, 0); for (i = 0; i <= N; i++) blasfeo_dveccp(2*ni[i], out->lam+i, 0, work->tmp_nlp_out->lam+i, 0); // linear update of algebraic variables using state and input sensitivity // if (i < N) // { // blasfeo_dgemv_t(nu[i]+nx[i], nz[i], alpha, mem->dzduxt+i, 0, 0, mem->qp_out->ux+i, 0, 1.0, mem->z_alg+i, 0, out->z+i, 0); // } /* initialize (Leineweber1999 M5.1) */ if (mem->sqp_iter[0]==0) { // initialize weights // equality merit weights = abs( eq multipliers ) for (i = 0; i < N; i++) { for (j=0; j<nx[i+1]; j++) { tmp0 = fabs(BLASFEO_DVECEL(out->pi+i, j)); BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j) = tmp0; } } // printf("merit fun: initialize weights lam\n"); for (i = 0; i <= N; i++) { blasfeo_dveccp(2*ni[i], out->lam+i, 0, work->weight_merit_fun->lam+i, 0); // blasfeo_print_dvec(nx[i+1], work->weight_merit_fun->lam+i, 0); } } else { // update weights // printf("merit fun: update weights, sqp_iter = %d\n", mem->sqp_iter[0]); for (i = 0; i < N; i++) { for(j=0; j<nx[i+1]; j++) { // abs(lambda) (LW) tmp0 = fabs(BLASFEO_DVECEL(out->pi+i, j)); // .5 * (abs(lambda) + sigma) tmp1 = 0.5 * (tmp0 + BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j)); BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j) = tmp0>tmp1 ? tmp0 : tmp1; } } for (i = 0; i <= N; i++) { for(j=0; j<2*ni[i]; j++) { // mu (LW) tmp0 = BLASFEO_DVECEL(out->lam+i, j); // .5 * (mu + tau) tmp1 = 0.5 * (tmp0 + BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j)); BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j) = tmp0>tmp1 ? tmp0 : tmp1; } } } if (1) // (mem->sqp_iter[0]!=0) // TODO: why does Leineweber do full step in first SQP iter? { double merit_fun0 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work); double alpha_min = 0.1; // TODO(oj): add alpha_min and alpha_reduction factor [0.7] to options. /* actual Line Search*/ alpha = 1.0; // TODO: check out more advanced step search Leineweber1995 for (j=0; alpha>alpha_min; j++) { for (i = 0; i <= N; i++) blasfeo_daxpy(nv[i], alpha, mem->qp_out->ux+i, 0, out->ux+i, 0, work->tmp_nlp_out->ux+i, 0); // printf("\ntmp merit fun value step search iter: %d", j); double merit_fun1 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work); // TODO(oj): also check Armijo-type condition Leinweber1999 (2.35) if (merit_fun1 < merit_fun0) { break; } else { alpha *= 0.7; } } } printf("\nalpha %f\n", alpha); } return alpha; } void ocp_nlp_update_variables_sqp(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int i; int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nu = dims->nu; int *ni = dims->ni; int *nz = dims->nz; // step length double alpha = ocp_nlp_line_search(config, dims, in, out, opts, mem, work); #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i = 0; i <= N; i++) { // step in primal variables blasfeo_daxpy(nv[i], alpha, mem->qp_out->ux + i, 0, out->ux + i, 0, out->ux + i, 0); // update dual variables if (i < N) { blasfeo_dvecsc(nx[i+1], 1.0-alpha, out->pi+i, 0); blasfeo_daxpy(nx[i+1], alpha, mem->qp_out->pi+i, 0, out->pi+i, 0, out->pi+i, 0); } blasfeo_dvecsc(2*ni[i], 1.0-alpha, out->lam+i, 0); blasfeo_daxpy(2*ni[i], alpha, mem->qp_out->lam+i, 0, out->lam+i, 0, out->lam+i, 0); // update slack values blasfeo_dvecsc(2*ni[i], 1.0-alpha, out->t+i, 0); blasfeo_daxpy(2*ni[i], alpha, mem->qp_out->t+i, 0, out->t+i, 0, out->t+i, 0); // linear update of algebraic variables using state and input sensitivity if (i < N) { blasfeo_dgemv_t(nu[i]+nx[i], nz[i], alpha, mem->dzduxt+i, 0, 0, mem->qp_out->ux+i, 0, 1.0, mem->z_alg+i, 0, out->z+i, 0); } } return; } /************************************************ * residuals ************************************************/ int ocp_nlp_res_calculate_size(ocp_nlp_dims *dims) { // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; int size = sizeof(ocp_nlp_res); size += 3 * (N + 1) * sizeof(struct blasfeo_dvec); // res_stat res_ineq res_comp size += 1 * N * sizeof(struct blasfeo_dvec); // res_eq for (int ii = 0; ii < N; ii++) { size += 1 * blasfeo_memsize_dvec(nv[ii]); // res_stat size += 1 * blasfeo_memsize_dvec(nx[ii + 1]); // res_eq size += 2 * blasfeo_memsize_dvec(2 * ni[ii]); // res_ineq res_comp } size += 1 * blasfeo_memsize_dvec(nv[N]); // res_stat size += 2 * blasfeo_memsize_dvec(2 * ni[N]); // res_ineq res_comp size += 8; // initial align size += 8; // blasfeo_struct align size += 64; // blasfeo_mem align make_int_multiple_of(8, &size); return size; } ocp_nlp_res *ocp_nlp_res_assign(ocp_nlp_dims *dims, void *raw_memory) { char *c_ptr = (char *) raw_memory; // extract sizes int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; // initial align align_char_to(8, &c_ptr); // struct ocp_nlp_res *res = (ocp_nlp_res *) c_ptr; c_ptr += sizeof(ocp_nlp_res); // blasfeo_struct align align_char_to(8, &c_ptr); // res_stat assign_and_advance_blasfeo_dvec_structs(N + 1, &res->res_stat, &c_ptr); // res_eq assign_and_advance_blasfeo_dvec_structs(N, &res->res_eq, &c_ptr); // res_ineq assign_and_advance_blasfeo_dvec_structs(N + 1, &res->res_ineq, &c_ptr); // res_comp assign_and_advance_blasfeo_dvec_structs(N + 1, &res->res_comp, &c_ptr); // blasfeo_mem align align_char_to(64, &c_ptr); // res_stat for (int ii = 0; ii <= N; ii++) { assign_and_advance_blasfeo_dvec_mem(nv[ii], res->res_stat + ii, &c_ptr); } // res_eq for (int ii = 0; ii < N; ii++) { assign_and_advance_blasfeo_dvec_mem(nx[ii + 1], res->res_eq + ii, &c_ptr); } // res_ineq for (int ii = 0; ii <= N; ii++) { assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], res->res_ineq + ii, &c_ptr); } // res_comp for (int ii = 0; ii <= N; ii++) { assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], res->res_comp + ii, &c_ptr); } res->memsize = ocp_nlp_res_calculate_size(dims); return res; } void ocp_nlp_res_compute(ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_res *res, ocp_nlp_memory *mem) { // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nu = dims->nu; int *ni = dims->ni; double tmp_res; // res_stat res->inf_norm_res_stat = 0.0; for (int ii = 0; ii <= N; ii++) { blasfeo_daxpy(nv[ii], -1.0, mem->ineq_adj + ii, 0, mem->cost_grad + ii, 0, res->res_stat + ii, 0); blasfeo_daxpy(nu[ii] + nx[ii], -1.0, mem->dyn_adj + ii, 0, res->res_stat + ii, 0, res->res_stat + ii, 0); blasfeo_dvecnrm_inf(nv[ii], res->res_stat + ii, 0, &tmp_res); res->inf_norm_res_stat = tmp_res > res->inf_norm_res_stat ? tmp_res : res->inf_norm_res_stat; } // res_eq res->inf_norm_res_eq = 0.0; for (int ii = 0; ii < N; ii++) { blasfeo_dveccp(nx[ii + 1], mem->dyn_fun + ii, 0, res->res_eq + ii, 0); blasfeo_dvecnrm_inf(nx[ii + 1], res->res_eq + ii, 0, &tmp_res); res->inf_norm_res_eq = tmp_res > res->inf_norm_res_eq ? tmp_res : res->inf_norm_res_eq; } // res_ineq res->inf_norm_res_ineq = 0.0; for (int ii = 0; ii <= N; ii++) { blasfeo_daxpy(2 * ni[ii], 1.0, out->t + ii, 0, mem->ineq_fun + ii, 0, res->res_ineq + ii, 0); blasfeo_dvecnrm_inf(2 * ni[ii], res->res_ineq + ii, 0, &tmp_res); res->inf_norm_res_ineq = tmp_res > res->inf_norm_res_ineq ? tmp_res : res->inf_norm_res_ineq; } // res_comp res->inf_norm_res_comp = 0.0; for (int ii = 0; ii <= N; ii++) { blasfeo_dvecmul(2 * ni[ii], out->lam + ii, 0, out->t + ii, 0, res->res_comp + ii, 0); blasfeo_dvecnrm_inf(2 * ni[ii], res->res_comp + ii, 0, &tmp_res); res->inf_norm_res_comp = tmp_res > res->inf_norm_res_comp ? tmp_res : res->inf_norm_res_comp; } // printf("computed residuals g: %e, b: %e, d: %e, m: %e\n", res->inf_norm_res_stat, res->inf_norm_res_eq, // res->inf_norm_res_ineq, res->inf_norm_res_comp); return; } void ocp_nlp_cost_compute(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { // extract dims int N = dims->N; double* tmp_cost = NULL; double total_cost = 0.0; for (int ii = 0; ii <= N; ii++) { // set pointers // NOTE(oj): the cost compute function takes the tmp_ux_ptr as input, // since it is also used for globalization, // especially with primal variables that are NOT current SQP iterates. config->cost[ii]->memory_set_tmp_ux_ptr(out->ux+ii, mem->cost[ii]); config->cost[ii]->compute_fun(config->cost[ii], dims->cost[ii], in->cost[ii], opts->cost[ii], mem->cost[ii], work->cost[ii]); tmp_cost = config->cost[ii]->memory_get_fun_ptr(mem->cost[ii]); // printf("cost at stage %d = %e, total = %e\n", ii, *tmp_cost, total_cost); total_cost += *tmp_cost; } mem->cost_value = total_cost; // printf("\ncomputed total cost: %e\n", total_cost); return; }
flux.c
/* Author: Mohammed Al Farhan Email: mohammed.farhan@kaust.edu.sa */ #include <string.h> #include <stdint.h> #include <omp.h> #include <math.h> #include "inc/ktime.h" #include "inc/geometry.h" #include "inc/ker/phy.h" #define MAG0 (0.5 / 3) #define MAG1 (-MAG0) /* Calculates the residual */ void compute_flux(struct flux *restrict flux) { struct ktime ktime; setktime(&ktime); const size_t bsz = flux->bsz; const size_t nfnodes = flux->nfnodes; const size_t dofs = flux->dofs; const uint32_t snfc = flux->snfc; const double pressure = flux->pressure; const double velocity_u = flux->velocity_u; const double velocity_v = flux->velocity_v; const double velocity_w = flux->velocity_w; const double *restrict f_xyz0 = flux->f_xyz0; const double *restrict f_xyz1 = flux->f_xyz1; const double *restrict f_xyz2 = flux->f_xyz2; const double *restrict xyz0 = flux->xyz0; const double *restrict xyz1 = flux->xyz1; const double *restrict xyz2 = flux->xyz2; const uint32_t *restrict ie = flux->ie; const uint32_t *restrict part = flux->part; const uint32_t *restrict snfic = flux->snfic; const uint32_t *restrict n0 = flux->n0; const uint32_t *restrict n1 = flux->n1; const uint32_t *restrict nfptr = flux->nfptr; const uint32_t *restrict sn0 = flux->sn0; const uint32_t *restrict sn1 = flux->sn1; const uint32_t *restrict sn2 = flux->sn2; const double *restrict x0 = flux->x0; const double *restrict x1 = flux->x1; const double *restrict x2 = flux->x2; const double *restrict x3 = flux->x3; const double *restrict q = flux->q; const double *restrict gradx0 = flux->gradx0; const double *restrict gradx1 = flux->gradx1; const double *restrict gradx2 = flux->gradx2; double *restrict r = flux->r; memset(r, 0, dofs * sizeof(double)); __assume_aligned(r, 64); /* Calculates the fluxes on the face and performs the flux balance */ #pragma omp parallel { uint32_t t = omp_get_thread_num(); uint32_t ie0 = ie[t]; uint32_t ie1 = ie[t+1]; uint32_t i; for(i = ie0; i < ie1; i++) { uint32_t node0 = n0[i]; uint32_t node1 = n1[i]; double xn = x0[i]; double yn = x1[i]; double zn = x2[i]; double ln = x3[i]; double xmean = 0.5f * (xyz0[node0] + xyz0[node1]); double ymean = 0.5f * (xyz1[node0] + xyz1[node1]); double zmean = 0.5f * (xyz2[node0] + xyz2[node1]); /* Now lets get our other 2 vectors For first vector, use {1,0,0} and subtract off the component in the direction of the face normal. If the inner product of {1,0,0} is close to unity, use {0,1,0} */ double X1, Y1, Z1; double dot = xn; if(fabs(dot) < 0.95f) { X1 = 1.f - dot * xn; Y1 = -dot * yn; Z1 = -dot * zn; } else { dot = yn; X1 = -dot * xn; Y1 = 1.f - dot * yn; Z1 = -dot * zn; } /* Normalize the first vector */ double size = X1 * X1; size += Y1 * Y1; size += Z1 * Z1; size = sqrt(size); X1 /= size; Y1 /= size; Z1 /= size; /* Take cross-product of normal and V1 to get V2 */ double X2 = yn * Z1; X2 -= zn * Y1; double Y2 = zn * X1; Y2 -= xn * Z1; double Z2 = xn * Y1; Z2 -= yn * X1; /* Get variables on "left" and "right" side of face */ double rx = xmean - xyz0[node0]; double ry = ymean - xyz1[node0]; double rz = zmean - xyz2[node0]; uint32_t idx0 = bsz * node0; uint32_t idx1 = bsz * node1; // Pressure double pL = q[idx0 + 0] + gradx0[idx0 + 0] * rx; pL += gradx1[idx0 + 0] * ry; pL += gradx2[idx0 + 0] * rz; // Velocity u double uL = q[idx0 + 1] + gradx0[idx0 + 1] * rx; uL += gradx1[idx0 + 1] * ry; uL += gradx2[idx0 + 1] * rz; // Velocity v double vL = q[idx0 + 2] + gradx0[idx0 + 2] * rx; vL += gradx1[idx0 + 2] * ry; vL += gradx2[idx0 + 2] * rz; // Velocity w double wL = q[idx0 + 3] + gradx0[idx0 + 3] * rx; wL += gradx1[idx0 + 3] * ry; wL += gradx2[idx0 + 3] * rz; double ubarL = xn * uL; ubarL += yn * vL; ubarL += zn * wL; rx = xmean - xyz0[node1]; ry = ymean - xyz1[node1]; rz = zmean - xyz2[node1]; // Pressure double pR = q[idx1 + 0] + gradx0[idx1 + 0] * rx; pR += gradx1[idx1 + 0] * ry; pR += gradx2[idx1 + 0] * rz; // Velocity u double uR = q[idx1 + 1] + gradx0[idx1 + 1] * rx; uR += gradx1[idx1 + 1] * ry; uR += gradx2[idx1 + 1] * rz; // Velocity v double vR = q[idx1 + 2] + gradx0[idx1 + 2] * rx; vR += gradx1[idx1 + 2] * ry; vR += gradx2[idx1 + 2] * rz; // Velocity w double wR = q[idx1 + 3] + gradx0[idx1 + 3] * rx; wR += gradx1[idx1 + 3] * ry; wR += gradx2[idx1 + 3] * rz; double ubarR = xn * uR; ubarR += yn * vR; ubarR += zn * wR; /* Compute averages */ //double p = 0.5f * (pL + pR); double u = 0.5f * (uL + uR); double v = 0.5f * (vL + vR); double w = 0.5f * (wL + wR); double ubar = xn * u; ubar += yn * v; ubar += zn * w; double phi1 = xn * BETA; phi1 += u * ubar; double phi2 = yn * BETA; phi2 += v * ubar; double phi3 = zn * BETA; phi3 += w * ubar; double phi4 = Y2 * phi3; phi4 -= Z2 * phi2; double phi5 = Z2 * phi1; phi5 -= X2 * phi3; double phi6 = X2 * phi2; phi6 -= Y2 * phi1; double phi7 = Z1 * phi2; phi7 -= Y1 * phi3; double phi8 = X1 * phi3; phi8 -= Z1 * phi1; double phi9 = Y1 * phi1; phi9 -= X1 * phi2; double c2 = ubar * ubar + BETA; double c = sqrt(c2); /* Now compute eigenvalues, eigenvectors, and strengths */ double eig1 = fabs(ubar); double eig2 = fabs(ubar); double eig3 = fabs(ubar + c); double eig4 = fabs(ubar - c); double dp = pR - pL; double du = uR - uL; double dv = vR - vL; double dw = wR - wL; /* Components of T(inverse) */ double ti11 = u * phi4; ti11 += v * phi5; ti11 += w * phi6; ti11 = -ti11 / BETA; double ti21 = u * phi7; ti21 += v * phi8; ti21 += w * phi9; ti21 = -ti21 / BETA; double ti31 = 0.5f * (c - ubar); ti31 /= BETA; double ti41 = -0.5f * (c + ubar); ti41 /= BETA; /* jumps (T(inverse) * dq) */ double dv1 = ti11 * dp; dv1 += phi4 * du; dv1 += phi5 * dv; dv1 += phi6 * dw; dv1 /= c2; double dv2 = ti21 * dp; dv2 += phi7 * du; dv2 += phi8 * dv; dv2 += phi9 * dw; dv2 /= c2; double dv3 = 2.f * ti31 * dp; dv3 += xn * du; dv3 += yn * dv; dv3 += zn * dw; dv3 *= 0.5f / c2; double dv4 = 2.f * ti41 * dp; dv4 += xn * du; dv4 += yn * dv; dv4 += zn * dw; dv4 *= 0.5f / c2; /* Now get elements of T */ double r13 = c * BETA; double r23 = u * (ubar + c); r23 += xn * BETA; double r33 = v * (ubar + c); r33 += yn * BETA; double r43 = w * (ubar + c); r43 += zn * BETA; double r14 = -c * BETA; double r24 = u * (ubar - c); r24 += xn * BETA; double r34 = v * (ubar - c); r34 += yn * BETA; double r44 = w * (ubar - c); r44 += zn * BETA; /* Calculate T* |lambda| * T(inverse) */ double t1 = eig3 * r13 * dv3 + eig4 * r14 * dv4; double t2 = eig1 * X1 * dv1 + eig2 * X2 * dv2; t2 += eig3 * r23 * dv3 + eig4 * r24 * dv4; double t3 = eig1 * Y1 * dv1 + eig2 * Y2 * dv2; t3 += eig3 * r33 * dv3 + eig4 * r34 * dv4; double t4 = eig1 * Z1 * dv1 + eig2 * Z2 * dv2; t4 += eig3 * r43 * dv3 + eig4 * r44 * dv4; /* Modify to calculate .5(fl +fr) from nodes instead of extrapolated ones */ double fluxp1 = ln * BETA * ubarL; double fluxp2 = ln * (uL * ubarL + xn * pL); double fluxp3 = ln * (vL * ubarL + yn * pL); double fluxp4 = ln * (wL * ubarL + zn * pL); /* Now the right side */ double fluxm1 = ln * BETA * ubarR; double fluxm2 = ln * (uR * ubarR + xn * pR); double fluxm3 = ln * (vR * ubarR + yn * pR); double fluxm4 = ln * (wR * ubarR + zn * pR); double res1 = 0.5f * (fluxp1 + fluxm1 - ln * t1); double res2 = 0.5f * (fluxp2 + fluxm2 - ln * t2); double res3 = 0.5f * (fluxp3 + fluxm3 - ln * t3); double res4 = 0.5f * (fluxp4 + fluxm4 - ln * t4); r[idx0 + 0] = (part[node0] == t) ? (r[idx0 + 0] + res1) : r[idx0 + 0]; r[idx0 + 1] = (part[node0] == t) ? (r[idx0 + 1] + res2) : r[idx0 + 1]; r[idx0 + 2] = (part[node0] == t) ? (r[idx0 + 2] + res3) : r[idx0 + 2]; r[idx0 + 3] = (part[node0] == t) ? (r[idx0 + 3] + res4) : r[idx0 + 3]; r[idx1 + 0] = (part[node1] == t) ? (r[idx1 + 0] - res1) : r[idx1 + 0]; r[idx1 + 1] = (part[node1] == t) ? (r[idx1 + 1] - res2) : r[idx1 + 1]; r[idx1 + 2] = (part[node1] == t) ? (r[idx1 + 2] - res3) : r[idx1 + 2]; r[idx1 + 3] = (part[node1] == t) ? (r[idx1 + 3] - res4) : r[idx1 + 3]; } } uint32_t i; for(i = 0; i < snfc; i++) { uint32_t if0 = snfic[i]; uint32_t if1 = snfic[i+1]; uint32_t j; #pragma omp parallel for for(j = if0; j < if1; j++) { uint32_t node0 = sn0[j]; uint32_t node1 = sn1[j]; uint32_t node2 = sn2[j]; double p1 = q[bsz * node0]; double p2 = q[bsz * node1]; double p3 = q[bsz * node2]; double ax = xyz0[node1] - xyz0[node0]; double ay = xyz1[node1] - xyz1[node0]; double az = xyz2[node1] - xyz2[node0]; double bx = xyz0[node2] - xyz0[node0]; double by = xyz1[node2] - xyz1[node0]; double bz = xyz2[node2] - xyz2[node0]; /* Normal points away from grid interior. Magnitude is 1/3 area of surface triangle. */ double xn = ay * bz; xn -= az * by; xn *= MAG1; double yn = ax * bz; yn -= az * bx; yn *= MAG0; double zn = ax * by; zn -= ay * bx; zn *= MAG1; double pa = 0.125f * (p2 + p3); pa += 0.75f * p1; double pb = 0.125f * (p3 + p1); pb += 0.75f * p2; double pc = 0.125f * (p1 + p2); pc += 0.75f * p3; uint32_t idx; idx = bsz * node0; r[idx + 1] += xn * pa; r[idx + 2] += yn * pa; r[idx + 3] += zn * pa; idx = bsz * node1; r[idx + 1] += xn * pb; r[idx + 2] += yn * pb; r[idx + 3] += zn * pb; idx = bsz * node2; r[idx + 1] += xn * pc; r[idx + 2] += yn * pc; r[idx + 3] += zn * pc; } } /* Do the free boundaries */ #pragma omp parallel for for(i = 0; i < nfnodes; i++) { uint32_t n = nfptr[i]; /* Get normal and "other" 2 vectors. Remember that fxn,fyn and fzn has the magnitude of the face contained in it. */ double xn = f_xyz0[i]; double yn = f_xyz1[i]; double zn = f_xyz2[i]; double area = xn * xn; area += yn * yn; area += zn * zn; area = sqrt(area); xn /= area; yn /= area; zn /= area; /* Now lets get our other 2 vectors For first vector, use {1,0,0} and subtract off the component in the direction of the face normal. If the inner product of {1,0,0} is close to unity, use {0,1,0} */ double X1, Y1, Z1; double dot = xn; if(fabs(dot) < 0.95f) { X1 = 1.f - dot * xn; Y1 = -dot * yn; Z1 = -dot * zn; } else { dot = yn; X1 = -dot * xn; Y1 = 1.f - dot * yn; Z1 = -dot * zn; } /* Normalize the first vector (V1) */ double size = X1 * X1; size += Y1 * Y1; size += Z1 * Z1; size = sqrt(size); X1 /= size; Y1 /= size; Z1 /= size; /* Take cross-product of normal with V1 to get V2 */ double X2 = yn * Z1; X2 -= zn * Y1; double Y2 = zn * X1; Y2 -= xn * Z1; double Z2 = xn * Y1; Z2 -= yn * X1; /* Calculate elements of T and T(inverse) evaluated at free-stream */ double ubar0 = xn * velocity_u; ubar0 += yn * velocity_v; ubar0 += zn * velocity_w; double c20 = ubar0 * ubar0 + BETA; double c0 = sqrt(c20); double phi1 = xn * BETA; phi1 += velocity_u * ubar0; double phi2 = yn * BETA; phi2 += velocity_v * ubar0; double phi3 = zn * BETA; phi3 += velocity_w * ubar0; double phi4 = Y2 * phi3; phi4 -= Z2 * phi2; double phi5 = Z2 * phi1; phi5 -= X2 * phi3; double phi6 = X2 * phi2; phi6 -= Y2 * phi1; double phi7 = Z1 * phi2; phi7 -= Y1 * phi3; double phi8 = X1 * phi3; phi8 -= Z1 * phi1; double phi9 = Y1 * phi1; phi9 -= X1 * phi2; double t13 = c0 * BETA; double t23 = velocity_u * (ubar0 + c0); t23 += xn * BETA; double t33 = velocity_v * (ubar0 + c0); t33 += yn * BETA; double t43 = velocity_w * (ubar0 + c0); t43 += zn * BETA; double t14 = -c0 * BETA; double t24 = velocity_u * (ubar0 - c0); t24 += xn * BETA; double t34 = velocity_v * (ubar0 - c0); t34 += yn * BETA; double t44 = velocity_w * (ubar0 - c0); t44 += zn * BETA; double ti11 = velocity_u * phi4; ti11 += velocity_v * phi5; ti11 += velocity_w * phi6; ti11 = -ti11/BETA; double ti21 = velocity_u * phi7; ti21 += velocity_v * phi8; ti21 += velocity_w * phi9; ti21 = -ti21/BETA; double ti31 = 0.5f * (c0 - ubar0); ti31 /= BETA; double ti41 = -0.5f * (c0 + ubar0); ti41 /= BETA; /* Now, get the variables on the "inside" */ double pi = q[bsz * n + 0]; double ui = q[bsz * n + 1]; double vi = q[bsz * n + 2]; double wi = q[bsz * n + 3]; double un = xn * ui; un += yn * vi; un += zn * wi; /* If ubar is negative, take the reference condition from outside */ double pr, ur, vr, wr; if(un > 0.f) { pr = pi; ur = ui; vr = vi; wr = wi; } else { pr = pressure; ur = velocity_u; vr = velocity_v; wr = velocity_w; } /* Set rhs */ double rhs1 = ti11 * pr; rhs1 += phi4 * ur; rhs1 += phi5 * vr; rhs1 += phi6 * wr; rhs1 /= c20; double rhs2 = ti21 * pr; rhs2 += phi7 * ur; rhs2 += phi8 * vr; rhs2 += phi9 * wr; rhs2 /= c20; double rhs3 = 2.f * ti31 * pi; rhs3 += xn * ui; rhs3 += yn * vi; rhs3 += zn * wi; rhs3 = 0.5f * rhs3 / c20; double rhs4 = 2.f * ti41 * pressure; rhs4 += xn * velocity_u; rhs4 += yn * velocity_v; rhs4 += zn * velocity_w; rhs4 = 0.5f * rhs4 / c20; /* Now do matrix multiplication to get values on boundary */ double pb = t13 * rhs3; pb += t14 * rhs4; double ub = X1 * rhs1; ub += X2 * rhs2; ub += t23 * rhs3; ub += t24 * rhs4; double vb = Y1 * rhs1; vb += Y2 * rhs2; vb += t33 * rhs3; vb += t34 * rhs4; double wb = Z1 * rhs1; wb += Z2 * rhs2; wb += t43 * rhs3; wb += t44 * rhs4; double ubar = xn * ub; ubar += yn * vb; ubar += zn * wb; uint32_t idx = bsz * n; r[idx + 0] += area * BETA * ubar; r[idx + 1] += area * (ub * ubar + xn * pb); r[idx + 2] += area * (vb * ubar + yn * pb); r[idx + 3] += area * (wb * ubar + zn * pb); } compute_time(&ktime, flux->t); }
EPI_fmt_plug.c
/* * EPiServer module for john 1.7.2 (and possibly later) * Uses hashes/salts found in the tblSID of an EPiServer database installation * * Created by Johannes Gumbel (johannes [at] iforge.cc) * * If you have any questions as to how a function incorporates with john, please refer to formats.h of john * * version 0.1 released on 10 jan 2007 * * See doc/README.format-epi for information on the input file format. * * Updated Dec, 2014, JimF. Added OMP, and allowed more than one hash to be * processed at once (OMP_SCALE stuff). */ #if FMT_EXTERNS_H extern struct fmt_main fmt_EPI; #elif FMT_REGISTERS_H john_register_one(&fmt_EPI); #else #include <string.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "sha.h" #ifdef _OPENMP #include <omp.h> #ifdef __MIC__ #ifndef OMP_SCALE #define OMP_SCALE 8192 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 32768 // Tuned, K8-dual HT #endif #endif // __MIC__ #endif #include "memdbg.h" #define PLAINTEXT_LENGTH 125 #define BINARY_LENGTH 20 #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_LENGTH 30 #define SALT_ALIGN 4 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static int (*key_len); static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_LENGTH / 4]; static char global_salt[SALT_LENGTH+1]; static struct fmt_tests global_tests[] = { {"0x5F1D84A6DE97E2BEFB637A3CB5318AFEF0750B856CF1836BD1D4470175BE 0x4D5EFDFA143EDF74193076F174AC47CEBF2F417F", "Abc.!23"}, // new tests from pass_gen.pl {"0x4F5233704337716F63526A7066344B52784F7A6363316750516A72335668 0x7346DA02479E55973E052FC9A173A3FEA4644FF8","test1"}, {"0x76706335715834565A55784662304F3367756350684F634447777A313642 0xDBD3D2764A376673164962E3EE2AE95AB6ED2759","thatsworking"}, {"0x6F724166466172354A7431316A4842746878434B6632744945574A37524A 0xE1ADE625160BB27C16184795715F1C9EF30C45B0","test3"}, {NULL} }; static void init(struct fmt_main *self) { #if defined (_OPENMP) int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif key_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*key_len)); saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); MEM_FREE(key_len); } /* * Expects ciphertext of format: 0xHEX*60 0xHEX*40 */ static int valid(char *ciphertext, struct fmt_main *self) { unsigned int len, n; if(!ciphertext) return 0; len = strlen(ciphertext); if(len != 105) return 0; // check fixed positions if(ciphertext[0] != '0' || ciphertext[1] != 'x' || ciphertext[62] != ' ' || ciphertext[63] != '0' || ciphertext[64] != 'x') return 0; for(n = 2; n < 62 && atoi16u[ARCH_INDEX(ciphertext[n])] != 0x7F; ++n); if (n < 62) return 0; for(n = 65; n < 105 && atoi16u[ARCH_INDEX(ciphertext[n])] != 0x7F; ++n); return n == len; } static void _tobin(char* dst, char *src, unsigned int len) { unsigned int n; if(src[0] == '0' && src[1] == 'x') src += sizeof(char)*2; for(n = 0; n < len; ++n) dst[n] = atoi16[ARCH_INDEX(src[n*2])]<<4 | atoi16[ARCH_INDEX(src[n*2+1])]; } static void* get_binary(char *ciphertext) { static ARCH_WORD bin[(BINARY_LENGTH + sizeof(ARCH_WORD) - 1) / sizeof(ARCH_WORD)]; _tobin((char*)bin, (char*)(ciphertext+65), BINARY_LENGTH); return bin; } static void* get_salt(char *ciphertext) { static ARCH_WORD salt[(SALT_LENGTH + sizeof(ARCH_WORD) - 1) / sizeof(ARCH_WORD)]; _tobin((char*)salt, (char*)(ciphertext+2), sizeof(salt)); return salt; } static void set_salt(void *salt) { memcpy(global_salt, salt, SALT_LENGTH); } static void set_key(char *key, int index) { if(!key) return; key_len[index] = strlen(key) + 1; strcpy(saved_key[index], key); } static char* get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int i=0; #ifdef _OPENMP #pragma omp parallel for private(i) shared(global_salt, saved_key, key_len, crypt_out) #endif #if defined (_OPENMP) || MAX_KEYS_PER_CRYPT>1 for (i = 0; i < count; ++i) #endif { SHA_CTX ctx; SHA1_Init(&ctx); SHA1_Update(&ctx, (unsigned char*)global_salt, SALT_LENGTH-1); SHA1_Update(&ctx, saved_key[i], key_len[i]); SHA1_Final((unsigned char*)crypt_out[i], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if ( ((ARCH_WORD_32*)binary)[0] == crypt_out[index][0] ) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_LENGTH); } static int cmp_exact(char *source, int index) { return 1; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static int salt_hash(void *salt) { return *(ARCH_WORD_32*)salt & (SALT_HASH_SIZE - 1); } // Define john integration struct fmt_main fmt_EPI = { { // fmt_params "EPI", "EPiServer SID", "SHA1 32/" ARCH_BITS_STR, "", // benchmark comment 0, // benchmark length 0, PLAINTEXT_LENGTH, BINARY_LENGTH, BINARY_ALIGN, SALT_LENGTH, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { NULL }, global_tests }, { // fmt_methods init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
convolution_3x3_packn_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd63_transform_kernel_packn_fp16sa_rvv(const Mat& kernel, Mat& kernel_tm_packn, int inch, int outch, const Option& opt) { const int packn = csrr_vlenb() / 2; // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = pb-pa-inch/pa-64-outch/pb kernel_tm_packn.create(inch / packn, 64, outch / packn, (size_t)2u * packn * packn, packn * packn); for (int q = 0; q + (packn - 1) < outch; q += packn) { Mat g0 = kernel_tm_packn.channel(q / packn); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int p = 0; p + (packn - 1) < inch; p += packn) { for (int i = 0; i < packn; i++) { for (int j = 0; j < packn; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = (__fp16)k00[k]; g00++; } } } } } } static void conv3x3s1_winograd63_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tiles = outw / 6; int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); conv3x3s1_winograd63_transform_input_packn_fp16sa_rvv(bottom_blob_bordered, bottom_blob_tm, opt); } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { __fp16* tmpptr = tm2.row<__fp16>(i / 8); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if C906 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr[2] = r0[l + packn * 2]; tmpptr[3] = r0[l + packn * 3]; tmpptr[4] = r0[l + packn * 4]; tmpptr[5] = r0[l + packn * 5]; tmpptr[6] = r0[l + packn * 6]; tmpptr[7] = r0[l + packn * 7]; tmpptr += 8; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl); vfloat16m1_t _val4 = vle16_v_f16m1(r0 + packn * 4, vl); vfloat16m1_t _val5 = vle16_v_f16m1(r0 + packn * 5, vl); vfloat16m1_t _val6 = vle16_v_f16m1(r0 + packn * 6, vl); vfloat16m1_t _val7 = vle16_v_f16m1(r0 + packn * 7, vl); vsseg8e16_v_f16m1x8(tmpptr, vcreate_f16m1x8(_val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 8; #endif } } for (; i + 3 < tiles; i += 4) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if C906 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr[2] = r0[l + packn * 2]; tmpptr[3] = r0[l + packn * 3]; tmpptr += 4; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl); vsseg4e16_v_f16m1x4(tmpptr, vcreate_f16m1x4(_val0, _val1, _val2, _val3), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 4; #endif } } for (; i + 1 < tiles; i += 2) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if C906 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr += 2; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vsseg2e16_v_f16m1x2(tmpptr, vcreate_f16m1x2(_val0, _val1), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 2; #endif } } for (; i < tiles; i++) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { vfloat16m1_t _val = vle16_v_f16m1(r0, vl); vse16_v_f16m1(tmpptr, _val, vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum4 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum5 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum6 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum7 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; __fp16 val2 = *r0++; __fp16 val3 = *r0++; __fp16 val4 = *r0++; __fp16 val5 = *r0++; __fp16 val6 = *r0++; __fp16 val7 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); _sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl); _sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl); _sum4 = vfmacc_vf_f16m1(_sum4, val4, _w0, vl); _sum5 = vfmacc_vf_f16m1(_sum5, val5, _w0, vl); _sum6 = vfmacc_vf_f16m1(_sum6, val6, _w0, vl); _sum7 = vfmacc_vf_f16m1(_sum7, val7, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl); vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl); vse16_v_f16m1(output0_tm + packn * 4, _sum4, vl); vse16_v_f16m1(output0_tm + packn * 5, _sum5, vl); vse16_v_f16m1(output0_tm + packn * 6, _sum6, vl); vse16_v_f16m1(output0_tm + packn * 7, _sum7, vl); output0_tm += packn * 8; } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; __fp16 val2 = *r0++; __fp16 val3 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); _sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl); _sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl); vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl); output0_tm += packn * 4; } for (; i + 1 < tiles; i += 2) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); output0_tm += packn * 2; } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum = vfmacc_vf_f16m1(_sum, val, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum, vl); output0_tm += packn; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { conv3x3s1_winograd63_transform_output_packn_fp16sa_rvv(top_blob_tm, top_blob_bordered, bias, opt); } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd43_transform_kernel_packn_fp16sa_rvv(const Mat& kernel, Mat& kernel_tm_packn, int inch, int outch, const Option& opt) { const int packn = csrr_vlenb() / 2; // winograd43 transform kernel Mat kernel_tm(6 * 6, inch, outch); const float ktm[6][3] = { {1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6}, {1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = pb-pa-inch/pa-36-outch/pb kernel_tm_packn.create(inch / packn, 36, outch / packn, (size_t)2u * packn * packn, packn * packn); for (int q = 0; q + (packn - 1) < outch; q += packn) { Mat g0 = kernel_tm_packn.channel(q / packn); for (int k = 0; k < 36; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int p = 0; p + (packn - 1) < inch; p += packn) { for (int i = 0; i < packn; i++) { for (int j = 0; j < packn; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = (__fp16)k00[k]; g00++; } } } } } } static void conv3x3s1_winograd43_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tiles = outw / 4; int h_tiles = outh / 4; const int tiles = w_tiles * h_tiles; bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); conv3x3s1_winograd43_transform_input_packn_fp16sa_rvv(bottom_blob_bordered, bottom_blob_tm, opt); } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { __fp16* tmpptr = tm2.row<__fp16>(i / 8); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if C906 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr[2] = r0[l + packn * 2]; tmpptr[3] = r0[l + packn * 3]; tmpptr[4] = r0[l + packn * 4]; tmpptr[5] = r0[l + packn * 5]; tmpptr[6] = r0[l + packn * 6]; tmpptr[7] = r0[l + packn * 7]; tmpptr += 8; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl); vfloat16m1_t _val4 = vle16_v_f16m1(r0 + packn * 4, vl); vfloat16m1_t _val5 = vle16_v_f16m1(r0 + packn * 5, vl); vfloat16m1_t _val6 = vle16_v_f16m1(r0 + packn * 6, vl); vfloat16m1_t _val7 = vle16_v_f16m1(r0 + packn * 7, vl); vsseg8e16_v_f16m1x8(tmpptr, vcreate_f16m1x8(_val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 8; #endif } } for (; i + 3 < tiles; i += 4) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if C906 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr[2] = r0[l + packn * 2]; tmpptr[3] = r0[l + packn * 3]; tmpptr += 4; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl); vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl); vsseg4e16_v_f16m1x4(tmpptr, vcreate_f16m1x4(_val0, _val1, _val2, _val3), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 4; #endif } } for (; i + 1 < tiles; i += 2) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { #if C906 for (int l = 0; l < packn; l++) { tmpptr[0] = r0[l]; tmpptr[1] = r0[l + packn]; tmpptr += 2; } r0 += bottom_blob_tm.cstep * packn; #else vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl); vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl); vsseg2e16_v_f16m1x2(tmpptr, vcreate_f16m1x2(_val0, _val1), vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn * 2; #endif } } for (; i < tiles; i++) { __fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * packn; for (int q = 0; q < inch; q++) { vfloat16m1_t _val = vle16_v_f16m1(r0, vl); vse16_v_f16m1(tmpptr, _val, vl); r0 += bottom_blob_tm.cstep * packn; tmpptr += packn; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum4 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum5 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum6 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum7 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; __fp16 val2 = *r0++; __fp16 val3 = *r0++; __fp16 val4 = *r0++; __fp16 val5 = *r0++; __fp16 val6 = *r0++; __fp16 val7 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); _sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl); _sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl); _sum4 = vfmacc_vf_f16m1(_sum4, val4, _w0, vl); _sum5 = vfmacc_vf_f16m1(_sum5, val5, _w0, vl); _sum6 = vfmacc_vf_f16m1(_sum6, val6, _w0, vl); _sum7 = vfmacc_vf_f16m1(_sum7, val7, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl); vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl); vse16_v_f16m1(output0_tm + packn * 4, _sum4, vl); vse16_v_f16m1(output0_tm + packn * 5, _sum5, vl); vse16_v_f16m1(output0_tm + packn * 6, _sum6, vl); vse16_v_f16m1(output0_tm + packn * 7, _sum7, vl); output0_tm += packn * 8; } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; __fp16 val2 = *r0++; __fp16 val3 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); _sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl); _sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl); vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl); output0_tm += packn * 4; } for (; i + 1 < tiles; i += 2) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl); vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val0 = *r0++; __fp16 val1 = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl); _sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum0, vl); vse16_v_f16m1(output0_tm + packn, _sum1, vl); output0_tm += packn * 2; } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch * packn; // inch always > 0 vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl); for (int j = 0; j < nn; j++) { __fp16 val = *r0++; vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl); _sum = vfmacc_vf_f16m1(_sum, val, _w0, vl); k0 += packn; } vse16_v_f16m1(output0_tm, _sum, vl); output0_tm += packn; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { conv3x3s1_winograd43_transform_output_packn_fp16sa_rvv(top_blob_tm, top_blob_bordered, bias, opt); } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
krb5-18_fmt_plug.c
/* * KRB5 - Enctype 18 (aes256-cts-hmac-sha1-96) cracker patch for JtR * Created on August of 2012 by Mougey Camille (CEA/DAM) & Lalet Pierre (CEA/DAM) * * This format is one of formats saved in KDC database and used during the authentication part * * This software is Copyright (c) 2012, Mougey Camille (CEA/DAM) * Lalet Pierre (CEA/DAM) * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. * * Input Format : * - user:$krb18$REALMname$hash * - user:REALMname$hash * * Format rewritten Dec, 2014, without use of -lkrb5, by JimF. Now we use 'native' JtR * pbkdf2-hmac-sha1() and simple call to 2 AES limb encrypt for entire process. Very * simple, and 10x faster, and no obsure -lkrb5 dependancy */ #if AC_BUILT #include "autoconfig.h" #endif #if FMT_EXTERNS_H extern struct fmt_main fmt_krb5_18; #elif FMT_REGISTERS_H john_register_one(&fmt_krb5_18); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "sse-intrinsics.h" #include "pbkdf2_hmac_sha1.h" #include <openssl/aes.h> #ifdef _OPENMP #include <omp.h> #ifdef MMX_COEF #define OMP_SCALE 8 #else #define OMP_SCALE 32 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "krb5-18" #define FORMAT_NAME "Kerberos 5 db etype 18 aes256-cts-hmac-sha1-96" #define FORMAT_TAG "$krb18$" #define TAG_LENGTH 7 #if MMX_COEF #define ALGORITHM_NAME SHA1_ALGORITHM_NAME #else #if ARCH_BITS >= 64 #define ALGORITHM_NAME "64/" ARCH_BITS_STR #else #define ALGORITHM_NAME "32/" ARCH_BITS_STR #endif #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 64 #define CIPHERTEXT_LENGTH 64 #define BINARY_SIZE 32 #define BINARY_ALIGN 4 #define SALT_SIZE CIPHERTEXT_LENGTH #define SALT_ALIGN 1 #ifdef MMX_COEF #define MIN_KEYS_PER_CRYPT MMX_COEF #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests kinit_tests[] = { {"OLYMPE.OLtest$214bb89cf5b8330112d52189ab05d9d05b03b5a961fe6d06203335ad5f339b26", "password"}, {FORMAT_TAG "OLYMPE.OLtest$214bb89cf5b8330112d52189ab05d9d05b03b5a961fe6d06203335ad5f339b26", "password"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static char saved_salt[SALT_SIZE+1]; static ARCH_WORD_32 (*crypt_out)[16]; static void init(struct fmt_main *pFmt) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); pFmt->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; pFmt->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * pFmt->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * pFmt->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int valid(char *ciphertext, struct fmt_main *pFmt) { char *p, *q; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; p = strstr(p, "$"); if(p == NULL) return 0; q = ciphertext; if(p - q > SALT_SIZE) /* check salt length */ return 0; q = ++p; while (atoi16[ARCH_INDEX(*q)] != 0x7F) { if (*q >= 'A' && *q <= 'F') /* support lowercase only */ return 0; q++; } return !*q && q - p == CIPHERTEXT_LENGTH; } static char *split(char *ciphertext, int index, struct fmt_main *pFmt) { static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + SALT_SIZE + 1]; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) return ciphertext; memcpy(out, FORMAT_TAG, TAG_LENGTH); strnzcpyn(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH + SALT_SIZE + 1); return out; } static void *get_salt(char *ciphertext) { static char out[SALT_SIZE+1]; char *p, *q; memset(&out, 0, sizeof(out)); p = ciphertext + TAG_LENGTH; q = strstr(p, "$"); strncpy(out, p, q-p); out[q-p] = 0; return out; } static void set_salt(void *salt) { strcpy(saved_salt, salt); } static void *get_binary(char *ciphertext) { static unsigned char *out; char *p; int i = 0; if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); p = ciphertext; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; p = strstr(p, "$") + 1; for (; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int crypt_all(int *pcount, struct db_salt *_salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { unsigned char key[32], i; AES_KEY aeskey; #ifdef SSE_GROUP_SZ_SHA1 ARCH_WORD_32 Key[SSE_GROUP_SZ_SHA1][32/4]; int lens[SSE_GROUP_SZ_SHA1]; unsigned char *pin[SSE_GROUP_SZ_SHA1]; union { ARCH_WORD_32 *pout[SSE_GROUP_SZ_SHA1]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; x.pout[i] = Key[i]; } pbkdf2_sha1_sse((const unsigned char **)pin, lens, (const unsigned char*)saved_salt, strlen(saved_salt), 4096, &(x.poutc), 32, 0); #else pbkdf2_sha1((const unsigned char*)saved_key[index], strlen(saved_key[index]), (const unsigned char*)saved_salt, strlen(saved_salt), 4096, key, 32, 0); #endif i=0; #ifdef SSE_GROUP_SZ_SHA1 for (; i < SSE_GROUP_SZ_SHA1; ++i) { memcpy(key, Key[i], 32); #endif AES_set_encrypt_key(key, 256, &aeskey); AES_encrypt((unsigned char*)"kerberos{\x9b[+\x93\x13+\x93", (unsigned char*)(crypt_out[index+i]), &aeskey); AES_encrypt((unsigned char*)(crypt_out[index+i]), (unsigned char*)&crypt_out[index+i][4], &aeskey); #ifdef SSE_GROUP_SZ_SHA1 } #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (; index < count; index++) #endif if (crypt_out[index][0] == *(ARCH_WORD_32*)binary) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void set_key(char *key, int index) { int saved_key_length = strlen(key); if (saved_key_length > PLAINTEXT_LENGTH) saved_key_length = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_key_length); saved_key[index][saved_key_length] = 0; } static char *get_key(int index) { return saved_key[index]; } static int get_hash_0(int index) { return *((ARCH_WORD_32*)&crypt_out[index]) & 0xf; } static int get_hash_1(int index) { return *((ARCH_WORD_32*)&crypt_out[index]) & 0xff; } static int get_hash_2(int index) { return *((ARCH_WORD_32*)&crypt_out[index]) & 0xfff; } static int get_hash_3(int index) { return *((ARCH_WORD_32*)&crypt_out[index]) & 0xffff; } static int get_hash_4(int index) { return *((ARCH_WORD_32*)&crypt_out[index]) & 0xfffff; } static int get_hash_5(int index) { return *((ARCH_WORD_32*)&crypt_out[index]) & 0xffffff; } static int get_hash_6(int index) { return *((ARCH_WORD_32*)&crypt_out[index]) & 0x7ffffff; } struct fmt_main fmt_krb5_18 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif kinit_tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact, } }; #endif /* plugin stanza */
zeroslike_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: qtang@openailab.com */ #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/float.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <math.h> int ref_zeroslike_fp32(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread) { // dims size = 2 or 3 if (input_tensor->dim_num < 4) { float* input_data = input_tensor->data; float* out_data = output_tensor->data; int total_size = input_tensor->elem_num; for (int i = 0; i < total_size; i++) { input_data[i] = 0; } return 0; } // dims size 3 else if (input_tensor->dim_num == 4) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int size = h * w; int c_step = h * w; float* input_data = input_tensor->data; float* out_data = output_tensor->data; #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = input_data + c_step * q; float* dst = out_data + c_step * q; for (int i = 0; i < size; i++) { dst[i] = 0.f; } } return 0; } return -1; } int ref_zeroslike_uint8(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread) { // dims size = 2 or 3 if (input_tensor->dim_num < 4) { uint8_t* input_data = input_tensor->data; uint8_t* out_data = output_tensor->data; int total_size = input_tensor->elem_num; for (int i = 0; i < total_size; i++) { input_data[i] = 0; } return 0; } // dims size 3 else if (input_tensor->dim_num == 4) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int size = h * w; int c_step = h * w; uint8_t* input_data = input_tensor->data; uint8_t* out_data = output_tensor->data; #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { uint8_t* src = input_data + c_step * q; uint8_t* dst = out_data + c_step * q; for (int i = 0; i < size; i++) { dst[i] = 0; } } return 0; } return -1; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { // exec_node->inplace_map_num = 0; return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; struct tensor* output_tensor; int layout = ir_graph->graph_layout; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); int ret = -1; if (input_tensor->data_type == TENGINE_DT_FP32) ret = ref_zeroslike_fp32(input_tensor, output_tensor, exec_graph->num_thread); else if(input_tensor->data_type == TENGINE_DT_UINT8) ret = ref_zeroslike_uint8(input_tensor, output_tensor, exec_graph->num_thread); return ret; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { return OPS_SCORE_CANDO; } static struct node_ops hcl_node_ops = {.prerun = prerun, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; int register_zeroslike_ref_op() { return register_builtin_node_ops(OP_ZEROSLIKE, &hcl_node_ops); } int unregister_zeroslike_ref_op() { return unregister_builtin_node_ops(OP_ZEROSLIKE, &hcl_node_ops); }
omp_smithW_orig.c
/********************************************************************************* * Smith–Waterman algorithm * Purpose: Local alignment of nucleotide or protein sequences * Authors: Daniel Holanda, Hanoch Griner, Taynara Pinheiro * Compilation: gcc omp_smithW.c -o omp_smithW -fopenmp -DDEBUG * Execution: ./omp_smithW <number_of_threads> <number_of_col> <number_of_rows> *********************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include <time.h> /*-------------------------------------------------------------------- * Text Tweaks */ #define RESET "\033[0m" #define BOLDRED "\033[1m\033[31m" /* Bold Red */ /* End of text tweaks */ /*-------------------------------------------------------------------- * Constants */ #define PATH -1 #define NONE 0 #define UP 1 #define LEFT 2 #define DIAGONAL 3 /* End of constants */ /*-------------------------------------------------------------------- * Helpers */ #define min(x, y) (((x) < (y)) ? (x) : (y)) #define max(a,b) ((a) > (b) ? a : b) // #define DEBUG /* End of Helpers */ /*-------------------------------------------------------------------- * Functions Prototypes */ void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos); int matchMissmatchScore(long long int i, long long int j); void backtrack(int* P, long long int maxPos); void printMatrix(int* matrix); void printPredecessorMatrix(int* matrix); void generate(void); long long int nElement(long long int i); void calcFirstDiagElement(long long int *i, long long int *si, long long int *sj); /* End of prototypes */ /*-------------------------------------------------------------------- * Global Variables */ //Defines size of strings to be compared long long int m ; //Columns - Size of string a long long int n ; //Lines - Size of string b //Defines scores int matchScore = 5; int missmatchScore = -3; int gapScore = -4; //Strings over the Alphabet Sigma char *a, *b; /* End of global variables */ /*-------------------------------------------------------------------- * Function: main */ int main(int argc, char* argv[]) { int thread_count = strtol(argv[1], NULL, 10); m = strtoll(argv[2], NULL, 10); n = strtoll(argv[3], NULL, 10); #ifdef DEBUG printf("\nMatrix[%lld][%lld]\n", n, m); #endif //Allocates a and b a = malloc(m * sizeof(char)); b = malloc(n * sizeof(char)); //Because now we have zeros m++; n++; //Allocates similarity matrix H int *H; H = calloc(m * n, sizeof(int)); //Allocates predecessor matrix P int *P; P = calloc(m * n, sizeof(int)); //Gen rand arrays a and b generate(); //Uncomment this to test the sequence available at //http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1 // OBS: m=11 n=7 // a[0] = 'C'; // a[1] = 'G'; // a[2] = 'T'; // a[3] = 'G'; // a[4] = 'A'; // a[5] = 'A'; // a[6] = 'T'; // a[7] = 'T'; // a[8] = 'C'; // a[9] = 'A'; // a[10] = 'T'; // b[0] = 'G'; // b[1] = 'A'; // b[2] = 'C'; // b[3] = 'T'; // b[4] = 'T'; // b[5] = 'A'; // b[6] = 'C'; //Start position for backtrack long long int maxPos = 0; //Calculates the similarity matrix long long int i, j; //Gets Initial time double initialTime = omp_get_wtime(); long long int si, sj, ai, aj; //Because now we have zeros ((m-1) + (n-1) - 1) long long int nDiag = m + n - 3; long long int nEle; #pragma omp parallel num_threads(thread_count) \ default(none) shared(H, P, maxPos, nDiag) private(nEle, i, si, sj, ai, aj) { for (i = 1; i <= nDiag; ++i) { nEle = nElement(i); calcFirstDiagElement(&i, &si, &sj); #pragma omp for for (j = 1; j <= nEle; ++j) { ai = si - j + 1; aj = sj + j - 1; similarityScore(ai, aj, H, P, &maxPos); } } } backtrack(P, maxPos); //Gets final time double finalTime = omp_get_wtime(); printf("\nElapsed time: %f\n\n", finalTime - initialTime); #ifdef DEBUG printf("\nSimilarity Matrix:\n"); printMatrix(H); printf("\nPredecessor Matrix:\n"); printPredecessorMatrix(P); #endif //Frees similarity matrixes free(H); free(P); //Frees input arrays free(a); free(b); return 0; } /* End of main */ /*-------------------------------------------------------------------- * Function: nElement * Purpose: Calculate the number of i-diagonal elements */ long long int nElement(long long int i) { if (i < m && i < n) { //Number of elements in the diagonal is increasing return i; } else if (i < max(m, n)) { //Number of elements in the diagonal is stable long int min = min(m, n); return min - 1; } else { //Number of elements in the diagonal is decreasing long int min = min(m, n); return 2 * min - i + abs(m - n) - 2; } } /*-------------------------------------------------------------------- * Function: calcElement * Purpose: Calculate the position of (si, sj)-element */ void calcFirstDiagElement(long long int *i, long long int *si, long long int *sj) { // Calculate the first element of diagonal if (*i < n) { *si = *i; *sj = 1; } else { *si = n - 1; *sj = *i - n + 2; } } /*-------------------------------------------------------------------- * Function: SimilarityScore * Purpose: Calculate the maximum Similarity-Score H(i,j) */ void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos) { int up, left, diag; //Stores index of element long long int index = m * i + j; //Get element above up = H[index - m] + gapScore; //Get element on the left left = H[index - 1] + gapScore; //Get element on the diagonal diag = H[index - m - 1] + matchMissmatchScore(i, j); //Calculates the maximum int max = NONE; int pred = NONE; /* === Matrix === * a[0] ... a[n] * b[0] * ... * b[n] * * generate 'a' from 'b', if '←' insert e '↑' remove * a=GAATTCA * b=GACTT-A * * generate 'b' from 'a', if '←' insert e '↑' remove * b=GACTT-A * a=GAATTCA */ if (diag > max) { //same letter ↖ max = diag; pred = DIAGONAL; } if (up > max) { //remove letter ↑ max = up; pred = UP; } if (left > max) { //insert letter ← max = left; pred = LEFT; } //Inserts the value in the similarity and predecessor matrixes H[index] = max; P[index] = pred; //Updates maximum score to be used as seed on backtrack if (max > H[*maxPos]) { #pragma omp critical *maxPos = index; } } /* End of similarityScore */ /*-------------------------------------------------------------------- * Function: matchMissmatchScore * Purpose: Similarity function on the alphabet for match/missmatch */ int matchMissmatchScore(long long int i, long long int j) { if (a[j - 1] == b[i - 1]) return matchScore; else return missmatchScore; } /* End of matchMissmatchScore */ /*-------------------------------------------------------------------- * Function: backtrack * Purpose: Modify matrix to print, path change from value to PATH */ void backtrack(int* P, long long int maxPos) { //hold maxPos value long long int predPos; //backtrack from maxPos to startPos = 0 do { if (P[maxPos] == DIAGONAL) predPos = maxPos - m - 1; else if (P[maxPos] == UP) predPos = maxPos - m; else if (P[maxPos] == LEFT) predPos = maxPos - 1; P[maxPos] *= PATH; maxPos = predPos; } while (P[maxPos] != NONE); } /* End of backtrack */ /*-------------------------------------------------------------------- * Function: printMatrix * Purpose: Print Matrix */ void printMatrix(int* matrix) { long long int i, j; printf("-\t-\t"); for (j = 0; j < m-1; j++) { printf("%c\t", a[j]); } printf("\n-\t"); for (i = 0; i < n; i++) { //Lines for (j = 0; j < m; j++) { if (j==0 && i>0) printf("%c\t", b[i-1]); printf("%d\t", matrix[m * i + j]); } printf("\n"); } } /* End of printMatrix */ /*-------------------------------------------------------------------- * Function: printPredecessorMatrix * Purpose: Print predecessor matrix */ void printPredecessorMatrix(int* matrix) { long long int i, j, index; printf(" "); for (j = 0; j < m-1; j++) { printf("%c ", a[j]); } printf("\n "); for (i = 0; i < n; i++) { //Lines for (j = 0; j < m; j++) { if (j==0 && i>0) printf("%c ", b[i-1]); index = m * i + j; if (matrix[index] < 0) { printf(BOLDRED); if (matrix[index] == -UP) printf("↑ "); else if (matrix[index] == -LEFT) printf("← "); else if (matrix[index] == -DIAGONAL) printf("↖ "); else printf("- "); printf(RESET); } else { if (matrix[index] == UP) printf("↑ "); else if (matrix[index] == LEFT) printf("← "); else if (matrix[index] == DIAGONAL) printf("↖ "); else printf("- "); } } printf("\n"); } } /* End of printPredecessorMatrix */ /*-------------------------------------------------------------------- * Function: generate * Purpose: Generate arrays a and b */ void generate() { //Random seed srand(time(NULL)); //Generates the values of a long long int i; for (i = 0; i < m; i++) { int aux = rand() % 4; if (aux == 0) a[i] = 'A'; else if (aux == 2) a[i] = 'C'; else if (aux == 3) a[i] = 'G'; else a[i] = 'T'; } //Generates the values of b for (i = 0; i < n; i++) { int aux = rand() % 4; if (aux == 0) b[i] = 'A'; else if (aux == 2) b[i] = 'C'; else if (aux == 3) b[i] = 'G'; else b[i] = 'T'; } } /* End of generate */ /*-------------------------------------------------------------------- * External References: * http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1 * http://pt.slideshare.net/avrilcoghlan/the-smith-waterman-algorithm * http://baba.sourceforge.net/ */
rmse.c
/*************************************************************************/ /** File: rmse.c **/ /** Description: calculate root mean squared error of particular **/ /** clustering. **/ /** Author: Sang-Ha Lee **/ /** University of Virginia. **/ /** **/ /** Note: euclid_dist_2() and find_nearest_point() adopted from **/ /** Minebench code. **/ /** **/ /*************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <float.h> #include <math.h> #include <omp.h> #include "kmeans.h" extern double wtime(void); /*----< euclid_dist_2() >----------------------------------------------------*/ /* multi-dimensional spatial Euclid distance square */ __inline float euclid_dist_2(float *pt1, float *pt2, int numdims) { int i; float ans=0.0; for (i=0; i<numdims; i++) ans += (pt1[i]-pt2[i]) * (pt1[i]-pt2[i]); return(ans); } /*----< find_nearest_point() >-----------------------------------------------*/ __inline int find_nearest_point(float *pt, /* [nfeatures] */ int nfeatures, float **pts, /* [npts][nfeatures] */ int npts) { int index, i; float max_dist=FLT_MAX; /* find the cluster center id with min distance to pt */ for (i=0; i<npts; i++) { float dist; dist = euclid_dist_2(pt, pts[i], nfeatures); /* no need square root */ if (dist < max_dist) { max_dist = dist; index = i; } } return(index); } /*----< rms_err(): calculates RMSE of clustering >-------------------------------------*/ float rms_err (float **feature, /* [npoints][nfeatures] */ int nfeatures, int npoints, float **cluster_centres, /* [nclusters][nfeatures] */ int nclusters) { int i; int nearest_cluster_index; /* cluster center id with min distance to pt */ float sum_euclid = 0.0; /* sum of Euclidean distance squares */ float ret; /* return value */ /* calculate and sum the sqaure of euclidean distance*/ /* #pragma omp parallel for \ shared(feature,cluster_centres) \ firstprivate(npoints,nfeatures,nclusters) \ private(i, nearest_cluster_index) \ schedule (static)*/ for (i=0; i<npoints; i++) { nearest_cluster_index = find_nearest_point(feature[i], nfeatures, cluster_centres, nclusters); sum_euclid += euclid_dist_2(feature[i], cluster_centres[nearest_cluster_index], nfeatures); } /* divide by n, then take sqrt */ ret = sqrt(sum_euclid / npoints); return(ret); }
ast-dump-openmp-target-exit-data.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test(int x) { #pragma omp target exit data map(from \ : x) } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-exit-data.c:3:1, line:6:1> line:3:6 test 'void (int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:11, col:15> col:15 used x 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:18, line:6:1> // CHECK-NEXT: `-OMPTargetExitDataDirective {{.*}} <line:4:1, line:5:38> openmp_standalone_directive // CHECK-NEXT: |-OMPMapClause {{.*}} <line:4:30, line:5:37> // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:36> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: `-CapturedStmt {{.*}} <line:4:1> // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: |-CompoundStmt {{.*}} <col:1> // CHECK-NEXT: |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-exit-data.c:4:1) *const restrict'
GB_unaryop__ainv_uint16_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint16_fp32 // op(A') function: GB_tran__ainv_uint16_fp32 // C type: uint16_t // A type: float // cast: uint16_t cij ; GB_CAST_UNSIGNED(cij,aij,16) // unaryop: cij = -aij #define GB_ATYPE \ float #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ uint16_t z ; GB_CAST_UNSIGNED(z,x,16) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT16 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint16_fp32 ( uint16_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint16_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 32; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
DRB082-declared-in-func-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A variable is declared inside a function called within a parallel region. The variable should be shared if it uses static storage. Data race pair: q@57:3 vs. q@57:3 */ void foo() { static int q; q += 1; } int main() { #pragma omp parallel { foo(); } return 0; }
calculate_F.h
#pragma omp target teams distribute parallel for collapse(2) thread_limit(BLOCK_SIZE) for (int col = 1; col < NUM+1; col++) { for (int row = 1; row < NUM+1; row++) { if (col == NUM) { // right boundary, F_ij = u_ij // also do left boundary F(0, row) = u(0, row); F(NUM, row) = u(NUM, row); } else { // u velocities Real u_ij = u(col, row); Real u_ip1j = u(col + 1, row); Real u_ijp1 = u(col, row + 1); Real u_im1j = u(col - 1, row); Real u_ijm1 = u(col, row - 1); // v velocities Real v_ij = v(col, row); Real v_ip1j = v(col + 1, row); Real v_ijm1 = v(col, row - 1); Real v_ip1jm1 = v(col + 1, row - 1); // finite differences Real du2dx, duvdy, d2udx2, d2udy2; du2dx = (((u_ij + u_ip1j) * (u_ij + u_ip1j) - (u_im1j + u_ij) * (u_im1j + u_ij)) + mix_param * (fabs(u_ij + u_ip1j) * (u_ij - u_ip1j) - fabs(u_im1j + u_ij) * (u_im1j - u_ij))) / (FOUR * dx); duvdy = ((v_ij + v_ip1j) * (u_ij + u_ijp1) - (v_ijm1 + v_ip1jm1) * (u_ijm1 + u_ij) + mix_param * (fabs(v_ij + v_ip1j) * (u_ij - u_ijp1) - fabs(v_ijm1 + v_ip1jm1) * (u_ijm1 - u_ij))) / (FOUR * dy); d2udx2 = (u_ip1j - (TWO * u_ij) + u_im1j) / (dx * dx); d2udy2 = (u_ijp1 - (TWO * u_ij) + u_ijm1) / (dy * dy); F(col, row) = u_ij + dt * (((d2udx2 + d2udy2) / Re_num) - du2dx - duvdy + gx); } // end if } }
mandel-omp-for-point.c
/* * Sequential Mandelbrot program * * This program computes and displays all or part of the Mandelbrot * set. By default, it examines all points in the complex plane * that have both real and imaginary parts between -2 and 2. * Command-line parameters allow zooming in on a specific part of * this range. * * Usage: * mandel [-i maxiter -c x0 y0 -s size -w windowsize] * where * maxiter denotes the maximum number of iterations at each point -- by default 1000 * x0, y0, and size specify the range to examine (a square * centered at (x0 + iy0) of size 2*size by 2*size -- by default, * a square of size 4 by 4 centered at the origin) * windowsize denotes the size of the image (diplay window) to compute * * Input: none, except the optional command-line arguments * Output: a graphical display as described in Wilkinson & Allen, * displayed using the X Window system, plus text output to * standard output showing the above parameters, plus execution * time in seconds. * * Code based on the original code from Web site for Wilkinson and Allen's * text on parallel programming: * http://www.cs.uncc.edu/~abw/parallel/par_prog/ * */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <unistd.h> #include <malloc.h> #if _DISPLAY_ #include <X11/Xlib.h> #include <X11/Xutil.h> #include <X11/Xos.h> #endif #include <sys/time.h> double getusec_() { struct timeval time; gettimeofday(&time, NULL); return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec); } #define START_COUNT_TIME stamp = getusec_(); #define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\ stamp = stamp/1e6;\ printf ("%s: %0.6fs\n",(_m), stamp); /* Default values for things. */ #define N 2 /* size of problem space (x, y from -N to N) */ #define NPIXELS 800 /* size of display window in pixels */ int row, col; // variables used to traverse the problem space /* Structure definition for complex numbers */ typedef struct { double real, imag; } complex; #if _DISPLAY_ /* Functions for GUI */ #include "mandelbrot-gui.h" /* has setup(), interact() */ #endif void mandelbrot(int height, int width, double real_min, double imag_min, double scale_real, double scale_imag, int maxiter, #if _DISPLAY_ int setup_return, Display *display, Window win, GC gc, double scale_color, double min_color) #else int ** output) #endif { /* Calculate points and save/display */ #pragma omp parallel private(row,col) { for (row = 0; row < height; ++row) { #pragma omp for schedule(runtime) //#pragma omp task for (col = 0; col < width; ++col) { complex z, c; z.real = z.imag = 0; /* Scale display coordinates to actual region */ c.real = real_min + ((double) col * scale_real); c.imag = imag_min + ((double) (height-1-row) * scale_imag); /* height-1-row so y axis displays * with larger values at top */ /* Calculate z0, z1, .... until divergence or maximum iterations */ int k = 0; double lengthsq, temp; do { temp = z.real*z.real - z.imag*z.imag + c.real; z.imag = 2*z.real*z.imag + c.imag; z.real = temp; lengthsq = z.real*z.real + z.imag*z.imag; ++k; } while (lengthsq < (N*N) && k < maxiter); #if _DISPLAY_ /* Scale color and display point */ long color = (long) ((k-1) * scale_color) + min_color; if (setup_return == EXIT_SUCCESS) { #pragma omp critical { XSetForeground (display, gc, color); XDrawPoint (display, win, gc, col, row); } } #else output[row][col]=k; #endif } } } } int main(int argc, char *argv[]) { int maxiter = 1000; double real_min; double real_max; double imag_min; double imag_max; int width = NPIXELS; /* dimensions of display window */ int height = NPIXELS; double size=N, x0 = 0, y0 = 0; #if _DISPLAY_ Display *display; Window win; GC gc; int setup_return; long min_color = 0, max_color = 0; double scale_color; #else int ** output; FILE *fp = NULL; #endif double scale_real, scale_imag; /* Process command-line arguments */ for (int i=1; i<argc; i++) { if (strcmp(argv[i], "-i")==0) { maxiter = atoi(argv[++i]); } else if (strcmp(argv[i], "-w")==0) { width = atoi(argv[++i]); height = width; } else if (strcmp(argv[i], "-s")==0) { size = atof(argv[++i]); } #if !_DISPLAY_ else if (strcmp(argv[i], "-o")==0) { if((fp=fopen("mandel.out", "wb"))==NULL) { fprintf(stderr, "Unable to open file\n"); return EXIT_FAILURE; } } #endif else if (strcmp(argv[i], "-c")==0) { x0 = atof(argv[++i]); y0 = atof(argv[++i]); } else { #if _DISPLAY_ fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]); #else fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]); fprintf(stderr, " -o to write computed image to disk (default no file generated)\n"); #endif fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n"); #if _DISPLAY_ fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n"); #else fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n"); #endif fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n"); fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n"); return EXIT_FAILURE; } } real_min = x0 - size; real_max = x0 + size; imag_min = y0 - size; imag_max = y0 + size; /* Produce text output */ fprintf(stdout, "\n"); fprintf(stdout, "Mandelbrot program\n"); fprintf(stdout, "center = (%g, %g), size = %g\n", (real_max + real_min)/2, (imag_max + imag_min)/2, (real_max - real_min)/2); fprintf(stdout, "maximum iterations = %d\n", maxiter); fprintf(stdout, "\n"); #if _DISPLAY_ /* Initialize for graphical display */ setup_return = setup(width, height, &display, &win, &gc, &min_color, &max_color); if (setup_return != EXIT_SUCCESS) { fprintf(stderr, "Unable to initialize display, continuing\n"); return EXIT_FAILURE; } #else output = malloc(height*sizeof(int *)); for (int row = 0; row < height; ++row) output[row] = malloc(width*sizeof(int)); #endif /* Compute factors to scale computational region to window */ scale_real = (double) (real_max - real_min) / (double) width; scale_imag = (double) (imag_max - imag_min) / (double) height; #if _DISPLAY_ /* Compute factor for color scaling */ scale_color = (double) (max_color - min_color) / (double) (maxiter - 1); #endif /* Start timing */ double stamp; START_COUNT_TIME; #if _DISPLAY_ mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter, setup_return, display, win, gc, scale_color, min_color); #else mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter, output); #endif /* End timing */ STOP_COUNT_TIME("Total execution time"); /* Be sure all output is written */ #if _DISPLAY_ if (setup_return == EXIT_SUCCESS) { XFlush (display); } #else if (fp != NULL) { for (int row = 0; row < height; ++row) if(fwrite(output[row], sizeof(int), width, fp) != width) { fprintf(stderr, "Output file not written correctly\n"); } } #endif #if _DISPLAY_ /* Wait for user response, then exit program */ if (setup_return == EXIT_SUCCESS) { interact(display, &win, width, height, real_min, real_max, imag_min, imag_max); } return EXIT_SUCCESS; #endif }
lock_scalability.c
/** * \file * \brief Lock scalability benchmark. */ /* * Copyright (c) 2007, 2008, 2009, ETH Zurich. * All rights reserved. * * This file is distributed under the terms in the attached LICENSE file. * If you do not find this file, copies can be found by writing to: * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group. */ #include <stdio.h> #include <omp.h> #include <stdlib.h> // Use spinlocks if defined, mutexes otherwise #define SPINLOCKS #ifdef POSIX #include <pthread.h> #include <stdint.h> #ifdef SPINLOCKS /** \brief spinlock */ typedef volatile uint64_t spinlock_t __attribute__ ((aligned(64))); static inline void acquire_spinlock(spinlock_t * volatile lock) { __asm__ __volatile__( "0:\n\t" "xor %%rax,%%rax\n\t" "lock bts %%rax,(%0)\n\t" "jc 0b\n\t" : : "S" (lock) : "rax" ); } static inline void release_spinlock(spinlock_t * volatile lock) { *lock = 0; } #endif static inline uint64_t rdtsc(void) { uint64_t eax, edx; __asm volatile ("rdtsc" : "=a" (eax), "=d" (edx)); return (edx << 32) | eax; } #endif int main(int argc, char *argv[]) { int i=0; bomp_bomp_init(4); omp_set_num_threads(4); #ifndef POSIX #ifndef SPINLOCKS static struct thread_mutex lock = THREAD_MUTEX_INITIALIZER; #else static spinlock_t lock = 0; #endif #else #ifdef SPINLOCKS static spinlock_t lock = 0; #else static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER; #endif #endif uint64_t begin = rdtsc(); #pragma omp parallel { #pragma omp for private(i) for(i=0;i<1000000;i++) { #ifdef SPINLOCKS acquire_spinlock(&lock); release_spinlock(&lock); #else thread_mutex_lock(&lock); thread_mutex_unlock(&lock); #endif } } uint64_t end = rdtsc(); printf("took %lu\n", end - begin); }
GB_select_phase1.c
//------------------------------------------------------------------------------ // GB_select_phase1: count entries in each vector for C=select(A,thunk) //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ const int64_t *restrict kfirst_Aslice = A_ek_slicing ; const int64_t *restrict klast_Aslice = A_ek_slicing + A_ntasks ; const int64_t *restrict pstart_Aslice = A_ek_slicing + A_ntasks * 2 ; #if defined ( GB_ENTRY_SELECTOR ) //-------------------------------------------------------------------------- // entry selector //-------------------------------------------------------------------------- ASSERT (GB_JUMBLED_OK (A)) ; // The count of live entries kth vector A(:,k) is reduced to the kth scalar // Cp(k). Each thread computes the reductions on roughly the same number // of entries, which means that a vector A(:,k) may be reduced by more than // one thread. The first vector A(:,kfirst) reduced by thread tid may be // partial, where the prior thread tid-1 (and other prior threads) may also // do some of the reductions for this same vector A(:,kfirst). The thread // tid reduces all vectors A(:,k) for k in the range kfirst+1 to klast-1. // The last vector A(:,klast) reduced by thread tid may also be partial. // Thread tid+1, and following threads, may also do some of the reduces for // A(:,klast). //-------------------------------------------------------------------------- // get A //-------------------------------------------------------------------------- const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int64_t *restrict Ai = A->i ; const GB_ATYPE *restrict Ax = (GB_ATYPE *) A->x ; size_t asize = A->type->size ; int64_t avlen = A->vlen ; int64_t avdim = A->vdim ; ASSERT (GB_JUMBLED_OK (A)) ; //-------------------------------------------------------------------------- // reduce each slice //-------------------------------------------------------------------------- // each thread reduces its own part in parallel int tid ; #pragma omp parallel for num_threads(A_nthreads) schedule(dynamic,1) for (tid = 0 ; tid < A_ntasks ; tid++) { // if kfirst > klast then thread tid does no work at all int64_t kfirst = kfirst_Aslice [tid] ; int64_t klast = klast_Aslice [tid] ; Wfirst [tid] = 0 ; Wlast [tid] = 0 ; //---------------------------------------------------------------------- // reduce vectors kfirst to klast //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // find the part of A(:,k) to be reduced by this thread //------------------------------------------------------------------ GB_GET_J ; // int64_t j = GBH (Ah, k) ; but for user selectop only int64_t pA, pA_end ; GB_get_pA (&pA, &pA_end, tid, k, kfirst, klast, pstart_Aslice, Ap, avlen) ; //------------------------------------------------------------------ // count entries in Ax [pA ... pA_end-1] //------------------------------------------------------------------ int64_t cjnz = 0 ; for ( ; pA < pA_end ; pA++) { if (GB_TEST_VALUE_OF_ENTRY (pA)) cjnz++ ; } if (k == kfirst) { Wfirst [tid] = cjnz ; } else if (k == klast) { Wlast [tid] = cjnz ; } else { Cp [k] = cjnz ; } } } //-------------------------------------------------------------------------- // reduce the first and last vector of each slice using a single thread //-------------------------------------------------------------------------- GB_ek_slice_merge1 (Cp, Wfirst, Wlast, A_ek_slicing, A_ntasks) ; #else //-------------------------------------------------------------------------- // positional selector (tril, triu, diag, offdiag, resize) //-------------------------------------------------------------------------- const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int64_t *restrict Ai = A->i ; int64_t anvec = A->nvec ; int64_t avlen = A->vlen ; ASSERT (!GB_JUMBLED (A)) ; //-------------------------------------------------------------------------- // tril, triu, diag, offdiag, resize: binary search in each vector //-------------------------------------------------------------------------- int64_t k ; #pragma omp parallel for num_threads(A_nthreads) schedule(guided) for (k = 0 ; k < anvec ; k++) { //---------------------------------------------------------------------- // get A(:,k) //---------------------------------------------------------------------- int64_t pA_start = GBP (Ap, k, avlen) ; int64_t pA_end = GBP (Ap, k+1, avlen) ; int64_t p = pA_start ; int64_t cjnz = 0 ; int64_t ajnz = pA_end - pA_start ; bool found = false ; if (ajnz > 0) { //------------------------------------------------------------------ // search for the entry A(i,k) //------------------------------------------------------------------ int64_t ifirst = GBI (Ai, pA_start, avlen) ; int64_t ilast = GBI (Ai, pA_end-1, avlen) ; #if defined ( GB_RESIZE_SELECTOR ) int64_t i = ithunk ; #else int64_t j = GBH (Ah, k) ; int64_t i = j-ithunk ; #endif if (i < ifirst) { // all entries in A(:,k) come after i ; } else if (i > ilast) { // all entries in A(:,k) come before i p = pA_end ; } else if (ajnz == avlen) { // A(:,k) is dense found = true ; p += i ; ASSERT (GBI (Ai, p, avlen) == i) ; } else { // binary search for A (i,k) int64_t pright = pA_end - 1 ; GB_SPLIT_BINARY_SEARCH (i, Ai, p, pright, found) ; } #if defined ( GB_TRIL_SELECTOR ) // keep p to pA_end-1 cjnz = pA_end - p ; #elif defined ( GB_TRIU_SELECTOR ) \ || defined ( GB_RESIZE_SELECTOR ) // if found, keep pA_start to p // else keep pA_start to p-1 if (found) { p++ ; // now in both cases, keep pA_start to p-1 } // keep pA_start to p-1 cjnz = p - pA_start ; #elif defined ( GB_DIAG_SELECTOR ) // if found, keep p // else keep nothing cjnz = found ; if (!found) p = -1 ; // if (cjnz >= 0) keep p, else keep nothing #elif defined ( GB_OFFDIAG_SELECTOR ) // if found, keep pA_start to p-1 and p+1 to pA_end-1 // else keep pA_start to pA_end cjnz = ajnz - found ; if (!found) { p = pA_end ; // now just keep pA_start to p-1; p+1 to pA_end is // now empty } // in both cases, keep pA_start to p-1 and // p+1 to pA_end-1. If the entry is not found, then // p == pA_end, and all entries are kept. #endif } //---------------------------------------------------------------------- // log the result for the kth vector //---------------------------------------------------------------------- Zp [k] = p ; Cp [k] = cjnz ; } //-------------------------------------------------------------------------- // compute Wfirst and Wlast for each task //-------------------------------------------------------------------------- // Wfirst [0..A_ntasks-1] and Wlast [0..A_ntasks-1] are required for // constructing C_start_slice [0..A_ntasks-1] in GB_selector. for (int tid = 0 ; tid < A_ntasks ; tid++) { // if kfirst > klast then task tid does no work at all int64_t kfirst = kfirst_Aslice [tid] ; int64_t klast = klast_Aslice [tid] ; Wfirst [tid] = 0 ; Wlast [tid] = 0 ; if (kfirst <= klast) { int64_t pA_start = pstart_Aslice [tid] ; int64_t pA_end = GBP (Ap, kfirst+1, avlen) ; pA_end = GB_IMIN (pA_end, pstart_Aslice [tid+1]) ; if (pA_start < pA_end) { #if defined ( GB_TRIL_SELECTOR ) // keep Zp [kfirst] to pA_end-1 int64_t p = GB_IMAX (Zp [kfirst], pA_start) ; Wfirst [tid] = GB_IMAX (0, pA_end - p) ; #elif defined ( GB_TRIU_SELECTOR ) \ || defined ( GB_RESIZE_SELECTOR ) // keep pA_start to Zp [kfirst]-1 int64_t p = GB_IMIN (Zp [kfirst], pA_end) ; Wfirst [tid] = GB_IMAX (0, p - pA_start) ; #elif defined ( GB_DIAG_SELECTOR ) // task that owns the diagonal entry does this work int64_t p = Zp [kfirst] ; Wfirst [tid] = (pA_start <= p && p < pA_end) ? 1 : 0 ; #elif defined ( GB_OFFDIAG_SELECTOR ) // keep pA_start to Zp [kfirst]-1 int64_t p = GB_IMIN (Zp [kfirst], pA_end) ; Wfirst [tid] = GB_IMAX (0, p - pA_start) ; // keep Zp [kfirst]+1 to pA_end-1 p = GB_IMAX (Zp [kfirst]+1, pA_start) ; Wfirst [tid] += GB_IMAX (0, pA_end - p) ; #endif } } if (kfirst < klast) { int64_t pA_start = GBP (Ap, klast, avlen) ; int64_t pA_end = pstart_Aslice [tid+1] ; if (pA_start < pA_end) { #if defined ( GB_TRIL_SELECTOR ) // keep Zp [klast] to pA_end-1 int64_t p = GB_IMAX (Zp [klast], pA_start) ; Wlast [tid] = GB_IMAX (0, pA_end - p) ; #elif defined ( GB_TRIU_SELECTOR ) \ || defined ( GB_RESIZE_SELECTOR ) // keep pA_start to Zp [klast]-1 int64_t p = GB_IMIN (Zp [klast], pA_end) ; Wlast [tid] = GB_IMAX (0, p - pA_start) ; #elif defined ( GB_DIAG_SELECTOR ) // task that owns the diagonal entry does this work int64_t p = Zp [klast] ; Wlast [tid] = (pA_start <= p && p < pA_end) ? 1 : 0 ; #elif defined ( GB_OFFDIAG_SELECTOR ) // keep pA_start to Zp [klast]-1 int64_t p = GB_IMIN (Zp [klast], pA_end) ; Wlast [tid] = GB_IMAX (0, p - pA_start) ; // keep Zp [klast]+1 to pA_end-1 p = GB_IMAX (Zp [klast]+1, pA_start) ; Wlast [tid] += GB_IMAX (0, pA_end - p) ; #endif } } } #endif
nanort.h
// // NanoRT, single header only modern ray tracing kernel. // // // Notes : The number of primitives are up to 2G. If you want to render large // data, please split data into chunks(~ 2G prims) and use NanoSG scene graph // library(`${nanort}/examples/nanosg`). // /* The MIT License (MIT) Copyright (c) 2015 - 2018 Light Transport Entertainment, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef NANORT_H_ #define NANORT_H_ #include <algorithm> #include <cassert> #include <cmath> #include <cstdio> #include <cstdlib> #include <cstring> #include <functional> #include <limits> #include <memory> #include <queue> #include <string> #include <vector> // compiler macros // // NANORT_USE_CPP11_FEATURE : Enable C++11 feature // NANORT_ENABLE_PARALLEL_BUILD : Enable parallel BVH build. // NANORT_ENABLE_SERIALIZATION : Enable serialization feature for built BVH. // // Parallelized BVH build is supported on C++11 thread version. // OpenMP version is not fully tested. // thus turn off if you face a problem when building BVH in parallel. // #define NANORT_ENABLE_PARALLEL_BUILD // Some constants #define kNANORT_MIN_PRIMITIVES_FOR_PARALLEL_BUILD (1024 * 8) #define kNANORT_SHALLOW_DEPTH (4) // will create 2**N subtrees #ifdef NANORT_USE_CPP11_FEATURE // Assume C++11 compiler has thread support. // In some situation(e.g. embedded system, JIT compilation), thread feature // may not be available though... #include <atomic> #include <mutex> #include <thread> #define kNANORT_MAX_THREADS (256) // Parallel build should work well for C++11 version, thus force enable it. #ifndef NANORT_ENABLE_PARALLEL_BUILD #define NANORT_ENABLE_PARALLEL_BUILD #endif #endif namespace nanort { // RayType typedef enum { RAY_TYPE_NONE = 0x0, RAY_TYPE_PRIMARY = 0x1, RAY_TYPE_SECONDARY = 0x2, RAY_TYPE_DIFFUSE = 0x4, RAY_TYPE_REFLECTION = 0x8, RAY_TYPE_REFRACTION = 0x10 } RayType; #ifdef __clang__ #pragma clang diagnostic push #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #endif // ---------------------------------------------------------------------------- // Small vector class useful for multi-threaded environment. // // stack_container.h // // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // This allocator can be used with STL containers to provide a stack buffer // from which to allocate memory and overflows onto the heap. This stack buffer // would be allocated on the stack and allows us to avoid heap operations in // some situations. // // STL likes to make copies of allocators, so the allocator itself can't hold // the data. Instead, we make the creator responsible for creating a // StackAllocator::Source which contains the data. Copying the allocator // merely copies the pointer to this shared source, so all allocators created // based on our allocator will share the same stack buffer. // // This stack buffer implementation is very simple. The first allocation that // fits in the stack buffer will use the stack buffer. Any subsequent // allocations will not use the stack buffer, even if there is unused room. // This makes it appropriate for array-like containers, but the caller should // be sure to reserve() in the container up to the stack buffer size. Otherwise // the container will allocate a small array which will "use up" the stack // buffer. template <typename T, size_t stack_capacity> class StackAllocator : public std::allocator<T> { public: typedef typename std::allocator<T>::pointer pointer; typedef typename std::allocator<T>::size_type size_type; // Backing store for the allocator. The container owner is responsible for // maintaining this for as long as any containers using this allocator are // live. struct Source { Source() : used_stack_buffer_(false) {} // Casts the buffer in its right type. T *stack_buffer() { return reinterpret_cast<T *>(stack_buffer_); } const T *stack_buffer() const { return reinterpret_cast<const T *>(stack_buffer_); } // // IMPORTANT: Take care to ensure that stack_buffer_ is aligned // since it is used to mimic an array of T. // Be careful while declaring any unaligned types (like bool) // before stack_buffer_. // // The buffer itself. It is not of type T because we don't want the // constructors and destructors to be automatically called. Define a POD // buffer of the right size instead. char stack_buffer_[sizeof(T[stack_capacity])]; // Set when the stack buffer is used for an allocation. We do not track // how much of the buffer is used, only that somebody is using it. bool used_stack_buffer_; }; // Used by containers when they want to refer to an allocator of type U. template <typename U> struct rebind { typedef StackAllocator<U, stack_capacity> other; }; // For the straight up copy c-tor, we can share storage. StackAllocator(const StackAllocator<T, stack_capacity> &rhs) : source_(rhs.source_) {} // ISO C++ requires the following constructor to be defined, // and std::vector in VC++2008SP1 Release fails with an error // in the class _Container_base_aux_alloc_real (from <xutility>) // if the constructor does not exist. // For this constructor, we cannot share storage; there's // no guarantee that the Source buffer of Ts is large enough // for Us. // TODO(Google): If we were fancy pants, perhaps we could share storage // iff sizeof(T) == sizeof(U). template <typename U, size_t other_capacity> StackAllocator(const StackAllocator<U, other_capacity> &other) : source_(NULL) { (void)other; } explicit StackAllocator(Source *source) : source_(source) {} // Actually do the allocation. Use the stack buffer if nobody has used it yet // and the size requested fits. Otherwise, fall through to the standard // allocator. pointer allocate(size_type n, void *hint = 0) { if (source_ != NULL && !source_->used_stack_buffer_ && n <= stack_capacity) { source_->used_stack_buffer_ = true; return source_->stack_buffer(); } else { return std::allocator<T>::allocate(n, hint); } } // Free: when trying to free the stack buffer, just mark it as free. For // non-stack-buffer pointers, just fall though to the standard allocator. void deallocate(pointer p, size_type n) { if (source_ != NULL && p == source_->stack_buffer()) source_->used_stack_buffer_ = false; else std::allocator<T>::deallocate(p, n); } private: Source *source_; }; // A wrapper around STL containers that maintains a stack-sized buffer that the // initial capacity of the vector is based on. Growing the container beyond the // stack capacity will transparently overflow onto the heap. The container must // support reserve(). // // WATCH OUT: the ContainerType MUST use the proper StackAllocator for this // type. This object is really intended to be used only internally. You'll want // to use the wrappers below for different types. template <typename TContainerType, int stack_capacity> class StackContainer { public: typedef TContainerType ContainerType; typedef typename ContainerType::value_type ContainedType; typedef StackAllocator<ContainedType, stack_capacity> Allocator; // Allocator must be constructed before the container! StackContainer() : allocator_(&stack_data_), container_(allocator_) { // Make the container use the stack allocation by reserving our buffer size // before doing anything else. container_.reserve(stack_capacity); } // Getters for the actual container. // // Danger: any copies of this made using the copy constructor must have // shorter lifetimes than the source. The copy will share the same allocator // and therefore the same stack buffer as the original. Use std::copy to // copy into a "real" container for longer-lived objects. ContainerType &container() { return container_; } const ContainerType &container() const { return container_; } // Support operator-> to get to the container. This allows nicer syntax like: // StackContainer<...> foo; // std::sort(foo->begin(), foo->end()); ContainerType *operator->() { return &container_; } const ContainerType *operator->() const { return &container_; } #ifdef UNIT_TEST // Retrieves the stack source so that that unit tests can verify that the // buffer is being used properly. const typename Allocator::Source &stack_data() const { return stack_data_; } #endif protected: typename Allocator::Source stack_data_; unsigned char pad_[7]; Allocator allocator_; ContainerType container_; // DISALLOW_EVIL_CONSTRUCTORS(StackContainer); StackContainer(const StackContainer &); void operator=(const StackContainer &); }; // StackVector // // Example: // StackVector<int, 16> foo; // foo->push_back(22); // we have overloaded operator-> // foo[0] = 10; // as well as operator[] template <typename T, size_t stack_capacity> class StackVector : public StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >, stack_capacity> { public: StackVector() : StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >, stack_capacity>() {} // We need to put this in STL containers sometimes, which requires a copy // constructor. We can't call the regular copy constructor because that will // take the stack buffer from the original. Here, we create an empty object // and make a stack buffer of its own. StackVector(const StackVector<T, stack_capacity> &other) : StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >, stack_capacity>() { this->container().assign(other->begin(), other->end()); } StackVector<T, stack_capacity> &operator=( const StackVector<T, stack_capacity> &other) { this->container().assign(other->begin(), other->end()); return *this; } // Vectors are commonly indexed, which isn't very convenient even with // operator-> (using "->at()" does exception stuff we don't want). T &operator[](size_t i) { return this->container().operator[](i); } const T &operator[](size_t i) const { return this->container().operator[](i); } }; // ---------------------------------------------------------------------------- template <typename T = float> class real3 { public: real3() {} real3(T x) { v[0] = x; v[1] = x; v[2] = x; } real3(T xx, T yy, T zz) { v[0] = xx; v[1] = yy; v[2] = zz; } explicit real3(const T *p) { v[0] = p[0]; v[1] = p[1]; v[2] = p[2]; } inline T x() const { return v[0]; } inline T y() const { return v[1]; } inline T z() const { return v[2]; } real3 operator*(T f) const { return real3(x() * f, y() * f, z() * f); } real3 operator-(const real3 &f2) const { return real3(x() - f2.x(), y() - f2.y(), z() - f2.z()); } real3 operator*(const real3 &f2) const { return real3(x() * f2.x(), y() * f2.y(), z() * f2.z()); } real3 operator+(const real3 &f2) const { return real3(x() + f2.x(), y() + f2.y(), z() + f2.z()); } real3 &operator+=(const real3 &f2) { v[0] += f2.x(); v[1] += f2.y(); v[2] += f2.z(); return (*this); } real3 operator/(const real3 &f2) const { return real3(x() / f2.x(), y() / f2.y(), z() / f2.z()); } real3 operator-() const { return real3(-x(), -y(), -z()); } T operator[](int i) const { return v[i]; } T &operator[](int i) { return v[i]; } T v[3]; // T pad; // for alignment(when T = float) }; template <typename T> inline real3<T> operator*(T f, const real3<T> &v) { return real3<T>(v.x() * f, v.y() * f, v.z() * f); } template <typename T> inline real3<T> vneg(const real3<T> &rhs) { return real3<T>(-rhs.x(), -rhs.y(), -rhs.z()); } template <typename T> inline T vlength(const real3<T> &rhs) { return std::sqrt(rhs.x() * rhs.x() + rhs.y() * rhs.y() + rhs.z() * rhs.z()); } template <typename T> inline real3<T> vnormalize(const real3<T> &rhs) { real3<T> v = rhs; T len = vlength(rhs); if (std::fabs(len) > std::numeric_limits<T>::epsilon()) { T inv_len = static_cast<T>(1.0) / len; v.v[0] *= inv_len; v.v[1] *= inv_len; v.v[2] *= inv_len; } return v; } template <typename T> inline real3<T> vcross(const real3<T> a, const real3<T> b) { real3<T> c; c[0] = a[1] * b[2] - a[2] * b[1]; c[1] = a[2] * b[0] - a[0] * b[2]; c[2] = a[0] * b[1] - a[1] * b[0]; return c; } template <typename T> inline T vdot(const real3<T> a, const real3<T> b) { return a[0] * b[0] + a[1] * b[1] + a[2] * b[2]; } template <typename T> inline real3<T> vsafe_inverse(const real3<T> v) { real3<T> r; #ifdef NANORT_USE_CPP11_FEATURE if (std::fabs(v[0]) < std::numeric_limits<T>::epsilon()) { r[0] = std::numeric_limits<T>::infinity() * std::copysign(static_cast<T>(1), v[0]); } else { r[0] = static_cast<T>(1.0) / v[0]; } if (std::fabs(v[1]) < std::numeric_limits<T>::epsilon()) { r[1] = std::numeric_limits<T>::infinity() * std::copysign(static_cast<T>(1), v[1]); } else { r[1] = static_cast<T>(1.0) / v[1]; } if (std::fabs(v[2]) < std::numeric_limits<T>::epsilon()) { r[2] = std::numeric_limits<T>::infinity() * std::copysign(static_cast<T>(1), v[2]); } else { r[2] = static_cast<T>(1.0) / v[2]; } #else if (std::fabs(v[0]) < std::numeric_limits<T>::epsilon()) { T sgn = (v[0] < static_cast<T>(0)) ? static_cast<T>(-1) : static_cast<T>(1); r[0] = std::numeric_limits<T>::infinity() * sgn; } else { r[0] = static_cast<T>(1.0) / v[0]; } if (std::fabs(v[1]) < std::numeric_limits<T>::epsilon()) { T sgn = (v[1] < static_cast<T>(0)) ? static_cast<T>(-1) : static_cast<T>(1); r[1] = std::numeric_limits<T>::infinity() * sgn; } else { r[1] = static_cast<T>(1.0) / v[1]; } if (std::fabs(v[2]) < std::numeric_limits<T>::epsilon()) { T sgn = (v[2] < static_cast<T>(0)) ? static_cast<T>(-1) : static_cast<T>(1); r[2] = std::numeric_limits<T>::infinity() * sgn; } else { r[2] = static_cast<T>(1.0) / v[2]; } #endif return r; } template <typename real> inline const real *get_vertex_addr(const real *p, const size_t idx, const size_t stride_bytes) { return reinterpret_cast<const real *>( reinterpret_cast<const unsigned char *>(p) + idx * stride_bytes); } template <typename T = float> class Ray { public: Ray() : min_t(static_cast<T>(0.0)), max_t(std::numeric_limits<T>::max()), type(RAY_TYPE_NONE) { org[0] = static_cast<T>(0.0); org[1] = static_cast<T>(0.0); org[2] = static_cast<T>(0.0); dir[0] = static_cast<T>(0.0); dir[1] = static_cast<T>(0.0); dir[2] = static_cast<T>(-1.0); } T org[3]; // must set T dir[3]; // must set T min_t; // minimum ray hit distance. T max_t; // maximum ray hit distance. T inv_dir[3]; // filled internally int dir_sign[3]; // filled internally unsigned int type; // ray type // TODO(LTE): Align sizeof(Ray) }; template <typename T = float> class BVHNode { public: BVHNode() {} BVHNode(const BVHNode &rhs) { bmin[0] = rhs.bmin[0]; bmin[1] = rhs.bmin[1]; bmin[2] = rhs.bmin[2]; flag = rhs.flag; bmax[0] = rhs.bmax[0]; bmax[1] = rhs.bmax[1]; bmax[2] = rhs.bmax[2]; axis = rhs.axis; data[0] = rhs.data[0]; data[1] = rhs.data[1]; } BVHNode &operator=(const BVHNode &rhs) { bmin[0] = rhs.bmin[0]; bmin[1] = rhs.bmin[1]; bmin[2] = rhs.bmin[2]; flag = rhs.flag; bmax[0] = rhs.bmax[0]; bmax[1] = rhs.bmax[1]; bmax[2] = rhs.bmax[2]; axis = rhs.axis; data[0] = rhs.data[0]; data[1] = rhs.data[1]; return (*this); } ~BVHNode() {} T bmin[3]; T bmax[3]; int flag; // 1 = leaf node, 0 = branch node int axis; // leaf // data[0] = npoints // data[1] = index // // branch // data[0] = child[0] // data[1] = child[1] unsigned int data[2]; }; template <class H> class IntersectComparator { public: bool operator()(const H &a, const H &b) const { return a.t < b.t; } }; /// BVH build option. template <typename T = float> struct BVHBuildOptions { T cost_t_aabb; unsigned int min_leaf_primitives; unsigned int max_tree_depth; unsigned int bin_size; unsigned int shallow_depth; unsigned int min_primitives_for_parallel_build; // Cache bounding box computation. // Requires more memory, but BVHbuild can be faster. bool cache_bbox; unsigned char pad[3]; // Set default value: Taabb = 0.2 BVHBuildOptions() : cost_t_aabb(static_cast<T>(0.2)), min_leaf_primitives(4), max_tree_depth(256), bin_size(64), shallow_depth(kNANORT_SHALLOW_DEPTH), min_primitives_for_parallel_build( kNANORT_MIN_PRIMITIVES_FOR_PARALLEL_BUILD), cache_bbox(false) {} }; /// BVH build statistics. class BVHBuildStatistics { public: unsigned int max_tree_depth; unsigned int num_leaf_nodes; unsigned int num_branch_nodes; float build_secs; // Set default value: Taabb = 0.2 BVHBuildStatistics() : max_tree_depth(0), num_leaf_nodes(0), num_branch_nodes(0), build_secs(0.0f) {} }; /// BVH trace option. class BVHTraceOptions { public: // Hit only for face IDs in indexRange. // This feature is good to mimic something like glDrawArrays() unsigned int prim_ids_range[2]; // Prim ID to skip for avoiding self-intersection // -1 = no skipping unsigned int skip_prim_id; bool cull_back_face; unsigned char pad[3]; ///< Padding(not used) BVHTraceOptions() { prim_ids_range[0] = 0; prim_ids_range[1] = 0x7FFFFFFF; // Up to 2G face IDs. skip_prim_id = static_cast<unsigned int>(-1); cull_back_face = false; } }; template <typename T> class BBox { public: real3<T> bmin; real3<T> bmax; BBox() { bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<T>::max(); bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<T>::max(); } }; template <typename T> class NodeHit { public: NodeHit() : t_min(std::numeric_limits<T>::max()), t_max(-std::numeric_limits<T>::max()), node_id(static_cast<unsigned int>(-1)) {} NodeHit(const NodeHit<T> &rhs) { t_min = rhs.t_min; t_max = rhs.t_max; node_id = rhs.node_id; } NodeHit &operator=(const NodeHit<T> &rhs) { t_min = rhs.t_min; t_max = rhs.t_max; node_id = rhs.node_id; return (*this); } ~NodeHit() {} T t_min; T t_max; unsigned int node_id; }; template <typename T> class NodeHitComparator { public: inline bool operator()(const NodeHit<T> &a, const NodeHit<T> &b) { return a.t_min < b.t_min; } }; template <typename T> class BVHAccel { public: BVHAccel() : pad0_(0) { (void)pad0_; } ~BVHAccel() {} /// /// Build BVH for input primitives. /// template <class P, class Pred> bool Build(const unsigned int num_primitives, const P &p, const Pred &pred, const BVHBuildOptions<T> &options = BVHBuildOptions<T>()); /// /// Get statistics of built BVH tree. Valid after Build() /// BVHBuildStatistics GetStatistics() const { return stats_; } #if defined(NANORT_ENABLE_SERIALIZATION) /// /// Dump built BVH to the file. /// bool Dump(const char *filename) const; bool Dump(FILE *fp) const; /// /// Load BVH binary /// bool Load(const char *filename); bool Load(FILE *fp); #endif void Debug(); /// /// Traverse into BVH along ray and find closest hit point & primitive if /// found /// template <class I, class H> bool Traverse(const Ray<T> &ray, const I &intersector, H *isect, const BVHTraceOptions &options = BVHTraceOptions()) const; #if 0 /// Multi-hit ray traversal /// Returns `max_intersections` frontmost intersections template<class I, class H, class Comp> bool MultiHitTraverse(const Ray<T> &ray, int max_intersections, const I &intersector, StackVector<H, 128> *isects, const BVHTraceOptions &options = BVHTraceOptions()) const; #endif /// /// List up nodes which intersects along the ray. /// This function is useful for two-level BVH traversal. /// template <class I> bool ListNodeIntersections(const Ray<T> &ray, int max_intersections, const I &intersector, StackVector<NodeHit<T>, 128> *hits) const; const std::vector<BVHNode<T> > &GetNodes() const { return nodes_; } const std::vector<unsigned int> &GetIndices() const { return indices_; } /// /// Returns bounding box of built BVH. /// void BoundingBox(T bmin[3], T bmax[3]) const { if (nodes_.empty()) { bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<T>::max(); bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<T>::max(); } else { bmin[0] = nodes_[0].bmin[0]; bmin[1] = nodes_[0].bmin[1]; bmin[2] = nodes_[0].bmin[2]; bmax[0] = nodes_[0].bmax[0]; bmax[1] = nodes_[0].bmax[1]; bmax[2] = nodes_[0].bmax[2]; } } bool IsValid() const { return nodes_.size() > 0; } private: #if defined(NANORT_ENABLE_PARALLEL_BUILD) typedef struct { unsigned int left_idx; unsigned int right_idx; unsigned int offset; } ShallowNodeInfo; // Used only during BVH construction std::vector<ShallowNodeInfo> shallow_node_infos_; /// Builds shallow BVH tree recursively. template <class P, class Pred> unsigned int BuildShallowTree(std::vector<BVHNode<T> > *out_nodes, unsigned int left_idx, unsigned int right_idx, unsigned int depth, unsigned int max_shallow_depth, const P &p, const Pred &pred); #endif /// Builds BVH tree recursively. template <class P, class Pred> unsigned int BuildTree(BVHBuildStatistics *out_stat, std::vector<BVHNode<T> > *out_nodes, unsigned int left_idx, unsigned int right_idx, unsigned int depth, const P &p, const Pred &pred); template <class I> bool TestLeafNode(const BVHNode<T> &node, const Ray<T> &ray, const I &intersector) const; template <class I> bool TestLeafNodeIntersections( const BVHNode<T> &node, const Ray<T> &ray, const int max_intersections, const I &intersector, std::priority_queue<NodeHit<T>, std::vector<NodeHit<T> >, NodeHitComparator<T> > *isect_pq) const; #if 0 template<class I, class H, class Comp> bool MultiHitTestLeafNode(std::priority_queue<H, std::vector<H>, Comp> *isect_pq, int max_intersections, const BVHNode<T> &node, const Ray<T> &ray, const I &intersector) const; #endif std::vector<BVHNode<T> > nodes_; std::vector<unsigned int> indices_; // max 4G triangles. std::vector<BBox<T> > bboxes_; BVHBuildOptions<T> options_; BVHBuildStatistics stats_; unsigned int pad0_; }; // Predefined SAH predicator for triangle. template <typename T = float> class TriangleSAHPred { public: TriangleSAHPred( const T *vertices, const unsigned int *faces, size_t vertex_stride_bytes) // e.g. 12 for sizeof(float) * XYZ : axis_(0), pos_(static_cast<T>(0.0)), vertices_(vertices), faces_(faces), vertex_stride_bytes_(vertex_stride_bytes) {} TriangleSAHPred(const TriangleSAHPred<T> &rhs) : axis_(rhs.axis_), pos_(rhs.pos_), vertices_(rhs.vertices_), faces_(rhs.faces_), vertex_stride_bytes_(rhs.vertex_stride_bytes_) {} TriangleSAHPred<T> &operator=(const TriangleSAHPred<T> &rhs) { axis_ = rhs.axis_; pos_ = rhs.pos_; vertices_ = rhs.vertices_; faces_ = rhs.faces_; vertex_stride_bytes_ = rhs.vertex_stride_bytes_; return (*this); } void Set(int axis, T pos) const { axis_ = axis; pos_ = pos; } bool operator()(unsigned int i) const { int axis = axis_; T pos = pos_; unsigned int i0 = faces_[3 * i + 0]; unsigned int i1 = faces_[3 * i + 1]; unsigned int i2 = faces_[3 * i + 2]; real3<T> p0(get_vertex_addr<T>(vertices_, i0, vertex_stride_bytes_)); real3<T> p1(get_vertex_addr<T>(vertices_, i1, vertex_stride_bytes_)); real3<T> p2(get_vertex_addr<T>(vertices_, i2, vertex_stride_bytes_)); T center = p0[axis] + p1[axis] + p2[axis]; return (center < pos * static_cast<T>(3.0)); } private: mutable int axis_; mutable T pos_; const T *vertices_; const unsigned int *faces_; const size_t vertex_stride_bytes_; }; // Predefined Triangle mesh geometry. template <typename T = float> class TriangleMesh { public: TriangleMesh( const T *vertices, const unsigned int *faces, const size_t vertex_stride_bytes) // e.g. 12 for sizeof(float) * XYZ : vertices_(vertices), faces_(faces), vertex_stride_bytes_(vertex_stride_bytes) {} /// Compute bounding box for `prim_index`th triangle. /// This function is called for each primitive in BVH build. void BoundingBox(real3<T> *bmin, real3<T> *bmax, unsigned int prim_index) const { (*bmin)[0] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0], vertex_stride_bytes_)[0]; (*bmin)[1] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0], vertex_stride_bytes_)[1]; (*bmin)[2] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0], vertex_stride_bytes_)[2]; (*bmax)[0] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0], vertex_stride_bytes_)[0]; (*bmax)[1] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0], vertex_stride_bytes_)[1]; (*bmax)[2] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0], vertex_stride_bytes_)[2]; for (unsigned int i = 1; i < 3; i++) { for (unsigned int k = 0; k < 3; k++) { if ((*bmin)[static_cast<int>(k)] > get_vertex_addr<T>(vertices_, faces_[3 * prim_index + i], vertex_stride_bytes_)[k]) { (*bmin)[static_cast<int>(k)] = get_vertex_addr<T>( vertices_, faces_[3 * prim_index + i], vertex_stride_bytes_)[k]; } if ((*bmax)[static_cast<int>(k)] < get_vertex_addr<T>(vertices_, faces_[3 * prim_index + i], vertex_stride_bytes_)[k]) { (*bmax)[static_cast<int>(k)] = get_vertex_addr<T>( vertices_, faces_[3 * prim_index + i], vertex_stride_bytes_)[k]; } } } } const T *vertices_; const unsigned int *faces_; const size_t vertex_stride_bytes_; }; template <typename T = float> class TriangleIntersection { public: T u; T v; // Required member variables. T t; unsigned int prim_id; }; template <typename T = float, class H = TriangleIntersection<T> > class TriangleIntersector { public: TriangleIntersector(const T *vertices, const unsigned int *faces, const size_t vertex_stride_bytes) // e.g. // vertex_stride_bytes // = 12 = sizeof(float) // * 3 : vertices_(vertices), faces_(faces), vertex_stride_bytes_(vertex_stride_bytes) {} // For Watertight Ray/Triangle Intersection. typedef struct { T Sx; T Sy; T Sz; int kx; int ky; int kz; } RayCoeff; /// Do ray interesection stuff for `prim_index` th primitive and return hit /// distance `t`, /// varycentric coordinate `u` and `v`. /// Returns true if there's intersection. bool Intersect(T *t_inout, const unsigned int prim_index) const { if ((prim_index < trace_options_.prim_ids_range[0]) || (prim_index >= trace_options_.prim_ids_range[1])) { return false; } // Self-intersection test. if (prim_index == trace_options_.skip_prim_id) { return false; } const unsigned int f0 = faces_[3 * prim_index + 0]; const unsigned int f1 = faces_[3 * prim_index + 1]; const unsigned int f2 = faces_[3 * prim_index + 2]; const real3<T> p0(get_vertex_addr(vertices_, f0 + 0, vertex_stride_bytes_)); const real3<T> p1(get_vertex_addr(vertices_, f1 + 0, vertex_stride_bytes_)); const real3<T> p2(get_vertex_addr(vertices_, f2 + 0, vertex_stride_bytes_)); const real3<T> A = p0 - ray_org_; const real3<T> B = p1 - ray_org_; const real3<T> C = p2 - ray_org_; const T Ax = A[ray_coeff_.kx] - ray_coeff_.Sx * A[ray_coeff_.kz]; const T Ay = A[ray_coeff_.ky] - ray_coeff_.Sy * A[ray_coeff_.kz]; const T Bx = B[ray_coeff_.kx] - ray_coeff_.Sx * B[ray_coeff_.kz]; const T By = B[ray_coeff_.ky] - ray_coeff_.Sy * B[ray_coeff_.kz]; const T Cx = C[ray_coeff_.kx] - ray_coeff_.Sx * C[ray_coeff_.kz]; const T Cy = C[ray_coeff_.ky] - ray_coeff_.Sy * C[ray_coeff_.kz]; T U = Cx * By - Cy * Bx; T V = Ax * Cy - Ay * Cx; T W = Bx * Ay - By * Ax; #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wfloat-equal" #endif // Fall back to test against edges using double precision. if (U == static_cast<T>(0.0) || V == static_cast<T>(0.0) || W == static_cast<T>(0.0)) { double CxBy = static_cast<double>(Cx) * static_cast<double>(By); double CyBx = static_cast<double>(Cy) * static_cast<double>(Bx); U = static_cast<T>(CxBy - CyBx); double AxCy = static_cast<double>(Ax) * static_cast<double>(Cy); double AyCx = static_cast<double>(Ay) * static_cast<double>(Cx); V = static_cast<T>(AxCy - AyCx); double BxAy = static_cast<double>(Bx) * static_cast<double>(Ay); double ByAx = static_cast<double>(By) * static_cast<double>(Ax); W = static_cast<T>(BxAy - ByAx); } if (trace_options_.cull_back_face) { if (U < static_cast<T>(0.0) || V < static_cast<T>(0.0) || W < static_cast<T>(0.0)) return false; } else { if ((U < static_cast<T>(0.0) || V < static_cast<T>(0.0) || W < static_cast<T>(0.0)) && (U > static_cast<T>(0.0) || V > static_cast<T>(0.0) || W > static_cast<T>(0.0))) { return false; } } T det = U + V + W; if (det == static_cast<T>(0.0)) return false; #ifdef __clang__ #pragma clang diagnostic pop #endif const T Az = ray_coeff_.Sz * A[ray_coeff_.kz]; const T Bz = ray_coeff_.Sz * B[ray_coeff_.kz]; const T Cz = ray_coeff_.Sz * C[ray_coeff_.kz]; const T D = U * Az + V * Bz + W * Cz; const T rcpDet = static_cast<T>(1.0) / det; T tt = D * rcpDet; if (tt > (*t_inout)) { return false; } if (tt < t_min_) { return false; } (*t_inout) = tt; // Use Thomas-Mueller style barycentric coord. // U + V + W = 1.0 and interp(p) = U * p0 + V * p1 + W * p2 // We want interp(p) = (1 - u - v) * p0 + u * v1 + v * p2; // => u = V, v = W. u_ = V * rcpDet; v_ = W * rcpDet; return true; } /// Returns the nearest hit distance. T GetT() const { return t_; } /// Update is called when initializing intesection and nearest hit is found. void Update(T t, unsigned int prim_idx) const { t_ = t; prim_id_ = prim_idx; } /// Prepare BVH traversal(e.g. compute inverse ray direction) /// This function is called only once in BVH traversal. void PrepareTraversal(const Ray<T> &ray, const BVHTraceOptions &trace_options) const { ray_org_[0] = ray.org[0]; ray_org_[1] = ray.org[1]; ray_org_[2] = ray.org[2]; // Calculate dimension where the ray direction is maximal. ray_coeff_.kz = 0; T absDir = std::fabs(ray.dir[0]); if (absDir < std::fabs(ray.dir[1])) { ray_coeff_.kz = 1; absDir = std::fabs(ray.dir[1]); } if (absDir < std::fabs(ray.dir[2])) { ray_coeff_.kz = 2; absDir = std::fabs(ray.dir[2]); } ray_coeff_.kx = ray_coeff_.kz + 1; if (ray_coeff_.kx == 3) ray_coeff_.kx = 0; ray_coeff_.ky = ray_coeff_.kx + 1; if (ray_coeff_.ky == 3) ray_coeff_.ky = 0; // Swap kx and ky dimension to preserve widing direction of triangles. if (ray.dir[ray_coeff_.kz] < static_cast<T>(0.0)) std::swap(ray_coeff_.kx, ray_coeff_.ky); // Calculate shear constants. ray_coeff_.Sx = ray.dir[ray_coeff_.kx] / ray.dir[ray_coeff_.kz]; ray_coeff_.Sy = ray.dir[ray_coeff_.ky] / ray.dir[ray_coeff_.kz]; ray_coeff_.Sz = static_cast<T>(1.0) / ray.dir[ray_coeff_.kz]; trace_options_ = trace_options; t_min_ = ray.min_t; u_ = static_cast<T>(0.0); v_ = static_cast<T>(0.0); } /// Post BVH traversal stuff. /// Fill `isect` if there is a hit. void PostTraversal(const Ray<T> &ray, bool hit, H *isect) const { if (hit && isect) { (*isect).t = t_; (*isect).u = u_; (*isect).v = v_; (*isect).prim_id = prim_id_; } (void)ray; } private: const T *vertices_; const unsigned int *faces_; const size_t vertex_stride_bytes_; mutable real3<T> ray_org_; mutable RayCoeff ray_coeff_; mutable BVHTraceOptions trace_options_; mutable T t_min_; mutable T t_; mutable T u_; mutable T v_; mutable unsigned int prim_id_; }; // // Robust BVH Ray Traversal : http://jcgt.org/published/0002/02/02/paper.pdf // // NaN-safe min and max function. template <class T> const T &safemin(const T &a, const T &b) { return (a < b) ? a : b; } template <class T> const T &safemax(const T &a, const T &b) { return (a > b) ? a : b; } // // SAH functions // struct BinBuffer { explicit BinBuffer(unsigned int size) { bin_size = size; bin.resize(2 * 3 * size); clear(); } void clear() { memset(&bin[0], 0, sizeof(size_t) * 2 * 3 * bin_size); } std::vector<size_t> bin; // (min, max) * xyz * binsize unsigned int bin_size; unsigned int pad0; }; template <typename T> inline T CalculateSurfaceArea(const real3<T> &min, const real3<T> &max) { real3<T> box = max - min; return static_cast<T>(2.0) * (box[0] * box[1] + box[1] * box[2] + box[2] * box[0]); } template <typename T> inline void GetBoundingBoxOfTriangle(real3<T> *bmin, real3<T> *bmax, const T *vertices, const unsigned int *faces, unsigned int index) { unsigned int f0 = faces[3 * index + 0]; unsigned int f1 = faces[3 * index + 1]; unsigned int f2 = faces[3 * index + 2]; real3<T> p[3]; p[0] = real3<T>(&vertices[3 * f0]); p[1] = real3<T>(&vertices[3 * f1]); p[2] = real3<T>(&vertices[3 * f2]); (*bmin) = p[0]; (*bmax) = p[0]; for (int i = 1; i < 3; i++) { (*bmin)[0] = std::min((*bmin)[0], p[i][0]); (*bmin)[1] = std::min((*bmin)[1], p[i][1]); (*bmin)[2] = std::min((*bmin)[2], p[i][2]); (*bmax)[0] = std::max((*bmax)[0], p[i][0]); (*bmax)[1] = std::max((*bmax)[1], p[i][1]); (*bmax)[2] = std::max((*bmax)[2], p[i][2]); } } template <typename T, class P> inline void ContributeBinBuffer(BinBuffer *bins, // [out] const real3<T> &scene_min, const real3<T> &scene_max, unsigned int *indices, unsigned int left_idx, unsigned int right_idx, const P &p) { T bin_size = static_cast<T>(bins->bin_size); // Calculate extent real3<T> scene_size, scene_inv_size; scene_size = scene_max - scene_min; for (int i = 0; i < 3; ++i) { assert(scene_size[i] >= static_cast<T>(0.0)); if (scene_size[i] > static_cast<T>(0.0)) { scene_inv_size[i] = bin_size / scene_size[i]; } else { scene_inv_size[i] = static_cast<T>(0.0); } } // Clear bin data std::fill(bins->bin.begin(), bins->bin.end(), 0); // memset(&bins->bin[0], 0, sizeof(2 * 3 * bins->bin_size)); size_t idx_bmin[3]; size_t idx_bmax[3]; for (size_t i = left_idx; i < right_idx; i++) { // // Quantize the position into [0, BIN_SIZE) // // q[i] = (int)(p[i] - scene_bmin) / scene_size // real3<T> bmin; real3<T> bmax; p.BoundingBox(&bmin, &bmax, indices[i]); // GetBoundingBoxOfTriangle(&bmin, &bmax, vertices, faces, indices[i]); real3<T> quantized_bmin = (bmin - scene_min) * scene_inv_size; real3<T> quantized_bmax = (bmax - scene_min) * scene_inv_size; // idx is now in [0, BIN_SIZE) for (int j = 0; j < 3; ++j) { int q0 = static_cast<int>(quantized_bmin[j]); if (q0 < 0) q0 = 0; int q1 = static_cast<int>(quantized_bmax[j]); if (q1 < 0) q1 = 0; idx_bmin[j] = static_cast<unsigned int>(q0); idx_bmax[j] = static_cast<unsigned int>(q1); if (idx_bmin[j] >= bin_size) idx_bmin[j] = static_cast<unsigned int>(bin_size) - 1; if (idx_bmax[j] >= bin_size) idx_bmax[j] = static_cast<unsigned int>(bin_size) - 1; assert(idx_bmin[j] < bin_size); assert(idx_bmax[j] < bin_size); // Increment bin counter bins->bin[0 * (bins->bin_size * 3) + static_cast<size_t>(j) * bins->bin_size + idx_bmin[j]] += 1; bins->bin[1 * (bins->bin_size * 3) + static_cast<size_t>(j) * bins->bin_size + idx_bmax[j]] += 1; } } } template <typename T> inline T SAH(size_t ns1, T leftArea, size_t ns2, T rightArea, T invS, T Taabb, T Ttri) { T sah; sah = static_cast<T>(2.0) * Taabb + (leftArea * invS) * static_cast<T>(ns1) * Ttri + (rightArea * invS) * static_cast<T>(ns2) * Ttri; return sah; } template <typename T> inline bool FindCutFromBinBuffer(T *cut_pos, // [out] xyz int *minCostAxis, // [out] const BinBuffer *bins, const real3<T> &bmin, const real3<T> &bmax, size_t num_primitives, T costTaabb) { // should be in [0.0, 1.0] const T kEPS = std::numeric_limits<T>::epsilon(); // * epsScale; size_t left, right; real3<T> bsize, bstep; real3<T> bminLeft, bmaxLeft; real3<T> bminRight, bmaxRight; T saLeft, saRight, saTotal; T pos; T minCost[3]; T costTtri = static_cast<T>(1.0) - costTaabb; (*minCostAxis) = 0; bsize = bmax - bmin; bstep = bsize * (static_cast<T>(1.0) / bins->bin_size); saTotal = CalculateSurfaceArea(bmin, bmax); T invSaTotal = static_cast<T>(0.0); if (saTotal > kEPS) { invSaTotal = static_cast<T>(1.0) / saTotal; } for (int j = 0; j < 3; ++j) { // // Compute SAH cost for the right side of each cell of the bbox. // Exclude both extreme side of the bbox. // // i: 0 1 2 3 // +----+----+----+----+----+ // | | | | | | // +----+----+----+----+----+ // T minCostPos = bmin[j] + static_cast<T>(1.0) * bstep[j]; minCost[j] = std::numeric_limits<T>::max(); left = 0; right = num_primitives; bminLeft = bminRight = bmin; bmaxLeft = bmaxRight = bmax; for (int i = 0; i < static_cast<int>(bins->bin_size) - 1; ++i) { left += bins->bin[0 * (3 * bins->bin_size) + static_cast<size_t>(j) * bins->bin_size + static_cast<size_t>(i)]; right -= bins->bin[1 * (3 * bins->bin_size) + static_cast<size_t>(j) * bins->bin_size + static_cast<size_t>(i)]; assert(left <= num_primitives); assert(right <= num_primitives); // // Split pos bmin + (i + 1) * (bsize / BIN_SIZE) // +1 for i since we want a position on right side of the cell. // pos = bmin[j] + (i + static_cast<T>(1.0)) * bstep[j]; bmaxLeft[j] = pos; bminRight[j] = pos; saLeft = CalculateSurfaceArea(bminLeft, bmaxLeft); saRight = CalculateSurfaceArea(bminRight, bmaxRight); T cost = SAH(left, saLeft, right, saRight, invSaTotal, costTaabb, costTtri); if (cost < minCost[j]) { // // Update the min cost // minCost[j] = cost; minCostPos = pos; // minCostAxis = j; } } cut_pos[j] = minCostPos; } // cut_axis = minCostAxis; // cut_pos = minCostPos; // Find min cost axis T cost = minCost[0]; (*minCostAxis) = 0; if (cost > minCost[1]) { (*minCostAxis) = 1; cost = minCost[1]; } if (cost > minCost[2]) { (*minCostAxis) = 2; cost = minCost[2]; } return true; } #ifdef _OPENMP template <typename T, class P> void ComputeBoundingBoxOMP(real3<T> *bmin, real3<T> *bmax, const unsigned int *indices, unsigned int left_index, unsigned int right_index, const P &p) { { p.BoundingBox(bmin, bmax, indices[left_index]); } T local_bmin[3] = {(*bmin)[0], (*bmin)[1], (*bmin)[2]}; T local_bmax[3] = {(*bmax)[0], (*bmax)[1], (*bmax)[2]}; unsigned int n = right_index - left_index; #pragma omp parallel firstprivate(local_bmin, local_bmax) if (n > (1024 * 128)) { #pragma omp parallel for for (int i = int(left_index); i < int(right_index); i++) { // for each faces unsigned int idx = indices[i]; real3<T> bbox_min, bbox_max; p.BoundingBox(&bbox_min, &bbox_max, idx); for (int k = 0; k < 3; k++) { // xyz if ((*bmin)[k] > bbox_min[k]) (*bmin)[k] = bbox_min[k]; if ((*bmax)[k] < bbox_max[k]) (*bmax)[k] = bbox_max[k]; } } #pragma omp critical { for (int k = 0; k < 3; k++) { if (local_bmin[k] < (*bmin)[k]) { { if (local_bmin[k] < (*bmin)[k]) (*bmin)[k] = local_bmin[k]; } } if (local_bmax[k] > (*bmax)[k]) { { if (local_bmax[k] > (*bmax)[k]) (*bmax)[k] = local_bmax[k]; } } } } } } #endif #ifdef NANORT_USE_CPP11_FEATURE template <typename T, class P> inline void ComputeBoundingBoxThreaded(real3<T> *bmin, real3<T> *bmax, const unsigned int *indices, unsigned int left_index, unsigned int right_index, const P &p) { unsigned int n = right_index - left_index; size_t num_threads = std::min(size_t(kNANORT_MAX_THREADS), std::max(1UL, size_t(std::thread::hardware_concurrency()))); if (n < num_threads) { num_threads = n; } std::vector<std::thread> workers; size_t ndiv = n / num_threads; std::vector<T> local_bmins(3 * num_threads); // 3 = xyz std::vector<T> local_bmaxs(3 * num_threads); // 3 = xyz for (size_t t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&, t]() { size_t si = left_index + t * ndiv; size_t ei = std::min(left_index + (t + 1) * ndiv, size_t(right_index)); local_bmins[3 * t + 0] = std::numeric_limits<T>::infinity(); local_bmins[3 * t + 1] = std::numeric_limits<T>::infinity(); local_bmins[3 * t + 2] = std::numeric_limits<T>::infinity(); local_bmaxs[3 * t + 0] = -std::numeric_limits<T>::infinity(); local_bmaxs[3 * t + 1] = -std::numeric_limits<T>::infinity(); local_bmaxs[3 * t + 2] = -std::numeric_limits<T>::infinity(); for (size_t i = si; i < ei; i++) { // for each faces unsigned int idx = indices[i]; real3<T> bbox_min, bbox_max; p.BoundingBox(&bbox_min, &bbox_max, idx); for (int k = 0; k < 3; k++) { // xyz if (local_bmins[3 * t + k] > bbox_min[k]) local_bmins[3 * t + k] = bbox_min[k]; if (local_bmaxs[3 * t + k] < bbox_max[k]) local_bmaxs[3 * t + k] = bbox_max[k]; } } })); } for (auto &t : workers) { t.join(); } // merge bbox for (int k = 0; k < 3; k++) { (*bmin)[k] = local_bmins[k]; (*bmax)[k] = local_bmaxs[k]; } for (size_t t = 1; t < num_threads; t++) { for (int k = 0; k < 3; k++) { if (local_bmins[3 * t + k] < (*bmin)[k]) { (*bmin)[k] = local_bmins[3 * t + k]; } if (local_bmaxs[3 * t + k] > (*bmax)[k]) { (*bmax)[k] = local_bmaxs[3 * t + k]; } } } } #endif template <typename T, class P> inline void ComputeBoundingBox(real3<T> *bmin, real3<T> *bmax, const unsigned int *indices, unsigned int left_index, unsigned int right_index, const P &p) { { unsigned int idx = indices[left_index]; p.BoundingBox(bmin, bmax, idx); } { for (unsigned int i = left_index + 1; i < right_index; i++) { // for each primitives unsigned int idx = indices[i]; real3<T> bbox_min, bbox_max; p.BoundingBox(&bbox_min, &bbox_max, idx); for (int k = 0; k < 3; k++) { // xyz if ((*bmin)[k] > bbox_min[k]) (*bmin)[k] = bbox_min[k]; if ((*bmax)[k] < bbox_max[k]) (*bmax)[k] = bbox_max[k]; } } } } template <typename T> inline void GetBoundingBox(real3<T> *bmin, real3<T> *bmax, const std::vector<BBox<T> > &bboxes, unsigned int *indices, unsigned int left_index, unsigned int right_index) { { unsigned int i = left_index; unsigned int idx = indices[i]; (*bmin)[0] = bboxes[idx].bmin[0]; (*bmin)[1] = bboxes[idx].bmin[1]; (*bmin)[2] = bboxes[idx].bmin[2]; (*bmax)[0] = bboxes[idx].bmax[0]; (*bmax)[1] = bboxes[idx].bmax[1]; (*bmax)[2] = bboxes[idx].bmax[2]; } T local_bmin[3] = {(*bmin)[0], (*bmin)[1], (*bmin)[2]}; T local_bmax[3] = {(*bmax)[0], (*bmax)[1], (*bmax)[2]}; { for (unsigned int i = left_index; i < right_index; i++) { // for each faces unsigned int idx = indices[i]; for (int k = 0; k < 3; k++) { // xyz T minval = bboxes[idx].bmin[k]; T maxval = bboxes[idx].bmax[k]; if (local_bmin[k] > minval) local_bmin[k] = minval; if (local_bmax[k] < maxval) local_bmax[k] = maxval; } } for (int k = 0; k < 3; k++) { (*bmin)[k] = local_bmin[k]; (*bmax)[k] = local_bmax[k]; } } } // // -- // #if defined(NANORT_ENABLE_PARALLEL_BUILD) template <typename T> template <class P, class Pred> unsigned int BVHAccel<T>::BuildShallowTree(std::vector<BVHNode<T> > *out_nodes, unsigned int left_idx, unsigned int right_idx, unsigned int depth, unsigned int max_shallow_depth, const P &p, const Pred &pred) { assert(left_idx <= right_idx); unsigned int offset = static_cast<unsigned int>(out_nodes->size()); if (stats_.max_tree_depth < depth) { stats_.max_tree_depth = depth; } real3<T> bmin, bmax; #if defined(NANORT_USE_CPP11_FEATURE) && defined(NANORT_ENABLE_PARALLEL_BUILD) ComputeBoundingBoxThreaded(&bmin, &bmax, &indices_.at(0), left_idx, right_idx, p); #else ComputeBoundingBox(&bmin, &bmax, &indices_.at(0), left_idx, right_idx, p); #endif unsigned int n = right_idx - left_idx; if ((n <= options_.min_leaf_primitives) || (depth >= options_.max_tree_depth)) { // Create leaf node. BVHNode<T> leaf; leaf.bmin[0] = bmin[0]; leaf.bmin[1] = bmin[1]; leaf.bmin[2] = bmin[2]; leaf.bmax[0] = bmax[0]; leaf.bmax[1] = bmax[1]; leaf.bmax[2] = bmax[2]; assert(left_idx < std::numeric_limits<unsigned int>::max()); leaf.flag = 1; // leaf leaf.data[0] = n; leaf.data[1] = left_idx; out_nodes->push_back(leaf); // atomic update stats_.num_leaf_nodes++; return offset; } // // Create branch node. // if (depth >= max_shallow_depth) { // Delay to build tree ShallowNodeInfo info; info.left_idx = left_idx; info.right_idx = right_idx; info.offset = offset; shallow_node_infos_.push_back(info); // Add dummy node. BVHNode<T> node; node.axis = -1; node.flag = -1; out_nodes->push_back(node); return offset; } else { // // TODO(LTE): multi-threaded SAH computation, or use simple object median or // spacial median for shallow tree to speeding up the parallel build. // // // Compute SAH and find best split axis and position // int min_cut_axis = 0; T cut_pos[3] = {0.0, 0.0, 0.0}; BinBuffer bins(options_.bin_size); ContributeBinBuffer(&bins, bmin, bmax, &indices_.at(0), left_idx, right_idx, p); FindCutFromBinBuffer(cut_pos, &min_cut_axis, &bins, bmin, bmax, n, options_.cost_t_aabb); // Try all 3 axis until good cut position avaiable. unsigned int mid_idx = left_idx; int cut_axis = min_cut_axis; for (int axis_try = 0; axis_try < 3; axis_try++) { unsigned int *begin = &indices_[left_idx]; unsigned int *end = &indices_[right_idx - 1] + 1; // mimics end() iterator. unsigned int *mid = 0; // try min_cut_axis first. cut_axis = (min_cut_axis + axis_try) % 3; // @fixme { We want something like: std::partition(begin, end, // pred(cut_axis, cut_pos[cut_axis])); } pred.Set(cut_axis, cut_pos[cut_axis]); // // Split at (cut_axis, cut_pos) // indices_ will be modified. // mid = std::partition(begin, end, pred); mid_idx = left_idx + static_cast<unsigned int>((mid - begin)); if ((mid_idx == left_idx) || (mid_idx == right_idx)) { // Can't split well. // Switch to object median(which may create unoptimized tree, but // stable) mid_idx = left_idx + (n >> 1); // Try another axis if there's axis to try. } else { // Found good cut. exit loop. break; } } BVHNode<T> node; node.axis = cut_axis; node.flag = 0; // 0 = branch out_nodes->push_back(node); unsigned int left_child_index = 0; unsigned int right_child_index = 0; left_child_index = BuildShallowTree(out_nodes, left_idx, mid_idx, depth + 1, max_shallow_depth, p, pred); right_child_index = BuildShallowTree(out_nodes, mid_idx, right_idx, depth + 1, max_shallow_depth, p, pred); (*out_nodes)[offset].data[0] = left_child_index; (*out_nodes)[offset].data[1] = right_child_index; (*out_nodes)[offset].bmin[0] = bmin[0]; (*out_nodes)[offset].bmin[1] = bmin[1]; (*out_nodes)[offset].bmin[2] = bmin[2]; (*out_nodes)[offset].bmax[0] = bmax[0]; (*out_nodes)[offset].bmax[1] = bmax[1]; (*out_nodes)[offset].bmax[2] = bmax[2]; } stats_.num_branch_nodes++; return offset; } #endif template <typename T> template <class P, class Pred> unsigned int BVHAccel<T>::BuildTree(BVHBuildStatistics *out_stat, std::vector<BVHNode<T> > *out_nodes, unsigned int left_idx, unsigned int right_idx, unsigned int depth, const P &p, const Pred &pred) { assert(left_idx <= right_idx); unsigned int offset = static_cast<unsigned int>(out_nodes->size()); if (out_stat->max_tree_depth < depth) { out_stat->max_tree_depth = depth; } real3<T> bmin, bmax; if (!bboxes_.empty()) { GetBoundingBox(&bmin, &bmax, bboxes_, &indices_.at(0), left_idx, right_idx); } else { ComputeBoundingBox(&bmin, &bmax, &indices_.at(0), left_idx, right_idx, p); } unsigned int n = right_idx - left_idx; if ((n <= options_.min_leaf_primitives) || (depth >= options_.max_tree_depth)) { // Create leaf node. BVHNode<T> leaf; leaf.bmin[0] = bmin[0]; leaf.bmin[1] = bmin[1]; leaf.bmin[2] = bmin[2]; leaf.bmax[0] = bmax[0]; leaf.bmax[1] = bmax[1]; leaf.bmax[2] = bmax[2]; assert(left_idx < std::numeric_limits<unsigned int>::max()); leaf.flag = 1; // leaf leaf.data[0] = n; leaf.data[1] = left_idx; out_nodes->push_back(leaf); // atomic update out_stat->num_leaf_nodes++; return offset; } // // Create branch node. // // // Compute SAH and find best split axis and position // int min_cut_axis = 0; T cut_pos[3] = {0.0, 0.0, 0.0}; BinBuffer bins(options_.bin_size); ContributeBinBuffer(&bins, bmin, bmax, &indices_.at(0), left_idx, right_idx, p); FindCutFromBinBuffer(cut_pos, &min_cut_axis, &bins, bmin, bmax, n, options_.cost_t_aabb); // Try all 3 axis until good cut position avaiable. unsigned int mid_idx = left_idx; int cut_axis = min_cut_axis; for (int axis_try = 0; axis_try < 3; axis_try++) { unsigned int *begin = &indices_[left_idx]; unsigned int *end = &indices_[right_idx - 1] + 1; // mimics end() iterator. unsigned int *mid = 0; // try min_cut_axis first. cut_axis = (min_cut_axis + axis_try) % 3; pred.Set(cut_axis, cut_pos[cut_axis]); // // Split at (cut_axis, cut_pos) // indices_ will be modified. // mid = std::partition(begin, end, pred); mid_idx = left_idx + static_cast<unsigned int>((mid - begin)); if ((mid_idx == left_idx) || (mid_idx == right_idx)) { // Can't split well. // Switch to object median(which may create unoptimized tree, but // stable) mid_idx = left_idx + (n >> 1); // Try another axis to find better cut. } else { // Found good cut. exit loop. break; } } BVHNode<T> node; node.axis = cut_axis; node.flag = 0; // 0 = branch out_nodes->push_back(node); unsigned int left_child_index = 0; unsigned int right_child_index = 0; left_child_index = BuildTree(out_stat, out_nodes, left_idx, mid_idx, depth + 1, p, pred); right_child_index = BuildTree(out_stat, out_nodes, mid_idx, right_idx, depth + 1, p, pred); { (*out_nodes)[offset].data[0] = left_child_index; (*out_nodes)[offset].data[1] = right_child_index; (*out_nodes)[offset].bmin[0] = bmin[0]; (*out_nodes)[offset].bmin[1] = bmin[1]; (*out_nodes)[offset].bmin[2] = bmin[2]; (*out_nodes)[offset].bmax[0] = bmax[0]; (*out_nodes)[offset].bmax[1] = bmax[1]; (*out_nodes)[offset].bmax[2] = bmax[2]; } out_stat->num_branch_nodes++; return offset; } template <typename T> template <class P, class Pred> bool BVHAccel<T>::Build(unsigned int num_primitives, const P &p, const Pred &pred, const BVHBuildOptions<T> &options) { options_ = options; stats_ = BVHBuildStatistics(); nodes_.clear(); bboxes_.clear(); assert(options_.bin_size > 1); if (num_primitives == 0) { return false; } unsigned int n = num_primitives; // // 1. Create triangle indices(this will be permutated in BuildTree) // indices_.resize(n); #if defined(NANORT_USE_CPP11_FEATURE) { size_t num_threads = std::min(size_t(kNANORT_MAX_THREADS), std::max(1UL, size_t(std::thread::hardware_concurrency()))); if (n < num_threads) { num_threads = n; } std::vector<std::thread> workers; size_t ndiv = n / num_threads; for (size_t t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&, t]() { size_t si = t * ndiv; size_t ei = std::min((t + 1) * ndiv, size_t(n)); for (size_t k = si; k < ei; k++) { indices_[k] = static_cast<unsigned int>(k); } })); } for (auto &t : workers) { t.join(); } } #else #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < static_cast<int>(n); i++) { indices_[static_cast<size_t>(i)] = static_cast<unsigned int>(i); } #endif // !NANORT_USE_CPP11_FEATURE // // 2. Compute bounding box(optional). // real3<T> bmin, bmax; if (options.cache_bbox) { bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<T>::max(); bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<T>::max(); bboxes_.resize(n); for (size_t i = 0; i < n; i++) { // for each primitived unsigned int idx = indices_[i]; BBox<T> bbox; p.BoundingBox(&(bbox.bmin), &(bbox.bmax), static_cast<unsigned int>(i)); bboxes_[idx] = bbox; for (int k = 0; k < 3; k++) { // xyz if (bmin[k] > bbox.bmin[k]) { bmin[k] = bbox.bmin[k]; } if (bmax[k] < bbox.bmax[k]) { bmax[k] = bbox.bmax[k]; } } } } else { #if defined(NANORT_USE_CPP11_FEATURE) ComputeBoundingBoxThreaded(&bmin, &bmax, &indices_.at(0), 0, n, p); #elif defined(_OPENMP) ComputeBoundingBoxOMP(&bmin, &bmax, &indices_.at(0), 0, n, p); #else ComputeBoundingBox(&bmin, &bmax, &indices_.at(0), 0, n, p); #endif } // // 3. Build tree // #if defined(NANORT_ENABLE_PARALLEL_BUILD) #if defined(NANORT_USE_CPP11_FEATURE) // Do parallel build for enoughly large dataset. if (n > options.min_primitives_for_parallel_build) { BuildShallowTree(&nodes_, 0, n, /* root depth */ 0, options.shallow_depth, p, pred); // [0, n) assert(shallow_node_infos_.size() > 0); // Build deeper tree in parallel std::vector<std::vector<BVHNode<T> > > local_nodes( shallow_node_infos_.size()); std::vector<BVHBuildStatistics> local_stats(shallow_node_infos_.size()); size_t num_threads = std::min(size_t(kNANORT_MAX_THREADS), std::max(1UL, size_t(std::thread::hardware_concurrency()))); if (shallow_node_infos_.size() < num_threads) { num_threads = shallow_node_infos_.size(); } std::vector<std::thread> workers; std::atomic<uint32_t> i(0); for (size_t t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&, t]() { uint32_t idx = 0; while ((idx = (i++)) < shallow_node_infos_.size()) { // Create thread-local copy of Pred since some mutable variables are // modified during SAH computation. const Pred local_pred = pred; unsigned int left_idx = shallow_node_infos_[size_t(idx)].left_idx; unsigned int right_idx = shallow_node_infos_[size_t(idx)].right_idx; BuildTree(&(local_stats[size_t(idx)]), &(local_nodes[size_t(idx)]), left_idx, right_idx, options.shallow_depth, p, local_pred); } })); } for (auto &t : workers) { t.join(); } // Join local nodes for (size_t i = 0; i < local_nodes.size(); i++) { assert(!local_nodes[i].empty()); size_t offset = nodes_.size(); // Add offset to child index(for branch node). for (size_t j = 0; j < local_nodes[i].size(); j++) { if (local_nodes[i][j].flag == 0) { // branch local_nodes[i][j].data[0] += offset - 1; local_nodes[i][j].data[1] += offset - 1; } } // replace nodes_[shallow_node_infos_[i].offset] = local_nodes[i][0]; // Skip root element of the local node. nodes_.insert(nodes_.end(), local_nodes[i].begin() + 1, local_nodes[i].end()); } // Join statistics for (size_t i = 0; i < local_nodes.size(); i++) { stats_.max_tree_depth = std::max(stats_.max_tree_depth, local_stats[i].max_tree_depth); stats_.num_leaf_nodes += local_stats[i].num_leaf_nodes; stats_.num_branch_nodes += local_stats[i].num_branch_nodes; } } else { // Single thread. BuildTree(&stats_, &nodes_, 0, n, /* root depth */ 0, p, pred); // [0, n) } #elif defined(_OPENMP) // Do parallel build for enoughly large dataset. if (n > options.min_primitives_for_parallel_build) { BuildShallowTree(&nodes_, 0, n, /* root depth */ 0, options.shallow_depth, p, pred); // [0, n) assert(shallow_node_infos_.size() > 0); // Build deeper tree in parallel std::vector<std::vector<BVHNode<T> > > local_nodes( shallow_node_infos_.size()); std::vector<BVHBuildStatistics> local_stats(shallow_node_infos_.size()); #pragma omp parallel for for (int i = 0; i < static_cast<int>(shallow_node_infos_.size()); i++) { unsigned int left_idx = shallow_node_infos_[size_t(i)].left_idx; unsigned int right_idx = shallow_node_infos_[size_t(i)].right_idx; const Pred local_pred = pred; BuildTree(&(local_stats[size_t(i)]), &(local_nodes[size_t(i)]), left_idx, right_idx, options.shallow_depth, p, local_pred); } // Join local nodes for (size_t i = 0; i < local_nodes.size(); i++) { assert(!local_nodes[size_t(i)].empty()); size_t offset = nodes_.size(); // Add offset to child index(for branch node). for (size_t j = 0; j < local_nodes[i].size(); j++) { if (local_nodes[i][j].flag == 0) { // branch local_nodes[i][j].data[0] += offset - 1; local_nodes[i][j].data[1] += offset - 1; } } // replace nodes_[shallow_node_infos_[i].offset] = local_nodes[i][0]; // Skip root element of the local node. nodes_.insert(nodes_.end(), local_nodes[i].begin() + 1, local_nodes[i].end()); } // Join statistics for (size_t i = 0; i < local_nodes.size(); i++) { stats_.max_tree_depth = std::max(stats_.max_tree_depth, local_stats[i].max_tree_depth); stats_.num_leaf_nodes += local_stats[i].num_leaf_nodes; stats_.num_branch_nodes += local_stats[i].num_branch_nodes; } } else { // Single thread BuildTree(&stats_, &nodes_, 0, n, /* root depth */ 0, p, pred); // [0, n) } #else // !NANORT_ENABLE_PARALLEL_BUILD { BuildTree(&stats_, &nodes_, 0, n, /* root depth */ 0, p, pred); // [0, n) } #endif #else // !_OPENMP // Single thread BVH build { BuildTree(&stats_, &nodes_, 0, n, /* root depth */ 0, p, pred); // [0, n) } #endif return true; } template <typename T> void BVHAccel<T>::Debug() { for (size_t i = 0; i < indices_.size(); i++) { printf("index[%d] = %d\n", int(i), int(indices_[i])); } for (size_t i = 0; i < nodes_.size(); i++) { printf("node[%d] : bmin %f, %f, %f, bmax %f, %f, %f\n", int(i), nodes_[i].bmin[0], nodes_[i].bmin[1], nodes_[i].bmin[1], nodes_[i].bmax[0], nodes_[i].bmax[1], nodes_[i].bmax[1]); } } #if defined(NANORT_ENABLE_SERIALIZATION) template <typename T> bool BVHAccel<T>::Dump(const char *filename) const { FILE *fp = fopen(filename, "wb"); if (!fp) { // fprintf(stderr, "[BVHAccel] Cannot write a file: %s\n", filename); return false; } size_t numNodes = nodes_.size(); assert(nodes_.size() > 0); size_t numIndices = indices_.size(); size_t r = 0; r = fwrite(&numNodes, sizeof(size_t), 1, fp); assert(r == 1); r = fwrite(&nodes_.at(0), sizeof(BVHNode<T>), numNodes, fp); assert(r == numNodes); r = fwrite(&numIndices, sizeof(size_t), 1, fp); assert(r == 1); r = fwrite(&indices_.at(0), sizeof(unsigned int), numIndices, fp); assert(r == numIndices); fclose(fp); return true; } template <typename T> bool BVHAccel<T>::Dump(FILE *fp) const { size_t numNodes = nodes_.size(); assert(nodes_.size() > 0); size_t numIndices = indices_.size(); size_t r = 0; r = fwrite(&numNodes, sizeof(size_t), 1, fp); assert(r == 1); r = fwrite(&nodes_.at(0), sizeof(BVHNode<T>), numNodes, fp); assert(r == numNodes); r = fwrite(&numIndices, sizeof(size_t), 1, fp); assert(r == 1); r = fwrite(&indices_.at(0), sizeof(unsigned int), numIndices, fp); assert(r == numIndices); return true; } template <typename T> bool BVHAccel<T>::Load(const char *filename) { FILE *fp = fopen(filename, "rb"); if (!fp) { // fprintf(stderr, "Cannot open file: %s\n", filename); return false; } size_t numNodes; size_t numIndices; size_t r = 0; r = fread(&numNodes, sizeof(size_t), 1, fp); assert(r == 1); assert(numNodes > 0); nodes_.resize(numNodes); r = fread(&nodes_.at(0), sizeof(BVHNode<T>), numNodes, fp); assert(r == numNodes); r = fread(&numIndices, sizeof(size_t), 1, fp); assert(r == 1); indices_.resize(numIndices); r = fread(&indices_.at(0), sizeof(unsigned int), numIndices, fp); assert(r == numIndices); fclose(fp); return true; } template <typename T> bool BVHAccel<T>::Load(FILE *fp) { size_t numNodes; size_t numIndices; size_t r = 0; r = fread(&numNodes, sizeof(size_t), 1, fp); assert(r == 1); assert(numNodes > 0); nodes_.resize(numNodes); r = fread(&nodes_.at(0), sizeof(BVHNode<T>), numNodes, fp); assert(r == numNodes); r = fread(&numIndices, sizeof(size_t), 1, fp); assert(r == 1); indices_.resize(numIndices); r = fread(&indices_.at(0), sizeof(unsigned int), numIndices, fp); assert(r == numIndices); return true; } #endif template <typename T> inline bool IntersectRayAABB(T *tminOut, // [out] T *tmaxOut, // [out] T min_t, T max_t, const T bmin[3], const T bmax[3], real3<T> ray_org, real3<T> ray_inv_dir, int ray_dir_sign[3]); template <> inline bool IntersectRayAABB<float>(float *tminOut, // [out] float *tmaxOut, // [out] float min_t, float max_t, const float bmin[3], const float bmax[3], real3<float> ray_org, real3<float> ray_inv_dir, int ray_dir_sign[3]) { float tmin, tmax; const float min_x = ray_dir_sign[0] ? bmax[0] : bmin[0]; const float min_y = ray_dir_sign[1] ? bmax[1] : bmin[1]; const float min_z = ray_dir_sign[2] ? bmax[2] : bmin[2]; const float max_x = ray_dir_sign[0] ? bmin[0] : bmax[0]; const float max_y = ray_dir_sign[1] ? bmin[1] : bmax[1]; const float max_z = ray_dir_sign[2] ? bmin[2] : bmax[2]; // X const float tmin_x = (min_x - ray_org[0]) * ray_inv_dir[0]; // MaxMult robust BVH traversal(up to 4 ulp). // 1.0000000000000004 for double precision. const float tmax_x = (max_x - ray_org[0]) * ray_inv_dir[0] * 1.00000024f; // Y const float tmin_y = (min_y - ray_org[1]) * ray_inv_dir[1]; const float tmax_y = (max_y - ray_org[1]) * ray_inv_dir[1] * 1.00000024f; // Z const float tmin_z = (min_z - ray_org[2]) * ray_inv_dir[2]; const float tmax_z = (max_z - ray_org[2]) * ray_inv_dir[2] * 1.00000024f; tmin = safemax(tmin_z, safemax(tmin_y, safemax(tmin_x, min_t))); tmax = safemin(tmax_z, safemin(tmax_y, safemin(tmax_x, max_t))); if (tmin <= tmax) { (*tminOut) = tmin; (*tmaxOut) = tmax; return true; } return false; // no hit } template <> inline bool IntersectRayAABB<double>(double *tminOut, // [out] double *tmaxOut, // [out] double min_t, double max_t, const double bmin[3], const double bmax[3], real3<double> ray_org, real3<double> ray_inv_dir, int ray_dir_sign[3]) { double tmin, tmax; const double min_x = ray_dir_sign[0] ? bmax[0] : bmin[0]; const double min_y = ray_dir_sign[1] ? bmax[1] : bmin[1]; const double min_z = ray_dir_sign[2] ? bmax[2] : bmin[2]; const double max_x = ray_dir_sign[0] ? bmin[0] : bmax[0]; const double max_y = ray_dir_sign[1] ? bmin[1] : bmax[1]; const double max_z = ray_dir_sign[2] ? bmin[2] : bmax[2]; // X const double tmin_x = (min_x - ray_org[0]) * ray_inv_dir[0]; // MaxMult robust BVH traversal(up to 4 ulp). const double tmax_x = (max_x - ray_org[0]) * ray_inv_dir[0] * 1.0000000000000004; // Y const double tmin_y = (min_y - ray_org[1]) * ray_inv_dir[1]; const double tmax_y = (max_y - ray_org[1]) * ray_inv_dir[1] * 1.0000000000000004; // Z const double tmin_z = (min_z - ray_org[2]) * ray_inv_dir[2]; const double tmax_z = (max_z - ray_org[2]) * ray_inv_dir[2] * 1.0000000000000004; tmin = safemax(tmin_z, safemax(tmin_y, safemax(tmin_x, min_t))); tmax = safemin(tmax_z, safemin(tmax_y, safemin(tmax_x, max_t))); if (tmin <= tmax) { (*tminOut) = tmin; (*tmaxOut) = tmax; return true; } return false; // no hit } template <typename T> template <class I> inline bool BVHAccel<T>::TestLeafNode(const BVHNode<T> &node, const Ray<T> &ray, const I &intersector) const { bool hit = false; unsigned int num_primitives = node.data[0]; unsigned int offset = node.data[1]; T t = intersector.GetT(); // current hit distance real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; for (unsigned int i = 0; i < num_primitives; i++) { unsigned int prim_idx = indices_[i + offset]; T local_t = t; if (intersector.Intersect(&local_t, prim_idx)) { // Update isect state t = local_t; intersector.Update(t, prim_idx); hit = true; } } return hit; } #if 0 // TODO(LTE): Implement template <typename T> template<class I, class H, class Comp> bool BVHAccel<T>::MultiHitTestLeafNode( std::priority_queue<H, std::vector<H>, Comp> *isect_pq, int max_intersections, const BVHNode<T> &node, const Ray<T> &ray, const I &intersector) const { bool hit = false; unsigned int num_primitives = node.data[0]; unsigned int offset = node.data[1]; T t = std::numeric_limits<T>::max(); if (isect_pq->size() >= static_cast<size_t>(max_intersections)) { t = isect_pq->top().t; // current furthest hit distance } real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; for (unsigned int i = 0; i < num_primitives; i++) { unsigned int prim_idx = indices_[i + offset]; T local_t = t, u = 0.0f, v = 0.0f; if (intersector.Intersect(&local_t, &u, &v, prim_idx)) { // Update isect state if ((local_t > ray.min_t)) { if (isect_pq->size() < static_cast<size_t>(max_intersections)) { H isect; t = local_t; isect.t = t; isect.u = u; isect.v = v; isect.prim_id = prim_idx; isect_pq->push(isect); // Update t to furthest distance. t = ray.max_t; hit = true; } else { if (local_t < isect_pq->top().t) { // delete furthest intersection and add new intersection. isect_pq->pop(); H hit; hit.t = local_t; hit.u = u; hit.v = v; hit.prim_id = prim_idx; isect_pq->push(hit); // Update furthest hit distance t = isect_pq->top().t; hit = true; } } } } } return hit; } #endif template <typename T> template <class I, class H> bool BVHAccel<T>::Traverse(const Ray<T> &ray, const I &intersector, H *isect, const BVHTraceOptions &options) const { const int kMaxStackDepth = 512; T hit_t = ray.max_t; int node_stack_index = 0; unsigned int node_stack[512]; node_stack[0] = 0; // Init isect info as no hit intersector.Update(hit_t, static_cast<unsigned int>(-1)); intersector.PrepareTraversal(ray, options); int dir_sign[3]; dir_sign[0] = ray.dir[0] < static_cast<T>(0.0) ? 1 : 0; dir_sign[1] = ray.dir[1] < static_cast<T>(0.0) ? 1 : 0; dir_sign[2] = ray.dir[2] < static_cast<T>(0.0) ? 1 : 0; real3<T> ray_inv_dir; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; ray_inv_dir = vsafe_inverse(ray_dir); real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; T min_t = std::numeric_limits<T>::max(); T max_t = -std::numeric_limits<T>::max(); while (node_stack_index >= 0) { unsigned int index = node_stack[node_stack_index]; const BVHNode<T> &node = nodes_[index]; node_stack_index--; bool hit = IntersectRayAABB(&min_t, &max_t, ray.min_t, hit_t, node.bmin, node.bmax, ray_org, ray_inv_dir, dir_sign); if (node.flag == 0) { // branch node if (hit) { int order_near = dir_sign[node.axis]; int order_far = 1 - order_near; // Traverse near first. node_stack[++node_stack_index] = node.data[order_far]; node_stack[++node_stack_index] = node.data[order_near]; } } else { // leaf node if (hit) { if (TestLeafNode(node, ray, intersector)) { hit_t = intersector.GetT(); } } } } assert(node_stack_index < kMaxStackDepth); bool hit = (intersector.GetT() < ray.max_t); intersector.PostTraversal(ray, hit, isect); return hit; } template <typename T> template <class I> inline bool BVHAccel<T>::TestLeafNodeIntersections( const BVHNode<T> &node, const Ray<T> &ray, const int max_intersections, const I &intersector, std::priority_queue<NodeHit<T>, std::vector<NodeHit<T> >, NodeHitComparator<T> > *isect_pq) const { bool hit = false; unsigned int num_primitives = node.data[0]; unsigned int offset = node.data[1]; real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; intersector.PrepareTraversal(ray); for (unsigned int i = 0; i < num_primitives; i++) { unsigned int prim_idx = indices_[i + offset]; T min_t, max_t; if (intersector.Intersect(&min_t, &max_t, prim_idx)) { // Always add to isect lists. NodeHit<T> isect; isect.t_min = min_t; isect.t_max = max_t; isect.node_id = prim_idx; if (isect_pq->size() < static_cast<size_t>(max_intersections)) { isect_pq->push(isect); } else { if (min_t < isect_pq->top().t_min) { // delete the furthest intersection and add a new intersection. isect_pq->pop(); isect_pq->push(isect); } } } } return hit; } template <typename T> template <class I> bool BVHAccel<T>::ListNodeIntersections( const Ray<T> &ray, int max_intersections, const I &intersector, StackVector<NodeHit<T>, 128> *hits) const { const int kMaxStackDepth = 512; T hit_t = ray.max_t; int node_stack_index = 0; unsigned int node_stack[512]; node_stack[0] = 0; // Stores furthest intersection at top std::priority_queue<NodeHit<T>, std::vector<NodeHit<T> >, NodeHitComparator<T> > isect_pq; (*hits)->clear(); int dir_sign[3]; dir_sign[0] = ray.dir[0] < static_cast<T>(0.0) ? 1 : 0; dir_sign[1] = ray.dir[1] < static_cast<T>(0.0) ? 1 : 0; dir_sign[2] = ray.dir[2] < static_cast<T>(0.0) ? 1 : 0; real3<T> ray_inv_dir; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; ray_inv_dir = vsafe_inverse(ray_dir); real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; T min_t, max_t; while (node_stack_index >= 0) { unsigned int index = node_stack[node_stack_index]; const BVHNode<T> &node = nodes_[static_cast<size_t>(index)]; node_stack_index--; bool hit = IntersectRayAABB(&min_t, &max_t, ray.min_t, hit_t, node.bmin, node.bmax, ray_org, ray_inv_dir, dir_sign); if (node.flag == 0) { // branch node if (hit) { int order_near = dir_sign[node.axis]; int order_far = 1 - order_near; // Traverse near first. node_stack[++node_stack_index] = node.data[order_far]; node_stack[++node_stack_index] = node.data[order_near]; } } else { // leaf node if (hit) { TestLeafNodeIntersections(node, ray, max_intersections, intersector, &isect_pq); } } } assert(node_stack_index < kMaxStackDepth); (void)kMaxStackDepth; if (!isect_pq.empty()) { // Store intesection in reverse order(make it frontmost order) size_t n = isect_pq.size(); (*hits)->resize(n); for (size_t i = 0; i < n; i++) { const NodeHit<T> &isect = isect_pq.top(); (*hits)[n - i - 1] = isect; isect_pq.pop(); } return true; } return false; } #if 0 // TODO(LTE): Implement template <typename T> template<class I, class H, class Comp> bool BVHAccel<T>::MultiHitTraverse(const Ray<T> &ray, int max_intersections, const I &intersector, StackVector<H, 128> *hits, const BVHTraceOptions& options) const { const int kMaxStackDepth = 512; T hit_t = ray.max_t; int node_stack_index = 0; unsigned int node_stack[512]; node_stack[0] = 0; // Stores furthest intersection at top std::priority_queue<H, std::vector<H>, Comp> isect_pq; (*hits)->clear(); // Init isect info as no hit intersector.Update(hit_t, static_cast<unsigned int>(-1)); intersector.PrepareTraversal(ray, options); int dir_sign[3]; dir_sign[0] = ray.dir[0] < static_cast<T>(0.0) ? static_cast<T>(1) : static_cast<T>(0); dir_sign[1] = ray.dir[1] < static_cast<T>(0.0) ? static_cast<T>(1) : static_cast<T>(0); dir_sign[2] = ray.dir[2] < static_cast<T>(0.0) ? static_cast<T>(1) : static_cast<T>(0); real3<T> ray_inv_dir; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; ray_inv_dir = vsafe_inverse(ray_dir); real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; T min_t, max_t; while (node_stack_index >= 0) { unsigned int index = node_stack[node_stack_index]; const BVHNode<T> &node = nodes_[static_cast<size_t>(index)]; node_stack_index--; bool hit = IntersectRayAABB(&min_t, &max_t, ray.min_t, hit_t, node.bmin, node.bmax, ray_org, ray_inv_dir, dir_sign); if (node.flag == 0) { // branch node if (hit) { int order_near = dir_sign[node.axis]; int order_far = 1 - order_near; // Traverse near first. node_stack[++node_stack_index] = node.data[order_far]; node_stack[++node_stack_index] = node.data[order_near]; } } else { // leaf node if (hit) { if (MultiHitTestLeafNode(&isect_pq, max_intersections, node, ray, intersector)) { // Only update `hit_t` when queue is full. if (isect_pq.size() >= static_cast<size_t>(max_intersections)) { hit_t = isect_pq.top().t; } } } } } assert(node_stack_index < kMaxStackDepth); (void)kMaxStackDepth; if (!isect_pq.empty()) { // Store intesection in reverse order(make it frontmost order) size_t n = isect_pq.size(); (*hits)->resize(n); for (size_t i = 0; i < n; i++) { const H &isect = isect_pq.top(); (*hits)[n - i - 1] = isect; isect_pq.pop(); } return true; } return false; } #endif #ifdef __clang__ #pragma clang diagnostic pop #endif } // namespace nanort #endif // NANORT_H_
GB_binop__pow_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__pow_int32 // A.*B function (eWiseMult): GB_AemultB__pow_int32 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__pow_int32 // C+=b function (dense accum): GB_Cdense_accumb__pow_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pow_int32 // C=scalar+B GB_bind1st__pow_int32 // C=scalar+B' GB_bind1st_tran__pow_int32 // C=A+scalar GB_bind2nd__pow_int32 // C=A'+scalar GB_bind2nd_tran__pow_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = GB_pow_int32 (aij, bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_pow_int32 (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_INT32 || GxB_NO_POW_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__pow_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__pow_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__pow_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__pow_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__pow_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__pow_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t bij = Bx [p] ; Cx [p] = GB_pow_int32 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__pow_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; Cx [p] = GB_pow_int32 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = GB_pow_int32 (x, aij) ; \ } GrB_Info GB_bind1st_tran__pow_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = GB_pow_int32 (aij, y) ; \ } GrB_Info GB_bind2nd_tran__pow_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Mapping.h
//===--------- Mapping.h - OpenMP device runtime mapping helpers -- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // //===----------------------------------------------------------------------===// #ifndef OMPTARGET_MAPPING_H #define OMPTARGET_MAPPING_H #include "Types.h" namespace _OMP { namespace mapping { #pragma omp declare target inline constexpr uint32_t MaxThreadsPerTeam = 1024; #pragma omp end declare target /// Initialize the mapping machinery. void init(bool IsSPMD); /// Return true if the kernel is executed in SPMD mode. bool isSPMDMode(); /// Return true if the kernel is executed in generic mode. bool isGenericMode(); /// Return true if the executing thread is the main thread in generic mode. bool isMainThreadInGenericMode(); bool isMainThreadInGenericMode(bool IsSPMD); /// Return true if the executing thread has the lowest Id of the active threads /// in the warp. bool isLeaderInWarp(); /// Return a mask describing all active threads in the warp. LaneMaskTy activemask(); /// Return a mask describing all threads with a smaller Id in the warp. LaneMaskTy lanemaskLT(); /// Return a mask describing all threads with a larget Id in the warp. LaneMaskTy lanemaskGT(); /// Return the thread Id in the warp, in [0, getWarpSize()). uint32_t getThreadIdInWarp(); /// Return the thread Id in the block, in [0, getBlockSize()). uint32_t getThreadIdInBlock(); /// Return the warp id in the block. uint32_t getWarpId(); /// Return the warp size, thus number of threads in the warp. uint32_t getWarpSize(); /// Return the number of warps in the block. uint32_t getNumberOfWarpsInBlock(); /// Return the block Id in the kernel, in [0, getKernelSize()). uint32_t getBlockId(); /// Return the block size, thus number of threads in the block. uint32_t getBlockSize(); /// Return the number of blocks in the kernel. uint32_t getNumberOfBlocks(); /// Return the kernel size, thus number of threads in the kernel. uint32_t getKernelSize(); /// Return the number of processing elements on the device. uint32_t getNumberOfProcessorElements(); } // namespace mapping } // namespace _OMP #endif
hello_gomp.c
/****************************************************************************** * FILE: omp_hello.c * DESCRIPTION: * OpenMP Example - Hello World - C/C++ Version * In this simple example, the master thread forks a parallel region. * All threads in the team obtain their unique thread number and print it. * The master thread only prints the total number of threads. Two OpenMP * library routines are used to obtain the number of threads and each * thread's number. * AUTHOR: Blaise Barney 5/99 * LAST REVISED: 04/06/05 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> int main (int argc, char *argv[]) { int nthreads, tid; /* Fork a team of threads giving them their own copies of variables */ #pragma omp parallel private(nthreads, tid) { /* Obtain thread number */ tid = omp_get_thread_num(); printf("Hello World from thread = %d\n", tid); /* Only master thread does this */ if (tid == 0) { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } printf("This is in thread %d\n", omp_get_thread_num()); } /* All threads join master thread and disband */ return 0; }
gd.c
#include "completion.h" #include "gradient.h" #include "../csf.h" #include <math.h> #include "../thd_info.h" /****************************************************************************** * PRIVATE FUNCTIONS *****************************************************************************/ /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ void splatt_tc_gd( sptensor_t * train, sptensor_t const * const validate, tc_model * const model, tc_ws * const ws) { /* convert training data to CSF-ALLMODE */ double * opts = splatt_default_opts(); opts[SPLATT_OPTION_CSF_ALLOC] = SPLATT_CSF_ONEMODE; opts[SPLATT_OPTION_TILE] = SPLATT_NOTILE; splatt_csf * csf = csf_alloc(train, opts); assert(csf->ntiles == 1); idx_t const nmodes = train->nmodes; /* allocate gradients */ val_t * gradients[MAX_NMODES]; val_t * directions[MAX_NMODES]; for(idx_t m=0; m < nmodes; ++m) { gradients[m] = splatt_malloc(model->dims[m] * model->rank * sizeof(**gradients)); directions[m] = splatt_malloc(model->dims[m] * model->rank * sizeof(**directions)); } val_t loss = tc_loss_sq(train, model, ws); val_t frobsq = tc_frob_sq(model, ws); val_t prev_obj = loss + frobsq; tc_converge(train, validate, model, loss, frobsq, 0, ws); timer_start(&ws->tc_time); /* foreach epoch */ for(idx_t e=1; e < ws->max_its+1; ++e) { tc_gradient(csf, model, ws, gradients); /* direction is the negative gradient */ #pragma omp parallel { for(idx_t m=0; m < model->nmodes; ++m) { idx_t const N = model->dims[m] * model->rank; val_t const * const restrict grad = gradients[m]; val_t * const restrict direc = directions[m]; #pragma omp for schedule(static) nowait for(idx_t x=0; x < N; ++x) { direc[x] = -grad[x]; } } } tc_line_search(train, model, ws, prev_obj, gradients, directions, &loss, &frobsq); prev_obj = loss + frobsq; printf(" time-grad: %0.3fs time-line: %0.3fs\n", ws->grad_time.seconds, ws->line_time.seconds); if(tc_converge(train, validate, model, loss, frobsq, e, ws)) { break; } } for(idx_t m=0; m < nmodes; ++m) { splatt_free(gradients[m]); splatt_free(directions[m]); } csf_free(csf, opts); splatt_free_opts(opts); }
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 8; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
cpu_kernels.h
#pragma once #include <torchaudio/csrc/rnnt/cpu/math.h> #include <torchaudio/csrc/rnnt/options.h> #include <torchaudio/csrc/rnnt/types.h> #include <cstring> #include <limits> #include <vector> namespace torchaudio { namespace rnnt { namespace cpu { template <typename DTYPE> struct LogProbs { DTYPE skip_; // blank. DTYPE emit_; // target. LogProbs(DTYPE skip, DTYPE emit) : skip_(skip), emit_(emit) {} DTYPE& skip() { return skip_; } DTYPE& emit() { return emit_; } const DTYPE& skip() const { return skip_; } const DTYPE& emit() const { return emit_; } }; // TensorView: view a block of allocated memory as a tensor. template <typename DTYPE> class TensorView { public: TensorView(const std::vector<int>& dims, DTYPE* data) : dims_(dims), data_(data) { strides_.resize(dims.size()); strides_.back() = 1; for (int i = dims.size() - 2; i >= 0; --i) { strides_[i] = strides_[i + 1] * dims[i + 1]; } } DTYPE& operator()(const std::vector<int>& indices) { CHECK_EQ(indices.size(), dims_.size()); int index = indices.back(); for (int i = indices.size() - 2; i >= 0; --i) { index += indices[i] * strides_[i]; } return data_[index]; } void SetZero() { int size = dims_[0] * strides_[0]; std::memset(data_, 0, sizeof(DTYPE) * size); } private: std::vector<int> dims_; std::vector<int> strides_; DTYPE* data_; }; template <typename DTYPE, typename CAST_DTYPE> status_t LogSumExp2D(int N, int D, const DTYPE* logits, CAST_DTYPE* outputs) { for (int i = 0; i < N * D; i += D) { CAST_DTYPE max = logits[i]; for (int j = 1; j < D; ++j) { max = std::max(max, CAST_DTYPE(logits[i + j])); } CAST_DTYPE sum = 0; for (int j = 0; j < D; ++j) { sum = sum + std::exp(CAST_DTYPE(logits[i + j]) - max); } outputs[i / D] = max + std::log(sum); } return SUCCESS; } template <typename DTYPE, typename CAST_DTYPE> void ComputeLogProbsOneSequence( const Options& options, TensorView<const DTYPE>& logits, const int* targets, int srcLen, int tgtLen, TensorView<const CAST_DTYPE>& denom, TensorView<LogProbs<CAST_DTYPE>>& logProbs) { const int& T = srcLen; const int& U = tgtLen; const int& blank = options.blank_; for (int t = 0; t < T; ++t) { for (int u = 0; u < U; ++u) { if (u < U - 1) { logProbs({t, u}).emit() = CAST_DTYPE(logits({t, u, targets[u]})) - denom({t, u}); } logProbs({t, u}).skip() = CAST_DTYPE(logits({t, u, blank})) - denom({t, u}); } } } template <typename DTYPE, typename CAST_DTYPE> status_t ComputeLogProbs( const Options& options, const DTYPE* logits, const int* targets, const int* srcLengths, const int* tgtLengths, const CAST_DTYPE* denominators, CAST_DTYPE* logProbs) { std::vector<TensorView<const DTYPE>> seqLogits; std::vector<const int*> seqTargets; std::vector<TensorView<const CAST_DTYPE>> seqDenoms; std::vector<TensorView<LogProbs<CAST_DTYPE>>> seqlogProbs; const int& B = options.batchSize_; const int& maxT = options.maxSrcLen_; const int& maxU = options.maxTgtLen_; const int& D = options.numTargets_; for (int b = 0; b < B; ++b) { seqLogits.push_back( TensorView<const DTYPE>({maxT, maxU, D}, logits + b * maxT * maxU * D)); seqTargets.push_back(targets + b * (maxU - 1)); seqDenoms.push_back(TensorView<const CAST_DTYPE>( {maxT, maxU}, denominators + b * maxT * maxU)); seqlogProbs.push_back(TensorView<LogProbs<CAST_DTYPE>>( {maxT, maxU}, reinterpret_cast<LogProbs<CAST_DTYPE>*>(logProbs) + b * maxT * maxU)); } //#pragma omp parallel for for (int b = 0; b < B; ++b) { // use max 2 * B threads. ComputeLogProbsOneSequence<DTYPE, CAST_DTYPE>( /*options=*/options, /*logits=*/seqLogits[b], /*targets=*/seqTargets[b], /*srcLen=*/srcLengths[b], /*tgtLen=*/tgtLengths[b] + 1, // with prepended blank. /*denom=*/seqDenoms[b], /*logProbs=*/seqlogProbs[b]); } return SUCCESS; } template <typename DTYPE> DTYPE ComputeAlphaOneSequence( const Options& options, TensorView<const LogProbs<DTYPE>>& logProbs, int srcLen, int tgtLen, TensorView<DTYPE>& alpha) { const int& T = srcLen; const int& U = tgtLen; alpha({0, 0}) = DTYPE(0); for (int t = 1; t < T; ++t) { // u == 0. alpha({t, 0}) = alpha({t - 1, 0}) + logProbs({t - 1, 0}).skip(); } for (int u = 1; u < U; ++u) { // t == 0. alpha({0, u}) = alpha({0, u - 1}) + logProbs({0, u - 1}).emit(); } for (int t = 1; t < T; ++t) { for (int u = 1; u < U; ++u) { alpha({t, u}) = math::lse( alpha({t - 1, u}) + logProbs({t - 1, u}).skip(), alpha({t, u - 1}) + logProbs({t, u - 1}).emit()); } } DTYPE forward_score = alpha({T - 1, U - 1}) + logProbs({T - 1, U - 1}).skip(); return forward_score; } template <typename DTYPE> DTYPE ComputeBetaOneSequence( const Options& options, TensorView<const LogProbs<DTYPE>>& logProbs, int srcLen, int tgtLen, TensorView<DTYPE>& beta) { const int& T = srcLen; const int& U = tgtLen; beta({T - 1, U - 1}) = logProbs({T - 1, U - 1}).skip(); for (int t = T - 2; t >= 0; --t) { // u == U - 1. beta({t, U - 1}) = beta({t + 1, U - 1}) + logProbs({t, U - 1}).skip(); } for (int u = U - 2; u >= 0; --u) { // t == T - 1. beta({T - 1, u}) = beta({T - 1, u + 1}) + logProbs({T - 1, u}).emit(); } for (int t = T - 2; t >= 0; --t) { for (int u = U - 2; u >= 0; --u) { beta({t, u}) = math::lse( beta({t + 1, u}) + logProbs({t, u}).skip(), beta({t, u + 1}) + logProbs({t, u}).emit()); } } DTYPE backward_score = beta({0, 0}); return backward_score; } template <typename DTYPE> DTYPE ComputeAlphaOrBetaOneSequence( int thread, const Options& options, TensorView<const LogProbs<DTYPE>>& logProbs, int srcLen, int tgtLen, TensorView<DTYPE>& alpha, TensorView<DTYPE>& beta) { if (thread & 1) { return ComputeAlphaOneSequence<DTYPE>( /*options=*/options, /*logProbs=*/logProbs, /*srcLen=*/srcLen, /*tgtLen=*/tgtLen, /*alpha=*/alpha); } else { return ComputeBetaOneSequence<DTYPE>( /*options=*/options, /*logProbs=*/logProbs, /*srcLen=*/srcLen, /*tgtLen=*/tgtLen, /*beta=*/beta); } } template <typename DTYPE, typename CAST_DTYPE> void ComputeAlphasBetas( const Options& options, const CAST_DTYPE* logProbs, const int* srcLengths, const int* tgtLengths, CAST_DTYPE* alphas, CAST_DTYPE* betas, DTYPE* costs) { std::vector<TensorView<const LogProbs<CAST_DTYPE>>> seqlogProbs; std::vector<TensorView<CAST_DTYPE>> seq_alphas; std::vector<TensorView<CAST_DTYPE>> seq_betas; const int& B = options.batchSize_; const int& maxT = options.maxSrcLen_; const int& maxU = options.maxTgtLen_; for (int b = 0; b < B; ++b) { seqlogProbs.push_back(TensorView<const LogProbs<CAST_DTYPE>>( {maxT, maxU}, reinterpret_cast<LogProbs<CAST_DTYPE>*>( const_cast<CAST_DTYPE*>(logProbs)) + b * maxT * maxU)); seq_alphas.push_back( TensorView<CAST_DTYPE>({maxT, maxU}, alphas + b * maxT * maxU)); seq_betas.push_back( TensorView<CAST_DTYPE>({maxT, maxU}, betas + b * maxT * maxU)); } std::vector<CAST_DTYPE> scores(B << 1); //#pragma omp parallel for for (int t = 0; t < (B << 1); ++t) { // use max 2 * B threads. int i = (t >> 1); scores[t] = ComputeAlphaOrBetaOneSequence<CAST_DTYPE>( /*thread=*/t, /*options=*/options, /*logProbs=*/seqlogProbs[i], /*srcLen=*/srcLengths[i], /*tgtLen=*/tgtLengths[i] + 1, // with prepended blank. /*alpha=*/seq_alphas[i], /*beta=*/seq_betas[i]); } for (int b = 0; b < B; ++b) { costs[b] = -scores[b << 1]; } } template <typename DTYPE, typename CAST_DTYPE> void ComputeGradientsOneSequence( const Options& options, TensorView<const DTYPE>& logits, const int* targets, int srcLen, int tgtLen, TensorView<const CAST_DTYPE>& denom, TensorView<const CAST_DTYPE>& alpha, TensorView<const CAST_DTYPE>& beta, TensorView<DTYPE>& gradients) { // don't set gradients to zero to here as gradients might reuse memory from // logits const int& T = srcLen; const int& U = tgtLen; const int& D = options.numTargets_; const int& blank = options.blank_; const CAST_DTYPE clamp = options.clamp_; CAST_DTYPE cost = -beta({0, 0}); // Note - below gradient is different from numpy_transducer, since we // compute log_softmax more efficiently within the loss, to save memory The // details of the below implementation / equations can be found in Sec 3.2 // (function merging) in below paper: // https://www.microsoft.com/en-us/research/uploads/prod/2019/10/RNNT.pdf for (int t = 0; t < T; ++t) { for (int u = 0; u < U; ++u) { CAST_DTYPE c = alpha({t, u}) + cost - denom({t, u}); for (int d = 0; d < D; ++d) { CAST_DTYPE g = CAST_DTYPE(logits({t, u, d})) + c; if (d == blank && t == T - 1 && u == U - 1) { // last blank transition. gradients({t, u, d}) = std::exp(g + beta({t, u})) - std::exp(g); } else if (d == blank && t < T - 1) { gradients({t, u, d}) = std::exp(g + beta({t, u})) - std::exp(g + beta({t + 1, u})); } else if (u < U - 1 && d == targets[u]) { gradients({t, u, d}) = std::exp(g + beta({t, u})) - std::exp(g + beta({t, u + 1})); } else { gradients({t, u, d}) = std::exp(g + beta({t, u})); } if (clamp > 0) { gradients({t, u, d}) = math::min(CAST_DTYPE(gradients({t, u, d})), clamp); gradients({t, u, d}) = math::max(CAST_DTYPE(gradients({t, u, d})), -clamp); } } } } // zero out the rest of the gradients, necessary when reusing logits memory // check the memory location to see if it's necessary if (&gradients({0, 0, 0}) == &logits({0, 0, 0})) { const int& maxT = options.maxSrcLen_; const int& maxU = options.maxTgtLen_; for (int t = T; t < maxT; ++t) { for (int u = 0; u < maxU; ++u) { for (int d = 0; d < D; ++d) { gradients({t, u, d}) = 0.; } } } for (int t = 0; t < T; ++t) { for (int u = U; u < maxU; ++u) { for (int d = 0; d < D; ++d) { gradients({t, u, d}) = 0.; } } } } } template <typename DTYPE, typename CAST_DTYPE> void ComputeGradients( const Options& options, const DTYPE* logits, const int* targets, const int* srcLengths, const int* tgtLengths, const CAST_DTYPE* denominators, const CAST_DTYPE* alphas, const CAST_DTYPE* betas, DTYPE* gradients) { std::vector<TensorView<const DTYPE>> seqLogits; std::vector<const int*> seqTargets; std::vector<TensorView<const CAST_DTYPE>> seqDenoms; std::vector<TensorView<const CAST_DTYPE>> seq_alphas; std::vector<TensorView<const CAST_DTYPE>> seq_betas; std::vector<TensorView<DTYPE>> seq_gradients; const int& B = options.batchSize_; const int& maxT = options.maxSrcLen_; const int& maxU = options.maxTgtLen_; const int& D = options.numTargets_; for (int b = 0; b < B; ++b) { seqLogits.push_back( TensorView<const DTYPE>({maxT, maxU, D}, logits + b * maxT * maxU * D)); seqTargets.push_back(targets + b * (maxU - 1)); seqDenoms.push_back(TensorView<const CAST_DTYPE>( {maxT, maxU}, denominators + b * maxT * maxU)); seq_alphas.push_back( TensorView<const CAST_DTYPE>({maxT, maxU}, alphas + b * maxT * maxU)); seq_betas.push_back( TensorView<const CAST_DTYPE>({maxT, maxU}, betas + b * maxT * maxU)); seq_gradients.push_back( TensorView<DTYPE>({maxT, maxU, D}, gradients + b * maxT * maxU * D)); } //#pragma omp parallel for for (int b = 0; b < B; ++b) { // use max 2 * B threads. ComputeGradientsOneSequence<DTYPE, CAST_DTYPE>( /*options=*/options, /*logits=*/seqLogits[b], /*targets=*/seqTargets[b], /*srcLen=*/srcLengths[b], /*tgtLen=*/tgtLengths[b] + 1, // with prepended blank. /*denom=*/seqDenoms[b], /*alpha=*/seq_alphas[b], /*beta=*/seq_betas[b], /*gradients=*/seq_gradients[b]); } } template <typename DTYPE, typename CAST_DTYPE> void ComputeAlphas( const Options& options, const CAST_DTYPE* logProbs, const int* srcLengths, const int* tgtLengths, CAST_DTYPE* alphas) { std::vector<TensorView<const LogProbs<CAST_DTYPE>>> seqlogProbs; std::vector<TensorView<CAST_DTYPE>> seq_alphas; const int& B = options.batchSize_; const int& maxT = options.maxSrcLen_; const int& maxU = options.maxTgtLen_; for (int b = 0; b < B; ++b) { seqlogProbs.push_back(TensorView<const LogProbs<CAST_DTYPE>>( {maxT, maxU}, reinterpret_cast<LogProbs<CAST_DTYPE>*>( const_cast<CAST_DTYPE*>(logProbs)) + b * maxT * maxU)); seq_alphas.push_back( TensorView<CAST_DTYPE>({maxT, maxU}, alphas + b * maxT * maxU)); } std::vector<CAST_DTYPE> scores(B << 1); //#pragma omp parallel for for (int i = 0; i < B; ++i) { // use max 2 * B threads. ComputeAlphaOneSequence<DTYPE>( options, /*logProbs=*/seqlogProbs[i], /*srcLen=*/srcLengths[i], /*tgtLen=*/tgtLengths[i] + 1, // with prepended blank. /*alpha=*/seq_alphas[i]); } } template <typename DTYPE, typename CAST_DTYPE> void ComputeBetas( const Options& options, const CAST_DTYPE* logProbs, const int* srcLengths, const int* tgtLengths, CAST_DTYPE* costs, CAST_DTYPE* betas) { std::vector<TensorView<const LogProbs<CAST_DTYPE>>> seqlogProbs; std::vector<TensorView<CAST_DTYPE>> seq_betas; const int& B = options.batchSize_; const int& maxT = options.maxSrcLen_; const int& maxU = options.maxTgtLen_; for (int b = 0; b < B; ++b) { seqlogProbs.push_back(TensorView<const LogProbs<CAST_DTYPE>>( {maxT, maxU}, reinterpret_cast<LogProbs<CAST_DTYPE>*>( const_cast<CAST_DTYPE*>(logProbs)) + b * maxT * maxU)); seq_betas.push_back( TensorView<CAST_DTYPE>({maxT, maxU}, betas + b * maxT * maxU)); } std::vector<CAST_DTYPE> scores(B << 1); //#pragma omp parallel for for (int i = 0; i < B; ++i) { // use max 2 * B threads. ComputeBetaOneSequence<DTYPE>( options, /*logProbs=*/seqlogProbs[i], /*srcLen=*/srcLengths[i], /*tgtLen=*/tgtLengths[i] + 1, // with prepended blank. /*betas=*/seq_betas[i]); } } } // namespace cpu } // namespace rnnt } // namespace torchaudio
master_taskloop_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 -triple x86_64-unknown-unknown %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify=expected,omp50 -triple x86_64-unknown-unknown %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 -triple x86_64-unknown-unknown %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify=expected,omp50 -triple x86_64-unknown-unknown %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp master taskloop simd for (int i = 0; i < 10; ++i) argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } // expected-error@+1 {{unexpected OpenMP directive '#pragma omp master taskloop simd'}} #pragma omp master taskloop simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp master taskloop simd'}} #pragma omp master taskloop simd foo void test_no_clause(void) { int i; #pragma omp master taskloop simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp master taskloop simd' must be a for loop}} #pragma omp master taskloop simd ++i; } void test_branch_protected_scope(void) { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp master taskloop simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause(void) { int i; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop simd' are ignored}} #pragma omp master taskloop simd foo bar for (i = 0; i < 16; ++i) ; // expected-error@+1 {{directive '#pragma omp master taskloop simd' cannot contain more than one 'nogroup' clause}} #pragma omp master taskloop simd nogroup nogroup for (i = 0; i < 16; ++i) ; } void test_non_identifiers(void) { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop simd' are ignored}} #pragma omp master taskloop simd; for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp master taskloop simd' are ignored}} #pragma omp parallel #pragma omp master taskloop simd linear(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop simd' are ignored}} #pragma omp master taskloop simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop simd' are ignored}} #pragma omp master taskloop simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(void); void test_collapse(void) { int i; #pragma omp parallel // expected-error@+1 {{expected '('}} #pragma omp master taskloop simd collapse for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp master taskloop simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp master taskloop simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp master taskloop simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp master taskloop simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+2 {{extra tokens at the end of '#pragma omp master taskloop simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp master taskloop simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp master taskloop simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp master taskloop simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp master taskloop simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp master taskloop simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp master taskloop simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp master taskloop simd', but found only 1}} #pragma omp parallel // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp master taskloop simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp master taskloop simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp master taskloop simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp master taskloop simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp master taskloop simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp master taskloop simd', but found only 1}} #pragma omp parallel #pragma omp master taskloop simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp master taskloop simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp master taskloop simd', but found only 1}} #pragma omp parallel // expected-error@+1 {{integer constant expression}} #pragma omp master taskloop simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{integer constant expression}} #pragma omp master taskloop simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp master taskloop simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp master taskloop simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp master taskloop simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; } void test_private(void) { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp master taskloop simd private( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp master taskloop simd private(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp master taskloop simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp master taskloop simd private() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp master taskloop simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp master taskloop simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp master taskloop simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp master taskloop simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp master taskloop simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate(void) { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp master taskloop simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp master taskloop simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp master taskloop simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp master taskloop simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp master taskloop simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp master taskloop simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp master taskloop simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp master taskloop simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp master taskloop simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate(void) { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp master taskloop simd firstprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp master taskloop simd firstprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp master taskloop simd firstprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp master taskloop simd firstprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp master taskloop simd firstprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp master taskloop simd firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp master taskloop simd lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp master taskloop simd lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp master taskloop simd lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp master taskloop simd simdlen(64) safelen(8) for (i = 0; i < 16; ++i) ; } void test_loop_messages(void) { float a[100], b[100], c[100]; #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp master taskloop simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp master taskloop simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } // expected-warning@+2 {{OpenMP loop iteration variable cannot have more than 64 bits size and will be narrowed}} #pragma omp master taskloop simd for (__int128 ii = 0; ii < 10; ii++) { c[ii] = a[ii] + b[ii]; } } void test_nontemporal(void) { int i; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp master taskloop simd nontemporal( for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp master taskloop simd nontemporal(, for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} expected-error@+1 2 {{expected expression}} #pragma omp master taskloop simd nontemporal(, ) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} expected-error@+1 {{expected expression}} #pragma omp master taskloop simd nontemporal() for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} expected-error@+1 {{expected expression}} #pragma omp master taskloop simd nontemporal(int) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} omp50-error@+1 {{expected variable name}} #pragma omp master taskloop simd nontemporal(0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp master taskloop simd nontemporal(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp master taskloop simd nontemporal(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp master taskloop simd nontemporal(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp master taskloop simd nontemporal(x :) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} #pragma omp master taskloop simd nontemporal(x :, ) for (i = 0; i < 16; ++i) ; // omp50-note@+2 {{defined as nontemporal}} // omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}} #pragma omp master taskloop simd nontemporal(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} #pragma omp master taskloop simd private(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} #pragma omp master taskloop simd nontemporal(x) private(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} #pragma omp master taskloop simd nontemporal(x, y : 0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} #pragma omp master taskloop simd nontemporal(x) lastprivate(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} #pragma omp master taskloop simd lastprivate(x) nontemporal(x) for (i = 0; i < 16; ++i) ; }
declare_simd_ast_print.c
// RUN: %clang_cc1 -verify -fopenmp -ast-print %s | FileCheck %s // RUN: %clang_cc1 -fopenmp -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp -include-pch %t -fsyntax-only -verify %s -ast-print | FileCheck %s // expected-no-diagnostics #ifndef HEADER #define HEADER #pragma omp declare simd aligned(b : 64) #pragma omp declare simd simdlen(32) aligned(d, b) #pragma omp declare simd inbranch, uniform(d) linear(val(s1, s2) : 32) #pragma omp declare simd notinbranch simdlen(2), uniform(s1, s2) linear(d: s1) void add_1(float *d, int s1, float *s2, double b[]) __attribute__((cold)); // CHECK: #pragma omp declare simd notinbranch simdlen(2) uniform(s1, s2) linear(val(d): s1) // CHECK-NEXT: #pragma omp declare simd inbranch uniform(d) linear(val(s1): 32) linear(val(s2): 32) // CHECK-NEXT: #pragma omp declare simd simdlen(32) aligned(d) aligned(b) // CHECK-NEXT: #pragma omp declare simd aligned(b: 64) // CHECK-NEXT: void add_1(float *d, int s1, float *s2, double b[]) __attribute__((cold)) #endif
convolution_5x5.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void conv5x5s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*25 + q*25; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; const float* r4 = img0 + w*4; const float* r5 = img0 + w*5; const float* k0 = kernel0; const float* k1 = kernel0 + 5; const float* k2 = kernel0 + 10; const float* k3 = kernel0 + 15; const float* k4 = kernel0 + 20; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); float32x4_t _k4567 = vld1q_f32(kernel0+4); float32x4_t _k891011 = vld1q_f32(kernel0+8); float32x4_t _k12131415 = vld1q_f32(kernel0+12); float32x4_t _k16171819 = vld1q_f32(kernel0+16); float32x4_t _k20212223 = vld1q_f32(kernel0+20); float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]); #endif // __ARM_NEON int i = 0; for (; i+1 < outh; i+=2) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum = vld1q_f32(outptr); float32x4_t _sum2 = vld1q_f32(outptr2); float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r04 = vld1q_f32(r0 + 4); float32x4_t _r01 = vextq_f32(_r00, _r04, 1); float32x4_t _r02 = vextq_f32(_r00, _r04, 2); float32x4_t _r03 = vextq_f32(_r00, _r04, 3); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r14 = vld1q_f32(r1 + 4); float32x4_t _r11 = vextq_f32(_r10, _r14, 1); float32x4_t _r12 = vextq_f32(_r10, _r14, 2); float32x4_t _r13 = vextq_f32(_r10, _r14, 3); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r24 = vld1q_f32(r2 + 4); float32x4_t _r21 = vextq_f32(_r20, _r24, 1); float32x4_t _r22 = vextq_f32(_r20, _r24, 2); float32x4_t _r23 = vextq_f32(_r20, _r24, 3); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r34 = vld1q_f32(r3 + 4); float32x4_t _r31 = vextq_f32(_r30, _r34, 1); float32x4_t _r32 = vextq_f32(_r30, _r34, 2); float32x4_t _r33 = vextq_f32(_r30, _r34, 3); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r44 = vld1q_f32(r4 + 4); float32x4_t _r41 = vextq_f32(_r40, _r44, 1); float32x4_t _r42 = vextq_f32(_r40, _r44, 2); float32x4_t _r43 = vextq_f32(_r40, _r44, 3); float32x4_t _r50 = vld1q_f32(r5); float32x4_t _r54 = vld1q_f32(r5 + 4); float32x4_t _r51 = vextq_f32(_r50, _r54, 1); float32x4_t _r52 = vextq_f32(_r50, _r54, 2); float32x4_t _r53 = vextq_f32(_r50, _r54, 3); _sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0); _sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1); _sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2); _sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3); _sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0); _sum = vfmaq_laneq_f32(_sum, _r10, _k4567, 1); _sum = vfmaq_laneq_f32(_sum, _r11, _k4567, 2); _sum = vfmaq_laneq_f32(_sum, _r12, _k4567, 3); _sum = vfmaq_laneq_f32(_sum, _r13, _k891011, 0); _sum = vfmaq_laneq_f32(_sum, _r14, _k891011, 1); _sum = vfmaq_laneq_f32(_sum, _r20, _k891011, 2); _sum = vfmaq_laneq_f32(_sum, _r21, _k891011, 3); _sum = vfmaq_laneq_f32(_sum, _r22, _k12131415, 0); _sum = vfmaq_laneq_f32(_sum, _r23, _k12131415, 1); _sum = vfmaq_laneq_f32(_sum, _r24, _k12131415, 2); _sum = vfmaq_laneq_f32(_sum, _r30, _k12131415, 3); _sum = vfmaq_laneq_f32(_sum, _r31, _k16171819, 0); _sum = vfmaq_laneq_f32(_sum, _r32, _k16171819, 1); _sum = vfmaq_laneq_f32(_sum, _r33, _k16171819, 2); _sum = vfmaq_laneq_f32(_sum, _r34, _k16171819, 3); _sum = vfmaq_laneq_f32(_sum, _r40, _k20212223, 0); _sum = vfmaq_laneq_f32(_sum, _r41, _k20212223, 1); _sum = vfmaq_laneq_f32(_sum, _r42, _k20212223, 2); _sum = vfmaq_laneq_f32(_sum, _r43, _k20212223, 3); _sum = vfmaq_laneq_f32(_sum, _r44, _k24242424, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r10, _k0123, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r11, _k0123, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r12, _k0123, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r13, _k0123, 3); _sum2 = vfmaq_laneq_f32(_sum2, _r14, _k4567, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r20, _k4567, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r21, _k4567, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r22, _k4567, 3); _sum2 = vfmaq_laneq_f32(_sum2, _r23, _k891011, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r24, _k891011, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r30, _k891011, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r31, _k891011, 3); _sum2 = vfmaq_laneq_f32(_sum2, _r32, _k12131415, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r33, _k12131415, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r34, _k12131415, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r40, _k12131415, 3); _sum2 = vfmaq_laneq_f32(_sum2, _r41, _k16171819, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r42, _k16171819, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r43, _k16171819, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r44, _k16171819, 3); _sum2 = vfmaq_laneq_f32(_sum2, _r50, _k20212223, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r51, _k20212223, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r52, _k20212223, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r53, _k20212223, 3); _sum2 = vfmaq_laneq_f32(_sum2, _r54, _k24242424, 0); vst1q_f32(outptr, _sum); vst1q_f32(outptr2, _sum2); r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; outptr += 4; outptr2 += 4; } #else if (nn > 0) { asm volatile( // "veor q13, q13 \n" // "veor q14, q14 \n" "pld [%1, #128] \n" "vld1.f32 {d14-d15}, [%1] \n"// q7 = out "0: \n" // q11 = rx1 / rx3 // q12 = rx2 // q13 q14 = intermediate sum register "pld [%2, #128] \n" "vld1.f32 {d16-d17}, [%2] \n"// q8 = out2 "pld [%4, #256] \n" // r1 "vld1.f32 {d18-d21}, [%4] \n"// q9 q10 = r10 r14 "add %4, #16 \n" "vext.32 q11, q9, q10, #1 \n"// r11 "vmul.f32 q13, q9, %e19[1] \n" "vmla.f32 q8, q9, %e18[0] \n" "vext.32 q12, q9, q10, #2 \n"// r12 "vmla.f32 q7, q11, %f19[0] \n" "vmul.f32 q14, q11, %e18[1] \n" "vext.32 q11, q9, q10, #3 \n"// r13 "vmla.f32 q13, q12, %f19[1] \n" "vmla.f32 q8, q12, %f18[0] \n" "vmla.f32 q7, q11, %e20[0] \n" "vmla.f32 q14, q11, %f18[1] \n" "pld [%5, #256] \n" "vmla.f32 q13, q10, %e20[1] \n" "vmla.f32 q8, q10, %e19[0] \n" // r2 "vld1.f32 {d18-d21}, [%5] \n"// q9 q10 = r20 r24 "add %5, #16 \n" "vext.32 q11, q9, q10, #1 \n"// r21 "vmla.f32 q7, q9, %f20[0] \n" "vmla.f32 q14, q9, %e19[1] \n" "vext.32 q12, q9, q10, #2 \n"// r22 "vmla.f32 q13, q11, %f20[1] \n" "vmla.f32 q8, q11, %f19[0] \n" "vext.32 q11, q9, q10, #3 \n"// r23 "vmla.f32 q7, q12, %e21[0] \n" "vmla.f32 q14, q12, %f19[1] \n" "vmla.f32 q13, q11, %e21[1] \n" "vmla.f32 q8, q11, %e20[0] \n" "pld [%6, #256] \n" "vmla.f32 q7, q10, %f21[0] \n" "vmla.f32 q14, q10, %e20[1] \n" // r3 "vld1.f32 {d18-d21}, [%6] \n"// q9 q10 = r30 r34 "add %6, #16 \n" "vext.32 q11, q9, q10, #1 \n"// r31 "vmla.f32 q13, q9, %f21[1] \n" "vmla.f32 q8, q9, %f20[0] \n" "vext.32 q12, q9, q10, #2 \n"// r32 "vmla.f32 q7, q11, %e22[0] \n" "vmla.f32 q14, q11, %f20[1] \n" "vext.32 q11, q9, q10, #3 \n"// r33 "vmla.f32 q13, q12, %e22[1] \n" "vmla.f32 q8, q12, %e21[0] \n" "vmla.f32 q7, q11, %f22[0] \n" "vmla.f32 q14, q11, %e21[1] \n" "pld [%7, #256] \n" "vmla.f32 q13, q10, %f22[1] \n" "vmla.f32 q8, q10, %f21[0] \n" // r4 "vld1.f32 {d18-d21}, [%7] \n"// q9 q10 = r40 r44 "add %7, #16 \n" "vext.32 q11, q9, q10, #1 \n"// r41 "vmla.f32 q7, q9, %e23[0] \n" "vmla.f32 q14, q9, %f21[1] \n" "vext.32 q12, q9, q10, #2 \n"// r42 "vmla.f32 q13, q11, %e23[1] \n" "vmla.f32 q8, q11, %e22[0] \n" "vext.32 q11, q9, q10, #3 \n"// r43 "vmla.f32 q7, q12, %f23[0] \n" "vmla.f32 q14, q12, %e22[1] \n" "vmla.f32 q13, q11, %f23[1] \n" "vmla.f32 q8, q11, %f22[0] \n" "pld [%3, #256] \n" "vmla.f32 q7, q10, %e24[0] \n" "vmla.f32 q14, q10, %f22[1] \n" // r0 and r5 "vld1.f32 {d18-d21}, [%3] \n"// q9 q10 = r00 r04 "add %3, #16 \n" "vext.32 q11, q9, q10, #1 \n"// r01 "vmla.f32 q13, q11, %e18[1] \n" "vext.32 q12, q9, q10, #2 \n"// r02 "vmla.f32 q7, q12, %f18[0] \n" "vext.32 q11, q9, q10, #3 \n"// r03 "pld [%8, #256] \n" "vmla.f32 q13, q11, %f18[1] \n" // r5 "vld1.f32 {d22-d25}, [%8] \n"// q11 q12 = r50 r54 "add %8, #16 \n" "vmla.f32 q8, q11, %e23[0] \n" "vmla.f32 q14, q12, %e24[0] \n" "vmla.f32 q7, q9, %e18[0] \n" "vmla.f32 q13, q10, %e19[0] \n" "vext.32 q9, q11, q12, #1 \n"// r51 "vext.32 q10, q11, q12, #2 \n"// r52 "vmla.f32 q14, q9, %e23[1] \n" "vext.32 q9, q11, q12, #3 \n"// r53 "vmla.f32 q8, q10, %f23[0] \n" "vmla.f32 q14, q9, %f23[1] \n" "vadd.f32 q7, q7, q13 \n" // "veor q13, q13 \n" "vst1.f32 {d14-d15}, [%1]! \n" "vadd.f32 q8, q8, q14 \n" "pld [%1, #128] \n" "vld1.f32 {d14-d15}, [%1] \n"// q7 = out // "veor q14, q14 \n" "vst1.f32 {d16-d17}, [%2]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3), // %6 "=r"(r4), // %7 "=r"(r5) // %8 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "7"(r4), "8"(r5), "w"(_k0123), // %18 "w"(_k4567), // %19 "w"(_k891011), // %20 "w"(_k12131415), // %21 "w"(_k16171819), // %22 "w"(_k20212223), // %23 "w"(_k24242424) // %24 : "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = 0; float sum2 = 0; #if __ARM_NEON float32x4_t _r1 = vld1q_f32(r1); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _sum = vmulq_f32(_r1, _k1); float32x4_t _sum2 = vmulq_f32(_r1, _k0123); float32x4_t _r2 = vld1q_f32(r2); float32x4_t _k2 = vld1q_f32(k2); _sum = vmlaq_f32(_sum, _r2, _k2); _sum2 = vmlaq_f32(_sum2, _r2, _k1); float32x4_t _r3 = vld1q_f32(r3); float32x4_t _k3 = vld1q_f32(k3); _sum = vmlaq_f32(_sum, _r3, _k3); _sum2 = vmlaq_f32(_sum2, _r3, _k2); float32x4_t _r4 = vld1q_f32(r4); _sum = vmlaq_f32(_sum, _r4, _k20212223); _sum2 = vmlaq_f32(_sum2, _r4, _k3); float32x4_t _r0 = vld1q_f32(r0); _sum = vmlaq_f32(_sum, _r0, _k0123); float32x4_t _r5 = vld1q_f32(r5); _sum2 = vmlaq_f32(_sum2, _r5, _k20212223); float32x4_t _k_t4; _k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0); _k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1); _k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2); _k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3); float32x4_t _r_t4; _r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0); _r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1); _r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2); _r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3); _sum = vmlaq_f32(_sum, _r_t4, _k_t4); sum = r4[4] * k4[4]; _r_t4 = vextq_f32(_r_t4, _r_t4, 1); _r_t4 = vsetq_lane_f32(r4[4], _r_t4, 3); _sum2 = vmlaq_f32(_sum2, _r_t4, _k_t4); sum2 = r5[4] * k4[4]; float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2)); float32x2_t _ss_ss2 = vpadd_f32(_ss, _ss2); sum += vget_lane_f32(_ss_ss2, 0); sum2 += vget_lane_f32(_ss_ss2, 1); #else sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r1[3] * k0[3]; sum2 += r1[4] * k0[4]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r2[3] * k1[3]; sum2 += r2[4] * k1[4]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; sum2 += r3[3] * k2[3]; sum2 += r3[4] * k2[4]; sum2 += r4[0] * k3[0]; sum2 += r4[1] * k3[1]; sum2 += r4[2] * k3[2]; sum2 += r4[3] * k3[3]; sum2 += r4[4] * k3[4]; sum2 += r5[0] * k4[0]; sum2 += r5[1] * k4[1]; sum2 += r5[2] * k4[2]; sum2 += r5[3] * k4[3]; sum2 += r5[4] * k4[4]; #endif // __ARM_NEON *outptr += sum; *outptr2 += sum2; r0++; r1++; r2++; r3++; r4++; r5++; outptr++; outptr2++; } r0 += 4 + w; r1 += 4 + w; r2 += 4 + w; r3 += 4 + w; r4 += 4 + w; r5 += 4 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum = vld1q_f32(outptr); float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r04 = vld1q_f32(r0 + 4); float32x4_t _r01 = vextq_f32(_r00, _r04, 1); float32x4_t _r02 = vextq_f32(_r00, _r04, 2); float32x4_t _r03 = vextq_f32(_r00, _r04, 3); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r14 = vld1q_f32(r1 + 4); float32x4_t _r11 = vextq_f32(_r10, _r14, 1); float32x4_t _r12 = vextq_f32(_r10, _r14, 2); float32x4_t _r13 = vextq_f32(_r10, _r14, 3); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r24 = vld1q_f32(r2 + 4); float32x4_t _r21 = vextq_f32(_r20, _r24, 1); float32x4_t _r22 = vextq_f32(_r20, _r24, 2); float32x4_t _r23 = vextq_f32(_r20, _r24, 3); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r34 = vld1q_f32(r3 + 4); float32x4_t _r31 = vextq_f32(_r30, _r34, 1); float32x4_t _r32 = vextq_f32(_r30, _r34, 2); float32x4_t _r33 = vextq_f32(_r30, _r34, 3); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r44 = vld1q_f32(r4 + 4); float32x4_t _r41 = vextq_f32(_r40, _r44, 1); float32x4_t _r42 = vextq_f32(_r40, _r44, 2); float32x4_t _r43 = vextq_f32(_r40, _r44, 3); _sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0); _sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1); _sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2); _sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3); _sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0); _sum = vfmaq_laneq_f32(_sum, _r10, _k4567, 1); _sum = vfmaq_laneq_f32(_sum, _r11, _k4567, 2); _sum = vfmaq_laneq_f32(_sum, _r12, _k4567, 3); _sum = vfmaq_laneq_f32(_sum, _r13, _k891011, 0); _sum = vfmaq_laneq_f32(_sum, _r14, _k891011, 1); _sum = vfmaq_laneq_f32(_sum, _r20, _k891011, 2); _sum = vfmaq_laneq_f32(_sum, _r21, _k891011, 3); _sum = vfmaq_laneq_f32(_sum, _r22, _k12131415, 0); _sum = vfmaq_laneq_f32(_sum, _r23, _k12131415, 1); _sum = vfmaq_laneq_f32(_sum, _r24, _k12131415, 2); _sum = vfmaq_laneq_f32(_sum, _r30, _k12131415, 3); _sum = vfmaq_laneq_f32(_sum, _r31, _k16171819, 0); _sum = vfmaq_laneq_f32(_sum, _r32, _k16171819, 1); _sum = vfmaq_laneq_f32(_sum, _r33, _k16171819, 2); _sum = vfmaq_laneq_f32(_sum, _r34, _k16171819, 3); _sum = vfmaq_laneq_f32(_sum, _r40, _k20212223, 0); _sum = vfmaq_laneq_f32(_sum, _r41, _k20212223, 1); _sum = vfmaq_laneq_f32(_sum, _r42, _k20212223, 2); _sum = vfmaq_laneq_f32(_sum, _r43, _k20212223, 3); _sum = vfmaq_laneq_f32(_sum, _r44, _k24242424, 0); vst1q_f32(outptr, _sum); r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; outptr += 4; } #else if (nn > 0) { asm volatile( // "veor q15, q15 \n"// _sum3 = 0; "pld [%1, #128] \n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2] \n"// _r00 = vld1q_f32(r0+j); "add %2, #16 \n" "0: \n" "vld1.f32 {d14-d15}, [%1] \n"// _sum = vld1q_f32(outptr+j); "veor q13, q13 \n"// _sum2 = 0; "veor q14, q14 \n"// _sum3 = 0; "vext.32 q10, q8, q9, #1 \n"// _r01 "vext.32 q11, q8, q9, #2 \n"// _r02 "vext.32 q12, q8, q9, #3 \n"// _r03 "vmla.f32 q7, q8, %e14[0] \n" "vmla.f32 q13, q10, %e14[1] \n" "pld [%3, #256] \n" "vmla.f32 q14, q11, %f14[0] \n" "vmul.f32 q15, q12, %f14[1] \n" "vmla.f32 q7, q9, %e15[0] \n" "vld1.f32 {d16-d19}, [%3] \n" "add %3, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vext.32 q12, q8, q9, #3 \n" "vmla.f32 q7, q8, %e15[1] \n" "vmla.f32 q13, q10, %f15[0] \n" "pld [%4, #256] \n" "vmla.f32 q14, q11, %f15[1] \n" "vmla.f32 q15, q12, %e16[0] \n" "vmla.f32 q7, q9, %e16[1] \n" "vld1.f32 {d16-d19}, [%4] \n" "add %4, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vext.32 q12, q8, q9, #3 \n" "vmla.f32 q7, q8, %f16[0] \n" "vmla.f32 q13, q10, %f16[1] \n" "pld [%5, #256] \n" "vmla.f32 q14, q11, %e17[0] \n" "vmla.f32 q15, q12, %e17[1] \n" "vmla.f32 q7, q9, %f17[0] \n" "vld1.f32 {d16-d19}, [%5] \n" "add %5, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vext.32 q12, q8, q9, #3 \n" "vmla.f32 q7, q8, %f17[1] \n" "vmla.f32 q13, q10, %e18[0] \n" "pld [%6, #256] \n" "vmla.f32 q14, q11, %e18[1] \n" "vmla.f32 q15, q12, %f18[0] \n" "vmla.f32 q7, q9, %f18[1] \n" "vld1.f32 {d16-d19}, [%6] \n" "add %6, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vext.32 q12, q8, q9, #3 \n" "vmla.f32 q7, q8, %e19[0] \n" "vmla.f32 q13, q10, %e19[1] \n" "vmla.f32 q14, q11, %f19[0] \n" "vmla.f32 q15, q12, %f19[1] \n" "vmla.f32 q7, q9, %e20[0] \n" "vadd.f32 q14, q14, q15 \n" "vadd.f32 q7, q7, q13 \n" // "veor q15, q15 \n"// _sum3 = 0; "pld [%2, #256] \n" "vadd.f32 q7, q7, q14 \n" "vld1.f32 {d16-d19}, [%2] \n"// _r00 = vld1q_f32(r0+j); "add %2, #16 \n" "vst1.f32 {d14-d15}, [%1]! \n" "pld [%1, #128] \n" "subs %0, #1 \n" "bne 0b \n" "sub %2, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4) // %6 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k891011), // %16 "w"(_k12131415), // %17 "w"(_k16171819), // %18 "w"(_k20212223), // %19 "w"(_k24242424) // %20 : "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = 0; #if __ARM_NEON float32x4_t _r0 = vld1q_f32(r0); float32x4_t _sum = vmulq_f32(_r0, _k0123); float32x4_t _r1 = vld1q_f32(r1); _sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1)); float32x4_t _r2 = vld1q_f32(r2); _sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2)); float32x4_t _r3 = vld1q_f32(r3); _sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3)); float32x4_t _r4 = vld1q_f32(r4); _sum = vmlaq_f32(_sum, _r4, _k20212223); float32x4_t _k_t4; _k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0); _k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1); _k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2); _k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3); float32x4_t _r_t4; _r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0); _r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1); _r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2); _r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3); _sum = vmlaq_f32(_sum, _r_t4, _k_t4); sum = r4[4] * k4[4]; float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); sum += vget_lane_f32(_ss, 0); #else sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; #endif *outptr += sum; r0++; r1++; r2++; r3++; r4++; outptr++; } r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; } } } } static void conv5x5s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*25 + q*25; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; const float* r4 = img0 + w*4; const float* k0 = kernel0; const float* k1 = kernel0 + 5; const float* k2 = kernel0 + 10; const float* k3 = kernel0 + 15; const float* k4 = kernel0 + 20; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); float32x4_t _k4567 = vld1q_f32(kernel0+4); float32x4_t _k891011 = vld1q_f32(kernel0+8); float32x4_t _k12131415 = vld1q_f32(kernel0+12); float32x4_t _k16171819 = vld1q_f32(kernel0+16); float32x4_t _k20212223 = vld1q_f32(kernel0+20); float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]); #endif // __ARM_NEON for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum = vld1q_f32(outptr); float32x4x2_t _r00_02461357 = vld2q_f32(r0); float32x4x2_t _r00nx2 = vld2q_f32(r0 + 8); float32x4_t _r0_8101214 = _r00nx2.val[0];// 8 10 12 14 float32x4_t _r0_9111315 = _r00nx2.val[1];// 9 11 13 15 float32x4_t _r00 = _r00_02461357.val[0];// 0 2 4 6 float32x4_t _r01 = _r00_02461357.val[1];// 1 3 5 7 float32x4_t _r02 = vextq_f32(_r00, _r0_8101214, 1);// 2 4 6 8 float32x4_t _r03 = vextq_f32(_r01, _r0_9111315, 1);// 3 5 7 9 float32x4_t _r04 = vextq_f32(_r00, _r0_8101214, 2);// 4 6 8 10 float32x4x2_t _r10_02461357 = vld2q_f32(r1); float32x4x2_t _r10nx2 = vld2q_f32(r1 + 8); float32x4_t _r1_8101214 = _r10nx2.val[0]; float32x4_t _r1_9111315 = _r10nx2.val[1]; float32x4_t _r10 = _r10_02461357.val[0]; float32x4_t _r11 = _r10_02461357.val[1]; float32x4_t _r12 = vextq_f32(_r10, _r1_8101214, 1); float32x4_t _r13 = vextq_f32(_r11, _r1_9111315, 1); float32x4_t _r14 = vextq_f32(_r10, _r1_8101214, 2); float32x4x2_t _r20_02461357 = vld2q_f32(r2); float32x4x2_t _r20nx2 = vld2q_f32(r2 + 8); float32x4_t _r2_8101214 = _r20nx2.val[0]; float32x4_t _r2_9111315 = _r20nx2.val[1]; float32x4_t _r20 = _r20_02461357.val[0]; float32x4_t _r21 = _r20_02461357.val[1]; float32x4_t _r22 = vextq_f32(_r20, _r2_8101214, 1); float32x4_t _r23 = vextq_f32(_r21, _r2_9111315, 1); float32x4_t _r24 = vextq_f32(_r20, _r2_8101214, 2); float32x4x2_t _r30_02461357 = vld2q_f32(r3); float32x4x2_t _r30nx2 = vld2q_f32(r3 + 8); float32x4_t _r3_8101214 = _r30nx2.val[0]; float32x4_t _r3_9111315 = _r30nx2.val[1]; float32x4_t _r30 = _r30_02461357.val[0]; float32x4_t _r31 = _r30_02461357.val[1]; float32x4_t _r32 = vextq_f32(_r30, _r3_8101214, 1); float32x4_t _r33 = vextq_f32(_r31, _r3_9111315, 1); float32x4_t _r34 = vextq_f32(_r30, _r3_8101214, 2); float32x4x2_t _r40_02461357 = vld2q_f32(r4); float32x4x2_t _r40nx2 = vld2q_f32(r4 + 8); float32x4_t _r4_8101214 = _r40nx2.val[0]; float32x4_t _r4_9111315 = _r40nx2.val[1]; float32x4_t _r40 = _r40_02461357.val[0]; float32x4_t _r41 = _r40_02461357.val[1]; float32x4_t _r42 = vextq_f32(_r40, _r4_8101214, 1); float32x4_t _r43 = vextq_f32(_r41, _r4_9111315, 1); float32x4_t _r44 = vextq_f32(_r40, _r4_8101214, 2); _sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0); _sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1); _sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2); _sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3); _sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0); _sum = vfmaq_laneq_f32(_sum, _r10, _k4567, 1); _sum = vfmaq_laneq_f32(_sum, _r11, _k4567, 2); _sum = vfmaq_laneq_f32(_sum, _r12, _k4567, 3); _sum = vfmaq_laneq_f32(_sum, _r13, _k891011, 0); _sum = vfmaq_laneq_f32(_sum, _r14, _k891011, 1); _sum = vfmaq_laneq_f32(_sum, _r20, _k891011, 2); _sum = vfmaq_laneq_f32(_sum, _r21, _k891011, 3); _sum = vfmaq_laneq_f32(_sum, _r22, _k12131415, 0); _sum = vfmaq_laneq_f32(_sum, _r23, _k12131415, 1); _sum = vfmaq_laneq_f32(_sum, _r24, _k12131415, 2); _sum = vfmaq_laneq_f32(_sum, _r30, _k12131415, 3); _sum = vfmaq_laneq_f32(_sum, _r31, _k16171819, 0); _sum = vfmaq_laneq_f32(_sum, _r32, _k16171819, 1); _sum = vfmaq_laneq_f32(_sum, _r33, _k16171819, 2); _sum = vfmaq_laneq_f32(_sum, _r34, _k16171819, 3); _sum = vfmaq_laneq_f32(_sum, _r40, _k20212223, 0); _sum = vfmaq_laneq_f32(_sum, _r41, _k20212223, 1); _sum = vfmaq_laneq_f32(_sum, _r42, _k20212223, 2); _sum = vfmaq_laneq_f32(_sum, _r43, _k20212223, 3); _sum = vfmaq_laneq_f32(_sum, _r44, _k24242424, 0); vst1q_f32(outptr, _sum); r0 += 8; r1 += 8; r2 += 8; r3 += 8; r4 += 8; outptr += 4; } #else if (nn > 0) { asm volatile( "veor q15, q15 \n"// _sump3 = 0; "pld [%1, #128] \n" "veor q13, q13 \n"// _sump2 = 0; "pld [%2, #256] \n" "veor q14, q14 \n"// _sump3 = 0; "vld2.f32 {d16-d19}, [%2]! \n"// q8 = 0 2 4 6 q9 = 1 3 5 7 "pld [%2, #256] \n" "vld2.f32 {d20-d23}, [%2] \n"// q10 = 8 10 12 14 q11 = 9 11 13 15 "0: \n" "vld1.f32 {d14-d15}, [%1] \n"// q7 = outptr "vext.32 q12, q8, q10, #1 \n"// q12 = 2 4 6 8 "vext.32 q11, q9, q11, #1 \n"// q11 = 3 5 7 9 "vext.32 q10, q8, q10, #2 \n"// q10 = 4 6 8 10 "vmla.f32 q7, q8, %e14[0] \n" "vmla.f32 q13, q9, %e14[1] \n" "pld [%3, #256] \n" "vmla.f32 q14, q12, %f14[0] \n" "vmla.f32 q15, q11, %f14[1] \n" "vmla.f32 q7, q10, %e15[0] \n" "vld2.f32 {d16-d19}, [%3]! \n" "pld [%3, #256] \n" "vld2.f32 {d20-d23}, [%3] \n" "vext.32 q12, q8, q10, #1 \n" "vext.32 q11, q9, q11, #1 \n" "vext.32 q10, q8, q10, #2 \n" "vmla.f32 q7, q8, %e15[1] \n" "vmla.f32 q13, q9, %f15[0] \n" "pld [%4, #256] \n" "vmla.f32 q14, q12, %f15[1] \n" "vmla.f32 q15, q11, %e16[0] \n" "vmla.f32 q7, q10, %e16[1] \n" "vld2.f32 {d16-d19}, [%4]! \n" "pld [%4, #256] \n" "vld2.f32 {d20-d23}, [%4] \n" "vext.32 q12, q8, q10, #1 \n" "vext.32 q11, q9, q11, #1 \n" "vext.32 q10, q8, q10, #2 \n" "vmla.f32 q7, q8, %f16[0] \n" "vmla.f32 q13, q9, %f16[1] \n" "pld [%5, #256] \n" "vmla.f32 q14, q12, %e17[0] \n" "vmla.f32 q15, q11, %e17[1] \n" "vmla.f32 q7, q10, %f17[0] \n" "vld2.f32 {d16-d19}, [%5]! \n" "pld [%5, #256] \n" "vld2.f32 {d20-d23}, [%5] \n" "vext.32 q12, q8, q10, #1 \n" "vext.32 q11, q9, q11, #1 \n" "vext.32 q10, q8, q10, #2 \n" "vmla.f32 q7, q8, %f17[1] \n" "vmla.f32 q13, q9, %e18[0] \n" "pld [%6, #256] \n" "vmla.f32 q14, q12, %e18[1] \n" "vmla.f32 q15, q11, %f18[0] \n" "vmla.f32 q7, q10, %f18[1] \n" "vld2.f32 {d16-d19}, [%6]! \n" "pld [%6, #256] \n" "vld2.f32 {d20-d23}, [%6] \n" "vext.32 q12, q8, q10, #1 \n" "vext.32 q11, q9, q11, #1 \n" "vext.32 q10, q8, q10, #2 \n" "vmla.f32 q7, q8, %e19[0] \n" "vmla.f32 q13, q9, %e19[1] \n" "vmla.f32 q14, q12, %f19[0] \n" "vmla.f32 q15, q11, %f19[1] \n" "vmla.f32 q7, q10, %e20[0] \n" "pld [%2, #256] \n" "vld2.f32 {d16-d19}, [%2]! \n"// q8 = 0 2 4 6 q9 = 1 3 5 7 "vadd.f32 q14, q14, q15 \n" "vadd.f32 q7, q7, q13 \n" "veor q15, q15 \n"// _sump3 = 0; "veor q13, q13 \n"// _sump2 = 0; "pld [%2, #256] \n" "vadd.f32 q7, q7, q14 \n" "vld2.f32 {d20-d23}, [%2] \n"// q10 = 8 10 12 14 q11 = 9 11 13 15 "veor q14, q14 \n"// _sump3 = 0; "vst1.f32 {d14-d15}, [%1]! \n" "pld [%1, #128] \n" "subs %0, #1 \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4) // %6 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k891011), // %16 "w"(_k12131415), // %17 "w"(_k16171819), // %18 "w"(_k20212223), // %19 "w"(_k24242424) // %20 : "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = 0; #if __ARM_NEON float32x4_t _r0 = vld1q_f32(r0); float32x4_t _sum = vmulq_f32(_r0, _k0123); float32x4_t _r1 = vld1q_f32(r1); _sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1)); float32x4_t _r2 = vld1q_f32(r2); _sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2)); float32x4_t _r3 = vld1q_f32(r3); _sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3)); float32x4_t _r4 = vld1q_f32(r4); _sum = vmlaq_f32(_sum, _r4, _k20212223); sum += r0[4] * k0[4]; sum += r1[4] * k1[4]; sum += r2[4] * k2[4]; sum += r3[4] * k3[4]; sum += r4[4] * k4[4]; float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); sum += vget_lane_f32(_ss, 0); #else sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; #endif *outptr += sum; r0 += 2; r1 += 2; r2 += 2; r3 += 2; r4 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; r4 += tailstep; } } } }
vect-simd-clone-5.c
/* { dg-require-effective-target vect_simd_clones } */ /* { dg-additional-options "-fopenmp-simd" } */ /* { dg-additional-options "-mavx" { target avx_runtime } } */ #include "tree-vect.h" #ifndef N #define N 1024 #endif int d[N], e[N]; #pragma omp declare simd simdlen(4) notinbranch uniform(b) linear(c:3) __attribute__((noinline)) long long int foo (int a, int b, int c) { return a + b + c; } __attribute__((noinline, noclone)) void bar () { int i; #pragma omp simd for (i = 0; i < N; ++i) { d[i] = foo (i, 123, i * 3); e[i] = e[i] + i; } } int main () { int i; check_vect (); bar (); for (i = 0; i < N; i++) if (d[i] != i * 4 + 123 || e[i] != i) abort (); return 0; }
omp_crit.c
// note not doing O0 below as to ensure we get tbaa // RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O1 -disable-llvm-optzns %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -S | %clang -fopenmp -x ir - -o %s.out && %s.out; fi // RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O1 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi // RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O2 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi // RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O3 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi // note not doing O0 below as to ensure we get tbaa // RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O1 -Xclang -disable-llvm-optzns %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -enzyme-inline=1 -S | %clang -fopenmp -x ir - -o %s.out && %s.out; fi // RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O1 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -enzyme-inline=1 -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi // RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O2 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -enzyme-inline=1 -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi // RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O3 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -enzyme-inline=1 -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi #include <stdio.h> #include <math.h> #include <assert.h> #include "test_utils.h" double __enzyme_autodiff(void*, ...); float omp(float* a, int N) { float res = 0.0; #pragma omp parallel { double thread_res = 0.0; #pragma omp for for (int i=0; i<N; i++) { thread_res += a[i] * a[i]; } #pragma omp critical { res += thread_res; } } return res; } int main(int argc, char** argv) { int N = 20; float a[N]; for(int i=0; i<N; i++) { a[i] = i+1; } float d_a[N]; for(int i=0; i<N; i++) d_a[i] = 0.0f; //omp(*a, N); printf("ran omp\n"); __enzyme_autodiff((void*)omp, a, d_a, N); for(int i=0; i<N; i++) { printf("a[%d]=%f d_a[%d]=%f\n", i, a[i], i, d_a[i]); } for(int i=0; i<N; i++) { APPROX_EQ(d_a[i], 2.0f*(i+1), 1e-10); } return 0; }
GB_binop__rminus_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_int32) // A.*B function (eWiseMult): GB (_AemultB_08__rminus_int32) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_int32) // A.*B function (eWiseMult): GB (_AemultB_04__rminus_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_int32) // A*D function (colscale): GB (_AxD__rminus_int32) // D*A function (rowscale): GB (_DxB__rminus_int32) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_int32) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_int32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_int32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_int32) // C=scalar+B GB (_bind1st__rminus_int32) // C=scalar+B' GB (_bind1st_tran__rminus_int32) // C=A+scalar GB (_bind2nd__rminus_int32) // C=A'+scalar GB (_bind2nd_tran__rminus_int32) // C type: int32_t // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (y - x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_INT32 || GxB_NO_RMINUS_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rminus_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_int32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rminus_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rminus_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB (_bind1st_tran__rminus_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
floorplan.ref.c
#include <sys/time.h> #include <time.h> #include <stdio.h> static unsigned long long current_time_ns() { #ifdef __MACH__ clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); unsigned long long s = 1000000000ULL * (unsigned long long)mts.tv_sec; return (unsigned long long)mts.tv_nsec + s; #else struct timespec t ={0,0}; clock_gettime(CLOCK_MONOTONIC, &t); unsigned long long s = 1000000000ULL * (unsigned long long)t.tv_sec; return (((unsigned long long)t.tv_nsec)) + s; #endif } /**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /**********************************************************************************************/ /* Original code from the Application Kernel Matrix by Cray */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include "app-desc.h" #include "bots.h" #define ROWS 64 #define COLS 64 #define DMAX 64 #define max(a, b) ((a > b) ? a : b) #define min(a, b) ((a < b) ? a : b) int solution = -1; typedef int coor[2]; typedef char ibrd[ROWS * COLS]; typedef char (*pibrd)[COLS]; FILE * inputFile; struct cell { int n; coor *alt; int top; int bot; int lhs; int rhs; int left; int above; int next; }; struct cell * gcells; int MIN_AREA; ibrd BEST_BOARD; coor MIN_FOOTPRINT; int N; /* compute all possible locations for nw corner for cell */ static int starts(int id, int shape, coor *NWS, struct cell *cells) { int i, n, top, bot, lhs, rhs; int rows, cols, left, above; /* size of cell */ rows = cells[id].alt[shape][0]; cols = cells[id].alt[shape][1]; /* the cells to the left and above */ left = cells[id].left; above = cells[id].above; /* if there is a vertical and horizontal dependence */ if ((left >= 0) && (above >= 0)) { top = cells[above].bot + 1; lhs = cells[left].rhs + 1; bot = top + rows; rhs = lhs + cols; /* if footprint of cell touches the cells to the left and above */ if ((top <= cells[left].bot) && (bot >= cells[left].top) && (lhs <= cells[above].rhs) && (rhs >= cells[above].lhs)) { n = 1; NWS[0][0] = top; NWS[0][1] = lhs; } else { n = 0; } /* if there is only a horizontal dependence */ } else if (left >= 0) { /* highest initial row is top of cell to the left - rows */ top = max(cells[left].top - rows + 1, 0); /* lowest initial row is bottom of cell to the left */ bot = min(cells[left].bot, ROWS); n = bot - top + 1; for (i = 0; i < n; i++) { NWS[i][0] = i + top; NWS[i][1] = cells[left].rhs + 1; } } else { /* leftmost initial col is lhs of cell above - cols */ lhs = max(cells[above].lhs - cols + 1, 0); /* rightmost initial col is rhs of cell above */ rhs = min(cells[above].rhs, COLS); n = rhs - lhs + 1; for (i = 0; i < n; i++) { NWS[i][0] = cells[above].bot + 1; NWS[i][1] = i + lhs; } } return (n); } /* lay the cell down on the board in the rectangular space defined by the cells top, bottom, left, and right edges. If the cell can not be layed down, return 0; else 1. */ static int lay_down(int id, ibrd board, struct cell *cells) { int i, j, top, bot, lhs, rhs; top = cells[id].top; bot = cells[id].bot; lhs = cells[id].lhs; rhs = cells[id].rhs; for (i = top; i <= bot; i++) { for (j = lhs; j <= rhs; j++) { if (board[i * COLS + j] == 0) board[i * COLS + j] = (char)id; else return(0); } } return (1); } #define read_integer(file,var) \ if ( fscanf(file, "%d", &var) == EOF ) {\ bots_message(" Bogus input file\n");\ exit(-1);\ } static void read_inputs() { int i, j, n; read_integer(inputFile,n); N = n; gcells = (struct cell *) malloc((n + 1) * sizeof(struct cell)); gcells[0].n = 0; gcells[0].alt = 0; gcells[0].top = 0; gcells[0].bot = 0; gcells[0].lhs = -1; gcells[0].rhs = -1; gcells[0].left = 0; gcells[0].above = 0; gcells[0].next = 0; for (i = 1; i < n + 1; i++) { read_integer(inputFile, gcells[i].n); gcells[i].alt = (coor *) malloc(gcells[i].n * sizeof(coor)); for (j = 0; j < gcells[i].n; j++) { read_integer(inputFile, gcells[i].alt[j][0]); read_integer(inputFile, gcells[i].alt[j][1]); } read_integer(inputFile, gcells[i].left); read_integer(inputFile, gcells[i].above); read_integer(inputFile, gcells[i].next); } if (!feof(inputFile)) { read_integer(inputFile, solution); } } static void write_outputs() { int i, j; bots_message("Minimum area = %d\n\n", MIN_AREA); for (i = 0; i < MIN_FOOTPRINT[0]; i++) { for (j = 0; j < MIN_FOOTPRINT[1]; j++) { if (BEST_BOARD[i * COLS + j] == 0) {bots_message(" ");} else bots_message("%c", 'A' + BEST_BOARD[i * COLS + j] - 1); } bots_message("\n"); } } static int add_cell(int id, coor FOOTPRINT, ibrd BOARD, struct cell *CELLS, int dummy_level) { int i, j, nn, area, nnc,nnl; ibrd board; coor footprint, NWS[DMAX]; nnc = nnl = 0; /* for each possible shape */ for (i = 0; i < CELLS[id].n; i++) { /* compute all possible locations for nw corner */ nn = starts(id, i, NWS, CELLS); nnl += nn; /* for all possible locations */ for (j = 0; j < nn; j++) { #pragma omp task untied private(board, footprint,area) firstprivate(NWS,i,j,id,nn) shared(FOOTPRINT,BOARD,CELLS,MIN_AREA,MIN_FOOTPRINT,N,BEST_BOARD,nnc,bots_verbose_mode) { struct cell cells[N+1]; memcpy(cells,CELLS,sizeof(struct cell)*(N+1)); /* extent of shape */ cells[id].top = NWS[j][0]; cells[id].bot = cells[id].top + cells[id].alt[i][0] - 1; cells[id].lhs = NWS[j][1]; cells[id].rhs = cells[id].lhs + cells[id].alt[i][1] - 1; memcpy(board, BOARD, sizeof(ibrd)); /* if the cell cannot be layed down, prune search */ if (! lay_down(id, board, cells)) { bots_debug("Chip %d, shape %d does not fit\n", id, i); goto _end; } /* calculate new footprint of board and area of footprint */ footprint[0] = (FOOTPRINT[0] > cells[id].bot+1) ? FOOTPRINT[0] : cells[id].bot + 1; footprint[1] = (FOOTPRINT[1] > cells[id].rhs+1) ? FOOTPRINT[1] : cells[id].rhs + 1; area = footprint[0] * footprint[1]; /* if last cell */ if (cells[id].next == 0) { /* if area is minimum, update global values */ if (area < MIN_AREA) { #pragma omp critical if (area < MIN_AREA) { MIN_AREA = area; MIN_FOOTPRINT[0] = footprint[0]; MIN_FOOTPRINT[1] = footprint[1]; memcpy(BEST_BOARD, board, sizeof(ibrd)); bots_debug("N %d\n", MIN_AREA); } } /* if area is less than best area */ } else if (area < MIN_AREA) { #pragma omp atomic nnc += add_cell(cells[id].next, footprint, board,cells, 0); /* if area is greater than or equal to best area, prune search */ } else { bots_debug("T %d, %d\n", area, MIN_AREA); } _end:; } } } #pragma omp taskwait ; return nnc+nnl; } ibrd board; void floorplan_init (char *filename) { int i,j; inputFile = fopen(filename, "r"); if(NULL == inputFile) { bots_message("Couldn't open %s file for reading\n", filename); exit(1); } /* read input file and initialize global minimum area */ read_inputs(); MIN_AREA = ROWS * COLS; /* initialize board is empty */ for (i = 0; i < ROWS; i++) for (j = 0; j < COLS; j++) board[i * COLS + j] = 0; } void compute_floorplan (void) { const unsigned long long full_program_start = current_time_ns(); { coor footprint; /* footprint of initial board is zero */ footprint[0] = 0; footprint[1] = 0; bots_message("Computing floorplan "); #pragma omp parallel { #pragma omp single bots_number_of_tasks = add_cell(1, footprint, board, gcells, 0); } bots_message(" completed!\n"); } ; const unsigned long long full_program_end = current_time_ns(); printf("full_program %llu ns\n", full_program_end - full_program_start); } void floorplan_end (void) { /* write results */ write_outputs(); exit(0); } int floorplan_verify (void) { if (solution != -1 ) return MIN_AREA == solution ? BOTS_RESULT_SUCCESSFUL : BOTS_RESULT_UNSUCCESSFUL; else return BOTS_RESULT_NA; }
GB_binop__first_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__first_fc32) // A.*B function (eWiseMult): GB (_AemultB_08__first_fc32) // A.*B function (eWiseMult): GB (_AemultB_02__first_fc32) // A.*B function (eWiseMult): GB (_AemultB_04__first_fc32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__first_fc32) // A*D function (colscale): GB (_AxD__first_fc32) // D*A function (rowscale): GB (_DxB__first_fc32) // C+=B function (dense accum): GB (_Cdense_accumB__first_fc32) // C+=b function (dense accum): GB (_Cdense_accumb__first_fc32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_fc32) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: GxB_FC32_t // A type: GxB_FC32_t // B,b type: GxB_FC32_t // BinaryOp: cij = aij #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = x ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_FC32 || GxB_NO_FIRST_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__first_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__first_fc32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__first_fc32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__first_fc32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__first_fc32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__first_fc32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__first_fc32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__first_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__first_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__first_fc32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = GBX (Ax, p, false) ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
convolution_3x3_pack8to1_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_pack8to1_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = 8a-inch/8a-64-outch; kernel_tm_pack8to1.create(8 * inch / 8, 64, outch / 8 + outch % 8, (size_t)2u * 8, 8); int p = 0; for (; p + 7 < outch; p += 8) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); const Mat k4 = kernel_tm.channel(p + 4); const Mat k5 = kernel_tm.channel(p + 5); const Mat k6 = kernel_tm.channel(p + 6); const Mat k7 = kernel_tm.channel(p + 7); Mat g0 = kernel_tm_pack8to1.channel(p / 8); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = (__fp16)k0.row(q + i)[k]; g00[1] = (__fp16)k1.row(q + i)[k]; g00[2] = (__fp16)k2.row(q + i)[k]; g00[3] = (__fp16)k3.row(q + i)[k]; g00[4] = (__fp16)k4.row(q + i)[k]; g00[5] = (__fp16)k5.row(q + i)[k]; g00[6] = (__fp16)k6.row(q + i)[k]; g00[7] = (__fp16)k7.row(q + i)[k]; g00 += 8; } } } } for (; p < outch; p++) { const Mat k0 = kernel_tm.channel(p); Mat g0 = kernel_tm_pack8to1.channel(p / 8 + p % 8); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = (__fp16)k0.row(q + i)[k]; g00 += 1; } } } } } static void conv3x3s1_winograd64_pack8to1_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tiles = outw / 6; int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); conv3x3s1_winograd64_transform_input_pack8_fp16sa_neon(bottom_blob_bordered, bottom_blob_tm, opt); } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { __fp16* tm2p = tm2.row<__fp16>(i / 8); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n" "sub %0, %0, #64 \n" "uzp1 v16.8h, v0.8h, v4.8h \n" "uzp2 v20.8h, v0.8h, v4.8h \n" "uzp1 v17.8h, v1.8h, v5.8h \n" "uzp2 v21.8h, v1.8h, v5.8h \n" "uzp1 v18.8h, v2.8h, v6.8h \n" "uzp2 v22.8h, v2.8h, v6.8h \n" "uzp1 v19.8h, v3.8h, v7.8h \n" "uzp2 v23.8h, v3.8h, v7.8h \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); r0 += bottom_blob_tm.cstep * 8; } } for (; i + 3 < tiles; i += 4) { __fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x4 asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); r0 += bottom_blob_tm.cstep * 8; } } for (; i < tiles; i++) { __fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); r0 += bottom_blob_tm.cstep * 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 2u, 1, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; __fp16* output0_tm = top_blob_tm.channel(p); __fp16* output1_tm = top_blob_tm.channel(p + 1); __fp16* output2_tm = top_blob_tm.channel(p + 2); __fp16* output3_tm = top_blob_tm.channel(p + 3); __fp16* output4_tm = top_blob_tm.channel(p + 4); __fp16* output5_tm = top_blob_tm.channel(p + 5); __fp16* output6_tm = top_blob_tm.channel(p + 6); __fp16* output7_tm = top_blob_tm.channel(p + 7); const Mat kernel01_tm = kernel_tm.channel(p / 8); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%9], #64 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%10], #64 \n" "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[1] \n" "fmla v26.8h, v16.8h, v0.h[2] \n" "fmla v27.8h, v16.8h, v0.h[3] \n" "fmla v28.8h, v16.8h, v0.h[4] \n" "fmla v29.8h, v16.8h, v0.h[5] \n" "fmla v30.8h, v16.8h, v0.h[6] \n" "fmla v31.8h, v16.8h, v0.h[7] \n" "fmla v24.8h, v17.8h, v1.h[0] \n" "fmla v25.8h, v17.8h, v1.h[1] \n" "fmla v26.8h, v17.8h, v1.h[2] \n" "fmla v27.8h, v17.8h, v1.h[3] \n" "fmla v28.8h, v17.8h, v1.h[4] \n" "fmla v29.8h, v17.8h, v1.h[5] \n" "fmla v30.8h, v17.8h, v1.h[6] \n" "fmla v31.8h, v17.8h, v1.h[7] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%9], #64 \n" "fmla v24.8h, v18.8h, v2.h[0] \n" "fmla v25.8h, v18.8h, v2.h[1] \n" "fmla v26.8h, v18.8h, v2.h[2] \n" "fmla v27.8h, v18.8h, v2.h[3] \n" "fmla v28.8h, v18.8h, v2.h[4] \n" "fmla v29.8h, v18.8h, v2.h[5] \n" "fmla v30.8h, v18.8h, v2.h[6] \n" "fmla v31.8h, v18.8h, v2.h[7] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%10], #64 \n" "fmla v24.8h, v19.8h, v3.h[0] \n" "fmla v25.8h, v19.8h, v3.h[1] \n" "fmla v26.8h, v19.8h, v3.h[2] \n" "fmla v27.8h, v19.8h, v3.h[3] \n" "fmla v28.8h, v19.8h, v3.h[4] \n" "fmla v29.8h, v19.8h, v3.h[5] \n" "fmla v30.8h, v19.8h, v3.h[6] \n" "fmla v31.8h, v19.8h, v3.h[7] \n" "fmla v24.8h, v20.8h, v4.h[0] \n" "fmla v25.8h, v20.8h, v4.h[1] \n" "fmla v26.8h, v20.8h, v4.h[2] \n" "fmla v27.8h, v20.8h, v4.h[3] \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v20.8h, v4.h[5] \n" "fmla v30.8h, v20.8h, v4.h[6] \n" "fmla v31.8h, v20.8h, v4.h[7] \n" "fmla v24.8h, v21.8h, v5.h[0] \n" "fmla v25.8h, v21.8h, v5.h[1] \n" "fmla v26.8h, v21.8h, v5.h[2] \n" "fmla v27.8h, v21.8h, v5.h[3] \n" "fmla v28.8h, v21.8h, v5.h[4] \n" "fmla v29.8h, v21.8h, v5.h[5] \n" "fmla v30.8h, v21.8h, v5.h[6] \n" "fmla v31.8h, v21.8h, v5.h[7] \n" "fmla v24.8h, v22.8h, v6.h[0] \n" "fmla v25.8h, v22.8h, v6.h[1] \n" "fmla v26.8h, v22.8h, v6.h[2] \n" "fmla v27.8h, v22.8h, v6.h[3] \n" "fmla v28.8h, v22.8h, v6.h[4] \n" "fmla v29.8h, v22.8h, v6.h[5] \n" "fmla v30.8h, v22.8h, v6.h[6] \n" "fmla v31.8h, v22.8h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v23.8h, v7.h[0] \n" "fmla v25.8h, v23.8h, v7.h[1] \n" "fmla v26.8h, v23.8h, v7.h[2] \n" "fmla v27.8h, v23.8h, v7.h[3] \n" "fmla v28.8h, v23.8h, v7.h[4] \n" "fmla v29.8h, v23.8h, v7.h[5] \n" "fmla v30.8h, v23.8h, v7.h[6] \n" "fmla v31.8h, v23.8h, v7.h[7] \n" "bne 0b \n" "st1 {v24.8h}, [%1], #16 \n" "st1 {v25.8h}, [%2], #16 \n" "st1 {v26.8h}, [%3], #16 \n" "st1 {v27.8h}, [%4], #16 \n" "st1 {v28.8h}, [%5], #16 \n" "st1 {v29.8h}, [%6], #16 \n" "st1 {v30.8h}, [%7], #16 \n" "st1 {v31.8h}, [%8], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%9], #32 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%10], #64 \n" "fmla v24.4h, v16.4h, v0.h[0] \n" "fmla v25.4h, v16.4h, v0.h[1] \n" "fmla v26.4h, v16.4h, v0.h[2] \n" "fmla v27.4h, v16.4h, v0.h[3] \n" "fmla v28.4h, v16.4h, v0.h[4] \n" "fmla v29.4h, v16.4h, v0.h[5] \n" "fmla v30.4h, v16.4h, v0.h[6] \n" "fmla v31.4h, v16.4h, v0.h[7] \n" "fmla v24.4h, v17.4h, v1.h[0] \n" "fmla v25.4h, v17.4h, v1.h[1] \n" "fmla v26.4h, v17.4h, v1.h[2] \n" "fmla v27.4h, v17.4h, v1.h[3] \n" "fmla v28.4h, v17.4h, v1.h[4] \n" "fmla v29.4h, v17.4h, v1.h[5] \n" "fmla v30.4h, v17.4h, v1.h[6] \n" "fmla v31.4h, v17.4h, v1.h[7] \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%9], #32 \n" "fmla v24.4h, v18.4h, v2.h[0] \n" "fmla v25.4h, v18.4h, v2.h[1] \n" "fmla v26.4h, v18.4h, v2.h[2] \n" "fmla v27.4h, v18.4h, v2.h[3] \n" "fmla v28.4h, v18.4h, v2.h[4] \n" "fmla v29.4h, v18.4h, v2.h[5] \n" "fmla v30.4h, v18.4h, v2.h[6] \n" "fmla v31.4h, v18.4h, v2.h[7] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%10], #64 \n" "fmla v24.4h, v19.4h, v3.h[0] \n" "fmla v25.4h, v19.4h, v3.h[1] \n" "fmla v26.4h, v19.4h, v3.h[2] \n" "fmla v27.4h, v19.4h, v3.h[3] \n" "fmla v28.4h, v19.4h, v3.h[4] \n" "fmla v29.4h, v19.4h, v3.h[5] \n" "fmla v30.4h, v19.4h, v3.h[6] \n" "fmla v31.4h, v19.4h, v3.h[7] \n" "fmla v24.4h, v20.4h, v4.h[0] \n" "fmla v25.4h, v20.4h, v4.h[1] \n" "fmla v26.4h, v20.4h, v4.h[2] \n" "fmla v27.4h, v20.4h, v4.h[3] \n" "fmla v28.4h, v20.4h, v4.h[4] \n" "fmla v29.4h, v20.4h, v4.h[5] \n" "fmla v30.4h, v20.4h, v4.h[6] \n" "fmla v31.4h, v20.4h, v4.h[7] \n" "fmla v24.4h, v21.4h, v5.h[0] \n" "fmla v25.4h, v21.4h, v5.h[1] \n" "fmla v26.4h, v21.4h, v5.h[2] \n" "fmla v27.4h, v21.4h, v5.h[3] \n" "fmla v28.4h, v21.4h, v5.h[4] \n" "fmla v29.4h, v21.4h, v5.h[5] \n" "fmla v30.4h, v21.4h, v5.h[6] \n" "fmla v31.4h, v21.4h, v5.h[7] \n" "fmla v24.4h, v22.4h, v6.h[0] \n" "fmla v25.4h, v22.4h, v6.h[1] \n" "fmla v26.4h, v22.4h, v6.h[2] \n" "fmla v27.4h, v22.4h, v6.h[3] \n" "fmla v28.4h, v22.4h, v6.h[4] \n" "fmla v29.4h, v22.4h, v6.h[5] \n" "fmla v30.4h, v22.4h, v6.h[6] \n" "fmla v31.4h, v22.4h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v23.4h, v7.h[0] \n" "fmla v25.4h, v23.4h, v7.h[1] \n" "fmla v26.4h, v23.4h, v7.h[2] \n" "fmla v27.4h, v23.4h, v7.h[3] \n" "fmla v28.4h, v23.4h, v7.h[4] \n" "fmla v29.4h, v23.4h, v7.h[5] \n" "fmla v30.4h, v23.4h, v7.h[6] \n" "fmla v31.4h, v23.4h, v7.h[7] \n" "bne 0b \n" "st1 {v24.4h}, [%1], #8 \n" "st1 {v25.4h}, [%2], #8 \n" "st1 {v26.4h}, [%3], #8 \n" "st1 {v27.4h}, [%4], #8 \n" "st1 {v28.4h}, [%5], #8 \n" "st1 {v29.4h}, [%6], #8 \n" "st1 {v30.4h}, [%7], #8 \n" "st1 {v31.4h}, [%8], #8 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v30.16b, v30.16b, v30.16b \n" "0: \n" "prfm pldl1keep, [%9, #128] \n" "ld1 {v0.8h}, [%9], #16 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%10], #64 \n" "fmla v30.8h, v16.8h, v0.h[0] \n" "fmla v30.8h, v17.8h, v0.h[1] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%10], #64 \n" "fmla v30.8h, v18.8h, v0.h[2] \n" "fmla v30.8h, v19.8h, v0.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v30.8h, v20.8h, v0.h[4] \n" "fmla v30.8h, v21.8h, v0.h[5] \n" "fmla v30.8h, v22.8h, v0.h[6] \n" "fmla v30.8h, v23.8h, v0.h[7] \n" "bne 0b \n" "st1 {v30.h}[0], [%1], #2 \n" "st1 {v30.h}[1], [%2], #2 \n" "st1 {v30.h}[2], [%3], #2 \n" "st1 {v30.h}[3], [%4], #2 \n" "st1 {v30.h}[4], [%5], #2 \n" "st1 {v30.h}[5], [%6], #2 \n" "st1 {v30.h}[6], [%7], #2 \n" "st1 {v30.h}[7], [%8], #2 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v30"); } } } remain_outch_start += nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { __fp16* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p / 8 + p % 8); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v30.16b, v30.16b, v30.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%2], #64 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.8h}, [%3], #16 \n" "fmla v30.8h, v16.8h, v0.h[0] \n" "fmla v30.8h, v17.8h, v0.h[1] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%2], #64 \n" "fmla v30.8h, v18.8h, v0.h[2] \n" "fmla v30.8h, v19.8h, v0.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v30.8h, v20.8h, v0.h[4] \n" "fmla v30.8h, v21.8h, v0.h[5] \n" "fmla v30.8h, v22.8h, v0.h[6] \n" "fmla v30.8h, v23.8h, v0.h[7] \n" "bne 0b \n" "st1 {v30.8h}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v30"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v30.16b, v30.16b, v30.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.8h}, [%3], #16 \n" "fmla v30.4h, v16.4h, v0.h[0] \n" "fmla v30.4h, v17.4h, v0.h[1] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n" "fmla v30.4h, v18.4h, v0.h[2] \n" "fmla v30.4h, v19.4h, v0.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v30.4h, v20.4h, v0.h[4] \n" "fmla v30.4h, v21.4h, v0.h[5] \n" "fmla v30.4h, v22.4h, v0.h[6] \n" "fmla v30.4h, v23.4h, v0.h[7] \n" "bne 0b \n" "st1 {v30.4h}, [%1], #8 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v30"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); float16x8_t _sum0 = vdupq_n_f16((__fp16)0.f); for (int q = 0; q < inch; q++) { float16x8_t _r0 = vld1q_f16(r0); float16x8_t _k0 = vld1q_f16(kptr); _sum0 = vfmaq_f16(_sum0, _r0, _k0); kptr += 8; r0 += 8; } __fp16 sum0 = vaddvq_f32(vcvt_f32_f16(vadd_f16(vget_low_f16(_sum0), vget_high_f16(_sum0)))); output0_tm[0] = sum0; output0_tm++; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 2u, 1, opt.workspace_allocator); } { conv3x3s1_winograd64_transform_output_fp16sa_neon(top_blob_tm, top_blob_bordered, bias, opt); } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
GB_binop__second_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__second_int16) // A.*B function (eWiseMult): GB (_AemultB_08__second_int16) // A.*B function (eWiseMult): GB (_AemultB_02__second_int16) // A.*B function (eWiseMult): GB (_AemultB_04__second_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__second_int16) // A*D function (colscale): GB (_AxD__second_int16) // D*A function (rowscale): GB (_DxB__second_int16) // C+=B function (dense accum): GB (_Cdense_accumB__second_int16) // C+=b function (dense accum): GB (_Cdense_accumb__second_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_int16) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = bij #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = y ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 1 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SECOND || GxB_NO_INT16 || GxB_NO_SECOND_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__second_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__second_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__second_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__second_int16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__second_int16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__second_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__second_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__second_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__second_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__second_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = bij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = y ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = y ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
convolution_1x1_packnto1_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_packnto1_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; const int size = w * h; Mat bottom_im2col = bottom_blob; bottom_im2col.w = size; bottom_im2col.h = 1; im2col_sgemm_packnto1_fp16sa_rvv(bottom_im2col, top_blob, kernel, _bias, opt); } static void conv1x1s2_sgemm_packnto1_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2 * outw + w) * packn; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const __fp16* r0 = bottom_blob.channel(p); __fp16* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { vfloat16m1_t _val = vle16_v_f16m1(r0, vl); vse16_v_f16m1(outptr, _val, vl); r0 += packn * 2; outptr += packn; } r0 += tailstep; } } conv1x1s1_sgemm_packnto1_fp16sa_rvv(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
GB_unop__identity_uint16_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint16_uint32) // op(A') function: GB (_unop_tran__identity_uint16_uint32) // C type: uint16_t // A type: uint32_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = (uint16_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = (uint16_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint16_uint32) ( uint16_t *Cx, // Cx and Ax may be aliased const uint32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; uint16_t z = (uint16_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint32_t aij = Ax [p] ; uint16_t z = (uint16_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint16_uint32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__plus_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__plus_int32 // A.*B function (eWiseMult): GB_AemultB__plus_int32 // A*D function (colscale): GB_AxD__plus_int32 // D*A function (rowscale): GB_DxB__plus_int32 // C+=B function (dense accum): GB_Cdense_accumB__plus_int32 // C+=b function (dense accum): GB_Cdense_accumb__plus_int32 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__plus_int32 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__plus_int32 // C=scalar+B GB_bind1st__plus_int32 // C=scalar+B' GB_bind1st_tran__plus_int32 // C=A+scalar GB_bind2nd__plus_int32 // C=A'+scalar GB_bind2nd_tran__plus_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij + bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x + y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_INT32 || GxB_NO_PLUS_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__plus_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__plus_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__plus_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__plus_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__plus_int32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__plus_int32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__plus_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__plus_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__plus_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t bij = Bx [p] ; Cx [p] = (x + bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__plus_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; Cx [p] = (aij + y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (x + aij) ; \ } GrB_Info GB_bind1st_tran__plus_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (aij + y) ; \ } GrB_Info GB_bind2nd_tran__plus_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_int8_int16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int8_int16) // op(A') function: GB (_unop_tran__identity_int8_int16) // C type: int8_t // A type: int16_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int8_t z = (int8_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = (int8_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int8_int16) ( int8_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; int8_t z = (int8_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int16_t aij = Ax [p] ; int8_t z = (int8_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int8_int16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
kernel.h
/* * kernel.h * * Created on: Oct 9, 2016 * Updated on: Oct 20, 2016 * Author: Yimin Zhong */ #ifndef FMM_KERNEL_H #define FMM_KERNEL_H #include "tree.h" #include <functional> #include "Eigen/Dense" #ifdef RUN_OMP #include "omp.h" #endif using Eigen::MatrixXd; using Eigen::VectorXd; class kernel { public: tree t; VectorXd chargeTree; std::function<double(point&, point&)> eval; int rank; MatrixXd R[8]; int nChebyshev; VectorXd chebyNode; MatrixXd tNode; kernel() { nChebyshev = 0; rank = 0; } ~kernel() {} void initialize(int _nChebyshev, vector<point>& _source, vector<point>& _target, VectorXd _charge, int _nSource, int _nTarget, int _rank, int _maxLevel) { // populate the kd-tree. t.populate(_source, _target, _nSource, _nTarget, _rank, _maxLevel); nChebyshev = _nChebyshev; chargeTree = _charge; // nChebyshev^3 nodes are used for interpolation. rank = nChebyshev * nChebyshev * nChebyshev; chebyNode = VectorXd::Zero(nChebyshev); getStandardChebyNodes(nChebyshev, chebyNode); tNode = MatrixXd::Zero(nChebyshev, nChebyshev); getStandardChebyPoly(nChebyshev, nChebyshev, chebyNode, tNode); getTransfer(nChebyshev, chebyNode, tNode, R); } void run(VectorXd& potentialMatrix) { #ifdef RUN_OMP #pragma omp parallel #endif { #ifdef RUN_OMP #pragma omp single #endif RUN("up-pass", upPass(0)); } #ifdef RUN_OMP #pragma omp taskwait #endif potentialMatrix = VectorXd::Zero(t.nTarget); #ifdef RUN_OMP #pragma omp parallel #endif { #ifdef RUN_OMP #pragma omp single #endif RUN("down-pass", downPass(0, potentialMatrix)); } #ifdef RUN_OMP #pragma omp taskwait #endif } protected: void getStandardChebyNodes(int _nChebyshev, VectorXd& _chebyNode) { _chebyNode = VectorXd::Zero(_nChebyshev); for (int i = 0; i < _nChebyshev; ++i) { _chebyNode(i) = -cos((i + 0.5) * M_PI/_nChebyshev); } } void getStandardChebyPoly(int _nChebyPoly, int _N, VectorXd& _x, MatrixXd& _T) { _T = MatrixXd::Zero(_N, _nChebyPoly); _T.col(0) = VectorXd::Ones(_N); if (_nChebyPoly > 1) { _T.col(1) = _x; for (int i = 2; i < _nChebyPoly; ++i) { _T.col(i) = 2.0 * _x.cwiseProduct(_T.col(i - 1)) - _T.col(i - 2); } } } void getTransferFromParentChebyshevToChildrenChebyshev(int _nChebyshev, VectorXd& _chebyNode, MatrixXd& _tNode, MatrixXd& _transfer) { VectorXd childChebyNode (2 * _nChebyshev); childChebyNode.segment(0, nChebyshev) = 0.5 *(_chebyNode - VectorXd::Ones(_nChebyshev)); childChebyNode.segment(nChebyshev, nChebyshev) = 0.5 *(_chebyNode + VectorXd::Ones(_nChebyshev)); getStandardChebyPoly(_nChebyshev, 2 * _nChebyshev, childChebyNode, _transfer); _transfer = (2.0 * _transfer * _tNode.transpose() - MatrixXd::Ones(2 * _nChebyshev, _nChebyshev))/_nChebyshev; } void getTransfer(int _nChebyshev, VectorXd& _chebyNode, MatrixXd& _tNode, MatrixXd* R) { MatrixXd S; S = MatrixXd::Zero(2 * _nChebyshev, _nChebyshev); getTransferFromParentChebyshevToChildrenChebyshev(_nChebyshev, _chebyNode, _tNode, S); MatrixXd Transfer[2]; Transfer[0] = S.block(0, 0, _nChebyshev, _nChebyshev); Transfer[1] = S.block(_nChebyshev, 0, _nChebyshev, _nChebyshev); int _rank = _nChebyshev * _nChebyshev * _nChebyshev; for (int i = 0; i < 8; ++i) { R[i] = MatrixXd::Zero(_rank, _rank); } // follow bit representaion. for (int i = 0; i < _nChebyshev; ++i) { for (int j =0; j < _nChebyshev; ++j) { for (int k = 0; k < _nChebyshev; ++k) { for (int l = 0; l < _nChebyshev; ++l) { for (int m = 0; m < _nChebyshev; ++m) { for (int n = 0; n < _nChebyshev; ++n) { for (int id = 0; id < 8; ++id) { int bit[3]; bit[0] = (id >> 0) & 1; bit[1] = (id >> 1) & 1; bit[2] = (id >> 2) & 1; R[id](i * _nChebyshev * _nChebyshev + j * _nChebyshev + k, l * _nChebyshev * _nChebyshev + m * _nChebyshev + n) = Transfer[bit[2]](i, l) * Transfer[bit[1]](j, m) * Transfer[bit[0]](k, n); } } } } } } } } void getScaledChebyNode(int _nChebyNode, VectorXd& _chebyNode, point& center, point& radius, vector<point>& _scaledCnode) { for (int i = 0; i < _nChebyNode; ++i) { _scaledCnode.push_back(point(center.x + radius.x * _chebyNode(i), center.y + radius.y * _chebyNode(i), center.z + radius.z * _chebyNode(i))); } } void getCharge(int rootId) { node& n = t.dict[rootId]; if(n.chargeComputed){ return; } else{ n.chargeComputed = true; n.charge = MatrixXd::Zero(n.nSource,1); for(int k=0;k<n.nSource; ++k){ n.charge.row(k) = chargeTree.row(n.sourceIndex[k]); } } } void getTransferParentToChildren(int _nChebyNode, vector<point>& _tree, vector<int>& _index, point& _center, point& _radius, VectorXd& _chebyNode, MatrixXd& _tNode, MatrixXd& R) { int N = (int) _index.size(); VectorXd standlocation[3]; standlocation[0].resize(N); standlocation[1].resize(N); standlocation[2].resize(N); for (int i = 0; i < N; ++i) { standlocation[0](i) = (_tree[_index[i]].x - _center.x)/_radius.x; standlocation[1](i) = (_tree[_index[i]].y - _center.y)/_radius.y; standlocation[2](i) = (_tree[_index[i]].z - _center.z)/_radius.z; } MatrixXd Transfer[3]; for (int k = 0; k < 3; ++k) { getStandardChebyPoly(_nChebyNode, N, standlocation[k], Transfer[k]); Transfer[k] = (2.0 * Transfer[k] * _tNode.transpose() - MatrixXd::Ones(N, _nChebyNode))/_nChebyNode; } int _rank = _nChebyNode * _nChebyNode * _nChebyNode; R = MatrixXd::Zero(N, _rank); for (int k = 0; k < N; ++k) { for (int i = 0; i < _nChebyNode; ++i) { for (int j = 0; j <_nChebyNode; ++j) { for (int l = 0; l< _nChebyNode; ++l) { R(k, l*_nChebyNode * _nChebyNode + j*_nChebyNode + i) = Transfer[0](k, i) * Transfer[1](k, j) * Transfer[2](k, l); } } } } } void kernelEval(vector<point>& _source, vector<point>& _target, MatrixXd& K) { K = MatrixXd::Zero(_target.size(), _source.size()); for (size_t _s = 0; _s < _source.size(); ++_s) { for (size_t _t = 0; _t < _target.size(); ++_t) { K(_t, _s) = this->eval(_source[_s], _target[_t]); } } } void kernelEvalIndex(vector<int>& _sourceIndex, vector<int>& _targetIndex, MatrixXd& K) { K = MatrixXd::Zero(_targetIndex.size(), _sourceIndex.size()); for (size_t _s = 0; _s < _sourceIndex.size(); ++_s) { for (size_t _t = 0; _t < _targetIndex.size(); ++_t) { K(_t, _s) = this->eval( this->t.sourceTree[_sourceIndex[_s]], this->t.targetTree[_targetIndex[_t]] ); } } } void kernelEvalChebyshev(int _M, vector<point>& _xv, int _N, vector<point>& _yv, MatrixXd& K) { vector<point> sourceVec; vector<point> targetVec; K = MatrixXd::Zero(_M * _M * _M, _N* _N * _N); for (int k = 0; k < _M; k++) { for (int j = 0; j < _M; j++) { for (int i = 0; i < _M; i++) { point np(_xv[i].x , _xv[j].y, _xv[k].z); sourceVec.push_back(np); } } } for (int k = 0; k < _N; k++) { for (int j = 0; j < _N; j++) { for (int i = 0; i < _N; i++) { point np(_yv[i].x , _yv[j].y, _yv[k].z); targetVec.push_back(np); } } } kernelEval(sourceVec, targetVec, K); } void upPass(int rootId) { node& n = t.dict[rootId]; n.scaledCnode.clear(); n.nodeCharge = MatrixXd::Zero(rank, 1); n.nodePotential = MatrixXd::Zero(rank, 1); getScaledChebyNode(nChebyshev, chebyNode, n.center, n.radius, n.scaledCnode); if (n.isLeaf) { // lazy getCharge(rootId); getTransferParentToChildren(nChebyshev, t.sourceTree, n.sourceIndex, n.center, n.radius, chebyNode, tNode, n.R); getTransferParentToChildren(nChebyshev, t.targetTree, n.targetIndex, n.center, n.radius, chebyNode, tNode, n.L); n.nodeCharge += n.R.transpose() * n.charge; } else { for (int i = 0; i < 8; ++i) { #ifdef RUN_OMP #pragma omp task shared(n) firstprivate(i) #endif upPass(n.child[i]); } #ifdef RUN_OMP #pragma omp taskwait #endif for (int i = 0; i < 8; ++i) { if (!t.dict[n.child[i]].isEmpty) { n.nodeCharge += R[i].transpose() * t.dict[n.child[i]].nodeCharge; } } } } void downPass(int rootId, VectorXd& potential) { node& n = t.dict[rootId]; MatrixXd K; VectorXd temp; if (n.parent != -1) { /* * V list */ for (int i : n.vList) { if (!t.dict[i].isEmpty) { kernelEvalChebyshev(nChebyshev, t.dict[i].scaledCnode, nChebyshev, n.scaledCnode, K); n.nodePotential += K * t.dict[i].nodeCharge; } } /* * X List */ for (int i : n.xList) { if (!t.dict[i].isEmpty) { kernelEvalChebyshev(nChebyshev, t.dict[i].scaledCnode, nChebyshev, n.scaledCnode, K); n.nodePotential += K * t.dict[i].nodeCharge; } } /* * L2L */ node& p = t.dict[n.parent]; n.nodePotential += this->R[n.nodeIndex] * p.nodePotential; } if (n.isLeaf && n.nTarget != 0) { n.potential = MatrixXd::Zero(n.nTarget, 1); /* * U List */ for (int i : n.uList) { if (!t.dict[i].isEmpty) { getCharge(i); kernelEvalIndex(t.dict[i].sourceIndex, n.targetIndex, K); n.potential += K * t.dict[i].charge; } } /* * W List */ for (int i : n.wList) { if (!t.dict[i].isEmpty) { getCharge(i); kernelEvalIndex(t.dict[i].sourceIndex, n.targetIndex, K); n.potential += K * t.dict[i].charge; } } /* * L2T */ n.potential += n.L * n.nodePotential; /* * Finalize, caution: * * omp should be fine here, because no two threads will write to the same place at the same time. */ for (int i = 0; i < n.nTarget; i++) { potential.row(n.targetIndex[i]) += n.potential.row(i); } } if (!n.isLeaf) { for (int i = 0; i < 8; ++i) { #ifdef RUN_OMP #pragma omp task shared(n, potential) firstprivate(i) #endif downPass(n.child[i], potential); } #ifdef RUN_OMP #pragma omp taskwait #endif } } }; #endif //FMM_KERNEL_H
GB_binop__pow_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__pow_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__pow_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__pow_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_uint8) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__pow_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_uint8) // C=scalar+B GB (_bind1st__pow_uint8) // C=scalar+B' GB (_bind1st_tran__pow_uint8) // C=A+scalar GB (_bind2nd__pow_uint8) // C=A'+scalar GB (_bind2nd_tran__pow_uint8) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = GB_pow_uint8 (aij, bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_pow_uint8 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_UINT8 || GxB_NO_POW_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pow_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_pow_uint8 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_pow_uint8 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint8 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint8 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__lnot_int64_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int64_uint32 // op(A') function: GB_tran__lnot_int64_uint32 // C type: int64_t // A type: uint32_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint32_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT64 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int64_uint32 ( int64_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int64_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_sgemm_pack8_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_pack8_fp16sa_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { // Mat bottom_im2col(size, maxk, inch, 16u, 8, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; const __fp16* bias = _bias; // permute Mat tmp; if (size >= 12) tmp.create(12 * maxk, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, 16u, 8, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 16u, 8, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 16u, 8, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 16u, 8, opt.workspace_allocator); else tmp.create(maxk, inch, size, 16u, 8, opt.workspace_allocator); { int nn_size = size / 12; int remain_size_start = 0; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 12; __fp16* tmpptr = tmp.channel(i / 12); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { // transpose 12x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0], #64 \n" "ld4 {v16.8h, v17.8h, v18.8h, v19.8h}, [%0] \n" "sub %0, %0, #128 \n" "uzp1 v20.8h, v0.8h, v4.8h \n" // 0 "uzp1 v21.8h, v16.8h, v1.8h \n" // 1 "uzp1 v22.8h, v5.8h, v17.8h \n" // 2 "uzp1 v23.8h, v2.8h, v6.8h \n" // 3 "uzp1 v24.8h, v18.8h, v3.8h \n" // 4 "uzp1 v25.8h, v7.8h, v19.8h \n" // 5 "uzp2 v26.8h, v0.8h, v4.8h \n" // 6 "uzp2 v27.8h, v16.8h, v1.8h \n" // 7 "uzp2 v28.8h, v5.8h, v17.8h \n" // 8 "uzp2 v29.8h, v2.8h, v6.8h \n" // 9 "uzp2 v30.8h, v18.8h, v3.8h \n" // 10 "uzp2 v31.8h, v7.8h, v19.8h \n" // 11 "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" "st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n" "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); img0 += size * 8; } } } remain_size_start += nn_size * 12; nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { // transpose 8x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n" "sub %0, %0, #64 \n" "uzp1 v16.8h, v0.8h, v4.8h \n" "uzp2 v20.8h, v0.8h, v4.8h \n" "uzp1 v17.8h, v1.8h, v5.8h \n" "uzp2 v21.8h, v1.8h, v5.8h \n" "uzp1 v18.8h, v2.8h, v6.8h \n" "uzp2 v22.8h, v2.8h, v6.8h \n" "uzp1 v19.8h, v3.8h, v7.8h \n" "uzp2 v23.8h, v3.8h, v7.8h \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); img0 += size * 8; } } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); img0 += size * 8; } } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.8h, v1.8h}, [%0] \n" "st1 {v0.8h, v1.8h}, [%1], #32 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1"); img0 += size * 8; } } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); img0 += size * 8; } } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr0 = top_blob.channel(p); const __fp16 zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const __fp16* biasptr = bias ? bias + p * 8 : zeros; int i = 0; for (; i + 11 < size; i += 12) { const __fp16* tmpptr = tmp.channel(i / 12); const __fp16* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v20.8h}, [%8] \n" "mov v21.16b, v20.16b \n" "mov v22.16b, v20.16b \n" "mov v23.16b, v20.16b \n" "mov v24.16b, v20.16b \n" "mov v25.16b, v20.16b \n" "mov v26.16b, v20.16b \n" "mov v27.16b, v20.16b \n" "mov v28.16b, v20.16b \n" "mov v29.16b, v20.16b \n" "mov v30.16b, v20.16b \n" "mov v31.16b, v20.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123 "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w0123 "fmla v20.8h, v12.8h, v0.h[0] \n" "fmla v21.8h, v12.8h, v0.h[1] \n" "fmla v22.8h, v12.8h, v0.h[2] \n" "fmla v23.8h, v12.8h, v0.h[3] \n" "fmla v24.8h, v12.8h, v0.h[4] \n" "fmla v25.8h, v12.8h, v0.h[5] \n" "fmla v26.8h, v12.8h, v0.h[6] \n" "fmla v27.8h, v12.8h, v0.h[7] \n" "fmla v28.8h, v12.8h, v1.h[0] \n" "fmla v29.8h, v12.8h, v1.h[1] \n" "fmla v30.8h, v12.8h, v1.h[2] \n" "fmla v31.8h, v12.8h, v1.h[3] \n" "fmla v20.8h, v13.8h, v1.h[4] \n" "fmla v21.8h, v13.8h, v1.h[5] \n" "fmla v22.8h, v13.8h, v1.h[6] \n" "fmla v23.8h, v13.8h, v1.h[7] \n" "fmla v24.8h, v13.8h, v2.h[0] \n" "fmla v25.8h, v13.8h, v2.h[1] \n" "fmla v26.8h, v13.8h, v2.h[2] \n" "fmla v27.8h, v13.8h, v2.h[3] \n" "fmla v28.8h, v13.8h, v2.h[4] \n" "fmla v29.8h, v13.8h, v2.h[5] \n" "fmla v30.8h, v13.8h, v2.h[6] \n" "fmla v31.8h, v13.8h, v2.h[7] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567 "fmla v20.8h, v14.8h, v3.h[0] \n" "fmla v21.8h, v14.8h, v3.h[1] \n" "fmla v22.8h, v14.8h, v3.h[2] \n" "fmla v23.8h, v14.8h, v3.h[3] \n" "fmla v24.8h, v14.8h, v3.h[4] \n" "fmla v25.8h, v14.8h, v3.h[5] \n" "fmla v26.8h, v14.8h, v3.h[6] \n" "fmla v27.8h, v14.8h, v3.h[7] \n" "fmla v28.8h, v14.8h, v4.h[0] \n" "fmla v29.8h, v14.8h, v4.h[1] \n" "fmla v30.8h, v14.8h, v4.h[2] \n" "fmla v31.8h, v14.8h, v4.h[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%3], #64 \n" // w4567 "fmla v20.8h, v15.8h, v4.h[4] \n" "fmla v21.8h, v15.8h, v4.h[5] \n" "fmla v22.8h, v15.8h, v4.h[6] \n" "fmla v23.8h, v15.8h, v4.h[7] \n" "fmla v24.8h, v15.8h, v5.h[0] \n" "fmla v25.8h, v15.8h, v5.h[1] \n" "fmla v26.8h, v15.8h, v5.h[2] \n" "fmla v27.8h, v15.8h, v5.h[3] \n" "fmla v28.8h, v15.8h, v5.h[4] \n" "fmla v29.8h, v15.8h, v5.h[5] \n" "fmla v30.8h, v15.8h, v5.h[6] \n" "fmla v31.8h, v15.8h, v5.h[7] \n" "fmla v20.8h, v16.8h, v6.h[0] \n" "fmla v21.8h, v16.8h, v6.h[1] \n" "fmla v22.8h, v16.8h, v6.h[2] \n" "fmla v23.8h, v16.8h, v6.h[3] \n" "fmla v24.8h, v16.8h, v6.h[4] \n" "fmla v25.8h, v16.8h, v6.h[5] \n" "fmla v26.8h, v16.8h, v6.h[6] \n" "fmla v27.8h, v16.8h, v6.h[7] \n" "fmla v28.8h, v16.8h, v7.h[0] \n" "fmla v29.8h, v16.8h, v7.h[1] \n" "fmla v30.8h, v16.8h, v7.h[2] \n" "fmla v31.8h, v16.8h, v7.h[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%2], #64 \n" // r891011 "fmla v20.8h, v17.8h, v7.h[4] \n" "fmla v21.8h, v17.8h, v7.h[5] \n" "fmla v22.8h, v17.8h, v7.h[6] \n" "fmla v23.8h, v17.8h, v7.h[7] \n" "fmla v24.8h, v17.8h, v8.h[0] \n" "fmla v25.8h, v17.8h, v8.h[1] \n" "fmla v26.8h, v17.8h, v8.h[2] \n" "fmla v27.8h, v17.8h, v8.h[3] \n" "fmla v28.8h, v17.8h, v8.h[4] \n" "fmla v29.8h, v17.8h, v8.h[5] \n" "fmla v30.8h, v17.8h, v8.h[6] \n" "fmla v31.8h, v17.8h, v8.h[7] \n" "fmla v20.8h, v18.8h, v9.h[0] \n" "fmla v21.8h, v18.8h, v9.h[1] \n" "fmla v22.8h, v18.8h, v9.h[2] \n" "fmla v23.8h, v18.8h, v9.h[3] \n" "fmla v24.8h, v18.8h, v9.h[4] \n" "fmla v25.8h, v18.8h, v9.h[5] \n" "fmla v26.8h, v18.8h, v9.h[6] \n" "fmla v27.8h, v18.8h, v9.h[7] \n" "fmla v28.8h, v18.8h, v10.h[0] \n" "fmla v29.8h, v18.8h, v10.h[1] \n" "fmla v30.8h, v18.8h, v10.h[2] \n" "fmla v31.8h, v18.8h, v10.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v20.8h, v19.8h, v10.h[4] \n" "fmla v21.8h, v19.8h, v10.h[5] \n" "fmla v22.8h, v19.8h, v10.h[6] \n" "fmla v23.8h, v19.8h, v10.h[7] \n" "fmla v24.8h, v19.8h, v11.h[0] \n" "fmla v25.8h, v19.8h, v11.h[1] \n" "fmla v26.8h, v19.8h, v11.h[2] \n" "fmla v27.8h, v19.8h, v11.h[3] \n" "fmla v28.8h, v19.8h, v11.h[4] \n" "fmla v29.8h, v19.8h, v11.h[5] \n" "fmla v30.8h, v19.8h, v11.h[6] \n" "fmla v31.8h, v19.8h, v11.h[7] \n" "bne 0b \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" "st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n" "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < size; i += 8) { const __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); const __fp16* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v16.8h}, [%8] \n" "mov v17.16b, v16.16b \n" "mov v18.16b, v16.16b \n" "mov v19.16b, v16.16b \n" "mov v20.16b, v16.16b \n" "mov v21.16b, v16.16b \n" "mov v22.16b, v16.16b \n" "mov v23.16b, v16.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123 "fmla v16.8h, v8.8h, v0.h[0] \n" "fmla v17.8h, v8.8h, v0.h[1] \n" "fmla v18.8h, v8.8h, v0.h[2] \n" "fmla v19.8h, v8.8h, v0.h[3] \n" "fmla v20.8h, v8.8h, v0.h[4] \n" "fmla v21.8h, v8.8h, v0.h[5] \n" "fmla v22.8h, v8.8h, v0.h[6] \n" "fmla v23.8h, v8.8h, v0.h[7] \n" "fmla v16.8h, v9.8h, v1.h[0] \n" "fmla v17.8h, v9.8h, v1.h[1] \n" "fmla v18.8h, v9.8h, v1.h[2] \n" "fmla v19.8h, v9.8h, v1.h[3] \n" "fmla v20.8h, v9.8h, v1.h[4] \n" "fmla v21.8h, v9.8h, v1.h[5] \n" "fmla v22.8h, v9.8h, v1.h[6] \n" "fmla v23.8h, v9.8h, v1.h[7] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567 "fmla v16.8h, v10.8h, v2.h[0] \n" "fmla v17.8h, v10.8h, v2.h[1] \n" "fmla v18.8h, v10.8h, v2.h[2] \n" "fmla v19.8h, v10.8h, v2.h[3] \n" "fmla v20.8h, v10.8h, v2.h[4] \n" "fmla v21.8h, v10.8h, v2.h[5] \n" "fmla v22.8h, v10.8h, v2.h[6] \n" "fmla v23.8h, v10.8h, v2.h[7] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567 "fmla v16.8h, v11.8h, v3.h[0] \n" "fmla v17.8h, v11.8h, v3.h[1] \n" "fmla v18.8h, v11.8h, v3.h[2] \n" "fmla v19.8h, v11.8h, v3.h[3] \n" "fmla v20.8h, v11.8h, v3.h[4] \n" "fmla v21.8h, v11.8h, v3.h[5] \n" "fmla v22.8h, v11.8h, v3.h[6] \n" "fmla v23.8h, v11.8h, v3.h[7] \n" "fmla v16.8h, v12.8h, v4.h[0] \n" "fmla v17.8h, v12.8h, v4.h[1] \n" "fmla v18.8h, v12.8h, v4.h[2] \n" "fmla v19.8h, v12.8h, v4.h[3] \n" "fmla v20.8h, v12.8h, v4.h[4] \n" "fmla v21.8h, v12.8h, v4.h[5] \n" "fmla v22.8h, v12.8h, v4.h[6] \n" "fmla v23.8h, v12.8h, v4.h[7] \n" "fmla v16.8h, v13.8h, v5.h[0] \n" "fmla v17.8h, v13.8h, v5.h[1] \n" "fmla v18.8h, v13.8h, v5.h[2] \n" "fmla v19.8h, v13.8h, v5.h[3] \n" "fmla v20.8h, v13.8h, v5.h[4] \n" "fmla v21.8h, v13.8h, v5.h[5] \n" "fmla v22.8h, v13.8h, v5.h[6] \n" "fmla v23.8h, v13.8h, v5.h[7] \n" "fmla v16.8h, v14.8h, v6.h[0] \n" "fmla v17.8h, v14.8h, v6.h[1] \n" "fmla v18.8h, v14.8h, v6.h[2] \n" "fmla v19.8h, v14.8h, v6.h[3] \n" "fmla v20.8h, v14.8h, v6.h[4] \n" "fmla v21.8h, v14.8h, v6.h[5] \n" "fmla v22.8h, v14.8h, v6.h[6] \n" "fmla v23.8h, v14.8h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v16.8h, v15.8h, v7.h[0] \n" "fmla v17.8h, v15.8h, v7.h[1] \n" "fmla v18.8h, v15.8h, v7.h[2] \n" "fmla v19.8h, v15.8h, v7.h[3] \n" "fmla v20.8h, v15.8h, v7.h[4] \n" "fmla v21.8h, v15.8h, v7.h[5] \n" "fmla v22.8h, v15.8h, v7.h[6] \n" "fmla v23.8h, v15.8h, v7.h[7] \n" "bne 0b \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i + 3 < size; i += 4) { const __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const __fp16* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v16.8h}, [%8] \n" "mov v17.16b, v16.16b \n" "mov v18.16b, v16.16b \n" "mov v19.16b, v16.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123 "fmla v16.8h, v8.8h, v0.h[0] \n" "fmla v17.8h, v8.8h, v1.h[0] \n" "fmla v18.8h, v8.8h, v2.h[0] \n" "fmla v19.8h, v8.8h, v3.h[0] \n" "fmla v16.8h, v9.8h, v0.h[1] \n" "fmla v17.8h, v9.8h, v1.h[1] \n" "fmla v18.8h, v9.8h, v2.h[1] \n" "fmla v19.8h, v9.8h, v3.h[1] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567 "fmla v16.8h, v10.8h, v0.h[2] \n" "fmla v17.8h, v10.8h, v1.h[2] \n" "fmla v18.8h, v10.8h, v2.h[2] \n" "fmla v19.8h, v10.8h, v3.h[2] \n" "fmla v16.8h, v11.8h, v0.h[3] \n" "fmla v17.8h, v11.8h, v1.h[3] \n" "fmla v18.8h, v11.8h, v2.h[3] \n" "fmla v19.8h, v11.8h, v3.h[3] \n" "fmla v16.8h, v12.8h, v0.h[4] \n" "fmla v17.8h, v12.8h, v1.h[4] \n" "fmla v18.8h, v12.8h, v2.h[4] \n" "fmla v19.8h, v12.8h, v3.h[4] \n" "fmla v16.8h, v13.8h, v0.h[5] \n" "fmla v17.8h, v13.8h, v1.h[5] \n" "fmla v18.8h, v13.8h, v2.h[5] \n" "fmla v19.8h, v13.8h, v3.h[5] \n" "fmla v16.8h, v14.8h, v0.h[6] \n" "fmla v17.8h, v14.8h, v1.h[6] \n" "fmla v18.8h, v14.8h, v2.h[6] \n" "fmla v19.8h, v14.8h, v3.h[6] \n" "subs %w0, %w0, #1 \n" "fmla v16.8h, v15.8h, v0.h[7] \n" "fmla v17.8h, v15.8h, v1.h[7] \n" "fmla v18.8h, v15.8h, v2.h[7] \n" "fmla v19.8h, v15.8h, v3.h[7] \n" "bne 0b \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } for (; i + 1 < size; i += 2) { const __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const __fp16* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v16.8h}, [%8] \n" "mov v17.16b, v16.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.8h, v1.8h}, [%2], #32 \n" // r01 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123 "fmla v16.8h, v8.8h, v0.h[0] \n" "fmla v17.8h, v8.8h, v1.h[0] \n" "fmla v16.8h, v9.8h, v0.h[1] \n" "fmla v17.8h, v9.8h, v1.h[1] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567 "fmla v16.8h, v10.8h, v0.h[2] \n" "fmla v17.8h, v10.8h, v1.h[2] \n" "fmla v16.8h, v11.8h, v0.h[3] \n" "fmla v17.8h, v11.8h, v1.h[3] \n" "fmla v16.8h, v12.8h, v0.h[4] \n" "fmla v17.8h, v12.8h, v1.h[4] \n" "fmla v16.8h, v13.8h, v0.h[5] \n" "fmla v17.8h, v13.8h, v1.h[5] \n" "fmla v16.8h, v14.8h, v0.h[6] \n" "fmla v17.8h, v14.8h, v1.h[6] \n" "subs %w0, %w0, #1 \n" "fmla v16.8h, v15.8h, v0.h[7] \n" "fmla v17.8h, v15.8h, v1.h[7] \n" "bne 0b \n" "st1 {v16.8h, v17.8h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17"); } for (; i < size; i++) { const __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const __fp16* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v16.8h}, [%8] \n" "0: \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.8h}, [%2], #16 \n" // r0 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123 "fmla v16.8h, v8.8h, v0.h[0] \n" "fmla v16.8h, v9.8h, v0.h[1] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567 "fmla v16.8h, v10.8h, v0.h[2] \n" "fmla v16.8h, v11.8h, v0.h[3] \n" "fmla v16.8h, v12.8h, v0.h[4] \n" "fmla v16.8h, v13.8h, v0.h[5] \n" "subs %w0, %w0, #1 \n" "fmla v16.8h, v14.8h, v0.h[6] \n" "fmla v16.8h, v15.8h, v0.h[7] \n" "bne 0b \n" "st1 {v16.8h}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16"); } } } static void convolution_im2col_sgemm_transform_kernel_pack8_fp16sa_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // interleave // src = maxk-inch-outch // dst = 8b-8a-maxk-inch/8a-outch/8b Mat kernel = _kernel.reshape(maxk, inch, outch); kernel_tm.create(64 * maxk, inch / 8, outch / 8, (size_t)2u); for (int q = 0; q + 7 < outch; q += 8) { Mat g0 = kernel_tm.channel(q / 8); for (int p = 0; p + 7 < inch; p += 8) { __fp16* g00 = g0.row<__fp16>(p / 8); for (int k = 0; k < maxk; k++) { for (int i = 0; i < 8; i++) { for (int j = 0; j < 8; j++) { const float* k00 = kernel.channel(q + j).row(p + i); g00[0] = (__fp16)k00[k]; g00++; } } } } } } static void convolution_im2col_sgemm_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 16u, 8, opt.workspace_allocator); { const int gap = (w * stride_h - outw * stride_w) * 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); __fp16* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const __fp16* sptr = img.row<const __fp16>(dilation_h * u) + dilation_w * v * 8; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { float16x8_t _val0 = vld1q_f16(sptr); float16x8_t _val1 = vld1q_f16(sptr + stride_w * 8); float16x8_t _val2 = vld1q_f16(sptr + stride_w * 16); float16x8_t _val3 = vld1q_f16(sptr + stride_w * 24); vst1q_f16(ptr, _val0); vst1q_f16(ptr + 8, _val1); vst1q_f16(ptr + 16, _val2); vst1q_f16(ptr + 24, _val3); sptr += stride_w * 32; ptr += 32; } for (; j + 1 < outw; j += 2) { float16x8_t _val0 = vld1q_f16(sptr); float16x8_t _val1 = vld1q_f16(sptr + stride_w * 8); vst1q_f16(ptr, _val0); vst1q_f16(ptr + 8, _val1); sptr += stride_w * 16; ptr += 16; } for (; j < outw; j++) { float16x8_t _val = vld1q_f16(sptr); vst1q_f16(ptr, _val); sptr += stride_w * 8; ptr += 8; } sptr += gap; } } } } } im2col_sgemm_pack8_fp16sa_neon(bottom_im2col, top_blob, kernel, _bias, opt); }
convolutionbnrelu_1x1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convbnrelu1x1s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt, const Mat& _a_data, const Mat& _b_data) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; const float* a_data = _a_data; const float* b_data = _b_data; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); float a = a_data[p]; float b = b_data[p]; const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); int q = 0; for (; q+3<inch; q+=4) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* img2 = bottom_blob.channel(q+2); const float* img3 = bottom_blob.channel(q+3); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float k1 = kernel0[1]; const float k2 = kernel0[2]; const float k3 = kernel0[3]; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; int size = outw * outh; int remain = size; for (; remain>0; remain--) { float sum = *r0 * k0; float sum1 = *r1 * k1; float sum2 = *r2 * k2; float sum3 = *r3 * k3; *outptr += sum + sum1 + sum2 + sum3; r0++; r1++; r2++; r3++; outptr++; } } for (; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float* r0 = img0; int size = outw * outh; int remain = size; for (; remain>0; remain--) { float sum = *r0 * k0; *outptr += sum; r0++; outptr++; } } int remain = outw * outh; float* outptr = out; for (; remain>0; remain--) { *outptr = b*(*outptr)+a; if(*outptr < 0) *outptr = 0; outptr++; } } } static void convbnrelu1x1s2_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt, const Mat& _a_data, const Mat& _b_data) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; const float* kernel = _kernel; const float* bias = _bias; const float* a_data = _a_data; const float* b_data = _b_data; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); float a = a_data[p]; float b = b_data[p]; const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); int q = 0; for (; q+3<inch; q+=4) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* img2 = bottom_blob.channel(q+2); const float* img3 = bottom_blob.channel(q+3); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float k1 = kernel0[1]; const float k2 = kernel0[2]; const float k3 = kernel0[3]; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain>0; remain--) { float sum = *r0 * k0; float sum1 = *r1 * k1; float sum2 = *r2 * k2; float sum3 = *r3 * k3; *outptr += sum + sum1 + sum2 + sum3; r0 += 2; r1 += 2; r2 += 2; r3 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; } } for (; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float* r0 = img0; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain>0; remain--) { float sum = *r0 * k0; *outptr += sum; r0 += 2; outptr++; } r0 += tailstep; } } int remain = outw * outh; float* outptr = out; for (; remain>0; remain--) { *outptr = b*(*outptr)+a; if(*outptr < 0) *outptr = 0; outptr++; } } }
transform.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M % % T R R A A NN N SS F O O R R MM MM % % T RRRR AAAAA N N N SSS FFF O O RRRR M M M % % T R R A A N NN SS F O O R R M M % % T R R A A N N SSSSS F OOO R R M M % % % % % % MagickCore Image Transform Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/memory_.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/resource_.h" #include "MagickCore/resize.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o O r i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoOrientImage() adjusts an image so that its orientation is suitable for % viewing (i.e. top-left orientation). % % The format of the AutoOrientImage method is: % % Image *AutoOrientImage(const Image *image, % const OrientationType orientation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o orientation: Current image orientation. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *AutoOrientImage(const Image *image, const OrientationType orientation,ExceptionInfo *exception) { Image *orient_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); orient_image=(Image *) NULL; switch(orientation) { case UndefinedOrientation: case TopLeftOrientation: default: { orient_image=CloneImage(image,0,0,MagickTrue,exception); break; } case TopRightOrientation: { orient_image=FlopImage(image,exception); break; } case BottomRightOrientation: { orient_image=RotateImage(image,180.0,exception); break; } case BottomLeftOrientation: { orient_image=FlipImage(image,exception); break; } case LeftTopOrientation: { orient_image=TransposeImage(image,exception); break; } case RightTopOrientation: { orient_image=RotateImage(image,90.0,exception); break; } case RightBottomOrientation: { orient_image=TransverseImage(image,exception); break; } case LeftBottomOrientation: { orient_image=RotateImage(image,270.0,exception); break; } } if (orient_image != (Image *) NULL) orient_image->orientation=TopLeftOrientation; return(orient_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ChopImage() removes a region of an image and collapses the image to occupy % the removed portion. % % The format of the ChopImage method is: % % Image *ChopImage(const Image *image,const RectangleInfo *chop_info) % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o chop_info: Define the region of the image to chop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info, ExceptionInfo *exception) { #define ChopImageTag "Chop/Image" CacheView *chop_view, *image_view; Image *chop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo extent; ssize_t y; /* Check chop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(chop_info != (RectangleInfo *) NULL); if (((chop_info->x+(ssize_t) chop_info->width) < 0) || ((chop_info->y+(ssize_t) chop_info->height) < 0) || (chop_info->x > (ssize_t) image->columns) || (chop_info->y > (ssize_t) image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); extent=(*chop_info); if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns) extent.width=(size_t) ((ssize_t) image->columns-extent.x); if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows) extent.height=(size_t) ((ssize_t) image->rows-extent.y); if (extent.x < 0) { extent.width-=(size_t) (-extent.x); extent.x=0; } if (extent.y < 0) { extent.height-=(size_t) (-extent.y); extent.y=0; } chop_image=CloneImage(image,image->columns-extent.width,image->rows- extent.height,MagickTrue,exception); if (chop_image == (Image *) NULL) return((Image *) NULL); /* Extract chop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); chop_view=AcquireAuthenticCacheView(chop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,chop_image,extent.y,1) #endif for (y=0; y < (ssize_t) extent.y; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel); if ((traits == UndefinedPixelTrait) || (chop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(chop_image,channel,p[i],q); } q+=GetPixelChannels(chop_image); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ChopImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } /* Extract chop image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,chop_image,image->rows-(extent.y+extent.height),1) #endif for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel); if ((traits == UndefinedPixelTrait) || (chop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(chop_image,channel,p[i],q); } q+=GetPixelChannels(chop_image); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ChopImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } chop_view=DestroyCacheView(chop_view); image_view=DestroyCacheView(image_view); chop_image->type=image->type; if (status == MagickFalse) chop_image=DestroyImage(chop_image); return(chop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C M Y K I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a % single image. % % The format of the ConsolidateCMYKImage method is: % % Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConsolidateCMYKImages(const Image *images, ExceptionInfo *exception) { CacheView *cmyk_view, *image_view; Image *cmyk_image, *cmyk_images; register ssize_t j; ssize_t y; /* Consolidate separate C, M, Y, and K planes into a single image. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cmyk_images=NewImageList(); for (j=0; j < (ssize_t) GetImageListLength(images); j+=4) { register ssize_t i; assert(images != (Image *) NULL); cmyk_image=CloneImage(images,0,0,MagickTrue, exception); if (cmyk_image == (Image *) NULL) break; if (SetImageStorageClass(cmyk_image,DirectClass,exception) == MagickFalse) break; (void) SetImageColorspace(cmyk_image,CMYKColorspace,exception); for (i=0; i < 4; i++) { image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { Quantum pixel; pixel=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p)); switch (i) { case 0: SetPixelCyan(cmyk_image,pixel,q); break; case 1: SetPixelMagenta(cmyk_image,pixel,q); break; case 2: SetPixelYellow(cmyk_image,pixel,q); break; case 3: SetPixelBlack(cmyk_image,pixel,q); break; default: break; } p+=GetPixelChannels(images); q+=GetPixelChannels(cmyk_image); } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; } AppendImageToList(&cmyk_images,cmyk_image); } return(cmyk_images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImage() extracts a region of the image starting at the offset defined % by geometry. Region must be fully defined, and no special handling of % geometry flags is performed. % % The format of the CropImage method is: % % Image *CropImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to crop with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry, ExceptionInfo *exception) { #define CropImageTag "Crop/Image" CacheView *crop_view, *image_view; Image *crop_image; MagickBooleanType status; MagickOffsetType progress; OffsetInfo offset; RectangleInfo bounding_box, page; ssize_t y; /* Check crop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); bounding_box=image->page; if ((bounding_box.width == 0) || (bounding_box.height == 0)) { bounding_box.width=image->columns; bounding_box.height=image->rows; } page=(*geometry); if (page.width == 0) page.width=bounding_box.width; if (page.height == 0) page.height=bounding_box.height; if (((bounding_box.x-page.x) >= (ssize_t) page.width) || ((bounding_box.y-page.y) >= (ssize_t) page.height) || ((page.x-bounding_box.x) > (ssize_t) image->columns) || ((page.y-bounding_box.y) > (ssize_t) image->rows)) { /* Crop is not within virtual canvas, return 1 pixel transparent image. */ (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.alpha=(MagickRealType) TransparentAlpha; crop_image->alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(crop_image,exception); crop_image->page=bounding_box; crop_image->page.x=(-1); crop_image->page.y=(-1); if (crop_image->dispose == BackgroundDispose) crop_image->dispose=NoneDispose; return(crop_image); } if ((page.x < 0) && (bounding_box.x >= 0)) { page.width+=page.x-bounding_box.x; page.x=0; } else { page.width-=bounding_box.x-page.x; page.x-=bounding_box.x; if (page.x < 0) page.x=0; } if ((page.y < 0) && (bounding_box.y >= 0)) { page.height+=page.y-bounding_box.y; page.y=0; } else { page.height-=bounding_box.y-page.y; page.y-=bounding_box.y; if (page.y < 0) page.y=0; } if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns) page.width=image->columns-page.x; if ((geometry->width != 0) && (page.width > geometry->width)) page.width=geometry->width; if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows) page.height=image->rows-page.y; if ((geometry->height != 0) && (page.height > geometry->height)) page.height=geometry->height; bounding_box.x+=page.x; bounding_box.y+=page.y; if ((page.width == 0) || (page.height == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return((Image *) NULL); } /* Initialize crop image attributes. */ crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->page.width=image->page.width; crop_image->page.height=image->page.height; offset.x=(ssize_t) (bounding_box.x+bounding_box.width); offset.y=(ssize_t) (bounding_box.y+bounding_box.height); if ((offset.x > (ssize_t) image->page.width) || (offset.y > (ssize_t) image->page.height)) { crop_image->page.width=bounding_box.width; crop_image->page.height=bounding_box.height; } crop_image->page.x=bounding_box.x; crop_image->page.y=bounding_box.y; /* Crop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); crop_view=AcquireAuthenticCacheView(crop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,crop_image,crop_image->rows,1) #endif for (y=0; y < (ssize_t) crop_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns, 1,exception); q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) crop_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait crop_traits=GetPixelChannelTraits(crop_image,channel); if ((traits == UndefinedPixelTrait) || (crop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(crop_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(crop_image); } if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CropImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } crop_view=DestroyCacheView(crop_view); image_view=DestroyCacheView(image_view); crop_image->type=image->type; if (status == MagickFalse) crop_image=DestroyImage(crop_image); return(crop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e T o T i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImageToTiles() crops a single image, into a possible list of tiles. % This may include a single sub-region of the image. This basically applies % all the normal geometry flags for Crop. % % Image *CropImageToTiles(const Image *image, % const RectangleInfo *crop_geometry, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport Image *CropImageToTiles(const Image *image, const char *crop_geometry,ExceptionInfo *exception) { Image *next, *crop_image; MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); crop_image=NewImageList(); next=NewImageList(); flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception); if ((flags & AreaValue) != 0) { PointInfo delta, offset; RectangleInfo crop; size_t height, width; /* Crop into NxM tiles (@ flag). */ width=image->columns; height=image->rows; if (geometry.width == 0) geometry.width=1; if (geometry.height == 0) geometry.height=1; if ((flags & AspectValue) == 0) { width-=(geometry.x < 0 ? -1 : 1)*geometry.x; height-=(geometry.y < 0 ? -1 : 1)*geometry.y; } else { width+=(geometry.x < 0 ? -1 : 1)*geometry.x; height+=(geometry.y < 0 ? -1 : 1)*geometry.y; } delta.x=(double) width/geometry.width; delta.y=(double) height/geometry.height; if (delta.x < 1.0) delta.x=1.0; if (delta.y < 1.0) delta.y=1.0; for (offset.y=0; offset.y < (double) height; ) { if ((flags & AspectValue) == 0) { crop.y=(ssize_t) MagickRound((double) (offset.y- (geometry.y > 0 ? 0 : geometry.y))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) MagickRound((double) (offset.y+ (geometry.y < 0 ? 0 : geometry.y))); } else { crop.y=(ssize_t) MagickRound((double) (offset.y- (geometry.y > 0 ? geometry.y : 0))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) MagickRound((double) (offset.y+(geometry.y < -1 ? geometry.y : 0))); } crop.height-=crop.y; crop.y+=image->page.y; for (offset.x=0; offset.x < (double) width; ) { if ((flags & AspectValue) == 0) { crop.x=(ssize_t) MagickRound((double) (offset.x- (geometry.x > 0 ? 0 : geometry.x))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) MagickRound((double) (offset.x+ (geometry.x < 0 ? 0 : geometry.x))); } else { crop.x=(ssize_t) MagickRound((double) (offset.x- (geometry.x > 0 ? geometry.x : 0))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) MagickRound((double) (offset.x+ (geometry.x < 0 ? geometry.x : 0))); } crop.width-=crop.x; crop.x+=image->page.x; next=CropImage(image,&crop,exception); if (next != (Image *) NULL) AppendImageToList(&crop_image,next); } } ClearMagickException(exception); return(crop_image); } if (((geometry.width == 0) && (geometry.height == 0)) || ((flags & XValue) != 0) || ((flags & YValue) != 0)) { /* Crop a single region at +X+Y. */ crop_image=CropImage(image,&geometry,exception); if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0)) { crop_image->page.width=geometry.width; crop_image->page.height=geometry.height; crop_image->page.x-=geometry.x; crop_image->page.y-=geometry.y; } return(crop_image); } if ((image->columns > geometry.width) || (image->rows > geometry.height)) { RectangleInfo page; size_t height, width; ssize_t x, y; /* Crop into tiles of fixed size WxH. */ page=image->page; if (page.width == 0) page.width=image->columns; if (page.height == 0) page.height=image->rows; width=geometry.width; if (width == 0) width=page.width; height=geometry.height; if (height == 0) height=page.height; next=NewImageList(); for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height) { for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width) { geometry.width=width; geometry.height=height; geometry.x=x; geometry.y=y; next=CropImage(image,&geometry,exception); if (next == (Image *) NULL) break; AppendImageToList(&crop_image,next); } if (next == (Image *) NULL) break; } return(crop_image); } return(CloneImage(image,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x c e r p t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExcerptImage() returns a excerpt of the image as defined by the geometry. % % The format of the ExcerptImage method is: % % Image *ExcerptImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExcerptImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define ExcerptImageTag "Excerpt/Image" CacheView *excerpt_view, *image_view; Image *excerpt_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate excerpt image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (excerpt_image == (Image *) NULL) return((Image *) NULL); /* Excerpt each row. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,excerpt_image,excerpt_image->rows,1) #endif for (y=0; y < (ssize_t) excerpt_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y, geometry->width,1,exception); q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) excerpt_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait excerpt_traits=GetPixelChannelTraits(excerpt_image,channel); if ((traits == UndefinedPixelTrait) || (excerpt_traits == UndefinedPixelTrait)) continue; SetPixelChannel(excerpt_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(excerpt_image); } if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ExcerptImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } excerpt_view=DestroyCacheView(excerpt_view); image_view=DestroyCacheView(image_view); excerpt_image->type=image->type; if (status == MagickFalse) excerpt_image=DestroyImage(excerpt_image); return(excerpt_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x t e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExtentImage() extends the image as defined by the geometry, gravity, and % image background color. Set the (x,y) offset of the geometry to move the % original image relative to the extended image. % % The format of the ExtentImage method is: % % Image *ExtentImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExtentImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { Image *extent_image; MagickBooleanType status; /* Allocate extent image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (extent_image == (Image *) NULL) return((Image *) NULL); status=SetImageBackgroundColor(extent_image,exception); if (status == MagickFalse) { extent_image=DestroyImage(extent_image); return((Image *) NULL); } status=CompositeImage(extent_image,image,image->compose,MagickTrue, -geometry->x,-geometry->y,exception); return(extent_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlipImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis. % % The format of the FlipImage method is: % % Image *FlipImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception) { #define FlipImageTag "Flip/Image" CacheView *flip_view, *image_view; Image *flip_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); flip_image=CloneImage(image,0,0,MagickTrue,exception); if (flip_image == (Image *) NULL) return((Image *) NULL); /* Flip image. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flip_view=AcquireAuthenticCacheView(flip_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,flip_image,flip_image->rows,1) #endif for (y=0; y < (ssize_t) flip_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y- 1),flip_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) flip_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait flip_traits=GetPixelChannelTraits(flip_image,channel); if ((traits == UndefinedPixelTrait) || (flip_traits == UndefinedPixelTrait)) continue; SetPixelChannel(flip_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(flip_image); } if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FlipImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flip_view=DestroyCacheView(flip_view); image_view=DestroyCacheView(image_view); flip_image->type=image->type; if (page.height != 0) page.y=(ssize_t) (page.height-flip_image->rows-page.y); flip_image->page=page; if (status == MagickFalse) flip_image=DestroyImage(flip_image); return(flip_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlopImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis. % % The format of the FlopImage method is: % % Image *FlopImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception) { #define FlopImageTag "Flop/Image" CacheView *flop_view, *image_view; Image *flop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); flop_image=CloneImage(image,0,0,MagickTrue,exception); if (flop_image == (Image *) NULL) return((Image *) NULL); /* Flop each row. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flop_view=AcquireAuthenticCacheView(flop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,flop_image,flop_image->rows,1) #endif for (y=0; y < (ssize_t) flop_image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } q+=GetPixelChannels(flop_image)*flop_image->columns; for (x=0; x < (ssize_t) flop_image->columns; x++) { register ssize_t i; q-=GetPixelChannels(flop_image); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait flop_traits=GetPixelChannelTraits(flop_image,channel); if ((traits == UndefinedPixelTrait) || (flop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(flop_image,channel,p[i],q); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FlopImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flop_view=DestroyCacheView(flop_view); image_view=DestroyCacheView(image_view); flop_image->type=image->type; if (page.width != 0) page.x=(ssize_t) (page.width-flop_image->columns-page.x); flop_image->page=page; if (status == MagickFalse) flop_image=DestroyImage(flop_image); return(flop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RollImage() offsets an image as defined by x_offset and y_offset. % % The format of the RollImage method is: % % Image *RollImage(const Image *image,const ssize_t x_offset, % const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x_offset: the number of columns to roll in the horizontal direction. % % o y_offset: the number of rows to roll in the vertical direction. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CopyImageRegion(Image *destination,const Image *source, const size_t columns,const size_t rows,const ssize_t sx,const ssize_t sy, const ssize_t dx,const ssize_t dy,ExceptionInfo *exception) { CacheView *source_view, *destination_view; MagickBooleanType status; ssize_t y; if (columns == 0) return(MagickTrue); status=MagickTrue; source_view=AcquireVirtualCacheView(source,exception); destination_view=AcquireAuthenticCacheView(destination,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source,destination,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; /* Transfer scanline. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception); q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(source); i++) { PixelChannel channel = GetPixelChannelChannel(source,i); PixelTrait source_traits=GetPixelChannelTraits(source,channel); PixelTrait destination_traits=GetPixelChannelTraits(destination, channel); if ((source_traits == UndefinedPixelTrait) || (destination_traits == UndefinedPixelTrait)) continue; SetPixelChannel(destination,channel,p[i],q); } p+=GetPixelChannels(source); q+=GetPixelChannels(destination); } sync=SyncCacheViewAuthenticPixels(destination_view,exception); if (sync == MagickFalse) status=MagickFalse; } destination_view=DestroyCacheView(destination_view); source_view=DestroyCacheView(source_view); return(status); } MagickExport Image *RollImage(const Image *image,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define RollImageTag "Roll/Image" Image *roll_image; MagickStatusType status; RectangleInfo offset; /* Initialize roll image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); roll_image=CloneImage(image,0,0,MagickTrue,exception); if (roll_image == (Image *) NULL) return((Image *) NULL); offset.x=x_offset; offset.y=y_offset; while (offset.x < 0) offset.x+=(ssize_t) image->columns; while (offset.x >= (ssize_t) image->columns) offset.x-=(ssize_t) image->columns; while (offset.y < 0) offset.y+=(ssize_t) image->rows; while (offset.y >= (ssize_t) image->rows) offset.y-=(ssize_t) image->rows; /* Roll image. */ status=CopyImageRegion(roll_image,image,(size_t) offset.x, (size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows- offset.y,0,0,exception); (void) SetImageProgress(image,RollImageTag,0,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x, (size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0, exception); (void) SetImageProgress(image,RollImageTag,1,3); status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows- offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception); (void) SetImageProgress(image,RollImageTag,2,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows- offset.y,0,0,offset.x,offset.y,exception); (void) SetImageProgress(image,RollImageTag,3,3); roll_image->type=image->type; if (status == MagickFalse) roll_image=DestroyImage(roll_image); return(roll_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShaveImage() shaves pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the ShaveImage method is: % % Image *ShaveImage(const Image *image,const RectangleInfo *shave_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o shave_image: Method ShaveImage returns a pointer to the shaved % image. A null image is returned if there is a memory shortage or % if the image width or height is zero. % % o image: the image. % % o shave_info: Specifies a pointer to a RectangleInfo which defines the % region of the image to crop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShaveImage(const Image *image, const RectangleInfo *shave_info,ExceptionInfo *exception) { Image *shave_image; RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (((2*shave_info->width) >= image->columns) || ((2*shave_info->height) >= image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); SetGeometry(image,&geometry); geometry.width-=2*shave_info->width; geometry.height-=2*shave_info->height; geometry.x=(ssize_t) shave_info->width+image->page.x; geometry.y=(ssize_t) shave_info->height+image->page.y; shave_image=CropImage(image,&geometry,exception); if (shave_image == (Image *) NULL) return((Image *) NULL); shave_image->page.width-=2*shave_info->width; shave_image->page.height-=2*shave_info->height; shave_image->page.x-=(ssize_t) shave_info->width; shave_image->page.y-=(ssize_t) shave_info->height; return(shave_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p l i c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpliceImage() splices a solid color into the image as defined by the % geometry. % % The format of the SpliceImage method is: % % Image *SpliceImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to splice with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpliceImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define SpliceImageTag "Splice/Image" CacheView *image_view, *splice_view; Image *splice_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo splice_geometry; ssize_t columns, y; /* Allocate splice image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); splice_geometry=(*geometry); splice_image=CloneImage(image,image->columns+splice_geometry.width, image->rows+splice_geometry.height,MagickTrue,exception); if (splice_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(splice_image,DirectClass,exception) == MagickFalse) { splice_image=DestroyImage(splice_image); return((Image *) NULL); } if ((IsPixelInfoGray(&splice_image->background_color) == MagickFalse) && (IsGrayColorspace(splice_image->colorspace) != MagickFalse)) (void) SetImageColorspace(splice_image,sRGBColorspace,exception); if ((splice_image->background_color.alpha_trait != UndefinedPixelTrait) && (splice_image->alpha_trait == UndefinedPixelTrait)) (void) SetImageAlpha(splice_image,OpaqueAlpha,exception); (void) SetImageBackgroundColor(splice_image,exception); /* Respect image geometry. */ switch (image->gravity) { default: case UndefinedGravity: case NorthWestGravity: break; case NorthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; break; } case NorthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; break; } case WestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.width/2; break; } case CenterGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case EastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case SouthWestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } } /* Splice image. */ status=MagickTrue; progress=0; columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns); image_view=AcquireVirtualCacheView(image,exception); splice_view=AcquireAuthenticCacheView(splice_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,splice_image,splice_geometry.y,1) #endif for (y=0; y < (ssize_t) splice_geometry.y; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q+=GetPixelChannels(splice_image); for ( ; x < (ssize_t) splice_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpliceImageTag,progress, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,splice_image,splice_image->rows,2) #endif for (y=(ssize_t) (splice_geometry.y+splice_geometry.height); y < (ssize_t) splice_image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; if ((y < 0) || (y >= (ssize_t)splice_image->rows)) continue; p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height, splice_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q+=GetPixelChannels(splice_image); for ( ; x < (ssize_t) splice_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpliceImageTag,progress, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } splice_view=DestroyCacheView(splice_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) splice_image=DestroyImage(splice_image); return(splice_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImage() is a convenience method that behaves like ResizeImage() or % CropImage() but accepts scaling and/or cropping information as a region % geometry specification. If the operation fails, the original image handle % is left as is. % % This should only be used for single images. % % This function destroys what it assumes to be a single image list. % If the input image is part of a larger list, all other images in that list % will be simply 'lost', not destroyed. % % Also if the crop generates a list of images only the first image is resized. % And finally if the crop succeeds and the resize failed, you will get a % cropped image, as well as a 'false' or 'failed' report. % % This function and should probably be deprecated in favor of direct calls % to CropImageToTiles() or ResizeImage(), as appropriate. % % The format of the TransformImage method is: % % MagickBooleanType TransformImage(Image **image,const char *crop_geometry, % const char *image_geometry,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. This geometry defines a % subregion of the image to crop. % % o image_geometry: An image geometry string. This geometry defines the % final size of the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType TransformImage(Image **image, const char *crop_geometry,const char *image_geometry,ExceptionInfo *exception) { Image *resize_image, *transform_image; RectangleInfo geometry; assert(image != (Image **) NULL); assert((*image)->signature == MagickCoreSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); transform_image=(*image); if (crop_geometry != (const char *) NULL) { Image *crop_image; /* Crop image to a user specified size. */ crop_image=CropImageToTiles(*image,crop_geometry,exception); if (crop_image == (Image *) NULL) transform_image=CloneImage(*image,0,0,MagickTrue,exception); else { transform_image=DestroyImage(transform_image); transform_image=GetFirstImageInList(crop_image); } *image=transform_image; } if (image_geometry == (const char *) NULL) return(MagickTrue); /* Scale image to a user specified size. */ (void) ParseRegionGeometry(transform_image,image_geometry,&geometry, exception); if ((transform_image->columns == geometry.width) && (transform_image->rows == geometry.height)) return(MagickTrue); resize_image=ResizeImage(transform_image,geometry.width,geometry.height, transform_image->filter,exception); if (resize_image == (Image *) NULL) return(MagickFalse); transform_image=DestroyImage(transform_image); transform_image=resize_image; *image=transform_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p o s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransposeImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis while rotating them by 90 degrees. % % The format of the TransposeImage method is: % % Image *TransposeImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception) { #define TransposeImageTag "Transpose/Image" CacheView *image_view, *transpose_view; Image *transpose_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transpose_image == (Image *) NULL) return((Image *) NULL); /* Transpose image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transpose_view=AcquireAuthenticCacheView(transpose_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,transpose_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1), 0,1,transpose_image->rows,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait transpose_traits=GetPixelChannelTraits(transpose_image, channel); if ((traits == UndefinedPixelTrait) || (transpose_traits == UndefinedPixelTrait)) continue; SetPixelChannel(transpose_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(transpose_image); } if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransposeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transpose_view=DestroyCacheView(transpose_view); image_view=DestroyCacheView(image_view); transpose_image->type=image->type; page=transpose_image->page; Swap(page.width,page.height); Swap(page.x,page.y); transpose_image->page=page; if (status == MagickFalse) transpose_image=DestroyImage(transpose_image); return(transpose_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s v e r s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransverseImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis while rotating them by 270 degrees. % % The format of the TransverseImage method is: % % Image *TransverseImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception) { #define TransverseImageTag "Transverse/Image" CacheView *image_view, *transverse_view; Image *transverse_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transverse_image == (Image *) NULL) return((Image *) NULL); /* Transverse image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transverse_view=AcquireAuthenticCacheView(transverse_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,transverse_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-1), 0,1,transverse_image->rows,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } q+=GetPixelChannels(transverse_image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; q-=GetPixelChannels(transverse_image); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait transverse_traits=GetPixelChannelTraits(transverse_image, channel); if ((traits == UndefinedPixelTrait) || (transverse_traits == UndefinedPixelTrait)) continue; SetPixelChannel(transverse_image,channel,p[i],q); } p+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(transverse_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransverseImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transverse_view=DestroyCacheView(transverse_view); image_view=DestroyCacheView(image_view); transverse_image->type=image->type; page=transverse_image->page; Swap(page.width,page.height); Swap(page.x,page.y); if (page.width != 0) page.x=(ssize_t) (page.width-transverse_image->columns-page.x); if (page.height != 0) page.y=(ssize_t) (page.height-transverse_image->rows-page.y); transverse_image->page=page; if (status == MagickFalse) transverse_image=DestroyImage(transverse_image); return(transverse_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r i m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TrimImage() trims pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the TrimImage method is: % % Image *TrimImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception) { RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); geometry=GetImageBoundingBox(image,exception); if ((geometry.width == 0) || (geometry.height == 0)) { Image *crop_image; crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.alpha=(MagickRealType) TransparentAlpha; crop_image->alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(crop_image,exception); crop_image->page=image->page; crop_image->page.x=(-1); crop_image->page.y=(-1); return(crop_image); } geometry.x+=image->page.x; geometry.y+=image->page.y; return(CropImage(image,&geometry,exception)); }
DRB018-plusplus-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Data race on outLen due to ++ operation. Adding private (outLen) can avoid race condition. But it is wrong semantically. Data races on outLen also cause output[outLen++] to have data races. Data race pairs (we allow two pairs to preserve the original code pattern): 1. outLen@72 vs. outLen@72 2. output[]@72 vs. output[]@72 */ #include <stdlib.h> #include <stdio.h> int input[1000]; int output[1000]; int main() { int i ; int inLen=1000 ; int outLen = 0; for (i=0; i<inLen; ++i) input[i]= i; #pragma omp parallel for schedule(dynamic) for (i=0; i<inLen; ++i) { output[outLen++] = input[i] ; } printf("output[500]=%d\n",output[500]); return 0; }
omp_binding_forex.c
#include <omp.h> #include <stdio.h> #include <sched.h> int main( int argc, char**argv ) { for (int i=1; i<100; i++){ #pragma omp parallel { printf( "Running from thread %d of %d on cpu %2d!\n", omp_get_thread_num()+1, omp_get_num_threads(), sched_getcpu()); } } return 0; }
nco_rgr.c
/* $Header$ */ /* Purpose: NCO regridding utilities */ /* Copyright (C) 2015--present Charlie Zender This file is part of NCO, the netCDF Operators. NCO is free software. You may redistribute and/or modify NCO under the terms of the 3-Clause BSD License with exceptions described in the LICENSE file */ #include "nco_rgr.h" /* Regridding */ extern double min_dbl(double a, double b); extern double max_dbl(double a, double b); inline double min_dbl(double a, double b){return (a < b) ? a : b;} inline double max_dbl(double a, double b){return (a > b) ? a : b;} int /* O [enm] Return code */ nco_rgr_ctl /* [fnc] Control regridding logic */ (rgr_sct * const rgr, /* I/O [sct] Regridding structure */ trv_tbl_sct * const trv_tbl) /* I/O [sct] Traversal Table */ { /* Purpose: Control regridding logic */ int rcd=NCO_NOERR; const char fnc_nm[]="nco_rgr_ctl()"; nco_bool flg_grd=False; /* [flg] Create SCRIP-format grid file */ nco_bool flg_map=False; /* [flg] Create ESMF-format mapfile */ nco_bool flg_nfr=False; /* [flg] Infer SCRIP-format grid file */ nco_bool flg_smf=False; /* [flg] ESMF regridding (unused) */ nco_bool flg_s1d=False; /* [flg] Unpack sparse-1D CLM/ELM variables */ nco_bool flg_tps=False; /* [flg] Tempest regridding (unused) */ nco_bool flg_vrt=False; /* [flg] Interpolate to new vertical grid */ nco_bool flg_wgt=False; /* [flg] Regrid with external weights */ /* Main control branching occurs here Branching complexity and utility will increase as regridding features are added */ if(rgr->flg_grd) flg_grd=True; if(rgr->flg_grd_src && rgr->flg_grd_dst && rgr->flg_wgt) flg_map=True; if(rgr->flg_nfr) flg_nfr=True; if(rgr->flg_wgt && !(rgr->flg_grd_src && rgr->flg_grd_dst)) flg_wgt=True; if(rgr->flg_s1d) flg_s1d=True; if(rgr->fl_vrt) flg_vrt=True; assert(!flg_smf); assert(!flg_tps); /* Create SCRIP-format grid file */ if(flg_grd) rcd=nco_grd_mk(rgr); /* Create ESMF-format map file */ if(flg_map) rcd=nco_map_mk(rgr); /* Infer SCRIP-format grid file from data file */ if(flg_nfr) rcd=nco_grd_nfr(rgr); /* Interpolate data file to new vertical grid */ if(flg_vrt) rcd=nco_ntp_vrt(rgr,trv_tbl); /* Unpack sparse-1D CLM/ELM variables into full file */ if(flg_s1d) rcd=nco_s1d_unpack(rgr,trv_tbl); /* Regrid data horizontally using weights from mapping file */ if(flg_wgt) rcd=nco_rgr_wgt(rgr,trv_tbl); /* Regrid using ESMF library 20150701: On-line weight generation with ESMF never worked well and was abandoned */ if(flg_smf){ #ifdef ENABLE_ESMF (void)fprintf(stderr,"%s: %s calling nco_rgr_esmf() to generate and apply regridding map\n",nco_prg_nm_get(),fnc_nm); rcd=nco_rgr_esmf(rgr); /* Close output and free dynamic memory */ (void)nco_fl_out_cls(rgr->fl_out,rgr->fl_out_tmp,rgr->out_id); #else /* !ENABLE_ESMF */ (void)fprintf(stderr,"%s: ERROR %s reports attempt to use ESMF regridding without built-in support. Re-configure with --enable_esmf.\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); #endif /* !ENABLE_ESMF */ } /* !flg_smf */ /* Regrid using TempestRemap regridding 20180314: Weight generation with Tempest is implemented off-line via ncremap, not internally on-line However, do not deprecate this since TempestRemap2 has a library that could be accessed on-line */ if(flg_tps) rcd=nco_rgr_tps(rgr); return rcd; } /* end nco_rgr_ctl() */ rgr_sct * /* O [sct] Pointer to free'd regridding structure */ nco_rgr_free /* [fnc] Deallocate regridding structure */ (rgr_sct *rgr) /* I/O [sct] Regridding structure */ { /* Purpose: Free all dynamic memory in regridding structure */ /* free() standalone command-line arguments */ if(rgr->cmd_ln) rgr->cmd_ln=(char *)nco_free(rgr->cmd_ln); if(rgr->grd_ttl) rgr->grd_ttl=(char *)nco_free(rgr->grd_ttl); if(rgr->fl_grd_src) rgr->fl_grd_src=(char *)nco_free(rgr->fl_grd_src); if(rgr->fl_grd_dst) rgr->fl_grd_dst=(char *)nco_free(rgr->fl_grd_dst); if(rgr->fl_hrz) rgr->fl_hrz=(char *)nco_free(rgr->fl_hrz); if(rgr->fl_in) rgr->fl_in=(char *)nco_free(rgr->fl_in); if(rgr->fl_map) rgr->fl_map=(char *)nco_free(rgr->fl_map); if(rgr->fl_msh) rgr->fl_msh=(char *)nco_free(rgr->fl_msh); if(rgr->fl_out) rgr->fl_out=(char *)nco_free(rgr->fl_out); if(rgr->fl_out_tmp) rgr->fl_out_tmp=(char *)nco_free(rgr->fl_out_tmp); if(rgr->fl_vrt) rgr->fl_vrt=(char *)nco_free(rgr->fl_vrt); if(rgr->var_nm) rgr->var_nm=(char *)nco_free(rgr->var_nm); if(rgr->xtn_var) rgr->xtn_var=(char **)nco_sng_lst_free(rgr->xtn_var,rgr->xtn_nbr); /* free() strings associated with grid properties */ if(rgr->fl_grd) rgr->fl_grd=(char *)nco_free(rgr->fl_grd); if(rgr->fl_hnt_dst) rgr->fl_hnt_dst=(char *)nco_free(rgr->fl_hnt_dst); if(rgr->fl_hnt_src) rgr->fl_hnt_src=(char *)nco_free(rgr->fl_hnt_src); if(rgr->fl_skl) rgr->fl_skl=(char *)nco_free(rgr->fl_skl); if(rgr->fl_ugrid) rgr->fl_ugrid=(char *)nco_free(rgr->fl_ugrid); /* Tempest */ if(rgr->drc_tps) rgr->drc_tps=(char *)nco_free(rgr->drc_tps); /* free() memory used to construct KVMs */ if(rgr->rgr_nbr > 0) rgr->rgr_arg=nco_sng_lst_free(rgr->rgr_arg,rgr->rgr_nbr); /* free() memory copied from KVMs */ if(rgr->area_nm) rgr->area_nm=(char *)nco_free(rgr->area_nm); if(rgr->bnd_nm) rgr->bnd_nm=(char *)nco_free(rgr->bnd_nm); if(rgr->bnd_tm_nm) rgr->bnd_tm_nm=(char *)nco_free(rgr->bnd_tm_nm); if(rgr->col_nm_in) rgr->col_nm_in=(char *)nco_free(rgr->col_nm_in); if(rgr->col_nm_out) rgr->col_nm_out=(char *)nco_free(rgr->col_nm_out); if(rgr->frc_nm) rgr->frc_nm=(char *)nco_free(rgr->frc_nm); if(rgr->ilev_nm_in) rgr->ilev_nm_in=(char *)nco_free(rgr->ilev_nm_in); if(rgr->ilev_nm_out) rgr->ilev_nm_out=(char *)nco_free(rgr->ilev_nm_out); if(rgr->lat_bnd_nm) rgr->lat_bnd_nm=(char *)nco_free(rgr->lat_bnd_nm); if(rgr->lat_nm_in) rgr->lat_nm_in=(char *)nco_free(rgr->lat_nm_in); if(rgr->lat_nm_out) rgr->lat_nm_out=(char *)nco_free(rgr->lat_nm_out); if(rgr->lat_vrt_nm) rgr->lat_vrt_nm=(char *)nco_free(rgr->lat_vrt_nm); if(rgr->lat_wgt_nm) rgr->lat_wgt_nm=(char *)nco_free(rgr->lat_wgt_nm); if(rgr->lev_nm_in) rgr->lev_nm_in=(char *)nco_free(rgr->lev_nm_in); if(rgr->lev_nm_out) rgr->lev_nm_out=(char *)nco_free(rgr->lev_nm_out); if(rgr->lon_bnd_nm) rgr->lon_bnd_nm=(char *)nco_free(rgr->lon_bnd_nm); if(rgr->lon_nm_in) rgr->lon_nm_in=(char *)nco_free(rgr->lon_nm_in); if(rgr->lon_nm_out) rgr->lon_nm_out=(char *)nco_free(rgr->lon_nm_out); if(rgr->lon_vrt_nm) rgr->lon_vrt_nm=(char *)nco_free(rgr->lon_vrt_nm); if(rgr->msk_nm) rgr->msk_nm=(char *)nco_free(rgr->msk_nm); if(rgr->plev_nm_in) rgr->plev_nm_in=(char *)nco_free(rgr->plev_nm_in); if(rgr->vrt_nm) rgr->vrt_nm=(char *)nco_free(rgr->vrt_nm); /* Lastly, free() regrid structure itself */ if(rgr) rgr=(rgr_sct *)nco_free(rgr); return rgr; } /* end nco_rgr_free() */ rgr_sct * /* O [sct] Regridding structure */ nco_rgr_ini /* [fnc] Initialize regridding structure */ (const char * const cmd_ln, /* I [sng] Command-line */ const int in_id, /* I [id] Input netCDF file ID */ char **rgr_arg, /* [sng] Regridding arguments */ const int rgr_arg_nbr, /* [nbr] Number of regridding arguments */ char * const rgr_in, /* I [sng] File containing fields to be regridded */ char * const rgr_out, /* I [sng] File containing regridded fields */ char * const rgr_grd_src, /* I [sng] File containing input grid */ char * const rgr_grd_dst, /* I [sng] File containing destination grid */ char * const rgr_hrz, /* I [sng] File containing horizontal coordinate grid */ char * const rgr_map, /* I [sng] File containing mapping weights from source to destination grid */ char * const rgr_var, /* I [sng] Variable for special regridding treatment */ char * const rgr_vrt, /* I [sng] File containing vertical coordinate grid */ const double wgt_vld_thr, /* I [frc] Weight threshold for valid destination value */ char **xtn_var, /* [sng] I Extensive variables */ const int xtn_nbr) /* [nbr] I Number of extensive variables */ { /* Purpose: Initialize regridding structure */ const char fnc_nm[]="nco_rgr_ini()"; rgr_sct *rgr; /* Allocate */ rgr=(rgr_sct *)nco_malloc(sizeof(rgr_sct)); /* Initialize variables directly or indirectly set via command-line (except for key-value arguments) */ rgr->cmd_ln=strdup(cmd_ln); /* [sng] Command-line */ rgr->flg_usr_rqs=False; /* [flg] User requested regridding */ rgr->out_id=int_CEWI; /* [id] Output netCDF file ID */ rgr->in_id=in_id; /* [id] Input netCDF file ID */ rgr->rgr_arg=rgr_arg; /* [sng] Regridding arguments */ rgr->rgr_nbr=rgr_arg_nbr; /* [nbr] Number of regridding arguments */ rgr->drc_tps=NULL; /* [sng] Directory where Tempest grids, meshes, and weights are stored */ rgr->flg_grd_src= rgr_grd_src ? True : False; /* [flg] User-specified input grid */ rgr->fl_grd_src=rgr_grd_src; /* [sng] File containing input grid */ rgr->flg_grd_dst= rgr_grd_dst ? True : False; /* [flg] User-specified destination grid */ rgr->fl_grd_dst=rgr_grd_dst; /* [sng] File containing destination grid */ rgr->fl_in=rgr_in; /* [sng] File containing fields to be regridded */ rgr->fl_out=rgr_out; /* [sng] File containing regridded fields */ rgr->fl_out_tmp=NULL_CEWI; /* [sng] Temporary file containing regridded fields */ rgr->flg_wgt= rgr_map ? True : False; /* [flg] User-specified mapping weights */ rgr->fl_map=rgr_map; /* [sng] File containing mapping weights from source to destination grid */ rgr->fl_hrz=rgr_hrz; /* [sng] [sng] File containing horizontal coordinate grid (for S1D) */ rgr->fl_vrt=rgr_vrt; /* [sng] [sng] File containing vertical coordinate grid */ rgr->var_nm=rgr_var; /* [sng] Variable for special regridding treatment */ rgr->xtn_var=xtn_var; /* [sng] Extensive variables */ rgr->xtn_nbr=xtn_nbr; /* [nbr] Number of extensive variables */ /* Did user explicitly request regridding? */ if(rgr_arg_nbr > 0 || rgr_grd_src != NULL || rgr_grd_dst != NULL || rgr_map != NULL || rgr_vrt != NULL) rgr->flg_usr_rqs=True; /* Initialize arguments after copying */ if(!rgr->fl_out) rgr->fl_out=(char *)strdup("/data/zender/rgr/rgr_out.nc"); if(!rgr->fl_grd_dst) rgr->fl_grd_dst=(char *)strdup("/data/zender/scrip/grids/remap_grid_T42.nc"); // if(!rgr->var_nm) rgr->var_nm=(char *)strdup("ORO"); if(nco_dbg_lvl_get() >= nco_dbg_crr){ (void)fprintf(stderr,"%s: INFO %s reports ",nco_prg_nm_get(),fnc_nm); (void)fprintf(stderr,"flg_usr_rqs = %d, ",rgr->flg_usr_rqs); (void)fprintf(stderr,"rgr_nbr = %d, ",rgr->rgr_nbr); (void)fprintf(stderr,"fl_grd_src = %s, ",rgr->fl_grd_src ? rgr->fl_grd_src : "NULL"); (void)fprintf(stderr,"fl_grd_dst = %s, ",rgr->fl_grd_dst ? rgr->fl_grd_dst : "NULL"); (void)fprintf(stderr,"fl_hrz = %s, ",rgr->fl_hrz ? rgr->fl_hrz : "NULL"); (void)fprintf(stderr,"fl_in = %s, ",rgr->fl_in ? rgr->fl_in : "NULL"); (void)fprintf(stderr,"fl_out = %s, ",rgr->fl_out ? rgr->fl_out : "NULL"); (void)fprintf(stderr,"fl_out_tmp = %s, ",rgr->fl_out_tmp ? rgr->fl_out_tmp : "NULL"); (void)fprintf(stderr,"fl_map = %s, ",rgr->fl_map ? rgr->fl_map : "NULL"); (void)fprintf(stderr,"fl_vrt = %s, ",rgr->fl_vrt ? rgr->fl_vrt : "NULL"); (void)fprintf(stderr,"\n"); } /* endif dbg */ /* Flags */ if(wgt_vld_thr == NC_MIN_DOUBLE){ rgr->flg_rnr=False; }else if(wgt_vld_thr >= 0.0 && wgt_vld_thr <= 1.0){ /* NB: Weight thresholds of 0.0 or nearly zero can lead to underflow or divide-by-zero errors */ // const double wgt_vld_thr_min=1.0e-10; /* [frc] Minimum weight threshold for valid destination value */ rgr->flg_rnr=True; rgr->wgt_vld_thr=wgt_vld_thr; }else{ (void)fprintf(stderr,"%s: ERROR weight threshold must be in [0.0,1.0] and user supplied wgt_vld_thr = %g\n",nco_prg_nm_get(),wgt_vld_thr); nco_exit(EXIT_FAILURE); } /* endif */ /* Parse extended kvm options */ char *sng_fnl=NULL; int cnv_nbr; /* [nbr] Number of elements converted by sscanf() */ int rgr_var_idx; /* [idx] Index over rgr_lst (i.e., all names explicitly specified in all "--rgr var1[,var2]=val" options) */ int rgr_var_nbr=0; kvm_sct *rgr_lst=NULL; /* [sct] List of all regrid specifications */ if(rgr_arg_nbr > 0){ /* Join arguments together */ sng_fnl=nco_join_sng(rgr_arg,rgr_arg_nbr); rgr_lst=nco_arg_mlt_prs(sng_fnl); if(sng_fnl) sng_fnl=(char *)nco_free(sng_fnl); /* Count number of keys */ for(rgr_var_idx=0;(rgr_lst+rgr_var_idx)->key;rgr_var_idx++,rgr_var_nbr++);/* !rgr_var_idx */ } /* !rgr_arg_nbr */ /* NULL-initialize key-value properties required for string variables */ rgr->area_nm=NULL; /* [sng] Name of variable containing gridcell area */ rgr->bnd_nm=NULL; /* [sng] Name of dimension to employ for spatial bounds */ rgr->bnd_tm_nm=NULL; /* [sng] Name of dimension to employ for temporal bounds */ rgr->col_nm_in=NULL; /* [sng] Name to recognize as input horizontal spatial dimension on unstructured grid */ rgr->col_nm_out=NULL; /* [sng] Name of horizontal spatial output dimension on unstructured grid */ rgr->frc_nm=NULL; /* [sng] Name of variable containing gridcell fraction */ rgr->ilev_nm_in=NULL; /* [sng] Name of input dimension to recognize as vertical dimension at layer interfaces */ rgr->ilev_nm_out=NULL; /* [sng] Name of output vertical dimension at layer interfaces */ rgr->lat_bnd_nm=NULL; /* [sng] Name of rectangular boundary variable for latitude */ rgr->lat_dmn_nm=NULL; /* [sng] Name of latitude dimension in inferred grid */ rgr->lat_nm_in=NULL; /* [sng] Name of input dimension to recognize as latitude */ rgr->lat_nm_out=NULL; /* [sng] Name of output dimension for latitude */ rgr->lat_vrt_nm=NULL; /* [sng] Name of non-rectangular boundary variable for latitude */ rgr->lat_wgt_nm=NULL; /* [sng] Name of variable containing latitude weights */ rgr->lev_nm_in=NULL; /* [sng] Name of input dimension to recognize as vertical dimension at layer midpoints */ rgr->lev_nm_out=NULL; /* [sng] Name of output vertical dimension at layer midpoints */ rgr->lon_bnd_nm=NULL; /* [sng] Name of rectangular boundary variable for longitude */ rgr->lon_dmn_nm=NULL; /* [sng] Name of longitude dimension in inferred grid */ rgr->lon_nm_in=NULL; /* [sng] Name of dimension to recognize as longitude */ rgr->lon_nm_out=NULL; /* [sng] Name of output dimension for longitude */ rgr->lon_vrt_nm=NULL; /* [sng] Name of non-rectangular boundary variable for longitude */ rgr->msk_nm=NULL; /* [sng] Name of variable containing destination mask */ rgr->plev_nm_in=NULL; /* [sng] Name of input variable recognize as pure-pressure coordinate */ rgr->sgs_frc_nm=NULL; /* [sng] Name of variable sub-gridscale fraction */ rgr->sgs_msk_nm=NULL; /* [sng] Name of variable sub-gridscale mask */ rgr->vrt_nm=NULL; /* [sng] Name of dimension to employ for vertices */ /* Initialize key-value properties used in grid and weight generation */ rgr->area_mth=1; /* [enm] Method to compute grid cell area */ rgr->edg_typ=nco_edg_nil; /* [enm] Edge/Arc-type for triangle edges */ rgr->fl_grd=NULL; /* [sng] Name of SCRIP grid file to create */ rgr->fl_hnt_dst=NULL; /* [sng] ERWG hint destination */ rgr->fl_hnt_src=NULL; /* [sng] ERWG hint source */ rgr->fl_msh=NULL; /* [sng] Name of SCRIP intersection mesh file to create */ rgr->fl_skl=NULL; /* [sng] Name of skeleton data file to create */ rgr->fl_ugrid=NULL; /* [sng] Name of UGRID grid file to create */ rgr->flg_area_out=True; /* [flg] Add area to output */ rgr->flg_cf_units=False; /* [flg] Generate CF-compliant (breaks ERWG 7.1.0r-) units fields in SCRIP-format grid files */ rgr->flg_cll_msr=True; /* [flg] Add cell_measures attribute */ rgr->flg_crv=False; /* [flg] Use curvilinear coordinates */ rgr->flg_dgn_area=False; /* [flg] Diagnose rather than copy inferred area */ rgr->flg_dgn_bnd=False; /* [flg] Diagnose rather than copy inferred bounds */ rgr->flg_erwg_units=True; /* [flg] Generate ERWG 7.1.0r-compliant SCRIP-format grid files */ rgr->flg_grd=False; /* [flg] Create SCRIP-format grid file */ rgr->flg_msk_out=False; /* [flg] Add mask to output */ rgr->flg_nfr=False; /* [flg] Infer SCRIP-format grid file */ rgr->flg_s1d=False; /* [flg] Unpack sparse-1D CLM/ELM variables */ rgr->flg_stg=True; /* [flg] Write staggered grid with FV output */ rgr->grd_ttl=strdup("None given (supply with --rgr grd_ttl=\"Grid Title\")"); /* [enm] Grid title */ rgr->grd_typ=nco_grd_2D_eqa; /* [enm] Grid type */ rgr->idx_dbg=0; /* [idx] Index of gridcell for debugging */ rgr->lat_drc=nco_grd_lat_drc_s2n; /* [enm] Latitude grid direction */ rgr->lat_typ=nco_grd_lat_eqa; /* [enm] Latitude grid type */ rgr->lon_typ=nco_grd_lon_Grn_ctr; /* [enm] Longitude grid type */ rgr->lat_nbr=180; /* [nbr] Number of latitudes in destination grid */ rgr->lon_nbr=360; /* [nbr] Number of longitudes in destination grid */ rgr->lat_crv=0.0; /* [dgr] Latitudinal curvilinearity */ rgr->lon_crv=0.0; /* [dgr] Longitudinal curvilinearity */ rgr->lat_sth=NC_MAX_DOUBLE; /* [dgr] Latitude of southern edge of grid */ rgr->lon_wst=NC_MAX_DOUBLE; /* [dgr] Longitude of western edge of grid */ rgr->lat_nrt=NC_MAX_DOUBLE; /* [dgr] Latitude of northern edge of grid */ rgr->lon_est=NC_MAX_DOUBLE; /* [dgr] Longitude of eastern edge of grid */ rgr->msk_var=NULL; /* [sng] Mask-template variable */ rgr->ply_tri_mth=nco_ply_tri_mth_csz; /* [enm] Polygon-to-triangle decomposition method */ rgr->sgs_nrm=1.0; /* [sng] Sub-gridscale normalization */ rgr->tst=0L; /* [enm] Generic key for testing (undocumented) */ rgr->ntp_mth=nco_ntp_log; /* [enm] Interpolation method */ rgr->xtr_mth=nco_xtr_fll_ngh; /* [enm] Extrapolation method */ rgr->xtr_nsp=8; /* [sng] Extrapolation number of source points */ rgr->xtr_xpn=2.0; /* [sng] Exponent of distance in extrapolation (absolute value) */ rgr->wgt_typ=nco_wgt_con; /* [enm] Weight generation method */ /* Parse key-value properties */ char *sng_cnv_rcd=NULL_CEWI; /* [sng] strtol()/strtoul() return code */ for(rgr_var_idx=0;rgr_var_idx<rgr_var_nbr;rgr_var_idx++){ if(!strcmp(rgr_lst[rgr_var_idx].key,"grid") || !strcasecmp(rgr_lst[rgr_var_idx].key,"scrip")){ rgr->fl_grd=(char *)strdup(rgr_lst[rgr_var_idx].val); rgr->flg_grd=True; continue; } /* !grid */ if(!strcmp(rgr_lst[rgr_var_idx].key,"hnt_dst") || !strcmp(rgr_lst[rgr_var_idx].key,"fl_hnt_dst")){ rgr->fl_hnt_dst=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !hnt_dst */ if(!strcmp(rgr_lst[rgr_var_idx].key,"hnt_src") || !strcmp(rgr_lst[rgr_var_idx].key,"fl_hnt_src")){ rgr->fl_hnt_src=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !hnt_src */ if(!strcmp(rgr_lst[rgr_var_idx].key,"msk_var") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_var") || !strcmp(rgr_lst[rgr_var_idx].key,"mask") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_variable")){ rgr->msk_var=(char *)strdup(rgr_lst[rgr_var_idx].val); rgr->flg_msk_out=True; continue; } /* !msk_var */ if(!strcmp(rgr_lst[rgr_var_idx].key,"msh") || !strcmp(rgr_lst[rgr_var_idx].key,"mesh")){ rgr->fl_msh=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !msh */ if(!strcmp(rgr_lst[rgr_var_idx].key,"skl")){ rgr->fl_skl=(char *)strdup(rgr_lst[rgr_var_idx].val); rgr->flg_grd=True; continue; } /* !skl */ if(!strcasecmp(rgr_lst[rgr_var_idx].key,"ugrid")){ rgr->fl_ugrid=(char *)strdup(rgr_lst[rgr_var_idx].val); rgr->flg_nfr=True; continue; } /* !ugrid */ if(!strcasecmp(rgr_lst[rgr_var_idx].key,"fl_hrz") || !strcasecmp(rgr_lst[rgr_var_idx].key,"hrz")){ rgr->fl_hrz=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !hrz */ if(!strcasecmp(rgr_lst[rgr_var_idx].key,"fl_vrt") || !strcasecmp(rgr_lst[rgr_var_idx].key,"vrt")){ rgr->fl_vrt=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !vrt */ if(!strcmp(rgr_lst[rgr_var_idx].key,"no_area") || !strcmp(rgr_lst[rgr_var_idx].key,"no_area_out")){ rgr->flg_area_out=False; continue; } /* !area */ if(!strcmp(rgr_lst[rgr_var_idx].key,"no_msk") || !strcmp(rgr_lst[rgr_var_idx].key,"no_msk_out") || !strcmp(rgr_lst[rgr_var_idx].key,"no_mask") || !strcmp(rgr_lst[rgr_var_idx].key,"no_mask_out")){ rgr->flg_msk_out=False; continue; } /* !msk */ if(!strcmp(rgr_lst[rgr_var_idx].key,"msk_out") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_out")){ rgr->flg_msk_out=True; continue; } /* !mask */ if(!strcmp(rgr_lst[rgr_var_idx].key,"cell_measures") || !strcmp(rgr_lst[rgr_var_idx].key,"cll_msr")){ rgr->flg_cll_msr=True; continue; } /* !cell_measures */ if(!strcmp(rgr_lst[rgr_var_idx].key,"no_cell_measures") || !strcmp(rgr_lst[rgr_var_idx].key,"no_cll_msr")){ rgr->flg_cll_msr=False; continue; } /* !cell_measures */ if(!strcmp(rgr_lst[rgr_var_idx].key,"curvilinear") || !strcmp(rgr_lst[rgr_var_idx].key,"crv")){ rgr->flg_crv=True; continue; } /* !curvilinear */ if(!strcmp(rgr_lst[rgr_var_idx].key,"diagnose_area") || !strcmp(rgr_lst[rgr_var_idx].key,"dgn_area")){ rgr->flg_dgn_area=True; continue; } /* !diagnose_area */ if(!strcmp(rgr_lst[rgr_var_idx].key,"diagnose_bounds") || !strcmp(rgr_lst[rgr_var_idx].key,"dgn_bnd")){ rgr->flg_dgn_bnd=True; continue; } /* !diagnose_bounds */ if(!strcmp(rgr_lst[rgr_var_idx].key,"cf_units") || !strcmp(rgr_lst[rgr_var_idx].key,"CF_units")){ rgr->flg_cf_units=True; rgr->flg_erwg_units=False; continue; } /* !erwg_units */ if(!strcmp(rgr_lst[rgr_var_idx].key,"cell_area_quad")){ rgr->area_mth=2; continue; } /* !area_nco */ if(!strcmp(rgr_lst[rgr_var_idx].key,"cell_area_nco")){ rgr->area_mth=1; continue; } /* !area_nco */ if(!strcmp(rgr_lst[rgr_var_idx].key,"edg_typ") || !strcmp(rgr_lst[rgr_var_idx].key,"tri_arc") || !strcmp(rgr_lst[rgr_var_idx].key,"vrt_cnc")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"grt_crc") || !strcasecmp(rgr_lst[rgr_var_idx].val,"gtc") || !strcasecmp(rgr_lst[rgr_var_idx].val,"great_circle") || !strcasecmp(rgr_lst[rgr_var_idx].val,"geodesic") || !strcasecmp(rgr_lst[rgr_var_idx].val,"orthodrome")){ rgr->edg_typ=nco_edg_gtc; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"sml_crc") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ltr") || !strcasecmp(rgr_lst[rgr_var_idx].val,"small_circle") || !strcasecmp(rgr_lst[rgr_var_idx].val,"latitude_triangle") || !strcasecmp(rgr_lst[rgr_var_idx].val,"true")){ rgr->edg_typ=nco_edg_smc; (void)fprintf(stderr,"%s: WARNING Requested to run with small-circle edges. This option has not yet been tested and validated. Use only at your own risk.\n",nco_prg_nm_get()); }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"crt") || !strcasecmp(rgr_lst[rgr_var_idx].val,"cartesian") || !strcasecmp(rgr_lst[rgr_var_idx].val,"planar") || !strcasecmp(rgr_lst[rgr_var_idx].val,"flat")){ rgr->edg_typ=nco_edg_crt; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !edg_typ */ if(!strcmp(rgr_lst[rgr_var_idx].key,"erwg_units") || !strcmp(rgr_lst[rgr_var_idx].key,"esmf_units") || !strcmp(rgr_lst[rgr_var_idx].key,"degrees")){ rgr->flg_cf_units=False; rgr->flg_erwg_units=True; continue; } /* !erwg_units */ if(!strcmp(rgr_lst[rgr_var_idx].key,"infer") || !strcmp(rgr_lst[rgr_var_idx].key,"nfr")){ rgr->flg_nfr=True; continue; } /* !infer */ if(!strcmp(rgr_lst[rgr_var_idx].key,"no_stagger") || !strcmp(rgr_lst[rgr_var_idx].key,"no_stg")){ rgr->flg_stg=False; continue; } /* !stagger */ if(!strcmp(rgr_lst[rgr_var_idx].key,"grd_ttl") || !strcmp(rgr_lst[rgr_var_idx].key,"ttl")){ if(rgr->grd_ttl) rgr->grd_ttl=(char *)nco_free(rgr->grd_ttl); rgr->grd_ttl=(char *)strdup(rgr_lst[rgr_var_idx].val); /* 20180828 Replace unquoted tildes with spaces (like LaTeX, NCL) so ncremap users can put tildes in place of spaces in ttl 20180905 Reverted this since quoting command in ncremap is superior solution */ if(False){ size_t ttl_lng=strlen(rgr->grd_ttl); for(size_t ttl_idx=0L;ttl_idx<ttl_lng;ttl_idx++) if(rgr->grd_ttl[ttl_idx] == '~'){ if(ttl_idx == 0L) rgr->grd_ttl[ttl_idx]=' '; // Always convert tilde to space if first character else if(rgr->grd_ttl[ttl_idx-1L] != '\\') rgr->grd_ttl[ttl_idx]=' '; // Convert tilde in other locations unless backslash-quoted } /* !tilde */ } /* !0 */ continue; } /* !grd_ttl */ if(!strcmp(rgr_lst[rgr_var_idx].key,"idx_dbg")){ rgr->idx_dbg=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd); continue; } /* !idx_dbg */ if(!strcmp(rgr_lst[rgr_var_idx].key,"latlon")){ cnv_nbr=sscanf(rgr_lst[rgr_var_idx].val,"%ld,%ld",&rgr->lat_nbr,&rgr->lon_nbr); assert(cnv_nbr == 2); continue; } /* !latlon */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lonlat")){ cnv_nbr=sscanf(rgr_lst[rgr_var_idx].val,"%ld,%ld",&rgr->lon_nbr,&rgr->lat_nbr); assert(cnv_nbr == 2); continue; } /* !lonlat */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_nbr")){ rgr->lat_nbr=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd); continue; } /* !lat_nbr */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_nbr")){ rgr->lon_nbr=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd); continue; } /* !lon_nbr */ if(!strcasecmp(rgr_lst[rgr_var_idx].key,"snwe")){ cnv_nbr=sscanf(rgr_lst[rgr_var_idx].val,"%lf,%lf,%lf,%lf",&rgr->lat_sth,&rgr->lat_nrt,&rgr->lon_wst,&rgr->lon_est); if(cnv_nbr != 4) (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); assert(cnv_nbr == 4); if(cnv_nbr != 4) abort(); /* CEWI Use cnv_nbr at least once outside of assert() to avoid gcc 4.8.2 set-but-not-used warning */ continue; } /* !snwe */ if(!strcasecmp(rgr_lst[rgr_var_idx].key,"wesn")){ if(cnv_nbr != 4) cnv_nbr=sscanf(rgr_lst[rgr_var_idx].val,"%lf,%lf,%lf,%lf",&rgr->lon_wst,&rgr->lon_est,&rgr->lat_sth,&rgr->lat_nrt); assert(cnv_nbr == 4); continue; } /* !wesn */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_crv")){ rgr->lat_crv=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); continue; } /* !lat_crv */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_crv")){ rgr->lon_crv=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); continue; } /* !lon_crv */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_sth")){ rgr->lat_sth=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); // rgr->lat_typ=nco_grd_lat_bb; continue; } /* !lat_sth */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_wst")){ rgr->lon_wst=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); rgr->lon_typ=nco_grd_lon_bb; continue; } /* !lon_wst */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_nrt")){ rgr->lat_nrt=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); //rgr->lat_typ=nco_grd_lat_bb; continue; } /* !lat_nrt */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_est")){ rgr->lon_est=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); rgr->lon_typ=nco_grd_lon_bb; continue; } /* !lon_est */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_drc")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"s2n") || !strcasecmp(rgr_lst[rgr_var_idx].val,"south2north") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ston") || !strcasecmp(rgr_lst[rgr_var_idx].val,"southnorth")){ rgr->lat_drc=nco_grd_lat_drc_s2n; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"n2s") || !strcasecmp(rgr_lst[rgr_var_idx].val,"north2south") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ntos") || !strcasecmp(rgr_lst[rgr_var_idx].val,"northsouth")){ rgr->lat_drc=nco_grd_lat_drc_n2s; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !lat_drc */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_typ")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"cap") || !strcasecmp(rgr_lst[rgr_var_idx].val,"fv") || !strcasecmp(rgr_lst[rgr_var_idx].val,"fix") || !strcasecmp(rgr_lst[rgr_var_idx].val,"yarmulke")){ rgr->lat_typ=nco_grd_lat_fv; rgr->grd_typ=nco_grd_2D_fv; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"eqa") || !strcasecmp(rgr_lst[rgr_var_idx].val,"rgl") || !strcasecmp(rgr_lst[rgr_var_idx].val,"unf") || !strcasecmp(rgr_lst[rgr_var_idx].val,"uni")){ rgr->lat_typ=nco_grd_lat_eqa; rgr->grd_typ=nco_grd_2D_eqa; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"gss")){ rgr->lat_typ=nco_grd_lat_gss; rgr->grd_typ=nco_grd_2D_gss; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !lat_typ */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_typ")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"180_wst") || !strcasecmp(rgr_lst[rgr_var_idx].val,"wst_180")) rgr->lon_typ=nco_grd_lon_180_wst; else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"180_ctr") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ctr_180")) rgr->lon_typ=nco_grd_lon_180_ctr; else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"Grn_wst") || !strcasecmp(rgr_lst[rgr_var_idx].val,"wst_Grn")) rgr->lon_typ=nco_grd_lon_Grn_wst; else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"Grn_ctr") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ctr_Grn")) rgr->lon_typ=nco_grd_lon_Grn_ctr; else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !lon_typ */ if(!strcmp(rgr_lst[rgr_var_idx].key,"area_nm")){ rgr->area_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !area_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"bnd_nm")){ rgr->bnd_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !bnd_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"bnd_tm_nm")){ rgr->bnd_tm_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !bnd_tm_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"col_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"col_nm")){ rgr->col_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !col_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"col_nm_out")){ rgr->col_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !col_nm_out */ if(!strcmp(rgr_lst[rgr_var_idx].key,"frc_nm")){ rgr->frc_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !frc_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"ilev_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"ilev_nm")){ rgr->ilev_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !ilev_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"ilev_nm_out")){ rgr->ilev_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !ilev_nm_out */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_bnd_nm")){ rgr->lat_bnd_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_bnd_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_dmn_nm") || !strcmp(rgr_lst[rgr_var_idx].key,"lat_dmn")){ rgr->lat_dmn_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_dmn_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"lat_nm")){ rgr->lat_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_nm_out")){ rgr->lat_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_nm_out */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_vrt_nm")){ rgr->lat_vrt_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_vrt_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_wgt_nm")){ rgr->lat_wgt_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_wgt_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lev_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"lev_nm")){ rgr->lev_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lev_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lev_nm_out")){ rgr->lev_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lev_nm_out */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_bnd_nm")){ rgr->lon_bnd_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lon_bnd_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_dmn_nm") || !strcmp(rgr_lst[rgr_var_idx].key,"lon_dmn")){ rgr->lon_dmn_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lon_dmn_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"lon_nm")){ rgr->lon_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lon_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_nm_out")){ rgr->lon_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lon_nm_out */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_vrt_nm")){ rgr->lon_vrt_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lon_vrt_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"plev_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"plev_nm")){ rgr->plev_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !plev_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"ply_tri")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"csz")){ rgr->ply_tri_mth=nco_ply_tri_mth_csz; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"ctr") || !strcasecmp(rgr_lst[rgr_var_idx].val,"centroid") || !strcasecmp(rgr_lst[rgr_var_idx].val,"snl") || !strcasecmp(rgr_lst[rgr_var_idx].val,"mat")){ rgr->ply_tri_mth=nco_ply_tri_mth_ctr; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !ply_tri */ if(!strcmp(rgr_lst[rgr_var_idx].key,"sgs_frc_nm")){ rgr->sgs_frc_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !sgs_frc */ if(!strcmp(rgr_lst[rgr_var_idx].key,"sgs_msk_nm")){ rgr->sgs_msk_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !sgs_msk */ if(!strcmp(rgr_lst[rgr_var_idx].key,"sgs_nrm")){ rgr->sgs_nrm=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); continue; } /* !sgs_nrm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"tst")){ rgr->tst=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd); continue; } /* !tst */ if(!strcmp(rgr_lst[rgr_var_idx].key,"msk_nm") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_nm")){ rgr->msk_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); rgr->flg_msk_out=True; continue; } /* !msk_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"vrt_nm")){ rgr->vrt_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !vrt_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"vrt_ntp") || !strcmp(rgr_lst[rgr_var_idx].key,"ntp_mth")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"lin") || !strcasecmp(rgr_lst[rgr_var_idx].val,"linear") || !strcasecmp(rgr_lst[rgr_var_idx].val,"lnr")){ rgr->ntp_mth=nco_ntp_lnr; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"log") || !strcasecmp(rgr_lst[rgr_var_idx].val,"logarithmic") || !strcasecmp(rgr_lst[rgr_var_idx].val,"lgr")){ rgr->ntp_mth=nco_ntp_log; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !ntp_mth */ if(!strcmp(rgr_lst[rgr_var_idx].key,"xtr_mth")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"nrs_ngh") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ngh") || !strcasecmp(rgr_lst[rgr_var_idx].val,"nearest_neighbor") || !strcasecmp(rgr_lst[rgr_var_idx].val,"nn")){ rgr->xtr_mth=nco_xtr_fll_ngh; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"mss_val") || !strcasecmp(rgr_lst[rgr_var_idx].val,"msv") || !strcasecmp(rgr_lst[rgr_var_idx].val,"fll_val") || !strcasecmp(rgr_lst[rgr_var_idx].val,"missing_value")){ rgr->xtr_mth=nco_xtr_fll_msv; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !xtr_mth */ if(!strcmp(rgr_lst[rgr_var_idx].key,"xtr_nsp") || !strcmp(rgr_lst[rgr_var_idx].key,"xtr_nbr_src_pnt") || !strcmp(rgr_lst[rgr_var_idx].key,"number_source_points") || !strcmp(rgr_lst[rgr_var_idx].key,"extrapolation_number_source_points")){ rgr->xtr_nsp=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd); continue; } /* !xtr_nsp */ if(!strcmp(rgr_lst[rgr_var_idx].key,"xtr_xpn") || !strcmp(rgr_lst[rgr_var_idx].key,"extrapolation_exponent") || !strcmp(rgr_lst[rgr_var_idx].key,"exponent_of_distance_in_extrapolation")){ rgr->xtr_xpn=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); continue; } /* !xtr_xpn */ if(!strcmp(rgr_lst[rgr_var_idx].key,"wgt_typ") || !strcmp(rgr_lst[rgr_var_idx].key,"weight_type")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"con") || !strcasecmp(rgr_lst[rgr_var_idx].val,"nco_con") || !strcasecmp(rgr_lst[rgr_var_idx].val,"conservative") || !strcasecmp(rgr_lst[rgr_var_idx].val,"wgt_con")) rgr->wgt_typ=nco_wgt_con; else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"dwe") || !strcasecmp(rgr_lst[rgr_var_idx].val,"nco_dwe") || !strcasecmp(rgr_lst[rgr_var_idx].val,"distance_weighted") || !strcasecmp(rgr_lst[rgr_var_idx].val,"wgt_dwe")) rgr->wgt_typ=nco_wgt_dwe; else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"bln") || !strcasecmp(rgr_lst[rgr_var_idx].val,"nco_bln") || !strcasecmp(rgr_lst[rgr_var_idx].val,"bilinear") || !strcasecmp(rgr_lst[rgr_var_idx].val,"wgt_bln")) rgr->wgt_typ=nco_wgt_bln; else { (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !wgt_typ */ (void)fprintf(stderr,"%s: ERROR %s reports unrecognized key-value option to --rgr switch: %s\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key); nco_exit(EXIT_FAILURE); } /* end for */ /* Eliminate sticky wickets: Give nfr precedence over grd */ if(rgr->flg_nfr && rgr->flg_grd) rgr->flg_grd=False; /* Revert to defaults for any names not specified on command-line */ if(!rgr->area_nm) rgr->area_nm=(char *)strdup("area"); /* [sng] Name of variable containing gridcell area */ if(!rgr->bnd_nm) rgr->bnd_nm=(char *)strdup("nvertices"); /* [sng] Name of dimension to employ for spatial bounds */ /* NB: CESM uses nbnd and ilev for temporal and vertical bounds, respectively (CESM outputs no horizontal spatial bounds). NCO defaults to nbnd for all bounds with two endpoints. */ if(!rgr->bnd_tm_nm) rgr->bnd_tm_nm=(char *)strdup("nbnd"); /* [sng] Name of dimension to employ for temporal bounds */ if(!rgr->col_nm_in) rgr->col_nm_in=(char *)strdup("ncol"); /* [sng] Name to recognize as input horizontal spatial dimension on unstructured grid */ if(!rgr->frc_nm) rgr->frc_nm=(char *)strdup("frac_b"); /* [sng] Name of variable containing gridcell fraction */ if(!rgr->ilev_nm_in) rgr->ilev_nm_in=(char *)strdup("ilev"); /* [sng] Name of input dimension to recognize as vertical dimension at layer interfaces */ if(!rgr->lat_bnd_nm) rgr->lat_bnd_nm=(char *)strdup("lat_bnds"); /* [sng] Name of rectangular boundary variable for latitude */ if(!rgr->lat_nm_in) rgr->lat_nm_in=(char *)strdup("lat"); /* [sng] Name of input dimension to recognize as latitude */ if(!rgr->lev_nm_in) rgr->lev_nm_in=(char *)strdup("lev"); /* [sng] Name of input dimension to recognize as vertical dimension at layer midpoints */ if(!rgr->lat_vrt_nm) rgr->lat_vrt_nm=(char *)strdup("lat_vertices"); /* [sng] Name of non-rectangular boundary variable for latitude */ if(!rgr->lat_wgt_nm) rgr->lat_wgt_nm=(char *)strdup("gw"); /* [sng] Name of variable containing latitude weights */ if(!rgr->lon_bnd_nm) rgr->lon_bnd_nm=(char *)strdup("lon_bnds"); /* [sng] Name of rectangular boundary variable for longitude */ if(!rgr->lon_nm_in) rgr->lon_nm_in=(char *)strdup("lon"); /* [sng] Name of dimension to recognize as longitude */ if(!rgr->lon_vrt_nm) rgr->lon_vrt_nm=(char *)strdup("lon_vertices"); /* [sng] Name of non-rectangular boundary variable for longitude */ if(!rgr->msk_nm) rgr->msk_nm=(char *)strdup("mask"); /* [sng] Name of variable containing destination mask */ if(!rgr->vrt_nm) rgr->vrt_nm=(char *)strdup("nv"); /* [sng] Name of dimension to employ for vertices */ if(!rgr->plev_nm_in) rgr->plev_nm_in=(char *)strdup("plev"); /* [sng] Name of variable to recognize as pure pressure coordinate */ /* Derived from defaults and command-line arguments */ // On second thought, do not strdup() these here. This way, NULL means user never specified lon/lat-out names // if(!rgr->col_nm_out) rgr->col_nm_out=(char *)strdup("ncol"); /* [sng] Name of dimension to output as horizontal spatial dimension on unstructured grid */ // if(!rgr->lat_nm_out) rgr->lat_nm_out=(char *)strdup("lat"); /* [sng] Name of dimension to output as latitude */ // if(!rgr->lon_nm_out) rgr->lon_nm_out=(char *)strdup("lon"); /* [sng] Name of dimension to output as longitude */ // if(!rgr->lat_nm_out) rgr->lat_nm_out=(char *)strdup(rgr_lat_nm_in); /* [sng] Name of output dimension for latitude */ // if(!rgr->lon_nm_out) rgr->lon_nm_out=(char *)strdup(rgr_lon_nm_in); /* [sng] Name of output dimension for longitude */ /* Free kvms */ if(rgr_lst) rgr_lst=nco_kvm_lst_free(rgr_lst,rgr_var_nbr); return rgr; } /* end nco_rgr_ini() */ int /* O [enm] Return code */ nco_ntp_vrt /* [fnc] Interpolate vertically */ (rgr_sct * const rgr, /* I/O [sct] Regridding structure */ trv_tbl_sct * const trv_tbl) /* I/O [sct] Traversal Table */ { /* Purpose: Interpolate fields to new vertical grid specified in a vertical file */ const char fnc_nm[]="nco_ntp_vrt()"; /* [sng] Function name */ char *fl_tpl; /* [sng] Template file (vertical grid file) */ char *fl_pth_lcl=NULL; int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */ int fll_md_old; /* [enm] Old fill mode */ int in_id; /* I [id] Input netCDF file ID */ int tpl_id; /* [id] Input netCDF file ID (for vertical grid template) */ int md_open; /* [enm] Mode flag for nc_open() call */ int out_id; /* I [id] Output netCDF file ID */ int rcd=NC_NOERR; int dmn_idx; /* [idx] Dimension index */ int rec_idx; /* [idx] Record dimension index */ nco_bool FL_RTR_RMT_LCN; nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool SHARE_OPEN=rgr->flg_uio; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */ nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */ size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stderr,"%s: INFO %s obtaining vertical grid from %s\n",nco_prg_nm_get(),fnc_nm,rgr->fl_vrt); /* Duplicate (because nco_fl_mk_lcl() free()'s its fl_in) */ fl_tpl=(char *)strdup(rgr->fl_vrt); /* Make sure file is on local system and is readable or die trying */ fl_tpl=nco_fl_mk_lcl(fl_tpl,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); /* Open file using appropriate buffer size hints and verbosity */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; if(SHARE_OPEN) md_open=md_open|NC_SHARE; rcd+=nco_fl_open(fl_tpl,md_open,&bfr_sz_hnt,&tpl_id); /* Formula-terms for hybrid pressure vertical grid on unstructured CAM/EAM horizontal grid: prs_mdp[time,lev,col]=P0*hyam[lev] +PS[time,col]*hybm[lev] prs_ntf[time,lev,col]=P0*hyai[ilev]+PS[time,col]*hybi[ilev] */ /* Formula-terms for hybrid pressure vertical grid on ECMWF RLL horizontal grid: prs_mdp[time,lev,lat,lon]=hyam[lev] +exp(lnsp[time,lat,lon])*hybm[lev] prs_ntf[time,lev,lat,lon]=hyai[ilev]+exp(lnsp[time,lat,lon])*hybi[ilev] */ /* For simplicity and code re-use, all single-variable (not hybrid-variable) coordinate systems adopt "lev" semantics This includes pure pressure coordinates and eventually will include sigma, depth, and height coordinates Only hybrid coordinates will refer to the "ilev" levels and indices All single coordinate systems will refer to "lev" levels and indices */ int dpt_id; /* [id] Ocean depth ID */ int hyai_id=NC_MIN_INT; /* [id] Hybrid A coefficient at layer interfaces ID */ int hyam_id=NC_MIN_INT; /* [id] Hybrid A coefficient at layer midpoints ID */ int hybi_id=NC_MIN_INT; /* [id] Hybrid B coefficient at layer interfaces ID */ int hybm_id=NC_MIN_INT; /* [id] Hybrid B coefficient at layer midpoints ID */ int ilev_id=NC_MIN_INT; /* [id] Interface pressure ID */ int lev_id=NC_MIN_INT; /* [id] Midpoint pressure ID */ int p0_id=NC_MIN_INT; /* [id] Reference pressure ID */ int ps_id=NC_MIN_INT; /* [id] Surface pressure ID */ int plev_id; /* [id] Air pressure ID */ nco_bool flg_grd_hyb_cameam=False; /* [flg] Hybrid coordinate vertical grid uses CAM/EAM conventions */ nco_bool flg_grd_hyb_ecmwf=False; /* [flg] Hybrid coordinate vertical grid uses ECMWF conventions */ nco_bool flg_grd_in_dpt=False; /* [flg] Input depth coordinate vertical grid */ nco_bool flg_grd_in_hyb=False; /* [flg] Input hybrid coordinate vertical grid */ nco_bool flg_grd_in_prs=False; /* [flg] Input pressure coordinate vertical grid */ nco_bool flg_grd_out_dpt=False; /* [flg] Output depth coordinate vertical grid */ nco_bool flg_grd_out_hyb=False; /* [flg] Output hybrid coordinate vertical grid */ nco_bool flg_grd_out_prs=False; /* [flg] Output pressure coordinate vertical grid */ nco_bool flg_vrt_tm=False; /* [flg] Output depends on time-varying vertical grid */ nco_grd_vrt_typ_enm nco_vrt_grd_in=nco_vrt_grd_nil; /* [enm] Vertical grid type for input grid */ nco_grd_vrt_typ_enm nco_vrt_grd_out=nco_vrt_grd_nil; /* [enm] Vertical grid type for output grid */ nco_ntp_typ_enm ntp_mth=rgr->ntp_mth; /* [enm] Interpolation method */ nco_xtr_typ_enm xtr_mth=rgr->xtr_mth; /* [enm] Extrapolation method */ /* Determine output grid type */ if((rcd=nco_inq_varid_flg(tpl_id,"hyai",&hyai_id)) == NC_NOERR){ nco_vrt_grd_out=nco_vrt_grd_hyb; /* EAM */ flg_grd_out_hyb=True; }else if((rcd=nco_inq_varid_flg(tpl_id,"plev",&plev_id)) == NC_NOERR){ nco_vrt_grd_out=nco_vrt_grd_prs; /* NCEP */ flg_grd_out_prs=True; }else if((rcd=nco_inq_varid_flg(tpl_id,"depth",&dpt_id)) == NC_NOERR){ nco_vrt_grd_out=nco_vrt_grd_dpt; /* MPAS */ flg_grd_out_dpt=True; }else{ /* !hyai */ (void)fprintf(stdout,"%s: ERROR %s Unable to locate hybrid-sigma/pressure or pure-pressure vertical grid coordinate information in vertical grid file\n",nco_prg_nm_get(),fnc_nm); (void)fprintf(stdout,"%s: HINT ensure vertical grid coordinate file contains a valid vertical grid coordinate\n",nco_prg_nm_get()); return NCO_ERR; } /* !hyai */ if(flg_grd_out_hyb){ rcd=nco_inq_varid(tpl_id,"hyai",&hyai_id); rcd=nco_inq_varid(tpl_id,"hyam",&hyam_id); rcd=nco_inq_varid(tpl_id,"hybi",&hybi_id); rcd=nco_inq_varid(tpl_id,"hybm",&hybm_id); rcd=nco_inq_varid(tpl_id,"P0",&p0_id); rcd=nco_inq_varid_flg(tpl_id,"ilev",&ilev_id); rcd=nco_inq_varid_flg(tpl_id,"lev",&lev_id); rcd=nco_inq_varid_flg(tpl_id,"PS",&ps_id); } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ rcd=nco_inq_varid(tpl_id,"plev",&lev_id); } /* !flg_grd_out_prs */ if(flg_grd_out_dpt){ rcd=nco_inq_varid(tpl_id,"depth",&lev_id); } /* !flg_grd_out_dpt */ const int hyai_id_tpl=hyai_id; /* [id] Hybrid A coefficient at layer interfaces ID */ const int hyam_id_tpl=hyam_id; /* [id] Hybrid A coefficient at layer midpoints ID */ const int hybi_id_tpl=hybi_id; /* [id] Hybrid B coefficient at layer interfaces ID */ const int hybm_id_tpl=hybm_id; /* [id] Hybrid B coefficient at layer midpoints ID */ const int p0_id_tpl=p0_id; /* [id] Reference pressure ID */ const int ilev_id_tpl=ilev_id; /* [id] Interface pressure ID */ const int lev_id_tpl=lev_id; /* [id] Midpoint pressure ID */ const int ps_id_tpl=ps_id; /* [id] Surface pressure ID */ char *ilev_nm_in=NULL; /* [sng] Interface level name */ char *lev_nm_in; char *ilev_nm_out; char *lev_nm_out; char *plev_nm_in; /* [sng] Pure-pressure coordnate name */ char dmn_nm[NC_MAX_NAME]; /* [sng] Dimension name */ int *dmn_ids_in=NULL; /* [nbr] Input file dimension IDs */ int *dmn_ids_out=NULL; /* [nbr] Output file dimension IDs */ int *dmn_ids_rec=NULL; /* [id] Unlimited dimension IDs */ int dmn_nbr_ps; /* [nbr] Number of dimensions in PS variable */ int dmn_nbr_in; /* [nbr] Number of dimensions in input file */ int dmn_nbr_out; /* [nbr] Number of dimensions in output file */ int dmn_id_ilev_out=NC_MIN_INT; /* [id] Dimension ID for interface level in output file */ int dmn_id_lev_out=NC_MIN_INT; /* [id] Dimension ID for midpoint level in output file */ int dmn_id_ilev_in=NC_MIN_INT; /* [id] Dimension ID for interface level in file to be interpolated */ int dmn_id_lev_in=NC_MIN_INT; /* [id] Dimension ID for midpoint level in file to be interpolated */ int dmn_id_tm_in=NC_MIN_INT; /* [id] Dimension ID for time in file to be interpolated */ int dmn_nbr_rec; /* [nbr] Number of unlimited dimensions */ int dmn_idx_tm_in=NC_MIN_INT; /* [idx] Index of record coordinate in input hybrid coordinate PS field */ long *dmn_cnt_in=NULL; long *dmn_cnt_out=NULL; long *dmn_srt=NULL; long ilev_nbr_in; long lev_nbr_in; long ilev_nbr_out; long lev_nbr_out; long tm_idx=0L; /* [idx] Current timestep */ long tm_nbr=1L; /* [idx] Number of timesteps in vertical grid */ long tm_nbr_in=1L; /* [nbr] Number of timesteps in input vertical grid definition */ long tm_nbr_out=1L; /* [nbr] Number of timesetps in output vertical grid definition */ size_t grd_idx; /* [idx] Gridcell index */ size_t grd_sz_in=1L; /* [nbr] Number of elements in single layer of input grid */ size_t grd_sz_out=1L; /* [nbr] Number of elements in single layer of output grid */ size_t idx_fst; /* [idx] Index-offset to current surface pressure timeslice */ if(flg_grd_out_hyb){ /* Interrogate hyai/hyam to obtain ilev/lev dimensions */ rcd=nco_inq_vardimid(tpl_id,hyai_id,&dmn_id_ilev_out); rcd=nco_inq_vardimid(tpl_id,hyam_id,&dmn_id_lev_out); rcd=nco_inq_dimlen(tpl_id,dmn_id_ilev_out,&ilev_nbr_out); rcd=nco_inq_dimlen(tpl_id,dmn_id_lev_out,&lev_nbr_out); rcd=nco_inq_dimname(tpl_id,dmn_id_ilev_out,dmn_nm); ilev_nm_out=strdup(dmn_nm); rcd=nco_inq_dimname(tpl_id,dmn_id_lev_out,dmn_nm); lev_nm_out=strdup(dmn_nm); /* Interrogate PS, if any, for horizontal dimensions */ if(ps_id_tpl != NC_MIN_INT){ rcd=nco_inq_varndims(tpl_id,ps_id,&dmn_nbr_ps); dmn_nbr_out=dmn_nbr_ps; dmn_ids_out=(int *)nco_malloc(dmn_nbr_out*sizeof(int)); dmn_cnt_out=(long *)nco_malloc((dmn_nbr_out+1)*sizeof(long)); dmn_srt=(long *)nco_malloc((dmn_nbr_out+1)*sizeof(long)); rcd=nco_inq_vardimid(tpl_id,ps_id,dmn_ids_out); rcd=nco_inq_unlimdims(tpl_id,&dmn_nbr_rec,(int *)NULL); if(dmn_nbr_rec > 0){ dmn_ids_rec=(int *)nco_malloc(dmn_nbr_rec*sizeof(int)); rcd=nco_inq_unlimdims(tpl_id,&dmn_nbr_rec,dmn_ids_rec); } /* !dmn_nbr_rec */ for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){ rcd=nco_inq_dimlen(tpl_id,dmn_ids_out[dmn_idx],dmn_cnt_out+dmn_idx); /* 20190330: Allow possibility that PS has time dimension > 1 We want horizontal not temporal dimensions to contribute to grd_sz Temporal dimension is usually unlimited Only multiply grd_sz by fixed (non-unlimited) dimension sizes Corner-case exception when PS spatial dimension on unstructured grid is unlimited */ for(rec_idx=0;rec_idx<dmn_nbr_rec;rec_idx++) if(dmn_ids_out[dmn_idx] == dmn_ids_rec[rec_idx]) break; if(rec_idx == dmn_nbr_rec || dmn_nbr_out == 1) grd_sz_out*=dmn_cnt_out[dmn_idx]; if(rec_idx != dmn_nbr_rec && dmn_nbr_out > 1 && dmn_cnt_out[dmn_idx] > 1L){ tm_nbr_out=dmn_cnt_out[dmn_idx]; if(tm_nbr_out > 1L) flg_vrt_tm=True; } /* tm_nbr_out > 1 */ dmn_srt[dmn_idx]=0L; } /* !dmn_idx */ if(dmn_ids_rec) dmn_ids_rec=(int *)nco_free(dmn_ids_rec); } /* !ps_id_tpl */ } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ /* Interrogate plev to obtain plev dimensions */ rcd=nco_inq_vardimid(tpl_id,lev_id,&dmn_id_lev_out); rcd=nco_inq_dimlen(tpl_id,dmn_id_lev_out,&lev_nbr_out); rcd=nco_inq_dimname(tpl_id,dmn_id_lev_out,dmn_nm); ilev_nbr_out=lev_nbr_out; } /* !flg_grd_out_prs */ double *hyai_out=NULL; /* [frc] Hybrid A coefficient at layer interfaces on output grid */ double *hyam_out=NULL; /* [frc] Hybrid A coefficient at layer midpoints on output grid */ double *hybi_out=NULL; /* [frc] Hybrid B coefficient at layer interfaces on output grid */ double *hybm_out=NULL; /* [frc] Hybrid B coefficient at layer midpoints on output grid */ double *ilev_out=NULL; /* [hPa] Interface pressure on output grid */ double *lev_out=NULL; /* [hPa] Midpoint pressure on output grid */ double *ps_out=NULL; /* [Pa] Surface pressure on output grid */ double *prs_mdp_out=NULL; /* [Pa] Midpoint pressure on output grid */ double *prs_ntf_out=NULL; /* [Pa] Interface pressure on output grid */ double p0_out; /* [Pa] Reference pressure on output grid */ long ilev_idx; /* [idx] Interface level index */ long lev_idx; /* [idx] Level index */ const nc_type crd_typ_out=NC_DOUBLE; nc_type var_typ_rgr; /* [enm] Variable type used during regridding */ var_typ_rgr=NC_DOUBLE; /* NB: Perform interpolation in double precision */ if(flg_grd_out_hyb){ hyai_out=(double *)nco_malloc(ilev_nbr_out*nco_typ_lng(var_typ_rgr)); hyam_out=(double *)nco_malloc(lev_nbr_out*nco_typ_lng(var_typ_rgr)); hybi_out=(double *)nco_malloc(ilev_nbr_out*nco_typ_lng(var_typ_rgr)); hybm_out=(double *)nco_malloc(lev_nbr_out*nco_typ_lng(var_typ_rgr)); ilev_out=(double *)nco_malloc(ilev_nbr_out*nco_typ_lng(var_typ_rgr)); lev_out=(double *)nco_malloc(lev_nbr_out*nco_typ_lng(var_typ_rgr)); rcd=nco_get_var(tpl_id,hyai_id,hyai_out,crd_typ_out); rcd=nco_get_var(tpl_id,hyam_id,hyam_out,crd_typ_out); rcd=nco_get_var(tpl_id,hybi_id,hybi_out,crd_typ_out); rcd=nco_get_var(tpl_id,hybm_id,hybm_out,crd_typ_out); rcd=nco_get_var(tpl_id,p0_id,&p0_out,crd_typ_out); if(ilev_id_tpl != NC_MIN_INT){ rcd=nco_get_var(tpl_id,ilev_id,ilev_out,crd_typ_out); }else{ /* p0 is in Pa but ilev traditionally given in hPa */ for(ilev_idx=0;ilev_idx<ilev_nbr_out;ilev_idx++) ilev_out[ilev_idx]=p0_out*(hyai_out[ilev_idx]+hybi_out[ilev_idx])/100.0; } /* !ilev_id_tpl */ if(lev_id_tpl != NC_MIN_INT){ rcd=nco_get_var(tpl_id,lev_id,lev_out,crd_typ_out); }else{ /* p0 is in Pa but lev traditionally given in hPa */ for(lev_idx=0;lev_idx<lev_nbr_out;lev_idx++) lev_out[lev_idx]=p0_out*(hyam_out[lev_idx]+hybm_out[lev_idx])/100.0; } /* !ilev_id_tpl */ } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ lev_out=(double *)nco_malloc(lev_nbr_out*nco_typ_lng(var_typ_rgr)); rcd=nco_get_var(tpl_id,lev_id,lev_out,crd_typ_out); } /* !flg_grd_out_prs */ /* For vertical interpolation (unlike horizontal regridding), the destination grid is known a priori Straightforward copy all variables and attributes that define grid from fl_tpl to output would work in theory, but would not allow dynamic identification and relabeling of names */ /* if(flg_grd_out_hyb){ const int vrt_grd_lst_nbr=8; const char *vrt_grd_lst[]={"/hyai","/hyam","/hybi","/hybm","/ilev","/lev","/P0","/PS"}; } if(flg_grd_out_prs){ const int vrt_grd_lst_nbr=1; const char *vrt_grd_lst[]={"/plev"}; } */ /* Above this line, fl_tpl and tpl_id refer to vertical coordinate file (i.e., template file) Below this line, fl_in and in_id refer to input file to be vertically regridded Do not close template file until all grid variables have been copied For maximum efficiency, do this after defining all interpolated variables in output That way no file needs to exit define mode or enter data mode more than once However this requires keeping template file, input data file, and output file simulataneously open */ in_id=rgr->in_id; out_id=rgr->out_id; /* Determine input grid type */ if(rgr->plev_nm_in) plev_nm_in=rgr->plev_nm_in; if((rcd=nco_inq_varid_flg(in_id,"hyai",&hyai_id)) == NC_NOERR){ nco_vrt_grd_in=nco_vrt_grd_hyb; /* EAM */ flg_grd_in_hyb=True; }else if((rcd=nco_inq_varid_flg(in_id,plev_nm_in,&plev_id)) == NC_NOERR){ nco_vrt_grd_in=nco_vrt_grd_prs; /* NCEP */ flg_grd_in_prs=True; }else if((rcd=nco_inq_varid_flg(in_id,"depth",&dpt_id)) == NC_NOERR){ nco_vrt_grd_in=nco_vrt_grd_dpt; /* NCEP */ flg_grd_in_dpt=True; }else{ /* !hyai */ (void)fprintf(stdout,"%s: ERROR %s Unable to locate hybrid-sigma/pressure or pure-pressure vertical grid coordinate information in input file\n",nco_prg_nm_get(),fnc_nm); (void)fprintf(stdout,"%s: HINT only invoke vertical interpolation on files that contain variables with vertical dimensions, and with known vertical coordinate variable names. These default to \"hyai\" for hybrid, \"plev\" for pressure, \"depth\" for depth. See http://nco.sf.net/nco.html#lev_nm for options to change these names at run-time, e.g., \"--rgr plev_nm=vrt_nm\"\n",nco_prg_nm_get()); return NCO_ERR; } /* !hyai */ /* Sanity checks: One type of input and one type of output grid detected */ assert(!(flg_grd_in_hyb && flg_grd_in_prs)); assert(!(flg_grd_in_hyb && flg_grd_in_dpt)); assert(!(flg_grd_in_prs && flg_grd_in_dpt)); assert(flg_grd_in_hyb || flg_grd_in_prs || flg_grd_in_dpt); assert(!(flg_grd_out_hyb && flg_grd_out_prs)); assert(!(flg_grd_out_hyb && flg_grd_out_dpt)); assert(!(flg_grd_out_prs && flg_grd_out_dpt)); assert(flg_grd_out_hyb || flg_grd_out_prs || flg_grd_out_dpt); if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: DEBUG Input grid flags : flg_grd_in_hyb = %d, flg_grd_in_prs = %d, flg_grd_in_dpt = %d\n",nco_prg_nm_get(),flg_grd_in_hyb,flg_grd_in_prs,flg_grd_in_dpt); if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: DEBUG Output grid flags: flg_grd_out_hyb = %d, flg_grd_out_prs = %d, flg_grd_out_dpt = %d\n",nco_prg_nm_get(),flg_grd_out_hyb,flg_grd_out_prs,flg_grd_out_dpt); /* 20191219: This block is not used, deprecate it? Or use once new coordinates like altitude, depth supported? */ nco_vrt_ntp_typ_enm nco_vrt_ntp_typ=nco_ntp_nil; /* Vertical interpolation type */ if(nco_vrt_grd_in == nco_vrt_grd_hyb && nco_vrt_grd_out == nco_vrt_grd_hyb) nco_vrt_ntp_typ=nco_ntp_hyb_to_hyb; if(nco_vrt_grd_in == nco_vrt_grd_hyb && nco_vrt_grd_out == nco_vrt_grd_prs) nco_vrt_ntp_typ=nco_ntp_hyb_to_prs; if(nco_vrt_grd_in == nco_vrt_grd_prs && nco_vrt_grd_out == nco_vrt_grd_hyb) nco_vrt_ntp_typ=nco_ntp_prs_to_hyb; if(nco_vrt_grd_in == nco_vrt_grd_prs && nco_vrt_grd_out == nco_vrt_grd_prs) nco_vrt_ntp_typ=nco_ntp_prs_to_prs; assert(nco_vrt_ntp_typ != nco_ntp_nil); /* Variables on input grid, i.e., on grid in data file to be interpolated */ if(flg_grd_in_hyb){ rcd=nco_inq_varid(in_id,"hyai",&hyai_id); rcd=nco_inq_varid(in_id,"hyam",&hyam_id); rcd=nco_inq_varid(in_id,"hybi",&hybi_id); rcd=nco_inq_varid(in_id,"hybm",&hybm_id); /* 20190602: ECMWF hybrid vertical grid parameters and dimensions differ from CAM/EAM: ECMWF defines vertical dimensions "nhym" and "nhyi" specifically for hy[ab][im] and uses "lev" and "lev_2" for all other variables, whereas CAM/EAM uses same dimensions "lev" and "ilev" for all vertical variables including hybrid coefficients ECMWF provides "hya?" as a constant in Pa and "hyb?" as a dimensionless coefficient of PS, whereas CAM/EAM provides "hya?" and "hyb?" both as dimensionless coefficients of P0 and PS ECMWF provides "lev" and "lev_2" with midpoint and surface pressure indices (not values), respectively, whereas CAM/EAM provides "lev" and "ilev" coordinate values in hPa ECMWF provides dimensionless "lnsp" for log(surface pressure) whereas CAM/EAM provides "PS" for surface pressure in Pa ECMWF "lnsp" has degenerate level dimension "lev_2" whereas CAM/EAM "PS" has no "ilev" dimension ECMWF uses hya? instead of reference pressure whereas CAM/EAM provides "P0" in hPa */ if((rcd=nco_inq_varid_flg(in_id,"lnsp",&ps_id)) == NC_NOERR) flg_grd_hyb_ecmwf=True; else if((rcd=nco_inq_varid_flg(in_id,"PS",&ps_id)) == NC_NOERR) flg_grd_hyb_cameam=True; else{ (void)fprintf(stderr,"%s: ERROR %s Unable to find surface pressure variable required for hybrid grid in input file\n",nco_prg_nm_get(),fnc_nm); abort(); } /* !rcd */ if(flg_grd_hyb_cameam){ rcd=nco_inq_varid(in_id,"P0",&p0_id); ilev_id=NC_MIN_INT; lev_id=NC_MIN_INT; if(ilev_id_tpl == NC_MIN_INT) rcd=nco_inq_varid_flg(in_id,"ilev",&ilev_id); if(lev_id_tpl == NC_MIN_INT) rcd=nco_inq_varid_flg(in_id,"lev",&lev_id); } /* !flg_grd_hyb_cameam */ /* 20190603: We require ECMWF IFS input to have a "lev" coordinate so we can use "lev" dimension not "nhyb" */ if(flg_grd_hyb_ecmwf) rcd=nco_inq_varid(in_id,"lev",&lev_id); } /* !flg_grd_in_hyb */ if(flg_grd_in_prs){ rcd=nco_inq_varid(in_id,plev_nm_in,&lev_id); if((rcd=nco_inq_varid_flg(in_id,"PS",&ps_id)) == NC_NOERR){ /* Output file creation procedure discriminates between input surface pressure dimensioned as CAM/EAM vs. ECMWF */ flg_grd_hyb_cameam=True; if(flg_grd_out_hyb && (ps_id_tpl == NC_MIN_INT)) (void)fprintf(stderr,"%s: INFO %s detects variable PS (canonical name for spatially varying surface pressure field in hybrid grids) in pure-pressure input data file. PS will be copied directly from pure-pressure grid input dataset to, and used to construct the pressures of, the output hybrid-coordinate data file.\n",nco_prg_nm_get(),fnc_nm); if(flg_grd_out_hyb && (ps_id_tpl != NC_MIN_INT)) (void)fprintf(stderr,"%s: INFO %s detects variable PS (canonical name for spatially varying surface pressure field in hybrid grids) in both vertical-grid file and pure-pressure input data file. The vertical grid-file takes precedence. PS will be copied directly from vertical-grid file to, and used to construct the pressures of, the output hybrid-coordinate data file. PS in input pure-pressure file will be ignored.\n",nco_prg_nm_get(),fnc_nm); }else{ if(flg_grd_out_hyb && (ps_id_tpl == NC_MIN_INT)){ (void)fprintf(stderr,"%s: ERROR %s does not find variable PS (canonical name for spatially varying surface pressure field in hybrid grids) in pure-pressure input data file or in vertical grid-file for hybrid-pressure output. PS must be present in at least one of these files in order to construct the output hybrid-coordinate pressures.\nHINT: Append a valid PS to the inpud data file or vertical grid-file.\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !ps_id_tpl */ } /* !ps_id */ } /* !flg_grd_in_prs */ if(flg_grd_in_dpt){ rcd=nco_inq_varid(in_id,"depth",&lev_id); } /* !flg_grd_in_dpt */ const int ilev_id_in=ilev_id; /* [id] Interface pressure ID */ const int lev_id_in=lev_id; /* [id] Midpoint pressure ID */ const int ps_id_in=ps_id; /* [id] Surface pressure ID */ /* Identify all record-dimensions in input file */ rcd=nco_inq_unlimdims(in_id,&dmn_nbr_rec,(int *)NULL); if(dmn_nbr_rec > 0){ dmn_ids_rec=(int *)nco_malloc(dmn_nbr_rec*sizeof(int)); rcd+=nco_inq_unlimdims(in_id,&dmn_nbr_rec,dmn_ids_rec); } /* !dmn_nbr_rec */ if(flg_grd_in_hyb){ /* Get hybrid vertical information first */ rcd=nco_inq_varndims(in_id,ps_id,&dmn_nbr_in); rcd=nco_inq_vardimid(in_id,hyai_id,&dmn_id_ilev_in); if(flg_grd_hyb_cameam) rcd=nco_inq_vardimid(in_id,hyam_id,&dmn_id_lev_in); if(flg_grd_hyb_ecmwf) rcd=nco_inq_vardimid(in_id,lev_id,&dmn_id_lev_in); rcd=nco_inq_dimlen(in_id,dmn_id_ilev_in,&ilev_nbr_in); rcd=nco_inq_dimlen(in_id,dmn_id_lev_in,&lev_nbr_in); rcd=nco_inq_dimname(in_id,dmn_id_ilev_in,dmn_nm); ilev_nm_in=strdup(dmn_nm); rcd=nco_inq_dimname(in_id,dmn_id_lev_in,dmn_nm); lev_nm_in=strdup(dmn_nm); } /* !flg_grd_in_hyb */ if(flg_grd_in_prs){ /* Interrogate plev to obtain plev dimensions */ rcd=nco_inq_vardimid(in_id,lev_id,&dmn_id_lev_in); rcd=nco_inq_dimlen(in_id,dmn_id_lev_in,&lev_nbr_in); rcd=nco_inq_dimname(in_id,dmn_id_lev_in,dmn_nm); lev_nm_in=strdup(dmn_nm); /* Define horizontal grid if no PS is provided (i.e., pure-pressure to pure-pressure interpolation) */ if(!flg_grd_out_hyb){ /* Problem: What is horizontal grid size of pressure grid file? Algorithm: Examine first multi-dimensional variable that includes plev dimension Assume horizontal dimensions vary more rapidly than (i.e., follow) plev Compute horizontal grid size accordingly Set output horizontal size to input horizontal size */ int var_nbr; /* [nbr] Number of variables in file */ int var_idx; /* [idx] Index over variables in file */ rcd=nco_inq(in_id,&dmn_nbr_in,&var_nbr,(int *)NULL,(int *)NULL); dmn_ids_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); dmn_cnt_in=(long *)nco_malloc(dmn_nbr_in*sizeof(long)); for(var_idx=0;var_idx<var_nbr;var_idx++){ rcd=nco_inq_varndims(in_id,var_idx,&dmn_nbr_in); rcd=nco_inq_vardimid(in_id,var_idx,dmn_ids_in); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++) if(dmn_ids_in[dmn_idx] == dmn_id_lev_in) break; /* Does current variable have lev dimension? */ if(dmn_idx < dmn_nbr_in){ /* Yes. Do any dimensions vary more rapidly than lev? */ if(dmn_idx < dmn_nbr_in-1){ /* Yes. Assume remaining dimension are horizontal spatial dimensions */ char var_nm[NC_MAX_NAME+1L]; (void)nc_inq_varname(in_id,var_idx,var_nm); for(int dmn_idx_hrz=dmn_idx+1;dmn_idx_hrz<dmn_nbr_in;dmn_idx_hrz++){ rcd=nco_inq_dimlen(in_id,dmn_ids_in[dmn_idx_hrz],dmn_cnt_in+dmn_idx_hrz); grd_sz_in*=dmn_cnt_in[dmn_idx_hrz]; } /* !dmn_idx_hrz */ break; } /* !dmn_idx */ } /* !dmn_idx */ } /* !var_idx */ assert(var_idx != var_nbr); grd_sz_out=grd_sz_in; } /* !flg_grd_out_hyb */ } /* !flg_grd_in_prs */ double *hyai_in=NULL; /* [frc] Hybrid A coefficient at layer interfaces on input grid */ double *hyam_in=NULL; /* [frc] Hybrid A coefficient at layer midpoints on input grid */ double *hybi_in=NULL; /* [frc] Hybrid B coefficient at layer interfaces on input grid */ double *hybm_in=NULL; /* [frc] Hybrid B coefficient at layer midpoints on input grid */ double *lev_in=NULL; /* [Pa] Air pressure on input grid */ double *prs_mdp_in=NULL; /* [Pa] Midpoint pressure on input grid */ double *prs_ntf_in=NULL; /* [Pa] Interface pressure on input grid */ double *ps_in=NULL; /* [Pa] Surface pressure on input grid */ double p0_in; /* [Pa] Reference pressure on input grid */ if(flg_grd_in_hyb){ hyai_in=(double *)nco_malloc(ilev_nbr_in*nco_typ_lng(var_typ_rgr)); hyam_in=(double *)nco_malloc(lev_nbr_in*nco_typ_lng(var_typ_rgr)); hybi_in=(double *)nco_malloc(ilev_nbr_in*nco_typ_lng(var_typ_rgr)); hybm_in=(double *)nco_malloc(lev_nbr_in*nco_typ_lng(var_typ_rgr)); rcd=nco_get_var(in_id,hyai_id,hyai_in,crd_typ_out); rcd=nco_get_var(in_id,hyam_id,hyam_in,crd_typ_out); rcd=nco_get_var(in_id,hybi_id,hybi_in,crd_typ_out); rcd=nco_get_var(in_id,hybm_id,hybm_in,crd_typ_out); if(flg_grd_hyb_cameam) rcd=nco_get_var(in_id,p0_id,&p0_in,crd_typ_out); /* ECMWF distributes IFS forecasts with lnsp = log(surface pressure) */ if(flg_grd_hyb_ecmwf){ /* Decompose ECMWF hya? convention into CAM/EAM-like product of P0 and hya? */ p0_in=100000.0; for(size_t idx=0;idx<lev_nbr_in;idx++){ hyai_in[idx]/=p0_in; hyam_in[idx]/=p0_in; } /* !idx */ } /* flg_grd_hyb_ecmwf */ } /* !flg_grd_in_hyb */ if(flg_grd_in_prs){ lev_in=(double *)nco_malloc(lev_nbr_in*nco_typ_lng(var_typ_rgr)); rcd=nco_get_var(in_id,lev_id,lev_in,crd_typ_out); } /* !flg_grd_in_prs */ /* Always obtain surface pressure if input or output grid is hybrid */ if(flg_grd_in_hyb || flg_grd_out_hyb){ /* Copy horizontal grid information from input file LHS variables were set above if PS is in template file */ if(ps_id_tpl == NC_MIN_INT){ /* NB: dmn_nbr_in/out in this block refer only to horizontal dimensions necessary to define PS */ rcd=nco_inq_varndims(in_id,ps_id,&dmn_nbr_in); /* This is harmlessly repeated for hybrid input files */ dmn_ids_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); dmn_cnt_in=(long *)nco_malloc((dmn_nbr_in+1)*sizeof(long)); if(!dmn_srt) dmn_srt=(long *)nco_malloc((dmn_nbr_in+1)*sizeof(long)); /* NB: Only allocate dmn_srt once */ rcd=nco_inq_vardimid(in_id,ps_id,dmn_ids_in); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimlen(in_id,dmn_ids_in[dmn_idx],dmn_cnt_in+dmn_idx); /* 20190330: Allow possibility that PS has time dimension > 1 We want horizontal not temporal dimensions to contribute to grd_sz Temporal dimension is usually unlimited Only multiply grd_sz by fixed (non-unlimited) dimension sizes Corner-case exception when PS spatial dimension on unstructured grid is unlimited */ for(rec_idx=0;rec_idx<dmn_nbr_rec;rec_idx++) if(dmn_ids_in[dmn_idx] == dmn_ids_rec[rec_idx]) break; if(rec_idx == dmn_nbr_rec || dmn_nbr_in == 1) grd_sz_in*=dmn_cnt_in[dmn_idx]; if(rec_idx != dmn_nbr_rec && dmn_nbr_in > 1 && dmn_cnt_in[dmn_idx] > 1L){ dmn_id_tm_in=dmn_ids_in[dmn_idx]; dmn_idx_tm_in=dmn_idx; tm_nbr_in=dmn_cnt_in[dmn_idx_tm_in]; if(tm_nbr_in > 1L) flg_vrt_tm=True; } /* tm_nbr_in > 1 */ dmn_srt[dmn_idx]=0L; } /* !dmn_idx */ /* Given all input PS information, define output PS information */ dmn_nbr_ps=dmn_nbr_out=dmn_nbr_in; dmn_ids_out=(int *)nco_malloc(dmn_nbr_out*sizeof(int)); dmn_cnt_out=(long *)nco_malloc((dmn_nbr_out+1)*sizeof(long)); /* fxm: next line works for hyb_in and is buggy for prs_in */ memcpy(dmn_ids_out,dmn_ids_in,dmn_nbr_in*sizeof(int)); memcpy(dmn_cnt_out,dmn_cnt_in,dmn_nbr_in*sizeof(long)); grd_sz_out=grd_sz_in; tm_nbr_out=tm_nbr_in; }else{ /* !ps_id_tpl */ /* 20200825: We have already defined grd_sz_out if PS is in template file We have already defined grd_sz_in and grd_sz_out := grd_sz_in when PS not in template file We have already defined grd_sz_in if input file is pure-pressure However, we have not yet defined grd_sz_in if input file is hybrid Expectation is that grd_sz_in (from input file) = grd_sz_out (from template file) An independent check on this would examine dimension sizes in input file Such a check would immediately flag horizontal mismatches between vertical file and input file The check could not rely on PS being present in input file The check could/should examine the first horizontal variable in input file This would require a lot of code, so we just assume it is true */ grd_sz_in=grd_sz_out; } /* !ps_id_tpl */ /* Timestep sequencing NB: tm_nbr_??? variables count timesteps in vertical grid definitions These are not necessarily the same as the number of timesteps in either file Time-invariant hybrid or pure-pressure coordinates are valid vertical grids for timeseries Usually hybrid grids have as many timesteps in the grids as in the timeseries Usually pressure grids are time-invariant (as of 20190511 time-varying pure pressure grids are still not supported) This implementation interpolates timeseries to/from time-invariant vertical grids in one OpenMP call! */ if(tm_nbr_in > 1L || tm_nbr_out > 1L){ if(tm_nbr_in > tm_nbr_out) assert((float)tm_nbr_in/(float)tm_nbr_out == tm_nbr_in/tm_nbr_out); else assert((float)tm_nbr_out/(float)tm_nbr_in == tm_nbr_out/tm_nbr_in); } /* !tm_nbr_in */ tm_nbr=tm_nbr_in > tm_nbr_out ? tm_nbr_in : tm_nbr_out; /* Sanity checks */ if(grd_sz_in != grd_sz_out || tm_nbr_in != tm_nbr_out) (void)fprintf(stdout,"%s: ERROR %s reports that temporal or horizontal spatial dimensions differ: grd_sz_in = %ld != %ld = grd_sz_out, and/or tm_nbr_in = %ld != %ld = tm_nbr_out\n",nco_prg_nm_get(),fnc_nm,grd_sz_in,grd_sz_out,tm_nbr_in,tm_nbr_out); assert(grd_sz_in == grd_sz_out); assert(tm_nbr_in == tm_nbr_out); ps_in=(double *)nco_malloc_dbg(tm_nbr_in*grd_sz_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() ps_in value buffer"); /* Surface pressure comes from either hybrid vertical grid-files, hybrid data files, or pressure data files that provide surface pressure */ if(flg_grd_in_hyb || (flg_grd_in_prs && ps_id_tpl == NC_MIN_INT)) rcd=nco_get_var(in_id,ps_id,ps_in,crd_typ_out); /* ECMWF distributes IFS forecasts with lnsp = log(surface pressure) */ if(flg_grd_hyb_ecmwf){ /* Convert ECMWF-provided log(surface_pressure) to surface_pressure */ const size_t ps_sz_in=tm_nbr_in*grd_sz_in; /* [nbr] Number of elements in ps_in */ for(size_t idx=0;idx<ps_sz_in;idx++) ps_in[idx]=exp(ps_in[idx]); } /* flg_grd_hyb_ecmwf */ /* Finally have enough information to allocate output pressure grid */ ps_out=(double *)nco_malloc_dbg(tm_nbr_out*grd_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() ps_out value buffer"); /* Get PS from output horizontal grid, if available, otherwise copy from input horizontal grid */ if(ps_id_tpl != NC_MIN_INT){ rcd=nco_get_var(tpl_id,ps_id_tpl,ps_out,crd_typ_out); /* NB: Here we read from tpl_id one last time */ }else{ memcpy(ps_out,ps_in,tm_nbr_in*grd_sz_in*nco_typ_lng(var_typ_rgr)); } /* !ps_id_tpl */ } /* ! */ /* Compare input and output surface pressure fields to determine whether subterranean extrapolation required */ nco_bool flg_add_msv_att; /* [flg] Extrapolation requires _FillValue */ flg_add_msv_att=False; /* Extrapolation type xtr_fll_msv may cause need to create _FillValue attributes */ if(xtr_mth == nco_xtr_fll_msv){ const size_t ps_sz=tm_nbr*grd_sz_in; // [nbr] Size of surface-pressure field double *prs_max_in=NULL; /* [Pa] Maximum midpoint pressure on input grid */ double *prs_max_out=NULL; /* [Pa] Maximum midpoint pressure on output grid */ double *prs_min_in=NULL; /* [Pa] Minimum midpoint pressure on input grid */ double *prs_min_out=NULL; /* [Pa] Minimum midpoint pressure on output grid */ long idx_lev_max; // [idx] Index of midpoint level with greatest pressure long idx_lev_min; // [idx] Index of midpoint level with lowest pressure size_t idx; // [idx] Counting index prs_max_in=(double *)nco_malloc_dbg(ps_sz*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_max_in value buffer"); prs_max_out=(double *)nco_malloc_dbg(ps_sz*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_max_out value buffer"); prs_min_in=(double *)nco_malloc_dbg(ps_sz*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_min_in value buffer"); prs_min_out=(double *)nco_malloc_dbg(ps_sz*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_min_out value buffer"); if(flg_grd_in_hyb){ // fxm: assumes hybrid grid has least/greatest pressure at top/bottom level idx_lev_max=lev_nbr_in-1; idx_lev_min=0L; for(tm_idx=0;tm_idx<tm_nbr;tm_idx++){ idx_fst=tm_idx*grd_sz_in; for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++){ prs_max_in[grd_idx+idx_fst]=p0_in*hyam_in[idx_lev_max]+ps_in[idx_fst+grd_idx]*hybm_in[idx_lev_max]; prs_min_in[grd_idx+idx_fst]=p0_in*hyam_in[idx_lev_min]+ps_in[idx_fst+grd_idx]*hybm_in[idx_lev_min]; } /* !grd_idx */ } /* !tm_idx */ } /* !flg_grd_in_hyb */ if(flg_grd_out_hyb){ // fxm: assumes hybrid grid has least/greatest pressure at top/bottom level idx_lev_max=lev_nbr_out-1; idx_lev_min=0L; for(tm_idx=0;tm_idx<tm_nbr;tm_idx++){ idx_fst=tm_idx*grd_sz_out; for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++){ prs_max_out[grd_idx+idx_fst]=p0_out*hyam_out[idx_lev_max]+ps_out[idx_fst+grd_idx]*hybm_out[idx_lev_max]; prs_min_out[grd_idx+idx_fst]=p0_out*hyam_out[idx_lev_min]+ps_out[idx_fst+grd_idx]*hybm_out[idx_lev_min]; } /* !grd_idx */ } /* !tm_idx */ } /* !flg_grd_out_hyb */ if(flg_grd_in_prs){ double lev_in_max; double lev_in_min; if(lev_in[0] < lev_in[1]) lev_in_max=lev_in[lev_nbr_in-1]; else lev_in_max=lev_in[0]; if(lev_in[0] < lev_in[1]) lev_in_min=lev_in[0]; else lev_in_max=lev_in[lev_nbr_in-1]; for(size_t idx_in=0;idx_in<ps_sz;idx_in++) prs_max_in[idx_in]=lev_in_max; for(size_t idx_in=0;idx_in<ps_sz;idx_in++) prs_min_in[idx_in]=lev_in_min; } /* !flg_grd_in_prs */ if(flg_grd_out_prs){ double lev_out_max; double lev_out_min; if(lev_out[0] < lev_out[1]) lev_out_max=lev_out[lev_nbr_out-1]; else lev_out_max=lev_out[0]; if(lev_out[0] < lev_out[1]) lev_out_min=lev_out[0]; else lev_out_min=lev_out[lev_nbr_out-1]; for(size_t idx_out=0;idx_out<ps_sz;idx_out++) prs_max_out[idx_out]=lev_out_max; for(size_t idx_out=0;idx_out<ps_sz;idx_out++) prs_min_out[idx_out]=lev_out_min; } /* !flg_grd_out_prs */ for(idx=0;idx<ps_sz;idx++) if(prs_max_out[idx] > prs_max_in[idx]) break; if(idx < ps_sz) flg_add_msv_att=True; for(idx=0;idx<ps_sz;idx++) if(prs_min_out[idx] < prs_min_in[idx]) break; if(idx < ps_sz) flg_add_msv_att=True; if(flg_add_msv_att && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports at least one point in at least one output level requires extrapolation (not interpolation). Will ensure that all interpolated fields have _FillValue attribute.\n",nco_prg_nm_get(),fnc_nm); if(prs_max_in) prs_max_in=(double *)nco_free(prs_max_in); if(prs_max_out) prs_max_out=(double *)nco_free(prs_max_out); if(prs_min_in) prs_min_in=(double *)nco_free(prs_min_in); if(prs_min_out) prs_min_out=(double *)nco_free(prs_min_out); } /* !xtr_mth */ /* Lay-out regridded file */ //(void)fprintf(stdout,"%s: DEBUG quark1 dmn_nbr_out = %d, dmn_nbr_ps = %d\n",nco_prg_nm_get(),dmn_nbr_out,dmn_nbr_ps); /* Use explicitly specified output names, if any, otherwise use template names (either explicitly specified or discovered by fuzzing) */ if(rgr->lev_nm_out) lev_nm_out=rgr->lev_nm_out; if(rgr->ilev_nm_out){ if(flg_grd_out_hyb) ilev_nm_out=rgr->ilev_nm_out; if(flg_grd_out_prs) lev_nm_out=rgr->ilev_nm_out; } /* !ilev_nm_out */ if(flg_grd_out_prs){ /* Unless user explicitly specifies output name, use same name as input */ if(!rgr->lev_nm_out) lev_nm_out=(char *)strdup(plev_nm_in); /* Hybrid-sigma/pressure interface variables, if any, must also be output to pure-pressure files on lev grid */ ilev_nm_out=(char *)strdup(lev_nm_out); } /* !flg_grd_out_prs */ /* Define new vertical dimensions before all else */ if(flg_grd_out_hyb){ rcd=nco_def_dim(out_id,ilev_nm_out,ilev_nbr_out,&dmn_id_ilev_out); rcd=nco_def_dim(out_id,lev_nm_out,lev_nbr_out,&dmn_id_lev_out); /* Horizontal dimensions necessary to define PS variable */ for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){ if(ps_id_tpl != NC_MIN_INT){ rcd=nco_inq_dimname(tpl_id,dmn_ids_out[dmn_idx],dmn_nm); }else{ rcd=nco_inq_dimname(in_id,dmn_ids_in[dmn_idx],dmn_nm); rcd=nco_inq_dimlen(in_id,dmn_ids_in[dmn_idx],dmn_cnt_out+dmn_idx); } /* !ps_id_tpl */ if(flg_grd_hyb_cameam) rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_ids_out+dmn_idx); /* 20190602: ECMWF IFS PS variable has degenerate vertical dimension (lev_2). Avoid re-definition */ if(flg_grd_hyb_ecmwf) if(strcmp(dmn_nm,ilev_nm_out)) if(strcmp(dmn_nm,lev_nm_out)) rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_ids_out+dmn_idx); } /* !dmn_idx */ } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ rcd=nco_def_dim(out_id,lev_nm_out,lev_nbr_out,&dmn_id_lev_out); } /* !flg_grd_out_prs */ /* Do not extract grid variables (that are also extensive variables) like ilev, lev, hyai, hyam, hybi, hybm */ /* Exception list source: CAM: hyai, hyam, hybi, hybm, ilev, lev, P0, PS EAM: hyai, hyam, hybi, hybm, ilev, lev, P0, PS ECMWF: hyai, hyam, hybi, hybm, lev, lnsp NCEP: plev */ const int var_xcl_lst_nbr=10; /* [nbr] Number of objects on exclusion list */ const char *var_xcl_lst[]={"/hyai","/hyam","/hybi","/hybm","/ilev","/lev","/P0","/plev","/PS","/lnsp"}; int var_cpy_nbr=0; /* [nbr] Number of copied variables */ int var_rgr_nbr=0; /* [nbr] Number of regridded variables */ int var_xcl_nbr=0; /* [nbr] Number of deleted variables */ int var_crt_nbr=0; /* [nbr] Number of created variables */ long idx; /* [idx] Generic index */ unsigned int idx_tbl; /* [idx] Counter for traversal table */ const unsigned int trv_nbr=trv_tbl->nbr; /* [idx] Number of traversal table entries */ for(idx=0;idx<var_xcl_lst_nbr;idx++){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++) if(!strcmp(trv_tbl->lst[idx_tbl].nm_fll,var_xcl_lst[idx])) break; if(idx_tbl < trv_nbr){ if(trv_tbl->lst[idx_tbl].flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) pre-defined exclusion-list variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll); var_xcl_nbr++; } /* endif */ trv_tbl->lst[idx_tbl].flg_xtr=False; } /* !idx_tbl */ } /* !idx */ /* 20191001: Do not automatically define plev_nm_in in pressure-grid output files The variable named lev_nm_out in the input data file is always defined in the output file So if plev_nm_in == lev_nm_out it will be defined anyway */ if(flg_grd_in_prs && flg_grd_out_prs && strcmp(plev_nm_in,lev_nm_out)){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++) if(!strcmp(trv_tbl->lst[idx_tbl].nm,plev_nm_in)) break; if(idx_tbl < trv_nbr){ if(trv_tbl->lst[idx_tbl].flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) pre-defined exclusion-list variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll); var_xcl_nbr++; } /* endif */ trv_tbl->lst[idx_tbl].flg_xtr=False; } /* !idx_tbl */ } /* !idx */ char *var_nm; /* [sng] Variable name */ int *dmn_id_in=NULL; /* [id] Dimension IDs */ int *dmn_id_out=NULL; /* [id] Dimension IDs */ int var_id_in; /* [id] Variable ID */ int var_id_out; /* [id] Variable ID */ nc_type var_typ_out; /* [enm] Variable type to write to disk */ nco_bool PCK_ATT_CPY=True; /* [flg] Copy attributes "scale_factor", "add_offset" */ int shuffle; /* [flg] Turn-on shuffle filter */ int deflate; /* [flg] Turn-on deflate filter */ deflate=(int)True; shuffle=NC_SHUFFLE; dfl_lvl=rgr->dfl_lvl; fl_out_fmt=rgr->fl_out_fmt; /* Define new coordinates and grid variables in regridded file */ const int dmn_nbr_0D=0; /* [nbr] Rank of 0-D grid variables (scalars) */ const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */ //const int dmn_nbr_2D=2; /* [nbr] Rank of 2-D grid variables */ //const int dmn_nbr_3D=3; /* [nbr] Rank of 3-D grid variables */ //const int dmn_nbr_grd_max=dmn_nbr_3D; /* [nbr] Maximum rank of grid variables */ if(flg_grd_out_hyb){ rcd+=nco_def_var(out_id,"hyai",crd_typ_out,dmn_nbr_1D,&dmn_id_ilev_out,&hyai_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,hyai_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,"hyam",crd_typ_out,dmn_nbr_1D,&dmn_id_lev_out,&hyam_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,hyam_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,"hybi",crd_typ_out,dmn_nbr_1D,&dmn_id_ilev_out,&hybi_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,hybi_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,"hybm",crd_typ_out,dmn_nbr_1D,&dmn_id_lev_out,&hybm_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,hybm_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,ilev_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_ilev_out,&ilev_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,ilev_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lev_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lev_out,&lev_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lev_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,"P0",crd_typ_out,dmn_nbr_0D,(int *)NULL,&p0_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,p0_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; // for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){ // rcd=nco_inq_dimname(out_id,dmn_ids_out[dmn_idx],dmn_nm); // (void)fprintf(stdout,"%s: DEBUG quark5 dmn_nbr_out = %d, dmn_nbr_ps = %d, dmn_idx = %d, dmn_ids_out[%d] = %d, dmn_nm = %s\n",nco_prg_nm_get(),dmn_nbr_out,dmn_nbr_ps,dmn_idx,dmn_idx,dmn_ids_out[dmn_idx],dmn_nm); // } /* !dmn_idx */ if(flg_grd_hyb_cameam) rcd+=nco_def_var(out_id,"PS",crd_typ_out,dmn_nbr_ps,dmn_ids_out,&ps_id); if(flg_grd_hyb_ecmwf){ /* Remove degenerate ECMWF vertical dimension so that output PS has dmn_nbr_ps-1 not dmn_nbr_ps dimensions */ int dmn_nbr_out_ecmwf=0; for(dmn_idx=0;dmn_idx<dmn_nbr_ps;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_ids_in[dmn_idx],dmn_nm); if(strcmp(dmn_nm,ilev_nm_out) && strcmp(dmn_nm,lev_nm_out) && strcmp(dmn_nm,"lev_2")) rcd=nco_inq_dimid(out_id,dmn_nm,dmn_ids_out+dmn_nbr_out_ecmwf++); } /* !dmn_idx */ rcd+=nco_def_var(out_id,"PS",crd_typ_out,dmn_nbr_out_ecmwf,dmn_ids_out,&ps_id); } /* !flg_grd_hyb_ecmwf */ if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,ps_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; (void)nco_att_cpy(tpl_id,out_id,hyai_id_tpl,hyai_id,PCK_ATT_CPY); (void)nco_att_cpy(tpl_id,out_id,hyam_id_tpl,hyam_id,PCK_ATT_CPY); (void)nco_att_cpy(tpl_id,out_id,hybi_id_tpl,hybi_id,PCK_ATT_CPY); (void)nco_att_cpy(tpl_id,out_id,hybm_id_tpl,hybm_id,PCK_ATT_CPY); if(p0_id_tpl != NC_MIN_INT) (void)nco_att_cpy(tpl_id,out_id,p0_id_tpl,p0_id,PCK_ATT_CPY); /* p0 not expected to be in ECMWF grids */ if(ilev_id_tpl != NC_MIN_INT) (void)nco_att_cpy(tpl_id,out_id,ilev_id_tpl,ilev_id,PCK_ATT_CPY); else if(ilev_id_in != NC_MIN_INT) (void)nco_att_cpy(in_id,out_id,ilev_id_in,ilev_id,PCK_ATT_CPY); if(lev_id_tpl != NC_MIN_INT) (void)nco_att_cpy(tpl_id,out_id,lev_id_tpl,lev_id,PCK_ATT_CPY); else if(lev_id_in != NC_MIN_INT) (void)nco_att_cpy(in_id,out_id,lev_id_in,lev_id,PCK_ATT_CPY); if(ps_id_tpl != NC_MIN_INT) (void)nco_att_cpy(tpl_id,out_id,ps_id_tpl,ps_id,PCK_ATT_CPY); else (void)nco_att_cpy(in_id,out_id,ps_id_in,ps_id,PCK_ATT_CPY); } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ rcd+=nco_def_var(out_id,lev_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lev_out,&lev_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lev_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; (void)nco_att_cpy(tpl_id,out_id,lev_id_tpl,lev_id,PCK_ATT_CPY); dmn_id_ilev_out=dmn_id_lev_out; } /* !flg_grd_out_prs */ /* No further access to template file, close it */ nco_close(tpl_id); /* Remove local copy of file */ if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_tpl); char *dmn_nm_cp; /* [sng] Dimension name as char * to reduce indirection */ nco_bool has_ilev; /* [flg] Contains interface level dimension */ nco_bool has_lev; /* [flg] Contains midpoint level dimension */ nco_bool has_tm; /* [flg] Contains time dimension */ nco_bool need_prs_ntf=False; /* [flg] At least one variable to regrid is on interface levels */ nco_bool need_prs_mdp=False; /* [flg] At least one variable to regrid is on midpoint levels */ trv_sct trv; /* [sct] Traversal table object structure to reduce indirection */ /* Define regridding flag for each variable */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ dmn_nbr_in=trv_tbl->lst[idx_tbl].nbr_dmn; has_ilev=False; has_lev=False; for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ /* Pre-determine flags necessary during next loop */ dmn_nm_cp=trv.var_dmn[dmn_idx].dmn_nm; /* fxm: Generalize to include any variable containing coordinates with "standard_name" = "atmosphere_hybrid_sigma_pressure_coordinate" */ if(!has_ilev && ilev_nm_in) has_ilev=!strcmp(dmn_nm_cp,ilev_nm_in); if(!has_lev) has_lev=!strcmp(dmn_nm_cp,lev_nm_in); } /* end loop over dimensions */ /* Regrid variables that contain either vertical dimension */ if(has_ilev || has_lev){ trv_tbl->lst[idx_tbl].flg_rgr=True; var_rgr_nbr++; if(has_ilev) need_prs_ntf=True; if(has_lev) need_prs_mdp=True; } /* endif */ assert(!(has_ilev && has_lev)); /* Copy all variables that are not regridded or omitted */ if(!trv_tbl->lst[idx_tbl].flg_rgr) var_cpy_nbr++; } /* end nco_obj_typ_var */ } /* end idx_tbl */ if(!var_rgr_nbr) (void)fprintf(stdout,"%s: WARNING %s reports no variables fit interpolation criteria. The vertical interpolator expects something to interpolate, and variables not interpolated are copied straight to output. HINT: If the name(s) of the input vertical grid dimensions (e.g., ilev and lev) do not match NCO's preset defaults (case-insensitive unambiguous forms and abbreviations of \"ilev\", \"lev\", and/or \"plev\", respectively) then change the dimension names that NCO looks for. Instructions are at http://nco.sf.net/nco.html#regrid. For hybrid-pressure coordinate grids, ensure that the \"ilev\" and \"lev\" variable names are known with, e.g., \"ncks --rgr ilev_nm=interface_level --rgr lev_nm=midpoint_level\" or \"ncremap -R '--rgr ilev=interface_level --rgr lev=midpoint_level'\". For pure pressure grids, ensure the \"plev\" coordinate name is defined with, e.g., \"ncks --rgr plev_nm=pressure_level\" or \"ncremap -R '--rgr plev=pressure_level'\".\n",nco_prg_nm_get(),fnc_nm); if(nco_dbg_lvl_get() >= nco_dbg_fl){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr) (void)fprintf(stderr,"Interpolate %s? %s\n",trv.nm,trv.flg_rgr ? "Yes" : "No"); } /* end idx_tbl */ } /* end dbg */ /* Pre-allocate dimension ID and cnt/srt space */ int dmn_nbr_max; /* [nbr] Maximum number of dimensions variable can have in input or output */ rcd+=nco_inq_ndims(in_id,&dmn_nbr_max); dmn_id_in=(int *)nco_malloc(dmn_nbr_max*sizeof(int)); dmn_id_out=(int *)nco_malloc(dmn_nbr_max*sizeof(int)); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out); dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); dmn_cnt_out=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); aed_sct aed_mtd_fll_val; char *att_nm_fll_val=strdup("_FillValue"); int flg_pck; /* [flg] Variable is packed on disk */ nco_bool has_mss_val; /* [flg] Has numeric missing value attribute */ float mss_val_flt; double mss_val_dbl; if(flg_add_msv_att){ aed_mtd_fll_val.att_nm=att_nm_fll_val; aed_mtd_fll_val.mode=aed_create; aed_mtd_fll_val.sz=1L; mss_val_dbl=NC_FILL_DOUBLE; mss_val_flt=NC_FILL_FLOAT; } /* !flg_add_msv_att */ /* Define interpolated and copied variables in output file */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ var_nm=trv.nm; /* Preserve input type in output type */ var_typ_out=trv.var_typ; dmn_nbr_in=trv.nbr_dmn; dmn_nbr_out=trv.nbr_dmn; rcd=nco_inq_varid(in_id,var_nm,&var_id_in); rcd=nco_inq_varid_flg(out_id,var_nm,&var_id_out); /* If variable has not been defined, define it */ if(rcd != NC_NOERR){ if(trv.flg_rgr){ /* Interpolate */ rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); rcd=nco_inq_var_packing(in_id,var_id_in,&flg_pck); if(flg_pck) (void)fprintf(stdout,"%s: WARNING %s reports variable \"%s\" is packed so results unpredictable. HINT: If regridded values seems weird, retry after unpacking input file with, e.g., \"ncpdq -U in.nc out.nc\"\n",nco_prg_nm_get(),fnc_nm,var_nm); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_id_in[dmn_idx],dmn_nm); if(ilev_nm_in && !strcmp(dmn_nm,ilev_nm_in)){ /* Change ilev dimension */ dmn_id_out[dmn_idx]=dmn_id_ilev_out; dmn_cnt_out[dmn_idx]=ilev_nbr_out; }else if(!strcmp(dmn_nm,lev_nm_in)){ /* Change lev dimension */ dmn_id_out[dmn_idx]=dmn_id_lev_out; dmn_cnt_out[dmn_idx]=lev_nbr_out; }else{ /* Dimensions ilev/lev_nm_in have already been defined as ilev/lev_nm_out, replicate all other dimensions */ rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx); } /* !ilev */ if(rcd != NC_NOERR){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt_out+dmn_idx); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt_out[dmn_idx]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_id_out+dmn_idx); } /* !rcd */ } /* !dmn_idx */ }else{ /* !flg_rgr */ /* Replicate non-interpolated variables */ rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_id_in[dmn_idx],dmn_nm); rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx); if(rcd != NC_NOERR){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt_out+dmn_idx); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt_out[dmn_idx]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_id_out+dmn_idx); } /* !rcd */ } /* !dmn_idx */ } /* !flg_rgr */ rcd=nco_def_var(out_id,var_nm,var_typ_out,dmn_nbr_out,dmn_id_out,&var_id_out); /* Duplicate netCDF4 settings when possible */ if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC){ /* Deflation */ if(dmn_nbr_out > 0){ int dfl_lvl_in; /* [enm] Deflate level [0..9] */ rcd=nco_inq_var_deflate(in_id,var_id_in,&shuffle,&deflate,&dfl_lvl_in); /* Copy original deflation settings */ if(deflate || shuffle) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl_in); /* Overwrite HDF Lempel-Ziv compression level, if requested */ if(dfl_lvl == 0) deflate=(int)False; else deflate=(int)True; /* Turn-off shuffle when uncompressing otherwise chunking requests may fail */ if(dfl_lvl == 0) shuffle=NC_NOSHUFFLE; /* Shuffle never, to my knowledge, increases filesize, so shuffle by default when manually deflating */ if(dfl_lvl >= 0) shuffle=NC_SHUFFLE; if(dfl_lvl >= 0) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl); } /* !dmn_nbr_out */ } /* !NC_FORMAT_NETCDF4 */ (void)nco_att_cpy(in_id,out_id,var_id_in,var_id_out,PCK_ATT_CPY); /* Variables with subterranean levels and missing-value extrapolation must have _FillValue attribute */ if(flg_add_msv_att && trv.flg_rgr){ has_mss_val=nco_mss_val_get_dbl(in_id,var_id_in,&mss_val_dbl); if(!has_mss_val){ nco_bool flg_att_chg; /* [flg] _FillValue attribute was written */ aed_mtd_fll_val.var_nm=var_nm; aed_mtd_fll_val.id=var_id_out; aed_mtd_fll_val.type=var_typ_out; if(var_typ_out == NC_FLOAT) aed_mtd_fll_val.val.fp=&mss_val_flt; else if(var_typ_out == NC_DOUBLE) aed_mtd_fll_val.val.dp=&mss_val_dbl; flg_att_chg=nco_aed_prc(out_id,var_id_out,aed_mtd_fll_val); if(!flg_att_chg && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: WARNING %s reports unsuccessful attempt to create _FillValue attribute for variable %s\n",nco_prg_nm_get(),fnc_nm,var_nm); } /* !has_mss_val */ } /* !flg_add_msv_att */ } /* !rcd */ } /* !var */ } /* end idx_tbl */ /* Free pre-allocated array space */ if(dmn_id_in) dmn_id_in=(int *)nco_free(dmn_id_in); if(dmn_id_out) dmn_id_out=(int *)nco_free(dmn_id_out); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out); if(dmn_ids_rec) dmn_ids_rec=(int *)nco_free(dmn_ids_rec); /* Turn-off default filling behavior to enhance efficiency */ nco_set_fill(out_id,NC_NOFILL,&fll_md_old); /* Begin data mode */ (void)nco_enddef(out_id); /* Copy all grid variables */ if(flg_grd_out_hyb){ (void)nco_put_var(out_id,hyai_id,hyai_out,crd_typ_out); (void)nco_put_var(out_id,hyam_id,hyam_out,crd_typ_out); (void)nco_put_var(out_id,hybi_id,hybi_out,crd_typ_out); (void)nco_put_var(out_id,hybm_id,hybm_out,crd_typ_out); (void)nco_put_var(out_id,ilev_id,ilev_out,crd_typ_out); (void)nco_put_var(out_id,lev_id,lev_out,crd_typ_out); (void)nco_put_var(out_id,p0_id,&p0_out,crd_typ_out); (void)nco_put_var(out_id,ps_id,ps_out,crd_typ_out); } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ (void)nco_put_var(out_id,lev_id,lev_out,crd_typ_out); } /* !flg_grd_out_prs */ nco_bool flg_ntp_log=True; /* [flg] Interpolate in log(vertical_coordinate) */ if(ntp_mth == nco_ntp_lnr) flg_ntp_log=False; size_t idx_in; /* [idx] Index into 3D input variables */ size_t idx_out; /* [idx] Index into 3D output variables */ size_t var_sz_in; /* [nbr] Number of elements in variable (will be self-multiplied) */ size_t var_sz_out; /* [nbr] Number of elements in variable (will be self-multiplied) */ /* Interpolate or copy variable values */ double *var_val_dbl_in=NULL; double *var_val_dbl_out=NULL; double *prs_ntp_in; /* [Pa] Interpolated pressure array on input grid */ double *prs_ntp_out; /* [Pa] Interpolated pressure array on output grid */ int lvl_idx_in; /* [idx] Level index on input grid */ int lvl_idx_out; /* [idx] Level index on output grid */ int lvl_nbr_in; /* [nbr] Number of levels for current interpolated variable on input grid */ int lvl_nbr_out; /* [nbr] Number of levels for current interpolated variable on output grid */ int thr_idx; /* [idx] Thread index */ size_t grd_nbr=grd_sz_in; /* [nbr] Horizonal grid size */ size_t idx_dbg=rgr->idx_dbg; /* Using naked stdin/stdout/stderr in parallel region generates warning Copy appropriate filehandle to variable scoped as shared in parallel clause */ FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */ /* Repeating above documentation for the forgetful: NB: tm_nbr is max(timesteps) in vertical grid definitions, not number of records in either file This implementation interpolates timeseries to/from time-invariant vertical grids in one OpenMP call! */ for(tm_idx=0;tm_idx<tm_nbr;tm_idx++){ /* Index-offset to current surface pressure timeslice */ idx_fst=tm_idx*grd_sz_in; if(need_prs_mdp){ /* Allocated and define midpoint pressures */ if(tm_idx == 0) prs_mdp_in=(double *)nco_malloc_dbg(grd_sz_in*lev_nbr_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_mdp_in value buffer"); if(tm_idx == 0) prs_mdp_out=(double *)nco_malloc_dbg(grd_sz_out*lev_nbr_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_mdp_out value buffer"); if(flg_grd_in_hyb) for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++) for(lev_idx=0;lev_idx<lev_nbr_in;lev_idx++) prs_mdp_in[grd_idx+lev_idx*grd_sz_in]=p0_in*hyam_in[lev_idx]+ps_in[idx_fst+grd_idx]*hybm_in[lev_idx]; if(flg_grd_out_hyb) for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++) for(lev_idx=0;lev_idx<lev_nbr_out;lev_idx++) prs_mdp_out[grd_idx+lev_idx*grd_sz_out]=p0_out*hyam_out[lev_idx]+ps_out[idx_fst+grd_idx]*hybm_out[lev_idx]; if(flg_grd_in_prs) for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++) for(lev_idx=0;lev_idx<lev_nbr_in;lev_idx++) prs_mdp_in[grd_idx+lev_idx*grd_sz_in]=lev_in[lev_idx]; if(flg_grd_out_prs) for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++) for(lev_idx=0;lev_idx<lev_nbr_out;lev_idx++) prs_mdp_out[grd_idx+lev_idx*grd_sz_out]=lev_out[lev_idx]; if(flg_ntp_log){ var_sz_in=grd_sz_in*lev_nbr_in; for(idx_in=0;idx_in<var_sz_in;idx_in++) prs_mdp_in[idx_in]=log(prs_mdp_in[idx_in]); var_sz_out=grd_sz_out*lev_nbr_out; for(idx_out=0;idx_out<var_sz_out;idx_out++) prs_mdp_out[idx_out]=log(prs_mdp_out[idx_out]); } /* !flg_ntp_log */ } /* !need_prs_mdp */ if(need_prs_ntf){ /* Allocate and define interface pressures */ if(tm_idx == 0) prs_ntf_in=(double *)nco_malloc_dbg(grd_sz_in*ilev_nbr_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_ntf_in value buffer"); if(tm_idx == 0) prs_ntf_out=(double *)nco_malloc_dbg(grd_sz_out*ilev_nbr_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_ntf_out value buffer"); if(flg_grd_in_hyb) for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++) for(ilev_idx=0;ilev_idx<ilev_nbr_in;ilev_idx++) prs_ntf_in[grd_idx+ilev_idx*grd_sz_in]=p0_in*hyai_in[ilev_idx]+ps_in[idx_fst+grd_idx]*hybi_in[ilev_idx]; if(flg_grd_out_hyb) for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++) for(ilev_idx=0;ilev_idx<ilev_nbr_out;ilev_idx++) prs_ntf_out[grd_idx+ilev_idx*grd_sz_out]=p0_out*hyai_out[ilev_idx]+ps_out[idx_fst+grd_idx]*hybi_out[ilev_idx]; if(flg_grd_in_prs) for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++) for(ilev_idx=0;ilev_idx<ilev_nbr_in;ilev_idx++) prs_ntf_in[grd_idx+ilev_idx*grd_sz_in]=lev_in[ilev_idx]; if(flg_grd_out_prs) for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++) for(ilev_idx=0;ilev_idx<ilev_nbr_out;ilev_idx++) prs_ntf_out[grd_idx+ilev_idx*grd_sz_out]=lev_out[ilev_idx]; if(flg_ntp_log){ var_sz_in=grd_sz_in*ilev_nbr_in; for(idx_in=0;idx_in<var_sz_in;idx_in++) prs_ntf_in[idx_in]=log(prs_ntf_in[idx_in]); var_sz_out=grd_sz_out*ilev_nbr_out; for(idx_out=0;idx_out<var_sz_out;idx_out++) prs_ntf_out[idx_out]=log(prs_ntf_out[idx_out]); } /* !flg_ntp_log */ } /* !need_prs_ntf */ /* Set firstprivate variables to initial values */ has_ilev=False; has_lev=False; has_tm=False; if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"Interpolation progress: # means interpolated, ~ means copied\n"); #ifdef __GNUG__ # define GCC_LIB_VERSION ( __GNUC__ * 100 + __GNUC_MINOR__ * 10 + __GNUC_PATCHLEVEL__ ) # if GCC_LIB_VERSION < 490 # define GXX_OLD_OPENMP_SHARED_TREATMENT 1 # endif /* 480 */ # if GCC_LIB_VERSION >= 900 # define GXX_WITH_OPENMP5_GPU_SUPPORT 1 # endif /* 900 */ #endif /* !__GNUC__ */ #if defined( __INTEL_COMPILER) # pragma omp parallel for default(none) firstprivate(has_ilev,has_lev,has_tm,var_val_dbl_in,var_val_dbl_out) private(dmn_cnt_in,dmn_cnt_out,dmn_id_in,dmn_id_out,dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dmn_nm,dmn_srt,grd_idx,has_mss_val,idx_in,idx_out,idx_tbl,in_id,lvl_idx_in,lvl_idx_out,lvl_nbr_in,lvl_nbr_out,mss_val_dbl,prs_ntp_in,prs_ntp_out,rcd,thr_idx,trv,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr) shared(dmn_id_ilev_in,dmn_id_ilev_out,dmn_id_lev_in,dmn_id_lev_out,dmn_id_tm_in,flg_ntp_log,flg_vrt_tm,fnc_nm,grd_nbr,idx_dbg,ilev_nbr_in,ilev_nbr_out,lev_nbr_in,lev_nbr_out,out_id,prs_mdp_in,prs_mdp_out,prs_ntf_in,prs_ntf_out,tm_idx,xtr_mth) #else /* !__INTEL_COMPILER */ # ifdef GXX_OLD_OPENMP_SHARED_TREATMENT # pragma omp parallel for default(none) firstprivate(has_ilev,has_lev,has_tm,var_val_dbl_in,var_val_dbl_out) private(dmn_cnt_in,dmn_cnt_out,dmn_id_in,dmn_id_out,dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dmn_nm,dmn_srt,grd_idx,has_mss_val,idx_in,idx_out,idx_tbl,in_id,lvl_idx_in,lvl_idx_out,lvl_nbr_in,lvl_nbr_out,mss_val_dbl,prs_ntp_in,prs_ntp_out,rcd,thr_idx,trv,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr) shared(dmn_id_ilev_in,dmn_id_ilev_out,dmn_id_lev_in,dmn_id_lev_out,dmn_id_tm_in,flg_ntp_log,flg_vrt_tm,fnc_nm,grd_nbr,idx_dbg,ilev_nbr_in,ilev_nbr_out,lev_nbr_in,lev_nbr_out,out_id,prs_mdp_in,prs_mdp_out,prs_ntf_in,prs_ntf_out,tm_idx,xtr_mth) # else /* !old g++ */ # if defined(GXX_WITH_OPENMP5_GPU_SUPPORT) && 0 # pragma omp target teams distribute parallel for # else # pragma omp parallel for firstprivate(has_ilev,has_lev,has_tm,var_val_dbl_in,var_val_dbl_out) private(dmn_cnt_in,dmn_cnt_out,dmn_id_in,dmn_id_out,dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dmn_nm,dmn_srt,grd_idx,has_mss_val,idx_in,idx_out,idx_tbl,in_id,lvl_idx_in,lvl_idx_out,lvl_nbr_in,lvl_nbr_out,mss_val_dbl,prs_ntp_in,prs_ntp_out,rcd,thr_idx,trv,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr) shared(dmn_id_ilev_in,dmn_id_ilev_out,dmn_id_lev_in,dmn_id_lev_out,dmn_id_tm_in,flg_ntp_log,flg_vrt_tm,grd_nbr,idx_dbg,ilev_nbr_in,ilev_nbr_out,lev_nbr_in,lev_nbr_out,out_id,prs_mdp_in,prs_mdp_out,prs_ntf_in,prs_ntf_out,tm_idx,xtr_mth) # endif /* !GCC > 9.0 */ # endif /* !GCC < 4.9 */ #endif /* !__INTEL_COMPILER */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; thr_idx=omp_get_thread_num(); in_id=trv_tbl->in_id_arr[thr_idx]; #ifdef _OPENMP if(nco_dbg_lvl_get() >= nco_dbg_grp && !thr_idx && !idx_tbl) (void)fprintf(fp_stdout,"%s: INFO %s reports regrid loop uses %d thread%s\n",nco_prg_nm_get(),fnc_nm,omp_get_num_threads(),(omp_get_num_threads() > 1) ? "s" : ""); if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s: INFO thread = %d, idx_tbl = %d, nm = %s\n",nco_prg_nm_get(),thr_idx,idx_tbl,trv.nm); #endif /* !_OPENMP */ if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s%s ",trv.flg_rgr ? "#" : "~",trv.nm); if(trv.flg_rgr){ /* Interpolate variable */ var_nm=trv.nm; var_typ_rgr=NC_DOUBLE; /* NB: Perform regridding in double precision */ var_typ_out=trv.var_typ; /* NB: Output type in file is same as input type */ var_sz_in=1L; var_sz_out=1L; rcd=nco_inq_varid(in_id,var_nm,&var_id_in); rcd=nco_inq_varid(out_id,var_nm,&var_id_out); rcd=nco_inq_varndims(in_id,var_id_in,&dmn_nbr_in); rcd=nco_inq_varndims(out_id,var_id_out,&dmn_nbr_out); dmn_nbr_max= dmn_nbr_in > dmn_nbr_out ? dmn_nbr_in : dmn_nbr_out; dmn_id_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); dmn_id_out=(int *)nco_malloc(dmn_nbr_out*sizeof(int)); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* max() for both input and output grids */ dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); dmn_cnt_out=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); rcd=nco_inq_vardimid(out_id,var_id_out,dmn_id_out); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt_in+dmn_idx); if(dmn_id_in[dmn_idx] == dmn_id_ilev_in) has_ilev=True; if(dmn_id_in[dmn_idx] == dmn_id_lev_in) has_lev=True; if(dmn_id_in[dmn_idx] == dmn_id_tm_in) has_tm=True; if(flg_vrt_tm && has_tm && dmn_id_in[dmn_idx] == dmn_id_tm_in){ dmn_cnt_in[dmn_idx]=1L; dmn_srt[dmn_idx]=tm_idx; }else{ dmn_srt[dmn_idx]=0L; } /* !flg_vrt_tm */ var_sz_in*=dmn_cnt_in[dmn_idx]; } /* !dmn_idx */ var_val_dbl_in=(double *)nco_malloc_dbg(var_sz_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() input value buffer"); rcd=nco_get_vara(in_id,var_id_in,dmn_srt,dmn_cnt_in,var_val_dbl_in,var_typ_rgr); for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){ /* Dimension count vector is same as input except for lvl dimension */ dmn_cnt_out[dmn_idx]=dmn_cnt_in[dmn_idx]; if(has_ilev && dmn_id_out[dmn_idx] == dmn_id_ilev_out) dmn_cnt_out[dmn_idx]=ilev_nbr_out; if(has_lev && dmn_id_out[dmn_idx] == dmn_id_lev_out) dmn_cnt_out[dmn_idx]=lev_nbr_out; var_sz_out*=dmn_cnt_out[dmn_idx]; } /* end loop over dimensions */ var_val_dbl_out=(double *)nco_malloc_dbg(var_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() output value buffer"); /* Missing value setup */ has_mss_val=nco_mss_val_get_dbl(in_id,var_id_in,&mss_val_dbl); if(!has_mss_val) mss_val_dbl=NC_FILL_DOUBLE; if(has_ilev){ /* Interpolate current variable from input interface pressure grid to output interface pressure grid */ lvl_nbr_in=ilev_nbr_in; lvl_nbr_out=ilev_nbr_out; prs_ntp_in=prs_ntf_in; prs_ntp_out=prs_ntf_out; }else{ /* Interpolate current variable from input midpoint pressure grid to output midpoint pressure grid */ lvl_nbr_in=lev_nbr_in; lvl_nbr_out=lev_nbr_out; prs_ntp_in=prs_mdp_in; prs_ntp_out=prs_mdp_out; } /* !ilev */ /* Procedure: Extract input/output coordinate/data arrays into 1D column order This enables actual interpolation code to be written for, or take advantage of, 1D interpolation routines After interpolating into 1D sequential memory, copy back to ND output and repeat */ double *crd_in=NULL; /* Input vertical coordinate (must be monotonic) */ double *crd_out=NULL; /* Output vertical coordinate (must be monotonic) */ double *dat_in=NULL; /* Input data (to be interpolated) on input vertical coordinate grid */ double *dat_out=NULL; /* Output data (interpolated) output vertical coordinate grid (i.e., the answer) */ double *crd_in_mnt; /* Input vertical coordinate reversed if necessary to be monotonically increasing */ double *crd_out_mnt; /* Output vertical coordinate reversed if necessary to be monotonically increasing */ double *dat_in_mnt; /* Input data (to be interpolated) reversed if necessary along with input grid */ double *dat_out_mnt; /* Output data (interpolated) reversed if necessary along with output grid */ nco_xtr_sct xtr_LHS; nco_xtr_sct xtr_RHS; size_t brk_lft_idx; size_t brk_rgt_idx; size_t in_idx; size_t in_nbr; size_t out_nbr; size_t out_idx; /* Default extrapolation uses nearest valid neighbor */ xtr_LHS.xtr_fll=True; xtr_LHS.xtr_vrb=False; xtr_LHS.typ_fll=xtr_mth; xtr_RHS.xtr_fll=True; xtr_RHS.xtr_vrb=False; xtr_RHS.typ_fll=xtr_mth; /* Special-case extrapolation methods allowed for all except missing-value extrapolation types */ if(xtr_mth != nco_xtr_fll_msv){ if(!strcmp(var_nm,"T") || !strcmp(var_nm,"ta")) xtr_RHS.typ_fll=nco_xtr_fll_tpt; else if(!strcmp(var_nm,"Z3") || !strcmp(var_nm,"zg")) xtr_LHS.typ_fll=xtr_RHS.typ_fll=nco_xtr_fll_gph; } /* !xtr_mth */ crd_in=(double *)nco_malloc(lvl_nbr_in*sizeof(double)); crd_out=(double *)nco_malloc(lvl_nbr_out*sizeof(double)); dat_in=(double *)nco_malloc(lvl_nbr_in*sizeof(double)); dat_out=(double *)nco_malloc(lvl_nbr_out*sizeof(double)); in_nbr=lvl_nbr_in; out_nbr=lvl_nbr_out; nco_bool in_ncr; /* [flg] Input coordinate monotonically increases */ nco_bool out_ncr; /* [flg] Output coordinate monotonically increases */ /* Determine monotonicity direction only once, based on first vertical column */ if(prs_ntp_in[grd_nbr]-prs_ntp_in[0] > 0.0) in_ncr=True; else in_ncr=False; out_ncr=True; if(out_nbr > 1) if(prs_ntp_out[grd_nbr]-prs_ntp_out[0] < 0.0) out_ncr=False; /* If necessary, allocate (once, and re-use it) additional memory to hold reversed arrays */ if(!in_ncr){ crd_in_mnt=(double *)nco_malloc(lvl_nbr_in*sizeof(double)); dat_in_mnt=(double *)nco_malloc(lvl_nbr_in*sizeof(double)); } /* !in_ncr */ if(!out_ncr){ crd_out_mnt=(double *)nco_malloc(lvl_nbr_out*sizeof(double)); dat_out_mnt=(double *)nco_malloc(lvl_nbr_out*sizeof(double)); } /* !out_ncr */ /* Constants and parameters for extrapolation */ const double gamma_moist=6.5/10000.0; /* [K/Pa] Temperature extrapolation assumes constant moist adiabatic lower atmosphere lapse rate dT/dp=constant=(6.5 K)/(100 mb) = (6.5 K)/(10000 Pa) */ const double Rd_rcp_g0=287.0/9.81; /* [K/Pa] Geopotential height extrapolation uses hypsometric equation Z2-Z1=(Rd*Tv_avg/g0)*ln(p1/p2)=(Rd*Tv_avg/g0)*(ln(p1)-ln(p2)) */ const double tpt_vrt_avg=288.0; /* [K] Mean virtual temperature assumed for geopotential height extrapolation */ nco_bool FIRST_WARNING_LHS; /* [flg] First warning for LHS extrapolation */ nco_bool FIRST_WARNING_RHS; /* [flg] First warning for RHS extrapolation */ if(tm_idx == 0){ /* Only print extrapolation warnings for first timestep to prevent noisy output NB: Algorithm prevents any warnings for extrapolations that appear after first timestep */ FIRST_WARNING_LHS=True; FIRST_WARNING_RHS=True; } /* !tm_idx */ /* Outer loop over columns */ for(grd_idx=0;grd_idx<grd_nbr;grd_idx++){ /* Initialize pseudo-1D variables with consecutive memory addresses to avoid indirection */ for(lvl_idx_in=0;lvl_idx_in<lvl_nbr_in;lvl_idx_in++){ idx_in=grd_idx+lvl_idx_in*grd_nbr; crd_in[lvl_idx_in]=prs_ntp_in[idx_in]; dat_in[lvl_idx_in]=var_val_dbl_in[idx_in]; } /* !lvl_idx_in */ for(lvl_idx_out=0;lvl_idx_out<lvl_nbr_out;lvl_idx_out++){ idx_out=grd_idx+lvl_idx_out*grd_nbr; crd_out[lvl_idx_out]=prs_ntp_out[idx_out]; } /* !lvl_idx_out */ /* Interpolation code easier to write/debug if crd_in and crd_out both monotonically increase However, monotonically decreasing coordinates useful in many cases, such as depth coordinate, and pressure levels arranged largest to smallest (favored by CMIP) Next code block reverses array(s) if necessary so coordinates monotonically increase Code uses crd_in_mnt, dat_in_mnt, crd_out_mnt where "_mnt" reminds of "monotonically increasing" assumption Following code lifted from CSZ's libcsz.a library source code ~/sw/c++/vec.hh */ if(in_ncr){ crd_in_mnt=crd_in; dat_in_mnt=dat_in; }else{ for(in_idx=0;in_idx<in_nbr;in_idx++){ crd_in_mnt[in_idx]=crd_in[in_nbr-in_idx-1]; dat_in_mnt[in_idx]=dat_in[in_nbr-in_idx-1]; } /* !in_idx */ } /* !in_ncr */ if(out_ncr){ crd_out_mnt=crd_out; dat_out_mnt=dat_out; }else{ for(out_idx=0;out_idx<out_nbr;out_idx++) crd_out_mnt[out_idx]=crd_out[out_nbr-out_idx-1]; } /* !out_ncr */ // Initialize bracketing index brk_lft_idx=0; // Loop over desired output coordinates for(out_idx=0;out_idx<out_nbr;out_idx++){ // Order of conditions is important since second condition is illegal if brk_lft_idx >= in_nbr while((brk_lft_idx < in_nbr) && (crd_in_mnt[brk_lft_idx] < crd_out_mnt[out_idx])){ brk_lft_idx++; } // !while brk_lft_idx--; // Handle identity interpolation separately to preserve symmetry in extrapolation code if(brk_lft_idx != in_nbr-1){ if(crd_in_mnt[brk_lft_idx+1] == crd_out_mnt[out_idx]){ dat_out_mnt[out_idx]=dat_in_mnt[brk_lft_idx+1]; if(brk_lft_idx == -1) brk_lft_idx=0; // Reset brk_lft_idx to 0 so next while loop works continue; // Jump to next iteration } // !crd_in_mnt } // !brk_lft_idx if(brk_lft_idx == -1){ // LHS Extrapolation required // Degenerate case: crd_out_mnt[out_idx] < crd_in_mnt[0] brk_lft_idx=0; // Reset brk_lft_idx to 0 so next while loop works if(xtr_LHS.xtr_vrb) (void)fprintf(fp_stdout,"%s: WARNING %s reports variable %s column %lu output value dat_out_mnt[%lu] at coordinate crd_out_mnt[%lu] = %g requires LHS extrapolation beyond leftmost valid coordinate at crd_in_mnt[%lu] = %g. Nearest valid datum is dat_in_mnt[%lu] = %g\n",nco_prg_nm_get(),fnc_nm,var_nm,grd_idx,out_idx,out_idx,crd_out_mnt[out_idx],brk_lft_idx,crd_in_mnt[brk_lft_idx],brk_lft_idx,dat_in_mnt[brk_lft_idx]); // Extrapolation options are presented in decreasing order of preference if(!xtr_LHS.xtr_fll){ (void)fprintf(fp_stdout,"%s: ERROR %s Full LHS extrapolation required but not permitted\n",nco_prg_nm_get(),fnc_nm); // return NCO_ERR; } /* !xtr_LHS.xtr_fll */ switch(xtr_LHS.typ_fll){ case nco_xtr_fll_nil: dat_out_mnt[out_idx]=0.0; break; case nco_xtr_fll_msv: dat_out_mnt[out_idx]=mss_val_dbl; break; case nco_xtr_fll_ngh: dat_out_mnt[out_idx]=dat_in_mnt[0]; break; case nco_xtr_fll_lnr: dat_out_mnt[out_idx]=dat_in_mnt[0]- (crd_in_mnt[0]-crd_out_mnt[out_idx])* (dat_in_mnt[1]-dat_in_mnt[0])/(crd_in_mnt[1]-crd_in_mnt[0]); break; case nco_xtr_fll_gph: if(flg_ntp_log) /* Coordinates are already logarithmic in pressure */ dat_out_mnt[out_idx]=dat_in_mnt[0]+ Rd_rcp_g0*tpt_vrt_avg*(crd_in_mnt[0]-crd_out_mnt[out_idx]); else /* Interpolate with logarithm of pressure coordinates */ dat_out_mnt[out_idx]=dat_in_mnt[0]+ Rd_rcp_g0*tpt_vrt_avg*log(crd_in_mnt[0]/crd_out_mnt[out_idx]); if(FIRST_WARNING_LHS) (void)fprintf(fp_stdout,"%s: INFO %s geopotential height extrapolated upward towards space using hypsometric equation with constant global mean virtual temperature = %g for variable %s\n",nco_prg_nm_get(),fnc_nm,tpt_vrt_avg,var_nm); FIRST_WARNING_LHS=False; break; default: (void)fprintf(fp_stdout,"%s: ERROR %s Unknown xtr_LHS.typ_fll\n",nco_prg_nm_get(),fnc_nm); // return NCO_ERR; break; } // !xtr_LHS.typ_fll if(xtr_LHS.xtr_vrb) (void)fprintf(fp_stdout,"%s: INFO %s LHS extrapolation yields dat_out_mnt[%lu] = %g\n",nco_prg_nm_get(),fnc_nm,out_idx,dat_out_mnt[out_idx]); }else if(brk_lft_idx < in_nbr-1){ // Normal case: crd_out_mnt is interpolable brk_rgt_idx=brk_lft_idx+1; // NB: brk_rgt_idx is ALWAYS greater than brk_lft_idx // This simulaneously meets two criteria: // 1. Divide-by-zero errors are impossible in the next step // 2. The identity interpolation is satisfied since crd_dlt == 0.0: // i.e., If crd_out_mnt[idx] == crd_in_mnt[brk_lft_idx] then dat_out_mnt[out_idx] := dat_in_mnt[brk_lft_idx] // Linearly interpolate dat_out_mnt[out_idx]= dat_in_mnt[brk_lft_idx]+ (crd_out_mnt[out_idx]-crd_in_mnt[brk_lft_idx])* (dat_in_mnt[brk_rgt_idx]-dat_in_mnt[brk_lft_idx])/ (crd_in_mnt[brk_rgt_idx]-crd_in_mnt[brk_lft_idx]); }else if(brk_lft_idx == in_nbr-1){ // RHS Extrapolation required // Degenerate case: brk_lft_idx is last element of crd_in_mnt brk_rgt_idx=brk_lft_idx; if(xtr_RHS.xtr_vrb) (void)fprintf(fp_stdout,"%s: WARNING %s reports variable %s column %lu output value dat_out_mnt[%lu] at coordinate crd_out_mnt[%lu] = %g requires RHS extrapolation beyond rightmost valid coordinate at crd_in_mnt[%lu] = %g. Nearest valid datum is dat_in_mnt[%lu] = %g\n",nco_prg_nm_get(),fnc_nm,var_nm,grd_idx,out_idx,out_idx,crd_out_mnt[out_idx],brk_rgt_idx,crd_in_mnt[brk_rgt_idx],brk_rgt_idx,dat_in_mnt[brk_rgt_idx]); // Extrapolation options are presented in decreasing order of preference if(!xtr_RHS.xtr_fll){ (void)fprintf(fp_stdout,"%s: ERROR %s Full RHS extrapolation required but not permitted\n",nco_prg_nm_get(),fnc_nm); // return NCO_ERR; } /* !xtr_RHS.xtr_fll */ switch(xtr_RHS.typ_fll){ case nco_xtr_fll_nil: dat_out_mnt[out_idx]=0.0; break; case nco_xtr_fll_msv: dat_out_mnt[out_idx]=mss_val_dbl; break; case nco_xtr_fll_ngh: dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]; break; case nco_xtr_fll_lnr: dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]+ (crd_out_mnt[out_idx]-crd_in_mnt[in_nbr-1])* (dat_in_mnt[in_nbr-1]-dat_in_mnt[in_nbr-2])/ (crd_in_mnt[in_nbr-1]-crd_in_mnt[in_nbr-2]); break; case nco_xtr_fll_tpt: if(flg_ntp_log) /* Exponentiate so coordinates are linear in pressure */ dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]+ (exp(crd_out_mnt[out_idx])-exp(crd_in_mnt[in_nbr-1]))*gamma_moist; else /* Coordinates are already linear in pressure */ dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]+ (crd_out_mnt[out_idx]-crd_in_mnt[in_nbr-1])*gamma_moist; if(FIRST_WARNING_RHS) (void)fprintf(fp_stdout,"%s: INFO %s temperature extrapolated toward/into surface assuming constant moist adiabatic lapse rate = %g K/(100 mb) for variable %s\n",nco_prg_nm_get(),fnc_nm,gamma_moist*10000.0,var_nm); FIRST_WARNING_RHS=False; break; case nco_xtr_fll_gph: if(flg_ntp_log) /* Coordinates are already logarithmic in pressure */ dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]- Rd_rcp_g0*tpt_vrt_avg*(crd_out_mnt[out_idx]-crd_in_mnt[in_nbr-1]); else /* Interpolate with logarithm of pressure coordinates */ dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]- Rd_rcp_g0*tpt_vrt_avg*log(crd_out_mnt[out_idx]/crd_in_mnt[in_nbr-1]); if(FIRST_WARNING_RHS) (void)fprintf(fp_stdout,"%s: INFO %s geopotential height extrapolated toward/into surface using hypsometric equation with constant global mean virtual temperature = %g for variable %s\n",nco_prg_nm_get(),fnc_nm,tpt_vrt_avg,var_nm); FIRST_WARNING_RHS=False; break; default: (void)fprintf(fp_stdout,"%s: ERROR %s Unknown xtr_RHS\n",nco_prg_nm_get(),fnc_nm); // return NCO_ERR; break; } // !xtr_RHS.typ_fll if(xtr_RHS.xtr_vrb) (void)fprintf(fp_stdout,"%s: INFO %s RHS extrapolation yields dat_out_mnt[%lu] = %g\n",nco_prg_nm_get(),fnc_nm,out_idx,dat_out_mnt[out_idx]); }else{ (void)fprintf(fp_stdout,"%s: ERROR %s Unforeseen value of brk_lft_idx\n",nco_prg_nm_get(),fnc_nm); // return NCO_ERR; } // !RHS } // !out_idx /* Un-reverse output data to be on original grid */ if(!out_ncr) for(out_idx=0;out_idx<out_nbr;out_idx++) dat_out[out_idx]=dat_out_mnt[out_nbr-out_idx-1]; // End of vec.hh code /* Copy answers into output array */ for(lvl_idx_out=0;lvl_idx_out<lvl_nbr_out;lvl_idx_out++){ idx_out=grd_idx+lvl_idx_out*grd_nbr; var_val_dbl_out[idx_out]=dat_out[lvl_idx_out]; } /* !lvl_idx_out */ if(nco_dbg_lvl_get() >= nco_dbg_io && grd_idx == idx_dbg){ (void)fprintf(fp_stdout,"%s: DEBUG %s variable %s at idx_dbg = %lu\n",nco_prg_nm_get(),fnc_nm,var_nm,idx_dbg); for(out_idx=0;out_idx<out_nbr;out_idx++){ (void)fprintf(fp_stdout,"out_idx = %lu dat_out = %g\n",out_idx,dat_out[out_idx]); } /* !out_idx */ } /* !dbg */ } /* !grd_idx */ if(crd_in) crd_in=(double *)nco_free(crd_in); if(crd_out) crd_out=(double *)nco_free(crd_out); if(dat_in) dat_in=(double *)nco_free(dat_in); if(dat_out) dat_out=(double *)nco_free(dat_out); if(!in_ncr){ if(crd_in_mnt) crd_in_mnt=(double *)nco_free(crd_in_mnt); if(dat_in_mnt) dat_in_mnt=(double *)nco_free(dat_in_mnt); } /* !in_ncr */ if(!out_ncr){ if(crd_out_mnt) crd_out_mnt=(double *)nco_free(crd_out_mnt); if(dat_out_mnt) dat_out_mnt=(double *)nco_free(dat_out_mnt); } /* !out_ncr */ #pragma omp critical { /* begin OpenMP critical */ rcd=nco_put_vara(out_id,var_id_out,dmn_srt,dmn_cnt_out,var_val_dbl_out,var_typ_rgr); } /* end OpenMP critical */ if(dmn_id_in) dmn_id_in=(int *)nco_free(dmn_id_in); if(dmn_id_out) dmn_id_out=(int *)nco_free(dmn_id_out); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out); if(var_val_dbl_out) var_val_dbl_out=(double *)nco_free(var_val_dbl_out); if(var_val_dbl_in) var_val_dbl_in=(double *)nco_free(var_val_dbl_in); }else{ /* !trv.flg_rgr */ /* Use standard NCO copy routine for variables that are not regridded 20190511: Copy them only once */ if(tm_idx == 0){ #pragma omp critical { /* begin OpenMP critical */ (void)nco_cpy_var_val(in_id,out_id,(FILE *)NULL,(md5_sct *)NULL,trv.nm,trv_tbl); } /* end OpenMP critical */ } /* !tm_idx */ } /* !flg_rgr */ } /* !xtr */ } /* end (OpenMP parallel for) loop over idx_tbl */ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"\n"); if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s completion report: Variables interpolated = %d, copied unmodified = %d, omitted = %d, created = %d\n",nco_prg_nm_get(),fnc_nm,var_rgr_nbr,var_cpy_nbr,var_xcl_nbr,var_crt_nbr); } /* !tm_idx */ if(att_nm_fll_val) att_nm_fll_val=(char *)nco_free(att_nm_fll_val); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_ids_in) dmn_ids_in=(int *)nco_free(dmn_ids_in); if(dmn_ids_out) dmn_ids_out=(int *)nco_free(dmn_ids_out); if(ilev_nm_in) ilev_nm_in=(char *)nco_free(ilev_nm_in); if(lev_nm_in) lev_nm_in=(char *)nco_free(lev_nm_in); if(hyai_in) hyai_in=(double *)nco_free(hyai_in); if(hyam_in) hyam_in=(double *)nco_free(hyam_in); if(hybi_in) hybi_in=(double *)nco_free(hybi_in); if(hybm_in) hybm_in=(double *)nco_free(hybm_in); if(ps_in) ps_in=(double *)nco_free(ps_in); if(prs_mdp_in) prs_mdp_in=(double *)nco_free(prs_mdp_in); if(prs_ntf_in) prs_ntf_in=(double *)nco_free(prs_ntf_in); if(hyai_out) hyai_out=(double *)nco_free(hyai_out); if(hyam_out) hyam_out=(double *)nco_free(hyam_out); if(hybi_out) hybi_out=(double *)nco_free(hybi_out); if(hybm_out) hybm_out=(double *)nco_free(hybm_out); if(ilev_out) ilev_out=(double *)nco_free(ilev_out); if(lev_in) lev_in=(double *)nco_free(lev_in); if(lev_out) lev_out=(double *)nco_free(lev_out); if(ps_out) ps_out=(double *)nco_free(ps_out); if(prs_mdp_out) prs_mdp_out=(double *)nco_free(prs_mdp_out); if(prs_ntf_out) prs_ntf_out=(double *)nco_free(prs_ntf_out); return rcd; } /* !nco_ntp_vrt() */ int /* O [enm] Return code */ nco_rgr_wgt /* [fnc] Regrid with external weights */ (rgr_sct * const rgr, /* I/O [sct] Regridding structure */ trv_tbl_sct * const trv_tbl) /* I/O [sct] Traversal Table */ { /* Purpose: Regrid fields using external weights contained in a mapfile Examine ESMF, SCRIP, Tempest map-files: ncks --cdl -M -m ${DATA}/scrip/rmp_T42_to_POP43_conserv.nc | m ncks --cdl -M -m ${DATA}/maps/map_t42_to_fv129x256_aave.20150621.nc | m ncks --cdl -M -m ${DATA}/maps/map_ne30np4_to_ne120np4_tps.20150618.nc | m Test ESMF, SCRIP, Tempest map-files: ncks -D 5 -O --map=${DATA}/scrip/rmp_T42_to_POP43_conserv.nc ${DATA}/rgr/essgcm14_clm.nc ~/foo.nc ncks -D 5 -O --map=${DATA}/maps/map_t42_to_fv129x256_aave.20150621.nc ${DATA}/rgr/essgcm14_clm.nc ~/foo.nc ncks -D 5 -O --map=${DATA}/maps/map_ne30np4_to_ne120np4_tps.20150618.nc ${DATA}/ne30/rgr/ne30_1D.nc ~/foo.nc Mapfile formats ESMF, GRIDSPEC, SCRIP, and UGRID described here: http://www.earthsystemmodeling.org/esmf_releases/public/ESMF_6_3_0rp1/ESMF_refdoc/node3.html#sec:fileformat:scrip Conventions: grid_size: Number of gridcells (product of lat*lon) address: Source and destination index for each link pair num_links: Number of unique address pairs in remapping, i.e., size of sparse matrix num_wgts: Number of weights per vertice for given remapping (we only handle num_wgts == 1 below) = 1 Bilinear Destination grid value determined by weights times known source grid values at vertices of source quadrilateral that bounds destination point P One weight per vertice guarantees fxm but is not conservative Bilinear requires logically rectangular grid = 1 Distance-based: Distance-weighted uses values at num_neighbors points The weight is inversely proportional to the angular distance from the destination point to each neighbor on the source grid = 3 Second-order conservative: Described in Jones, P. W. (1999), Monthly Weather Review, 127, 2204-2210 First-order conservative schemes assume fluxes are constant within gridcell Destination fluxes are simple summations of sources fluxes weighted by overlap areas Old clm and bds remappers use a first-order algorithm Second-order improves this by using a first-order Taylor expansion of flux Source flux is centroid value plus directional offset determined by dot product of directional gradient and vector pointing from vertice to centroid. Three weights per vertice are centroid weight, weight times local theta-gradient from centroid to vertice, and weight times local phi-gradient from centroid to vertice. = 4 Bicubic: The four weights are gradients in each direction plus a cross-gradient term Same principle as bilinear, but more weights per vertice Bicubic requires logically rectangular grid wgt: Maximum number of source cells contributing to destination cell is not a dimension in SCRIP remapping files because SCRIP stores everying in 1-D sparse matrix arrays Definition of sparse matrix formulations and normalization terminology, SCRIP manual p. 8, 13, 16: for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ // Remap source function f = 1 in all unmasked source gridcells, zero elsewhere, to function F on destination grid // Normalization: fractional area (fracarea) (F = 1 where destination overlaps umasked source grid) dst[ddr_dst[lnk_idx]]+=src[ddr_src[lnk_idx]]*remap_matrix[lnk_idx,0]; // Normalization: destination area (destarea) (weights in each destination cell sum to its area frcation) dst[ddr_dst[lnk_idx]]+=src[ddr_src[lnk_idx]]*remap_matrix[lnk_idx,0]/dst_area[ddr_dst[lnk_idx]]; // Normalization: none (F = angular area that participates in remapping) dst[ddr_dst[lnk_idx]]+=src[ddr_src[lnk_idx]]*remap_matrix[lnk_idx,0]/(dst_area[ddr_dst[lnk_idx]]*dst_frc[ddr_dst[lnk_idx]); } // end loop over lnk Documentation: NCL special cases described in popRemap.ncl, e.g., at https://github.com/yyr/ncl/blob/master/ni/src/examples/gsun/popRemap.ncl ESMF Regridding Status: https://www.earthsystemcog.org/projects/esmf Sample regrid T42->POP43, SCRIP: ncks -O --map=${DATA}/scrip/rmp_T42_to_POP43_conserv.nc ${DATA}/rgr/essgcm14_clm.nc ~/foo.nc */ const char fnc_nm[]="nco_rgr_wgt()"; /* [sng] Function name */ char *fl_in; char *fl_pth_lcl=NULL; const double rdn2dgr=180.0/M_PI; const double dgr2rdn=M_PI/180.0; const double eps_rlt=1.0e-14; /* [frc] Round-off error tolerance */ double lat_wgt_ttl=0.0; /* [frc] Actual sum of quadrature weights */ double area_out_ttl=0.0; /* [frc] Exact sum of area */ int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */ int fll_md_old; /* [enm] Old fill mode */ int in_id; /* I [id] Input netCDF file ID */ int md_open; /* [enm] Mode flag for nc_open() call */ int out_id; /* I [id] Output netCDF file ID */ int rcd=NC_NOERR; int dmn_idx; /* [idx] Dimension index */ int dst_grid_corners_id; /* [id] Destination grid corners dimension ID */ int dst_grid_rank_id; /* [id] Destination grid rank dimension ID */ int dst_grid_size_id; /* [id] Destination grid size dimension ID */ int num_links_id; /* [id] Number of links dimension ID */ int num_wgts_id=NC_MIN_INT; /* [id] Number of weights dimension ID */ int src_grid_corners_id; /* [id] Source grid corners dimension ID */ int src_grid_rank_id; /* [id] Source grid rank dimension ID */ int src_grid_size_id; /* [id] Source grid size dimension ID */ long int lat_idx; long int lon_idx; short int bnd_idx; nco_bool FL_RTR_RMT_LCN; nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool SHARE_OPEN=rgr->flg_uio; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */ nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */ nco_bool flg_dgn_area_out=False; /* [flg] Diagnose area_out from grid boundaries */ nco_bool flg_bnd_1D_usable=False; /* [flg] Usable 1D cell vertices exist */ nco_bool flg_stg=rgr->flg_stg; /* [flg] Write staggered grid with FV output */ nco_grd_2D_typ_enm nco_grd_2D_typ=nco_grd_2D_nil; /* [enm] Two-dimensional grid-type enum */ nco_grd_lat_typ_enm nco_grd_lat_typ=nco_grd_lat_nil; /* [enm] Latitude grid-type enum */ nco_grd_lon_typ_enm nco_grd_lon_typ=nco_grd_lon_nil; /* [enm] Longitude grid-type enum */ nco_mpf_sct mpf; size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stderr,"%s: INFO %s obtaining mapping weights from %s\n",nco_prg_nm_get(),fnc_nm,rgr->fl_map); /* Duplicate (because nco_fl_mk_lcl() free()'s fl_in) */ fl_in=(char *)strdup(rgr->fl_map); /* Make sure file is on local system and is readable or die trying */ fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); /* Open file using appropriate buffer size hints and verbosity */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; if(SHARE_OPEN) md_open=md_open|NC_SHARE; rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,&in_id); /* Identify mapping file type using string generated by weight-generator: ESMF: title = "ESMF Offline Regridding Weight Generator" ESMF_weight_only: title = "ESMF Regrid Weight Generator" NCO: Title = "netCDF Operators (NCO) Offline Regridding Weight Generator" SCRIP: conventions = "SCRIP" Tempest: Title = "TempestRemap Offline Regridding Weight Generator" */ char *att_val; char *att_cnv_val=NULL; char *att_gnr_val=NULL; char *att_ttl_val=NULL; char *cnv_sng=NULL; /* netCDF standard is uppercase Conventions, though some models user lowercase */ char att_sng_Cnv[]="Conventions"; /* [sng] Unidata standard string (uppercase) */ char att_sng_cnv[]="conventions"; /* [sng] Unidata non-standard string (lowercase) */ char att_sng_gnr[]="weight_generator"; /* [sng] CMIP6 standard string */ char att_sng_Ttl[]="Title"; /* [sng] NCO and Tempest use "Title" attribute, and Tempest does not use "Conventions" */ char att_sng_ttl[]="title"; /* [sng] ERWG 7.1 weight_only uses "title" not "Conventions" attribute */ char name0_sng[]="name0"; /* [sng] Attribute where Tempest stores least-rapidly-varying dimension name */ nco_rgr_mpf_typ_enm nco_rgr_mpf_typ=nco_rgr_mpf_nil; /* [enm] Type of remapping file */ nco_rgr_typ_enm nco_rgr_typ=nco_rgr_grd_nil; /* [enm] Type of grid conversion */ /* Look for map-type signature in [cC]onventions or [tT]itle attribute */ att_cnv_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_cnv); if(!att_cnv_val) att_cnv_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_Cnv); att_gnr_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_gnr); att_ttl_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_ttl); if(!att_ttl_val) att_ttl_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_Ttl); /* Either "[cC]onventions" or "[tT]itle" attribute determines map-file type... */ if(att_cnv_val && strstr(att_cnv_val,"SCRIP")) nco_rgr_mpf_typ=nco_rgr_mpf_SCRIP; if(nco_rgr_mpf_typ == nco_rgr_mpf_nil && att_ttl_val){ if(strstr(att_ttl_val,"ESMF Offline Regridding Weight Generator")) nco_rgr_mpf_typ=nco_rgr_mpf_ESMF; else if(strstr(att_ttl_val,"netCDF Operators")) nco_rgr_mpf_typ=nco_rgr_mpf_NCO; else if(strstr(att_ttl_val,"Tempest")) nco_rgr_mpf_typ=nco_rgr_mpf_Tempest; else if(strstr(att_ttl_val,"ESMF Regrid Weight Generator")) nco_rgr_mpf_typ=nco_rgr_mpf_ESMF_weight_only; } /* !att_ttl_val */ if(nco_rgr_mpf_typ == nco_rgr_mpf_nil && att_cnv_val){ if(strstr(att_cnv_val,"NCO")) nco_rgr_mpf_typ=nco_rgr_mpf_NCO; } /* !att_gnr_val */ if(nco_rgr_mpf_typ == nco_rgr_mpf_nil && att_gnr_val){ if(strstr(att_gnr_val,"NCO")) nco_rgr_mpf_typ=nco_rgr_mpf_NCO; } /* !att_gnr_val */ if(nco_rgr_mpf_typ == nco_rgr_mpf_nil){ (void)fprintf(stderr,"%s: WARNING %s unable to discern map-file type from global attributes \"[cC]onventions\" = \"%s\" and/or \"[tT]itle\" = \"%s\" and/or \"weight_generator\" = \"%s\"\n",nco_prg_nm_get(),fnc_nm,att_cnv_val ? att_cnv_val : "",att_ttl_val ? att_ttl_val : "",att_gnr_val ? att_gnr_val : ""); nco_rgr_mpf_typ=nco_rgr_mpf_unknown; } /* !nco_rgr_mpf_typ */ if(att_cnv_val) att_cnv_val=(char *)nco_free(att_cnv_val); if(att_gnr_val) att_gnr_val=(char *)nco_free(att_gnr_val); if(att_ttl_val) att_ttl_val=(char *)nco_free(att_ttl_val); switch(nco_rgr_mpf_typ){ case nco_rgr_mpf_SCRIP: rcd+=nco_inq_dimid(in_id,"src_grid_size",&src_grid_size_id); rcd+=nco_inq_dimid(in_id,"dst_grid_size",&dst_grid_size_id); rcd+=nco_inq_dimid(in_id,"src_grid_corners",&src_grid_corners_id); rcd+=nco_inq_dimid(in_id,"dst_grid_corners",&dst_grid_corners_id); rcd+=nco_inq_dimid(in_id,"src_grid_rank",&src_grid_rank_id); rcd+=nco_inq_dimid(in_id,"dst_grid_rank",&dst_grid_rank_id); rcd+=nco_inq_dimid(in_id,"num_links",&num_links_id); rcd+=nco_inq_dimid(in_id,"num_wgts",&num_wgts_id); break; case nco_rgr_mpf_ESMF_weight_only: rcd+=nco_inq_dimid(in_id,"n_s",&num_links_id); break; case nco_rgr_mpf_ESMF: case nco_rgr_mpf_NCO: case nco_rgr_mpf_Tempest: case nco_rgr_mpf_unknown: rcd+=nco_inq_dimid(in_id,"n_a",&src_grid_size_id); rcd+=nco_inq_dimid(in_id,"n_b",&dst_grid_size_id); rcd+=nco_inq_dimid(in_id,"nv_a",&src_grid_corners_id); rcd+=nco_inq_dimid(in_id,"nv_b",&dst_grid_corners_id); rcd+=nco_inq_dimid(in_id,"src_grid_rank",&src_grid_rank_id); rcd+=nco_inq_dimid(in_id,"dst_grid_rank",&dst_grid_rank_id); if(nco_rgr_mpf_typ != nco_rgr_mpf_Tempest){ rcd+=nco_inq_dimid_flg(in_id,"num_wgts",&num_wgts_id); if(rcd != NC_NOERR){ if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO %s reports map-file does not contain \"num_wgts\" dimension. ERWG always produces this as an orphan dimension, so post-processing could have removed it without harming other map-file fields. No harm, no foul.\n",nco_prg_nm_get(),fnc_nm); rcd=NC_NOERR; } /* !rcd */ } /* !nco_rgr_mpf_Tempest */ rcd+=nco_inq_dimid(in_id,"n_s",&num_links_id); break; default: (void)fprintf(stderr,"%s: ERROR %s (aka \"the regridder\") reports unknown map-file type\n",nco_prg_nm_get(),fnc_nm); nco_dfl_case_generic_err(); /* NB: This return never executes because nco_dfl_case_generic_err() calls exit() Return placed here to suppress clang -Wsometimes-uninitialized warnings This is done many other times throughout the code, though explained only once, here */ return NCO_ERR; break; } /* end switch */ /* Use dimension IDs to get dimension sizes */ rcd+=nco_inq_dimlen(in_id,num_links_id,&mpf.num_links); if(nco_rgr_mpf_typ != nco_rgr_mpf_ESMF_weight_only){ rcd+=nco_inq_dimlen(in_id,src_grid_size_id,&mpf.src_grid_size); rcd+=nco_inq_dimlen(in_id,dst_grid_size_id,&mpf.dst_grid_size); rcd+=nco_inq_dimlen(in_id,src_grid_corners_id,&mpf.src_grid_corners); rcd+=nco_inq_dimlen(in_id,dst_grid_corners_id,&mpf.dst_grid_corners); rcd+=nco_inq_dimlen(in_id,src_grid_rank_id,&mpf.src_grid_rank); rcd+=nco_inq_dimlen(in_id,dst_grid_rank_id,&mpf.dst_grid_rank); /* TempestRemap does not generate num_wgts */ if(nco_rgr_mpf_typ == nco_rgr_mpf_Tempest || num_wgts_id == NC_MIN_INT){ mpf.num_wgts=int_CEWI; }else{ rcd+=nco_inq_dimlen(in_id,num_wgts_id,&mpf.num_wgts); } /* !num_wgts_id */ assert(mpf.src_grid_size < INT_MAX && mpf.dst_grid_size < INT_MAX); }else{ mpf.src_grid_size=long_CEWI; mpf.dst_grid_size=long_CEWI; mpf.src_grid_corners=long_CEWI; mpf.dst_grid_corners=long_CEWI; mpf.src_grid_rank=long_CEWI; mpf.dst_grid_rank=long_CEWI; mpf.num_wgts=int_CEWI; } /* !ESMF_weight_only */ cnv_sng=strdup("normalization"); nco_rgr_nrm_typ_enm nco_rgr_nrm_typ=nco_rgr_nrm_nil; att_val=nco_char_att_get(in_id,NC_GLOBAL,cnv_sng); if(att_val){ if(strstr(att_val,"fracarea")) nco_rgr_nrm_typ=nco_rgr_nrm_fracarea; /* 20190912: map_gx1v6T_to_1x1_bilin.nc and map_0.1T_tripole_to_0.1x0.1_bilin.nc store "fracarea" in normalization attribute. I think NCAR created both maps for POP, probably by running ERWG with option --norm_type=fracarea. Hence "fracarea" seems to be the NCAR-way of guaranteeing that ESMF re-normalization is not performed by default. */ if(strstr(att_val,"destarea")) nco_rgr_nrm_typ=nco_rgr_nrm_destarea; /* ESMF conserve "aave" and bilinear "bilin" generate "destarea" by default */ if(strstr(att_val,"none")) nco_rgr_nrm_typ=nco_rgr_nrm_none; if(att_val) att_val=(char *)nco_free(att_val); }else{ /* 20150712: Tempest does not store a normalization attribute 20170620: ESMF weight_only does not store a normalization attribute 20190312: NCO does not yet store a normalization attribute */ if(nco_rgr_mpf_typ == nco_rgr_mpf_Tempest || nco_rgr_mpf_typ == nco_rgr_mpf_NCO || nco_rgr_mpf_typ == nco_rgr_mpf_unknown || nco_rgr_mpf_typ == nco_rgr_mpf_ESMF_weight_only) nco_rgr_nrm_typ=nco_rgr_nrm_unknown; } /* endif normalization */ assert(nco_rgr_nrm_typ != nco_rgr_nrm_nil); if(cnv_sng) cnv_sng=(char *)nco_free(cnv_sng); cnv_sng=strdup("map_method"); nco_rgr_mth_typ_enm nco_rgr_mth_typ=nco_rgr_mth_nil; att_val=nco_char_att_get(in_id,NC_GLOBAL,cnv_sng); if(att_val){ if(strcasestr(att_val,"Conservative")) nco_rgr_mth_typ=nco_rgr_mth_conservative; if(strcasestr(att_val,"Bilinear")) nco_rgr_mth_typ=nco_rgr_mth_bilinear; if(strcasestr(att_val,"none")) nco_rgr_mth_typ=nco_rgr_mth_none; if(att_val) att_val=(char *)nco_free(att_val); }else{ /* Tempest does not store a map_method attribute */ if(nco_rgr_mpf_typ == nco_rgr_mpf_NCO || nco_rgr_mpf_typ == nco_rgr_mpf_Tempest || nco_rgr_mpf_typ == nco_rgr_mpf_unknown) nco_rgr_mth_typ=nco_rgr_mth_unknown; } /* endif */ if(nco_rgr_mth_typ == nco_rgr_mth_nil) (void)fprintf(stdout,"%s: WARNING %s reports map global attribute %s = %s does not match SCRIP/ESMF conventions that support only values of \"Conservative\" and \"Bilinear\" for this attribute. Proceeding anyway...\n",nco_prg_nm_get(),fnc_nm,cnv_sng,att_val); if(cnv_sng) cnv_sng=(char *)nco_free(cnv_sng); if(nco_dbg_lvl_get() >= nco_dbg_scl){ (void)fprintf(stderr,"%s: INFO %s regridding input metadata and grid sizes: ",nco_prg_nm_get(),fnc_nm); (void)fprintf(stderr,"mapfile_generator = %s, map_method = %s, normalization = %s, src_grid_size = n_a = %li, dst_grid_size = n_b = %li, src_grid_corners = nv_a = %li, dst_grid_corners = nv_b = %li, src_grid_rank = %li, dst_grid_rank = %li, num_links = n_s = %li, num_wgts = %li\n",nco_rgr_mpf_sng(nco_rgr_mpf_typ),nco_rgr_mth_sng(nco_rgr_mth_typ),nco_rgr_nrm_sng(nco_rgr_nrm_typ),mpf.src_grid_size,mpf.dst_grid_size,mpf.src_grid_corners,mpf.dst_grid_corners,mpf.src_grid_rank,mpf.dst_grid_rank,mpf.num_links,mpf.num_wgts); } /* endif dbg */ /* 20190726: Allow normalization type to be "none" for bilinear regridding which UKMO SCRIP files set to "none"*/ if(nco_rgr_mth_typ == nco_rgr_mth_conservative && nco_rgr_nrm_typ == nco_rgr_nrm_none){ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports requested normalization type = %s is not yet supported. Specifically, masks specified by a mask variable (dst_grid_imask,mask_b) are ignored. More specifically, any destination mask information is assumed to be built into the weight array so that no source points will contribute to masked locations. Talk to Charlie if you want this changed.\n",nco_prg_nm_get(),fnc_nm,nco_rgr_nrm_sng(nco_rgr_nrm_typ)); nco_exit(EXIT_FAILURE); } /* !msk */ /* Got to here in bullet-proofing code for weight-only map-files */ if(nco_rgr_mpf_typ == nco_rgr_mpf_ESMF_weight_only) (void)fprintf(stderr,"%s: WARNING %s reached end of ESMF_weight_only section\n",nco_prg_nm_get(),fnc_nm); assert(nco_rgr_mpf_typ != nco_rgr_mpf_ESMF_weight_only); /* Set type of grid conversion */ if(mpf.src_grid_rank == 1 && mpf.dst_grid_rank == 1) nco_rgr_typ=nco_rgr_grd_1D_to_1D; if(mpf.src_grid_rank == 1 && mpf.dst_grid_rank == 2) nco_rgr_typ=nco_rgr_grd_1D_to_2D; if(mpf.src_grid_rank == 2 && mpf.dst_grid_rank == 1) nco_rgr_typ=nco_rgr_grd_2D_to_1D; if(mpf.src_grid_rank == 2 && mpf.dst_grid_rank == 2) nco_rgr_typ=nco_rgr_grd_2D_to_2D; assert(nco_rgr_typ != nco_rgr_grd_nil); /* Save typing later */ nco_bool flg_grd_in_1D=False; nco_bool flg_grd_in_2D=False; nco_bool flg_grd_out_1D=False; nco_bool flg_grd_out_2D=False; if(nco_rgr_typ == nco_rgr_grd_1D_to_1D || nco_rgr_typ == nco_rgr_grd_1D_to_2D) flg_grd_in_1D=True; if(nco_rgr_typ == nco_rgr_grd_2D_to_1D || nco_rgr_typ == nco_rgr_grd_2D_to_2D) flg_grd_in_2D=True; if(nco_rgr_typ == nco_rgr_grd_1D_to_1D || nco_rgr_typ == nco_rgr_grd_2D_to_1D) flg_grd_out_1D=True; if(nco_rgr_typ == nco_rgr_grd_1D_to_2D || nco_rgr_typ == nco_rgr_grd_2D_to_2D) flg_grd_out_2D=True; int dmn_nbr_hrz_crd; /* [nbr] Number of horizontal dimensions in output grid */ if(flg_grd_out_2D) dmn_nbr_hrz_crd=2; else dmn_nbr_hrz_crd=1; /* Obtain grid values necessary to compute output latitude and longitude coordinates */ int area_dst_id; /* [id] Area variable ID */ int col_src_adr_id; /* [id] Source address (col) variable ID */ int dmn_sz_in_int_id; /* [id] Source grid dimension sizes ID */ int dmn_sz_out_int_id; /* [id] Destination grid dimension sizes ID */ int dst_grd_crn_lat_id; /* [id] Destination grid corner latitudes variable ID */ int dst_grd_crn_lon_id; /* [id] Destination grid corner longitudes variable ID */ int dst_grd_ctr_lat_id; /* [id] Destination grid center latitudes variable ID */ int dst_grd_ctr_lon_id; /* [id] Destination grid center longitudes variable ID */ int frc_dst_id; /* [id] Fraction variable ID */ int msk_dst_id=NC_MIN_INT; /* [id] Mask variable ID */ int row_dst_adr_id; /* [id] Destination address (row) variable ID */ int wgt_raw_id; /* [id] Remap matrix variable ID */ switch(nco_rgr_mpf_typ){ /* Obtain fields whose name depends on mapfile type */ case nco_rgr_mpf_SCRIP: rcd+=nco_inq_varid(in_id,"dst_grid_area",&area_dst_id); /* ESMF: area_b */ rcd+=nco_inq_varid(in_id,"dst_grid_center_lon",&dst_grd_ctr_lon_id); /* ESMF: xc_b */ rcd+=nco_inq_varid(in_id,"dst_grid_center_lat",&dst_grd_ctr_lat_id); /* ESMF: yc_b */ rcd+=nco_inq_varid(in_id,"dst_grid_corner_lon",&dst_grd_crn_lon_id); /* ESMF: xv_b */ rcd+=nco_inq_varid(in_id,"dst_grid_corner_lat",&dst_grd_crn_lat_id); /* ESMF: yv_b */ rcd+=nco_inq_varid(in_id,"dst_grid_frac",&frc_dst_id); /* ESMF: frac_b */ rcd+=nco_inq_varid(in_id,"dst_address",&row_dst_adr_id); /* ESMF: row */ rcd+=nco_inq_varid(in_id,"src_address",&col_src_adr_id); /* ESMF: col */ rcd+=nco_inq_varid(in_id,"remap_matrix",&wgt_raw_id); /* NB: remap_matrix[num_links,num_wgts] != S[n_s] */ break; case nco_rgr_mpf_ESMF: case nco_rgr_mpf_ESMF_weight_only: case nco_rgr_mpf_NCO: case nco_rgr_mpf_Tempest: case nco_rgr_mpf_unknown: if(nco_rgr_mpf_typ != nco_rgr_mpf_ESMF_weight_only){ rcd+=nco_inq_varid(in_id,"area_b",&area_dst_id); /* SCRIP: dst_grid_area */ rcd+=nco_inq_varid(in_id,"xc_b",&dst_grd_ctr_lon_id); /* SCRIP: dst_grid_center_lon */ rcd+=nco_inq_varid(in_id,"yc_b",&dst_grd_ctr_lat_id); /* SCRIP: dst_grid_center_lat */ rcd+=nco_inq_varid(in_id,"xv_b",&dst_grd_crn_lon_id); /* SCRIP: dst_grid_corner_lon */ rcd+=nco_inq_varid(in_id,"yv_b",&dst_grd_crn_lat_id); /* SCRIP: dst_grid_corner_lat */ rcd+=nco_inq_varid(in_id,"frac_b",&frc_dst_id); /* SCRIP: dst_grid_frac */ } /* !nco_rgr_mpf_ESMF_weight_only */ rcd+=nco_inq_varid(in_id,"row",&row_dst_adr_id); /* SCRIP: dst_address */ rcd+=nco_inq_varid(in_id,"col",&col_src_adr_id); /* SCRIP: src_address */ rcd+=nco_inq_varid(in_id,"S",&wgt_raw_id); /* NB: remap_matrix[num_links,num_wgts] != S[n_s] */ break; default: (void)fprintf(stderr,"%s: ERROR %s (aka \"the regridder\") reports unknown map file type\n",nco_prg_nm_get(),fnc_nm); nco_dfl_case_generic_err(); /* NB: This return never executes because nco_dfl_case_generic_err() calls exit() Return placed here to suppress clang -Wsometimes-uninitialized warnings This is done many other times throughout the code, though explained only once, here */ return NCO_ERR; break; } /* end switch */ /* Obtain fields whose presence depends on mapfile type */ nco_bool flg_msk_out=rgr->flg_msk_out; /* [flg] Add mask to output */ msk_dst_id=NC_MIN_INT; if(flg_msk_out){ switch(nco_rgr_mpf_typ){ case nco_rgr_mpf_ESMF: case nco_rgr_mpf_NCO: rcd+=nco_inq_varid(in_id,"mask_b",&msk_dst_id); /* SCRIP: dst_grid_imask */ break; case nco_rgr_mpf_SCRIP: rcd+=nco_inq_varid(in_id,"dst_grid_imask",&msk_dst_id); /* ESMF: mask_b */ break; case nco_rgr_mpf_Tempest: case nco_rgr_mpf_unknown: /* 20190315: TempestRemap did not propagate mask_b (or mask_a) until ~201902 */ rcd+=nco_inq_varid_flg(in_id,"mask_b",&msk_dst_id); if(rcd == NC_ENOTVAR){ (void)fprintf(stderr,"%s: INFO %s reports map-file lacks mask_b. %sContinuing anyway without masks...\n",nco_prg_nm_get(),fnc_nm,(nco_rgr_mpf_typ == nco_rgr_mpf_Tempest) ? "Probably this TempestRemap map-file was created before ~201902 when TR began to propagate mask_a/b variables." : ""); } /* !rcd */ rcd=NC_NOERR; break; default: (void)fprintf(stderr,"%s: ERROR %s (aka \"the regridder\") reports unknown map-file type\n",nco_prg_nm_get(),fnc_nm); nco_dfl_case_generic_err(); } /* !nco_rgr_mpf_typ */ if(msk_dst_id == NC_MIN_INT) flg_msk_out=False; } /* !flg_msk_out */ /* Obtain fields whose names are independent of mapfile type */ rcd+=nco_inq_varid(in_id,"src_grid_dims",&dmn_sz_in_int_id); rcd+=nco_inq_varid(in_id,"dst_grid_dims",&dmn_sz_out_int_id); int lon_psn_src; /* [idx] Ordinal position of longitude in rectangular source grid dimension-size array */ int lat_psn_src; /* [idx] Ordinal position of latitude in rectangular source grid dimension-size array */ int lon_psn_dst=int_CEWI; /* [idx] Ordinal position of longitude in rectangular destination grid dimension-size array */ int lat_psn_dst=int_CEWI; /* [idx] Ordinal position of latitude in rectangular destination grid dimension-size array */ if(flg_grd_in_2D){ lon_psn_src=0; /* SCRIP introduced [lon,lat] convention because more natural for Fortran */ lat_psn_src=1; if(nco_rgr_mpf_typ == nco_rgr_mpf_Tempest){ /* Until 20150814, Tempest stored [src/dst]_grid_dims as [lat,lon] unlike SCRIP's [lon,lat] order Newer behavior follows SCRIP [lon,lat] order Challenge: Support both older and newer Tempest mapfiles Tempest (unlike SCRIP and ESMF) annotates mapfile [src/dst]_grid_dims with attributes that identify axis to which each element of [src/dst]_grid_dims refers Solution: Use Tempest mapfile [src/dst]_grid_dims attributes "name0" and/or "name1" to determine if axes' positions follow old order */ att_val=nco_char_att_get(in_id,dmn_sz_in_int_id,name0_sng); if(att_val){ if(strstr(att_val,"lat")){ lon_psn_src=1; lat_psn_src=0; } /* !lat */ if(att_val) att_val=(char *)nco_free(att_val); } /* end rcd && att_typ */ } /* !Tempest */ } /* !flg_grd_in_2D */ if(flg_grd_out_2D){ lon_psn_dst=0; lat_psn_dst=1; if(nco_rgr_mpf_typ == nco_rgr_mpf_Tempest){ att_val=nco_char_att_get(in_id,dmn_sz_in_int_id,name0_sng); if(att_val){ if(strstr(att_val,"lat")){ lon_psn_dst=1; lat_psn_dst=0; } /* !lat */ if(att_val) att_val=(char *)nco_free(att_val); } /* end rcd && att_typ */ } /* !Tempest */ } /* !flg_grd_out_2D */ const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */ const int dmn_nbr_2D=2; /* [nbr] Rank of 2-D grid variables */ const int dmn_nbr_3D=3; /* [nbr] Rank of 3-D grid variables */ const int dmn_nbr_grd_max=dmn_nbr_3D; /* [nbr] Maximum rank of grid variables */ double *area_out; /* [sr] Area of destination grid */ double *frc_out=NULL; /* [frc] Fraction of destination grid */ double *lat_bnd_out=NULL_CEWI; /* [dgr] Latitude boundaries of rectangular destination grid */ double *lat_crn_out=NULL; /* [dgr] Latitude corners of rectangular destination grid */ double *lat_ctr_out=NULL_CEWI; /* [dgr] Latitude centers of rectangular destination grid */ double *lat_ntf_out=NULL; /* [dgr] Latitude interfaces of rectangular destination grid */ double *lat_wgt_out=NULL; /* [dgr] Latitude weights of rectangular destination grid */ double *lon_bnd_out=NULL_CEWI; /* [dgr] Longitude boundaries of rectangular destination grid */ double *lon_crn_out=NULL; /* [dgr] Longitude corners of rectangular destination grid */ double *lon_ctr_out=NULL_CEWI; /* [dgr] Longitude centers of rectangular destination grid */ double *lon_ntf_out=NULL; /* [dgr] Longitude interfaces of rectangular destination grid */ double *slat_ctr_out=NULL_CEWI; /* [dgr] Latitude centers of staggered FV destination grid */ double *slat_wgt_out=NULL_CEWI; /* [frc] Latitude weights of staggered FV destination grid */ double *slon_ctr_out=NULL_CEWI; /* [dgr] Longitude centers of staggered FV destination grid */ double *wgt_raw; /* [frc] Remapping weights */ int *col_src_adr; /* [idx] Source address (col) */ int *row_dst_adr; /* [idx] Destination address (row) */ int *msk_out=NULL; /* [flg] Mask on destination grid */ int *dmn_sz_in_int; /* [nbr] Array of dimension sizes of source grid */ int *dmn_sz_out_int; /* [nbr] Array of dimension sizes of destination grid */ long *dmn_cnt_in=NULL; long *dmn_cnt_out=NULL; long *dmn_cnt=NULL; long *dmn_srt=NULL; long *dmn_srd=NULL; long idx; /* [idx] Counting index for unrolled grids */ /* Allocate space to hold dimension metadata for destination grid */ dmn_srt=(long *)nco_malloc(dmn_nbr_grd_max*sizeof(long)); dmn_cnt=(long *)nco_malloc(dmn_nbr_grd_max*sizeof(long)); dmn_srd=(long *)nco_malloc(dmn_nbr_grd_max*sizeof(long)); dmn_srt[0]=0L; dmn_cnt[0]=mpf.src_grid_rank; dmn_sz_in_int=(int *)nco_malloc(mpf.src_grid_rank*nco_typ_lng((nc_type)NC_INT)); rcd=nco_get_vara(in_id,dmn_sz_in_int_id,dmn_srt,dmn_cnt,dmn_sz_in_int,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=mpf.dst_grid_rank; dmn_sz_out_int=(int *)nco_malloc(mpf.dst_grid_rank*nco_typ_lng((nc_type)NC_INT)); rcd=nco_get_vara(in_id,dmn_sz_out_int_id,dmn_srt,dmn_cnt,dmn_sz_out_int,(nc_type)NC_INT); /* Check-for and workaround faulty Tempest and MPAS-O/I grid sizes */ if(flg_grd_in_1D && (mpf.src_grid_size != dmn_sz_in_int[0])){ (void)fprintf(stdout,"%s: INFO %s reports input grid dimension sizes disagree: mpf.src_grid_size = %ld != %d = dmn_sz_in[0]. Problem may be caused by incorrect src_grid_dims variable. This is a known issue with some TempestRemap mapfiles generated prior to ~20150901, and in some ESMF mapfiles for MPAS-O/I. This problem can be safely ignored if workaround succeeds. Attempting workaround ...\n",nco_prg_nm_get(),fnc_nm,mpf.src_grid_size,dmn_sz_in_int[0]); dmn_sz_in_int[0]=mpf.src_grid_size; } /* !bug */ if(flg_grd_out_1D && (mpf.dst_grid_size != dmn_sz_out_int[0])){ (void)fprintf(stdout,"%s: INFO %s reports output grid dimension sizes disagree: mpf.dst_grid_size = %ld != %d = dmn_sz_out[0]. Problem may be caused by incorrect dst_grid_dims variable. This is a known issue with some TempestRemap mapfiles generated prior to ~20150901, and in some ESMF mapfiles for MPAS-O/I. This problem can be safely ignored if workaround succeeds. Attempting workaround ...\n",nco_prg_nm_get(),fnc_nm,mpf.dst_grid_size,dmn_sz_out_int[0]); dmn_sz_out_int[0]=mpf.dst_grid_size; } /* !bug */ long col_nbr_in; /* [idx] Number of columns in source grid */ long lon_nbr_in; /* [idx] Number of longitudes in rectangular source grid */ long lat_nbr_in; /* [idx] Number of latitudes in rectangular source grid */ const size_t grd_sz_in=mpf.src_grid_size; /* [nbr] Number of elements in single layer of input grid */ const size_t grd_sz_out=mpf.dst_grid_size; /* [nbr] Number of elements in single layer of output grid */ if(flg_grd_in_1D){ col_nbr_in=dmn_sz_in_int[0]; lon_nbr_in=dmn_sz_in_int[0]; lat_nbr_in=dmn_sz_in_int[0]; }else if(flg_grd_in_2D){ col_nbr_in=0; lon_nbr_in=dmn_sz_in_int[lon_psn_src]; lat_nbr_in=dmn_sz_in_int[lat_psn_src]; /* Sanity-check */ assert(lat_nbr_in*lon_nbr_in == (long)grd_sz_in); } /* !src_grid_rank */ const int bnd_tm_nbr_out=2; /* [nbr] Number of boundaries for output time */ int bnd_nbr_out=int_CEWI; /* [nbr] Number of boundaries for output time and rectangular grid coordinates, and number of vertices for output non-rectangular grid coordinates */ long col_nbr_out=long_CEWI; /* [nbr] Number of columns in destination grid */ long lon_nbr_out=long_CEWI; /* [nbr] Number of longitudes in rectangular destination grid */ long lat_nbr_out=long_CEWI; /* [nbr] Number of latitudes in rectangular destination grid */ long slat_nbr_out=long_CEWI; /* [nbr] Number of latitudes in staggered FV grid destination grid */ long slon_nbr_out=long_CEWI; /* [nbr] Number of longitudes in staggered FV grid destination grid */ if(flg_grd_out_1D){ bnd_nbr_out=mpf.dst_grid_corners; col_nbr_out=dmn_sz_out_int[0]; lat_nbr_out=dmn_sz_out_int[0]; lon_nbr_out=dmn_sz_out_int[0]; /* Sanity-check */ assert(col_nbr_out == (long)grd_sz_out); }else if(flg_grd_out_2D){ col_nbr_out=lat_nbr_out*lon_nbr_out; lat_nbr_out=dmn_sz_out_int[lat_psn_dst]; lon_nbr_out=dmn_sz_out_int[lon_psn_dst]; slat_nbr_out=lat_nbr_out-1L; slon_nbr_out=lon_nbr_out; /* Sanity-check */ assert(lat_nbr_out*lon_nbr_out == (long)grd_sz_out); } /* !dst_grid_rank */ /* Ensure coordinates are in degrees not radians for simplicity and CF-compliance NB: ${DATA}/scrip/rmp_T42_to_POP43_conserv.nc has [xy]?_a in degrees and [xy]?_b in radians! */ nco_bool flg_crd_rdn=False; /* [flg] Destination coordinates are in radians not degrees */ char unt_sng[]="units"; /* [sng] netCDF-standard units attribute name */ att_val=nco_char_att_get(in_id,dst_grd_ctr_lat_id,unt_sng); if(att_val){ /* Match "radian" and "radians" */ if(strstr(att_val,"radian")) flg_crd_rdn=True; if(att_val) att_val=(char *)nco_free(att_val); } /* end rcd && att_typ */ nco_bool flg_grd_out_crv=False; /* [flg] Curvilinear coordinates */ nco_bool flg_grd_out_rct=False; /* [flg] Rectangular coordinates */ const nc_type crd_typ_out=NC_DOUBLE; if(flg_grd_out_2D){ lon_ctr_out=(double *)nco_malloc(grd_sz_out*nco_typ_lng(crd_typ_out)); lat_ctr_out=(double *)nco_malloc(grd_sz_out*nco_typ_lng(crd_typ_out)); lon_crn_out=(double *)nco_malloc(mpf.dst_grid_corners*grd_sz_out*nco_typ_lng(crd_typ_out)); lat_crn_out=(double *)nco_malloc(mpf.dst_grid_corners*grd_sz_out*nco_typ_lng(crd_typ_out)); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_out; rcd=nco_get_vara(in_id,dst_grd_ctr_lon_id,dmn_srt,dmn_cnt,lon_ctr_out,crd_typ_out); rcd=nco_get_vara(in_id,dst_grd_ctr_lat_id,dmn_srt,dmn_cnt,lat_ctr_out,crd_typ_out); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=grd_sz_out; dmn_cnt[1]=mpf.dst_grid_corners; rcd=nco_get_vara(in_id,dst_grd_crn_lon_id,dmn_srt,dmn_cnt,lon_crn_out,crd_typ_out); rcd=nco_get_vara(in_id,dst_grd_crn_lat_id,dmn_srt,dmn_cnt,lat_crn_out,crd_typ_out); /* User may specify curvilinear grid (with --rgr crv). Otherwise, manually test for curvilinear source grid. */ flg_grd_out_crv=rgr->flg_crv; /* [flg] Curvilinear coordinates */ if(flg_grd_out_crv){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO Output grid specified to be %s\n",nco_prg_nm_get(),flg_grd_out_crv ? "Curvilinear" : "Rectangular"); }else{ long idx_tst=long_CEWI; /* [idx] Index of first latitude or longitude */ for(idx=0;idx<(long)grd_sz_out;idx++){ if(idx%lon_nbr_out == 0) idx_tst=idx; if(lat_ctr_out[idx] != lat_ctr_out[idx_tst]) break; // (void)fprintf(stdout,"%s: DEBUG lat_ctr_out[%li] = %g, lat_ctr_out[%li] = %g\n",nco_prg_nm_get(),idx,lat_ctr_out[idx],idx_tst,lat_ctr_out[idx_tst]); /* fxm: also test lon */ } /* !rectangular */ if(idx != (long)grd_sz_out) flg_grd_out_crv=True; else flg_grd_out_rct=True; if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO Output grid detected to be %s\n",nco_prg_nm_get(),flg_grd_out_crv ? "Curvilinear" : "Rectangular"); } /* !flg_grd_out_crv */ if(flg_grd_out_crv) bnd_nbr_out=mpf.dst_grid_corners; if(flg_grd_out_rct) bnd_nbr_out=2; /* NB: Assumes rectangular latitude and longitude and is invalid for other quadrilaterals */ } /* !flg_grd_out_2D */ if(nco_dbg_lvl_get() >= nco_dbg_scl){ (void)fprintf(stderr,"%s: INFO %s grid conversion type = %s with expected input and prescribed output grid sizes: ",nco_prg_nm_get(),fnc_nm,nco_rgr_grd_sng(nco_rgr_typ)); (void)fprintf(stderr,"lat_in = %li, lon_in = %li, col_in = %li, lat_out = %li, lon_out = %li, col_out = %li\n",lat_nbr_in,lon_nbr_in,col_nbr_in,lat_nbr_out,lon_nbr_out,col_nbr_out); } /* endif dbg */ /* Allocate space for and obtain coordinates */ if(flg_grd_out_1D){ lon_ctr_out=(double *)nco_malloc(col_nbr_out*nco_typ_lng(crd_typ_out)); lat_ctr_out=(double *)nco_malloc(col_nbr_out*nco_typ_lng(crd_typ_out)); lon_bnd_out=(double *)nco_malloc(col_nbr_out*bnd_nbr_out*nco_typ_lng(crd_typ_out)); lat_bnd_out=(double *)nco_malloc(col_nbr_out*bnd_nbr_out*nco_typ_lng(crd_typ_out)); } /* !flg_grd_out_1D */ if(flg_grd_out_rct){ if(lat_ctr_out) lat_ctr_out=(double *)nco_free(lat_ctr_out); if(lon_ctr_out) lon_ctr_out=(double *)nco_free(lon_ctr_out); if(lat_crn_out) lat_crn_out=(double *)nco_free(lat_crn_out); if(lon_crn_out) lon_crn_out=(double *)nco_free(lon_crn_out); lon_ctr_out=(double *)nco_malloc(lon_nbr_out*nco_typ_lng(crd_typ_out)); lat_ctr_out=(double *)nco_malloc(lat_nbr_out*nco_typ_lng(crd_typ_out)); lon_crn_out=(double *)nco_malloc(mpf.dst_grid_corners*lon_nbr_out*nco_typ_lng(crd_typ_out)); lat_crn_out=(double *)nco_malloc(mpf.dst_grid_corners*lat_nbr_out*nco_typ_lng(crd_typ_out)); lat_wgt_out=(double *)nco_malloc(lat_nbr_out*nco_typ_lng(crd_typ_out)); lon_ntf_out=(double *)nco_malloc((lon_nbr_out+1L)*nco_typ_lng(crd_typ_out)); lat_ntf_out=(double *)nco_malloc((lat_nbr_out+1L)*nco_typ_lng(crd_typ_out)); lon_bnd_out=(double *)nco_malloc(lon_nbr_out*bnd_nbr_out*nco_typ_lng(crd_typ_out)); lat_bnd_out=(double *)nco_malloc(lat_nbr_out*bnd_nbr_out*nco_typ_lng(crd_typ_out)); } /* !flg_grd_out_rct */ /* Arrays unroll into all longitudes for first latitude, then second latitude, ... Obtain longitudes by reading first block contiguously (unstrided) Obtain latitudes by reading unrolled data with stride of lon_nbr */ if(flg_grd_out_1D){ dmn_srt[0]=0L; dmn_cnt[0]=col_nbr_out; rcd=nco_get_vara(in_id,dst_grd_ctr_lon_id,dmn_srt,dmn_cnt,lon_ctr_out,crd_typ_out); dmn_srt[0]=0L; dmn_cnt[0]=col_nbr_out; rcd=nco_get_vara(in_id,dst_grd_ctr_lat_id,dmn_srt,dmn_cnt,lat_ctr_out,crd_typ_out); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=col_nbr_out; dmn_cnt[1]=bnd_nbr_out; rcd=nco_get_vara(in_id,dst_grd_crn_lon_id,dmn_srt,dmn_cnt,lon_bnd_out,crd_typ_out); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=col_nbr_out; dmn_cnt[1]=bnd_nbr_out; rcd=nco_get_vara(in_id,dst_grd_crn_lat_id,dmn_srt,dmn_cnt,lat_bnd_out,crd_typ_out); if(flg_crd_rdn){ for(idx=0;idx<col_nbr_out;idx++){ lon_ctr_out[idx]*=rdn2dgr; lat_ctr_out[idx]*=rdn2dgr; } /* !idx */ for(idx=0;idx<col_nbr_out*bnd_nbr_out;idx++){ lon_bnd_out[idx]*=rdn2dgr; lat_bnd_out[idx]*=rdn2dgr; } /* !idx */ } /* !rdn */ /* Is 1D interface information usable? Yes, unless if all interfaces are zeros NB: fxm Better algorithm for "usable" is that not all interfaces in any cell are equal */ flg_bnd_1D_usable=True; for(idx=0;idx<col_nbr_out*bnd_nbr_out;idx++) if(lon_bnd_out[idx] != 0.0) break; if(idx == col_nbr_out*bnd_nbr_out){ flg_bnd_1D_usable=False; }else{ for(idx=0;idx<col_nbr_out*bnd_nbr_out;idx++) if(lat_bnd_out[idx] != 0.0) break; if(idx == col_nbr_out*bnd_nbr_out) flg_bnd_1D_usable=False; } /* !usable */ if(nco_dbg_lvl_get() >= nco_dbg_crr){ for(idx=0;idx<lat_nbr_out;idx++){ (void)fprintf(stdout,"lat[%li] = %g, vertices = ",idx,lat_ctr_out[idx]); for(bnd_idx=0;bnd_idx<bnd_nbr_out;bnd_idx++) (void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lat_bnd_out[bnd_nbr_out*idx+bnd_idx],bnd_idx == bnd_nbr_out-1 ? "]\n" : ", "); } /* end loop over lat */ for(idx=0;idx<lon_nbr_out;idx++){ (void)fprintf(stdout,"lon[%li] = %g, vertices = ",idx,lon_ctr_out[idx]); for(bnd_idx=0;bnd_idx<bnd_nbr_out;bnd_idx++) (void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lon_bnd_out[bnd_nbr_out*idx+bnd_idx],bnd_idx == bnd_nbr_out-1 ? "]\n" : ", "); } /* end loop over lon */ } /* endif dbg */ } /* !flg_grd_out_1D */ if(flg_grd_out_rct){ /* fxm: sub-sample these from the already-read ctr/crn arrays */ dmn_srt[0L]=0L; dmn_cnt[0L]=lon_nbr_out; rcd=nco_get_vara(in_id,dst_grd_ctr_lon_id,dmn_srt,dmn_cnt,lon_ctr_out,crd_typ_out); dmn_srt[0L]=0L; dmn_cnt[0L]=lat_nbr_out; dmn_srd[0L]=lon_nbr_out; rcd=nco_get_vars(in_id,dst_grd_ctr_lat_id,dmn_srt,dmn_cnt,dmn_srd,lat_ctr_out,crd_typ_out); dmn_srt[0L]=dmn_srt[1]=0L; dmn_cnt[0L]=lon_nbr_out; dmn_cnt[1]=mpf.dst_grid_corners; rcd=nco_get_vara(in_id,dst_grd_crn_lon_id,dmn_srt,dmn_cnt,lon_crn_out,crd_typ_out); dmn_srt[0L]=0L; dmn_cnt[0L]=lat_nbr_out; dmn_srd[0L]=lon_nbr_out; dmn_srt[1]=0L; dmn_cnt[1]=mpf.dst_grid_corners; dmn_srd[1]=1L; rcd=nco_get_vars(in_id,dst_grd_crn_lat_id,dmn_srt,dmn_cnt,dmn_srd,lat_crn_out,crd_typ_out); if(flg_crd_rdn){ for(idx=0L;idx<lon_nbr_out;idx++) lon_ctr_out[idx]*=rdn2dgr; for(idx=0L;idx<lat_nbr_out;idx++) lat_ctr_out[idx]*=rdn2dgr; for(idx=0L;idx<lon_nbr_out*mpf.dst_grid_corners;idx++) lon_crn_out[idx]*=rdn2dgr; for(idx=0L;idx<lat_nbr_out*mpf.dst_grid_corners;idx++) lat_crn_out[idx]*=rdn2dgr; } /* !rdn */ } /* !flg_grd_out_rct */ if(flg_grd_out_crv){ if(flg_crd_rdn){ for(idx=0L;idx<(long)grd_sz_out;idx++) lon_ctr_out[idx]*=rdn2dgr; for(idx=0L;idx<(long)grd_sz_out;idx++) lat_ctr_out[idx]*=rdn2dgr; for(idx=0L;idx<(long)grd_sz_out*mpf.dst_grid_corners;idx++) lon_crn_out[idx]*=rdn2dgr; for(idx=0L;idx<(long)grd_sz_out*mpf.dst_grid_corners;idx++) lat_crn_out[idx]*=rdn2dgr; } /* !rdn */ } /* !flg_grd_out_crv */ /* Allocate space for and obtain area, fraction, and mask, which are needed for both 1D and 2D grids */ area_out=(double *)nco_malloc(grd_sz_out*nco_typ_lng(crd_typ_out)); dmn_srt[0L]=0L; dmn_cnt[0L]=grd_sz_out; rcd=nco_get_vara(in_id,area_dst_id,dmn_srt,dmn_cnt,area_out,crd_typ_out); frc_out=(double *)nco_malloc(grd_sz_out*nco_typ_lng(crd_typ_out)); dmn_srt[0L]=0L; dmn_cnt[0L]=grd_sz_out; rcd=nco_get_vara(in_id,frc_dst_id,dmn_srt,dmn_cnt,frc_out,crd_typ_out); if(msk_dst_id != NC_MIN_INT){ msk_out=(int *)nco_malloc(grd_sz_out*nco_typ_lng(NC_INT)); dmn_srt[0L]=0L; dmn_cnt[0L]=grd_sz_out; rcd=nco_get_vara(in_id,msk_dst_id,dmn_srt,dmn_cnt,msk_out,(nc_type)NC_INT); } /* !msk */ /* Derive 2D interface boundaries from lat and lon grid-center values NB: Procedures to derive interfaces from midpoints on rectangular grids are theoretically possible However, ESMF often outputs interfaces values (e.g., yv_b) for midpoint coordinates (e.g., yc_b) For example, ACME standard map from ne120np4 to 181x360 has yc_b[0] = yv_b[0] = -90.0 Latitude = -90 is, by definition, not a midpoint coordinate This appears to be an artifact of the non-physical representation of the FV grid, i.e., a grid center located at the pole where longitudes collapse in the model, but cannot be represented as collapsed on a rectangular 2D grid with non-zero areas. Unfortunately, ESMF supports this nonsense by labeling the grid center as at the pole so that applications can easily diagnose an FV grid when they read-in datasets. A superior application could diagnose FV just fine from actual non-polar gridcell centers Maybe ESMF could introduce a flag or something to indicate/avoid this special case? Safer to read boundary interfaces directly from grid corner/vertice arrays in map file Derivation of boundaries xv_b, yv_b from _correct_ xc_b, yc_b is follows Do not implement this procedure until resolving midpoint/center issue described above: lon_ntf_out[0L]=0.5*(lon_ctr_out[0L]+lon_ctr_out[lon_nbr_out-1L])-180.0; // Extrapolation lat_ntf_out[0L]=lat_ctr_out[0L]-0.5*(lat_ctr_out[1L]-lat_ctr_out[0L]); // Extrapolation for(idx=1L;idx<lon_nbr_out;idx++) lon_ntf_out[idx]=0.5*(lon_ctr_out[idx-1L]+lon_ctr_out[idx]); for(idx=1L;idx<lat_nbr_out;idx++) lat_ntf_out[idx]=0.5*(lat_ctr_out[idx-1L]+lat_ctr_out[idx]); lon_ntf_out[lon_nbr_out]=lon_ntf_out[0L]+360.0; lat_ntf_out[lat_nbr_out]=lat_ctr_out[lat_nbr_out-1L]+0.5*(lat_ctr_out[lat_nbr_out-1L]-lat_ctr_out[lat_nbr_out-2L]); */ if(flg_grd_out_rct){ double lon_spn; /* [dgr] Longitude span */ double lat_spn; /* [dgr] Latitude span */ nco_bool flg_s2n=True; /* I [enm] Latitude grid-direction is South-to-North */ if(lat_ctr_out[1L] < lat_ctr_out[0L]) flg_s2n=False; /* Obtain 1-D rectangular interfaces from unrolled 1-D vertice arrays */ for(idx=0L;idx<lon_nbr_out;idx++) lon_ntf_out[idx]=lon_crn_out[mpf.dst_grid_corners*idx]; /* 20201009 The four possible CCW RLL orderings start with the ul, ll, lr, or ur vertice NCO grid generators store vertices in order (0,1,2,3)=(ul,ll,lr,ur) NCO final latitude is in upper vertices (0,3) for S2N grids, lower vertices (1,2) for N2S grids NCO final longitude is in RHS vertices (2,3) for S2N and N2S grids Need generic algorithm to pick easternmost longitude for any of the four CCW orderings What is ESMF vertice ordering? or does ESMF always copy from input grid? Most grid generators probably start with ul or ll so vertice 2 is good choice for easternmost */ // lon_ntf_out[lon_nbr_out]=lon_crn_out[mpf.dst_grid_corners*lon_nbr_out-(mpf.dst_grid_corners-1L)]; // ESMF? lon_ntf_out[lon_nbr_out]=lon_crn_out[mpf.dst_grid_corners*lon_nbr_out-2L]; // NCO lr if(lon_ntf_out[lon_nbr_out-1] == lon_ntf_out[lon_nbr_out]) lon_ntf_out[lon_nbr_out]=lon_crn_out[mpf.dst_grid_corners*lon_nbr_out-1L]; // NCO ur if(lon_ntf_out[lon_nbr_out-1] == lon_ntf_out[lon_nbr_out]) lon_ntf_out[lon_nbr_out]=lon_crn_out[mpf.dst_grid_corners*lon_nbr_out-3L]; // NCO ll assert(lon_ntf_out[lon_nbr_out-1] != lon_ntf_out[lon_nbr_out]); lon_spn=lon_ntf_out[lon_nbr_out]-lon_ntf_out[0L]; for(idx=0L;idx<lat_nbr_out;idx++) lat_ntf_out[idx]=lat_crn_out[mpf.dst_grid_corners*idx]; if(flg_s2n) lat_ntf_out[lat_nbr_out]=max_dbl(lat_crn_out[mpf.dst_grid_corners*lat_nbr_out-1L],lat_crn_out[mpf.dst_grid_corners*lat_nbr_out-2L]); else lat_ntf_out[lat_nbr_out]=min_dbl(lat_crn_out[mpf.dst_grid_corners*lat_nbr_out-1L],lat_crn_out[mpf.dst_grid_corners*lat_nbr_out-2L]); assert(lat_ntf_out[lat_nbr_out] != lat_ntf_out[lat_nbr_out-1]); lat_spn=fabs(lat_ntf_out[lat_nbr_out]-lat_ntf_out[0L]); /* Place 1-D rectangular interfaces into 2-D coordinate boundaries */ for(idx=0L;idx<lon_nbr_out;idx++){ lon_bnd_out[2L*idx]=lon_ntf_out[idx]; lon_bnd_out[2L*idx+1L]=lon_ntf_out[idx+1L]; } /* !lon_nbr_out */ for(idx=0L;idx<lat_nbr_out;idx++){ lat_bnd_out[2L*idx]=lat_ntf_out[idx]; lat_bnd_out[2L*idx+1L]=lat_ntf_out[idx+1L]; } /* !lat_nbr_out */ if(nco_dbg_lvl_get() >= nco_dbg_crr){ for(idx=0L;idx<lon_nbr_out;idx++) (void)fprintf(stdout,"lon[%li] = [%g, %g, %g]\n",idx,lon_bnd_out[2L*idx],lon_ctr_out[idx],lon_bnd_out[2L*idx+1L]); for(idx=0L;idx<lat_nbr_out;idx++) (void)fprintf(stdout,"lat[%li] = [%g, %g, %g]\n",idx,lat_bnd_out[2L*idx],lat_ctr_out[idx],lat_bnd_out[2L*idx+1L]); } /* endif dbg */ /* Global or regional grid? */ nco_grd_xtn_enm nco_grd_xtn; /* [enm] Extent of grid */ if((float)lon_spn == 360.0f && (float)lat_spn == 180.0f) nco_grd_xtn=nco_grd_xtn_glb; else nco_grd_xtn=nco_grd_xtn_rgn; /* Diagnose type of latitude output grid by testing second latitude center against formulae */ double lat_ctr_tst_eqa; double lat_ctr_tst_fv; if(flg_s2n) lat_ctr_tst_eqa=lat_ntf_out[0L]+lat_spn*1.5/lat_nbr_out; else lat_ctr_tst_eqa=lat_ntf_out[0L]-lat_spn*1.5/lat_nbr_out; if(flg_s2n) lat_ctr_tst_fv=lat_ntf_out[0L]+lat_spn/(lat_nbr_out-1L); else lat_ctr_tst_fv=lat_ntf_out[0L]-lat_spn/(lat_nbr_out-1L); double lat_ctr_tst_gss; /* In diagnosing grids, agreement to slightly worse than single-precision is "good enough for government work" Hence some comparisons cast from double to float before comparison 20150526: T42 grid from SCRIP and related maps, and NCL-generated Gaussian grids for CESM, are accurate to at most ~eight digits 20150611: map_ne120np4_to_fv801x1600_bilin.150418.nc has yc_b[1600]=-89.775000006 not expected exact value lat_ctr[1]=-89.775000000000006 20170521: T62 grid from NCEP-NCAR Reanalysis 1 is worse than single precision, has yc_[192]=-86.6531 not expected exact value lat_ctr[1]=-86.6532 */ if((float)lat_ctr_out[1L] == (float)lat_ctr_tst_eqa) nco_grd_lat_typ=nco_grd_lat_eqa; if((float)lat_ctr_out[1L] == (float)lat_ctr_tst_fv) nco_grd_lat_typ=nco_grd_lat_fv; double *wgt_Gss_out=NULL; // [frc] Gaussian weights double precision if(nco_grd_lat_typ == nco_grd_lat_nil){ /* Check for Gaussian grid */ double *lat_sin_out; // [frc] Sine of Gaussian latitudes double precision lat_sin_out=(double *)nco_malloc(lat_nbr_out*sizeof(double)); wgt_Gss_out=(double *)nco_malloc(lat_nbr_out*sizeof(double)); (void)nco_lat_wgt_gss(lat_nbr_out,flg_s2n,lat_sin_out,wgt_Gss_out); lat_ctr_tst_gss=rdn2dgr*asin(lat_sin_out[1L]); /* Gaussian weights on output grid will be double-precision accurate Grid itself is kept as user-specified so area diagnosed by ESMF_RegridWeightGen may be slightly inconsistent with weights */ if(nco_dbg_lvl_get() >= nco_dbg_sbr) (void)fprintf(stderr,"%s: INFO %s reports lat_ctr_out[1] = %g, lat_ctr_tst_gss = %g\n",nco_prg_nm_get(),fnc_nm,lat_ctr_out[1L],lat_ctr_tst_gss); if((float)lat_ctr_out[1L] == (float)lat_ctr_tst_gss) nco_grd_lat_typ=nco_grd_lat_gss; if(lat_sin_out) lat_sin_out=(double *)nco_free(lat_sin_out); } /* !Gaussian */ if(nco_grd_lat_typ == nco_grd_lat_nil){ /* If still of unknown type, this 2D grid may be weird This occurs, e.g., with POP3 destination grid Change gridtype from nil (which means not-yet-set) to unknown (which means none of the others matched) */ nco_grd_lat_typ=nco_grd_lat_unk; } /* !nil */ /* Currently grd_lat_typ and grd_2D_typ are equivalent, though that may be relaxed in future */ if(nco_grd_lat_typ == nco_grd_lat_unk) nco_grd_2D_typ=nco_grd_2D_unk; else if(nco_grd_lat_typ == nco_grd_lat_gss) nco_grd_2D_typ=nco_grd_2D_gss; else if(nco_grd_lat_typ == nco_grd_lat_fv) nco_grd_2D_typ=nco_grd_2D_fv; else if(nco_grd_lat_typ == nco_grd_lat_eqa) nco_grd_2D_typ=nco_grd_2D_eqa; else assert(False); if(nco_grd_lon_typ == nco_grd_lon_nil){ /* NB: Longitude grid diagnosis is susceptible to mistakes when input mapfile embeds common faulty grids, e.g., ACME *150418* FV maps map_ne30np4_to_fv129x256_aave.150418.nc is diagnosed as regional grid of unknown type because of input grid flaws map_ne30np4_to_fv129x256_aave.20150901.nc is (correctly) diagnosed as global grid of with lon_Grn_ctr */ if( (float)lon_ctr_out[0L] == 0.0f && (float)lon_ctr_out[1L] == (float)(lon_ctr_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_Grn_ctr; else if((float)lon_ctr_out[0L] == -180.0f && (float)lon_ctr_out[1L] == (float)(lon_ctr_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_180_ctr; else if((float)lon_ntf_out[0L] == 0.0f && (float)lon_ntf_out[1L] == (float)(lon_ntf_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_Grn_wst; else if((float)lon_ntf_out[0L] == -180.0f && (float)lon_ntf_out[1L] == (float)(lon_ntf_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_180_wst; else if((float)lon_ctr_out[1L] == (float)(lon_ctr_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_bb; else nco_grd_lon_typ=nco_grd_lon_unk; } /* !nco_grd_lon_typ */ if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO %s diagnosed output latitude grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_lat_sng(nco_grd_lat_typ)); if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO %s diagnosed output longitude grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_lon_sng(nco_grd_lon_typ)); if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO %s diagnosed output grid-extent: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_xtn_sng(nco_grd_xtn)); if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ slat_ctr_out=(double *)nco_malloc(slat_nbr_out*nco_typ_lng(crd_typ_out)); slat_wgt_out=(double *)nco_malloc(slat_nbr_out*nco_typ_lng(crd_typ_out)); slon_ctr_out=(double *)nco_malloc(slon_nbr_out*nco_typ_lng(crd_typ_out)); for(idx=0L;idx<slat_nbr_out;idx++){ slat_ctr_out[idx]=lat_ntf_out[idx+1L]; slat_wgt_out[idx]=fabs(sin(dgr2rdn*lat_ctr_out[idx+1L])-sin(dgr2rdn*lat_ctr_out[idx])); /* fabs() ensures positive area in n2s grids */ } /* !lat_nbr_out */ for(idx=0L;idx<slon_nbr_out;idx++){ slon_ctr_out[idx]=lon_ntf_out[idx]; } /* !lat_nbr_out */ } /* !nco_grd_lat_fv */ switch(nco_grd_lat_typ){ case nco_grd_lat_eqa: case nco_grd_lat_fv: for(idx=0L;idx<lat_nbr_out;idx++) lat_wgt_out[idx]=fabs(sin(dgr2rdn*lat_bnd_out[2*idx+1L])-sin(dgr2rdn*lat_bnd_out[2*idx])); /* fabs() ensures positive area in n2s grids */ break; case nco_grd_lat_gss: for(idx=0L;idx<lat_nbr_out;idx++) lat_wgt_out[idx]=wgt_Gss_out[idx]; if(wgt_Gss_out) wgt_Gss_out=(double *)nco_free(wgt_Gss_out); break; case nco_grd_lat_unk: for(idx=0L;idx<lat_nbr_out;idx++) lat_wgt_out[idx]=0.0; if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: WARNING %s reports unknown output latitude grid-type. Unable to guess what latitude weights should be.\n",nco_prg_nm_get(),fnc_nm); break; default: nco_dfl_case_generic_err(); break; } /* end nco_grd_lat_typ switch */ /* Fuzzy test of latitude weight normalization */ double lat_wgt_ttl_xpc; /* [frc] Expected sum of latitude weights */ lat_wgt_ttl=0.0; for(idx=0L;idx<lat_nbr_out;idx++) lat_wgt_ttl+=lat_wgt_out[idx]; lat_wgt_ttl_xpc=fabs(sin(dgr2rdn*lat_bnd_out[2L*(lat_nbr_out-1L)+1L])-sin(dgr2rdn*lat_bnd_out[0L])); /* fabs() ensures positive area in n2s grids */ if(nco_grd_lat_typ != nco_grd_lat_unk){ assert(1.0-lat_wgt_ttl/lat_wgt_ttl_xpc < eps_rlt); if(lat_wgt_ttl_xpc < 0.0) abort(); /* CEWI Use lat_wgt_ttl_xpc at least once outside of assert() to avoid gcc 4.8.2 set-but-not-used warning */ } /* !nco_grd_lat_unk */ } /* !flg_grd_out_rct */ /* When possible, ensure area_out is non-zero 20150722: ESMF documentation says "The grid area array is only output when the conservative remapping option is used" Actually, ESMF does (always?) output area, but area == 0.0 unless conservative remapping is used 20150721: ESMF bilinear interpolation map ${DATA}/maps/map_ne30np4_to_fv257x512_bilin.150418.nc has area == 0.0 20150710: Tempest regionally refined grids like bilinearly interpolated CONUS for ACME RRM has area_out == 0 20150821: ESMF always outputs area_out == 0.0 for bilinear interpolation Check whether NCO must diagnose and provide its own area_out */ /* If area_out contains any zero... */ for(idx=0;idx<(long)grd_sz_out;idx++) if(area_out[idx] == 0.0) break; if(idx != (long)grd_sz_out){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO Output grid detected with zero-valued output area(s) at idx = %ld (and likely others, too).\n",nco_prg_nm_get(),idx); } /* !zero */ for(idx=0;idx<(long)grd_sz_out;idx++) if(area_out[idx] != 0.0) break; if(idx == (long)grd_sz_out){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports area_out from mapfile is everywhere zero. This is expected for bilinearly interpolated output maps produced by ESMF_RegridWeightGen. ",nco_prg_nm_get(),fnc_nm); if(flg_grd_out_2D && flg_grd_out_rct && (bnd_nbr_out == 2 || bnd_nbr_out == 4)){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"Since the destination grid provides cell bounds information, NCO will diagnose area (and output it as a variable named \"%s\") from the destination gridcell boundaries. NCO diagnoses quadrilateral area for rectangular output grids from a formula that assumes that cell boundaries follow arcs of constant latitude and longitude. This differs from the area of cells with boundaries that follow great circle arcs (used by, e.g., ESMF_RegridWeightGen and TempestRemap). Be warned that NCO correctly diagnoses area for all convex polygons, yet not for most concave polygons. To determine whether the diagnosed areas are fully consistent with the output grid, one must know such exact details. If your grid has analytic areas that NCO does not yet diagnose correctly from provided cell boundaries, please contact us.\n",rgr->area_nm); flg_dgn_area_out=True; }else if(flg_grd_out_2D && flg_grd_out_crv && (bnd_nbr_out == 2 || bnd_nbr_out == 4)){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"Since the destination grid provides cell bounds information, NCO will diagnose area (and output it as a variable named \"%s\") from the destination gridcell boundaries. NCO diagnoses quadrilateral area for curvilinear output grids from formulae that assume that cell boundaries follow great circle arcs (as do, e.g., ESMF_RegridWeightGen and TempestRemap). This differs from the area of cells with boundaries that follow lines of constant latitude or longitude. Be warned that NCO correctly diagnoses area for all convex polygons, yet not for most concave polygons. To determine whether the diagnosed areas are fully consistent with the output grid, one must know such exact details. If your grid has analytic areas that NCO does not yet diagnose correctly from provided cell boundaries, please contact us.\n",rgr->area_nm); flg_dgn_area_out=True; }else if(flg_grd_out_1D && flg_bnd_1D_usable){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"Since the destination grid provides cell bounds information, NCO will diagnose area (and output it as a variable name \"%s\") from the destination gridcell boundaries. NCO diagnoses spherical polygon area for unstructured output grids from formulae that assume that cell boundaries follow great circle arcs (as do, e.g., ESMFRegridWeightGen and TempestRemap). This differs from the area of cells with boundaries that follow lines of constant latitude or longitude. Be warned that NCO correctly diagnoses area for all convex polygons, yet not for most concave polygons. To determine whether the diagnosed areas are fully consistent with the output grid, one must know such exact details. If your grid has analytic areas that NCO does not yet diagnose correctly from provided cell boundaries, please contact us.\n",rgr->area_nm); flg_dgn_area_out=True; }else{ /* !1D */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"However, NCO cannot find enough boundary information, or it is too stupid about spherical trigonometry, to diagnose area_out. NCO will output an area variable (named \"%s\") copied from the input mapfile. This area will be everywhere zero.\n",rgr->area_nm); } /* !2D */ } /* !area */ if(flg_dgn_area_out){ if(flg_grd_out_1D && flg_bnd_1D_usable){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"INFO: Diagnosing area_out for 1D grid\n"); /* Area of unstructured grids requires spherical trigonometry */ nco_sph_plg_area(rgr,lat_bnd_out,lon_bnd_out,col_nbr_out,bnd_nbr_out,area_out); } /* !1D */ if(flg_grd_out_crv){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"INFO: Diagnosing area_out for curvilinear grid\n"); /* Area of curvilinear grids requires spherical trigonometry */ nco_sph_plg_area(rgr,lat_crn_out,lon_crn_out,grd_sz_out,bnd_nbr_out,area_out); } /* !flg_grd_out_crv */ if(flg_grd_out_rct && nco_grd_2D_typ != nco_grd_2D_unk){ /* Mr. Enenstein and George O. Abell taught me the area of spherical zones Spherical zone area is exact and faithful to underlying rectangular equi-angular grid However, ESMF and Tempest approximate spherical polygons as connected by great circle arcs fxm: Distinguish spherical zone shapes (e.g., equi-angular) from great circle arcs (e.g., unstructured polygons) */ for(lat_idx=0;lat_idx<lat_nbr_out;lat_idx++) for(lon_idx=0;lon_idx<lon_nbr_out;lon_idx++) area_out[lat_idx*lon_nbr_out+lon_idx]=fabs(dgr2rdn*(lon_bnd_out[2*lon_idx+1]-lon_bnd_out[2*lon_idx])*(sin(dgr2rdn*lat_bnd_out[2*lat_idx+1])-sin(dgr2rdn*lat_bnd_out[2*lat_idx]))); /* fabs() ensures positive area in n2s grids */ } /* !spherical zones */ } /* !flg_dgn_area_out */ if(rgr->tst == -1){ /* Passing --rgr tst=-1 causes regridder to fail here This failure should cause host climo script to abort */ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports regridder instructed to fail here. This tests failure mode in climo scripts...\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !tst */ /* Verify frc_out is sometimes non-zero ESMF: "The grid frac arrays (frac_a and frac_b) are calculated by ESMF_RegridWeightGen. For conservative remapping, the grid frac array returns the area fraction of the grid cell which participates in the remapping. For bilinear and patch remapping, the destination grid frac array [frac_b] is one where the grid point participates in the remapping and zero otherwise. For bilinear and patch remapping, the source grid frac array is always set to zero." SCRIP: Similar to ESMF For both ESMF+SCRIP frac_[ab] are computed by the weight-generation algorithm and are not specified as part of the input grids How does an input ocean grid indicate that, say, half the gridcell is land and half ocean? Does it use the area variable to tell the weight generation algorithm that a gridcell is fractional? In other words does it use grid_imask=1 and grid_area=0.5*full_gridcell_area and, e.g., T=273.0? */ for(idx=0;idx<(long)grd_sz_out;idx++) if(frc_out[idx] != 0.0) break; if(idx == (long)grd_sz_out){ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports frc_out == frac_b contains all zeros\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !always zero */ /* Test whether frc_out is ever zero... */ for(idx=0;idx<(long)grd_sz_out;idx++) if(frc_out[idx] == 0.0) break; if(nco_dbg_lvl_get() >= nco_dbg_std) if(idx != (long)grd_sz_out) (void)fprintf(stdout,"%s: INFO %s reports frc_out == frac_b contains zero-elements (e.g., at 1D idx=%ld)\n",nco_prg_nm_get(),fnc_nm,idx); /* Normalizing by frc_out is redundant iff frc_out == 1.0, so we can save time without sacrificing accuracy However, frc_out is often (e.g., for CS <-> RLL maps) close but not equal to unity (ESMF_RegridWeightGen issue?) Hence, decide whether to normalize by frc_out by diagnosing the furthest excursion of frc_out from unity */ nco_bool flg_frc_out_one=True; /* [flg] Destination gridcell fraction frc_out == frac_b is in [1-epsilon,frc_out,1+epsilon] */ nco_bool flg_frc_out_wrt=False; /* [flg] Write destination gridcell fraction frc_out == frac_b to regridded files */ double frc_out_dff_one; /* [frc] Deviation of frc_out from 1.0 */ double frc_out_dff_one_max=0.0; /* [frc] Maximum deviation of frc_out from 1.0 */ long idx_max_dvn; /* [idx] Index of maximum deviation from 1.0 */ for(idx=0;idx<(long)grd_sz_out;idx++){ frc_out_dff_one=fabs(frc_out[idx]-1.0); if(frc_out_dff_one > frc_out_dff_one_max){ frc_out_dff_one_max=frc_out_dff_one; idx_max_dvn=idx; } /* !max */ } /* !idx */ if(frc_out_dff_one_max > eps_rlt) flg_frc_out_one=False; nco_bool flg_frc_nrm=False; /* [flg] Must normalize by frc_out == frac_b because frc_out is not always unity and specified normalization is destarea or none */ if(!flg_frc_out_one && /* If fraction is sometimes "far" from 1.0 and ... */ ((nco_rgr_mpf_typ == nco_rgr_mpf_ESMF && nco_rgr_mth_typ == nco_rgr_mth_conservative && (nco_rgr_nrm_typ == nco_rgr_nrm_destarea || nco_rgr_nrm_typ == nco_rgr_nrm_none)) || /* ESMF map-file specifies conservative regridding with "destarea" or "none" or ... */ (nco_rgr_mpf_typ != nco_rgr_mpf_ESMF)) /* 20191003: Weight-generator does not adhere to ESMF "normalization type" convention */ && True){ flg_frc_nrm=True; /* Avoid writing frc_out unless discrepancies are particularly egregious Otherwise would frc_out for standard remaps like ne30->fv129x256 for which eps=2.46e-13 */ double eps_rlt_wrt_thr=3.0e-13; /* 20181104: Never write frac_b for CMIP6! */ /* if(frc_out_dff_one_max > eps_rlt_wrt_thr) flg_frc_out_wrt=True; */ if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s reports global metadata specifies conservative remapping with normalization of type = %s. Furthermore, destination fractions frc_dst = dst_frac = frac_b = frc_out contain non-unity elements (maximum deviation from unity of %g exceeds hard-coded (in variable eps_rlt) relative-epsilon threshold of %g for frc_out[%ld] = %g). Thus normalization issues will be explicitly treated. Will apply \'destarea\' normalization (i.e., divide by non-zero frc_out[dst_idx]) to all regridded arrays.\n",nco_prg_nm_get(),fnc_nm,nco_rgr_nrm_sng(nco_rgr_nrm_typ),frc_out_dff_one_max,eps_rlt,idx_max_dvn,frc_out[idx_max_dvn]); if(nco_dbg_lvl_get() >= nco_dbg_std && flg_frc_out_wrt) (void)fprintf(stdout,"%s: INFO %s Maximum deviation %g exceeds threshold of %g that triggers automatic writing of fractional destination area as variable named frac_b in regridded output.\n",nco_prg_nm_get(),fnc_nm,frc_out_dff_one_max,eps_rlt_wrt_thr); } /* !sometimes non-unity */ if(flg_frc_nrm && rgr->flg_rnr){ // 20190918: Weaken from WARNING to INFO because NCO no longer renormalizes when using "destarea" maps unless specifically requested to with --rnr_thr (void)fprintf(stdout,"%s: INFO %s reports manual request to renormalize fields to preserve mean-values (rather than integral values) in destination gridcells that are incompletely covered by valid data in source gridcells (i.e., non-unity frc_dst = dst_frac = frac_b)\n",nco_prg_nm_get(),fnc_nm); //(void)fprintf(stdout,"%s: INFO %s reports manual request (with --rnr) to renormalize fields with non-unity frc_dst = dst_frac = frac_b at same time global metadata specifies normalization type = %s. Normalizing twice can be an error, depending on intent of each. Charlie is all ears on how NCO should handle this :)\n",nco_prg_nm_get(),fnc_nm,nco_rgr_nrm_sng(nco_rgr_nrm_typ)); //nco_exit(EXIT_FAILURE); } /* !flg_rnr */ /* Detailed summary of 2D grids now available including quality-checked coordinates and area */ if(flg_grd_out_2D && nco_dbg_lvl_get() >= nco_dbg_sbr){ lat_wgt_ttl=0.0; area_out_ttl=0.0; if(flg_grd_out_rct){ (void)fprintf(stderr,"%s: INFO %s reports destination rectangular latitude grid:\n",nco_prg_nm_get(),fnc_nm); for(idx=0;idx<lat_nbr_out;idx++) lat_wgt_ttl+=lat_wgt_out[idx]; } /* !flg_grd_out_rct */ for(lat_idx=0;lat_idx<lat_nbr_out;lat_idx++) for(lon_idx=0;lon_idx<lon_nbr_out;lon_idx++) area_out_ttl+=area_out[lat_idx*lon_nbr_out+lon_idx]; (void)fprintf(stdout,"lat_wgt_ttl = %20.15f, frc_lat_wgt = %20.15f, area_ttl = %20.15f, frc_area = %20.15f\n",lat_wgt_ttl,lat_wgt_ttl/2.0,area_out_ttl,area_out_ttl/(4.0*M_PI)); if(flg_grd_out_rct){ for(idx=0;idx<lon_nbr_out;idx++) (void)fprintf(stdout,"lon[%li] = [%g, %g, %g]\n",idx,lon_bnd_out[2*idx],lon_ctr_out[idx],lon_bnd_out[2*idx+1]); for(idx=0;idx<lat_nbr_out;idx++) (void)fprintf(stdout,"lat[%li] = [%g, %g, %g]\n",idx,lat_bnd_out[2*idx],lat_ctr_out[idx],lat_bnd_out[2*idx+1]); for(idx=0;idx<lat_nbr_out;idx++) (void)fprintf(stdout,"lat[%li], wgt[%li] = %20.15f, %20.15f\n",idx,idx,lat_ctr_out[idx],lat_wgt_out[idx]); } /* !flg_grd_out_rct */ if(nco_dbg_lvl_get() > nco_dbg_crr) for(lat_idx=0;lat_idx<lat_nbr_out;lat_idx++) for(lon_idx=0;lon_idx<lon_nbr_out;lon_idx++) (void)fprintf(stdout,"lat[%li] = %g, lon[%li] = %g, area[%li,%li] = %g\n",lat_idx,lat_ctr_out[lat_idx],lon_idx,lon_ctr_out[lon_idx],lat_idx,lon_idx,area_out[lat_idx*lon_nbr_out+lon_idx]); assert(area_out_ttl > 0.0); assert(area_out_ttl <= 4.0*M_PI + 5.0e-15); } /* !flg_grd_out_2D && !dbg */ /* Allocate space for and obtain weights and addresses */ wgt_raw=(double *)nco_malloc_dbg(mpf.num_links*nco_typ_lng(NC_DOUBLE),fnc_nm,"Unable to malloc() value buffer for remapping weights"); col_src_adr=(int *)nco_malloc_dbg(mpf.num_links*nco_typ_lng(NC_INT),fnc_nm,"Unable to malloc() value buffer for remapping addresses"); row_dst_adr=(int *)nco_malloc_dbg(mpf.num_links*nco_typ_lng(NC_INT),fnc_nm,"Unable to malloc() value buffer for remapping addresses"); /* Obtain remap matrix addresses and weights from map file */ dmn_srt[0]=0L; dmn_cnt[0]=mpf.num_links; rcd=nco_get_vara(in_id,col_src_adr_id,dmn_srt,dmn_cnt,col_src_adr,NC_INT); rcd=nco_get_vara(in_id,row_dst_adr_id,dmn_srt,dmn_cnt,row_dst_adr,NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=mpf.num_links; if(nco_rgr_mpf_typ != nco_rgr_mpf_SCRIP){ rcd=nco_get_vara(in_id,wgt_raw_id,dmn_srt,dmn_cnt,wgt_raw,NC_DOUBLE); }else{ /* SCRIP mapfiles store 2D weight array remap_matrix[num_links,num_wgts] Apply only first weight for first-order conservative accuracy (i.e., area overlap) Apply all three weights for second-order conservative accuracy (by including gradients from centroid to vertices) */ dmn_srd[0]=1L; dmn_srt[1]=0L; dmn_cnt[1]=1L; dmn_srd[1]=mpf.num_wgts; rcd=nco_get_vars(in_id,wgt_raw_id,dmn_srt,dmn_cnt,dmn_srd,wgt_raw,NC_DOUBLE); } /* !SCRIP */ /* Pre-subtract one from row/column addresses (stored, by convention, as Fortran indices) to optimize access with C indices */ size_t lnk_nbr; /* [nbr] Number of links */ size_t lnk_idx; /* [idx] Link index */ lnk_nbr=mpf.num_links; for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) row_dst_adr[lnk_idx]--; for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) col_src_adr[lnk_idx]--; if(nco_dbg_lvl_get() >= nco_dbg_io){ (void)fprintf(stdout,"idx row_dst col_src wgt_raw\n"); for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) (void)fprintf(stdout,"%li %d %d %g\n",lnk_idx,row_dst_adr[lnk_idx],col_src_adr[lnk_idx],wgt_raw[lnk_idx]); } /* endif dbg */ /* Free memory associated with input file */ if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt) dmn_cnt=(long *)nco_free(dmn_cnt); if(dmn_srd) dmn_srd=(long *)nco_free(dmn_srd); /* Close input netCDF file */ nco_close(in_id); /* Remove local copy of file */ if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in); /* Above this line, fl_in and in_id refer to map file Below this line, fl_in and in_id refer to input file to be regridded */ /* Initialize */ in_id=rgr->in_id; out_id=rgr->out_id; /* Sanity check that input data file matches expectations from mapfile */ char *col_nm_in=rgr->col_nm_in; /* [sng] Name to recognize as input horizontal spatial dimension on unstructured grid */ char *lat_nm_in=rgr->lat_nm_in; /* [sng] Name of input dimension to recognize as latitude */ char *lon_nm_in=rgr->lon_nm_in; /* [sng] Name of input dimension to recognize as longitude */ int dmn_id_col=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_lat; /* [id] Dimension ID */ int dmn_id_lon; /* [id] Dimension ID */ /* 20160503 Discover coordinates via CF Convention if indicated This copies method used in nco_grd_nfr() */ /* Begin CF-coordinates block */ cf_crd_sct *cf=NULL; char *rgr_var; /* [sng] Variable for special regridding treatment */ nco_bool flg_cf=False; /* [flg] Follow CF Coordinates convention to find and infer grid */ rgr_var=rgr->var_nm; if(rgr_var){ /* Infer grid from special variable Intended to be variable that has both horizontal dimensions and "coordinates" attribute, e.g., ncks --cdl -m ${DATA}/hdf/narrmon-a_221_20100101_0000_000.nc | grep coordinates 4LFTX_221_SPDY_S113:coordinates = "gridlat_221 gridlon_221" ; Usage: ncks -O -D 3 --rgr infer --rgr_var=ALBDO_221_SFC_S113 --rgr grid=${HOME}/grd_narr.nc ${DATA}/hdf/narrmon-a_221_20100101_0000_000.nc ~/foo.nc */ char crd_sng[]="coordinates"; /* CF-standard coordinates attribute name */ cf=(cf_crd_sct *)nco_malloc(sizeof(cf_crd_sct)); cf->crd=False; /* [flg] CF coordinates information is complete */ cf->crd_id[0]=NC_MIN_INT; /* [id] Coordinate ID, first */ cf->crd_id[1]=NC_MIN_INT; /* [id] Coordinate ID, second */ cf->crd_nm[0]=NULL; /* [sng] Coordinate name, first */ cf->crd_nm[1]=NULL; /* [sng] Coordinate name, second */ cf->crd_sng=NULL; /* [sng] Coordinates attribute value */ cf->dmn_id[0]=NC_MIN_INT; /* [id] Dimension ID, first */ cf->dmn_id[1]=NC_MIN_INT; /* [id] Dimension ID, second */ cf->dmn_nm[0]=NULL; /* [sng] Dimension name, first */ cf->dmn_nm[1]=NULL; /* [sng] Dimension name, second */ cf->unt_sng[0]=NULL; /* [sng] Units string, first coordinate */ cf->unt_sng[1]=NULL; /* [sng] Units string, second coordinate */ cf->var_id=NC_MIN_INT; /* [id] Coordinate variable ID */ cf->var_nm=NULL; /* [sng] Coordinates variable name */ cf->var_type=NC_NAT; /* [enm] Coordinates variable type */ if((rcd=nco_inq_varid_flg(in_id,rgr_var,&cf->var_id)) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports special \"coordinates\" variable %s not found. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,rgr_var); goto skp_cf; } /* !rcd */ cf->crd_sng=nco_char_att_get(in_id,cf->var_id,crd_sng); if(cf->crd_sng){ cf->crd=True; }else{ /* !rcd && att_typ */ (void)fprintf(stderr,"%s: WARNING %s reports coordinates variable %s does not have character-valued \"coordinates\" attribute. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,rgr_var); goto skp_cf; } /* !rcd && att_typ */ /* Valid coordinates attribute requires two coordinate names separated by space character */ char *crd_nm[NCO_MAX_CRD_PER_VAR]; /* [sng] Coordinate name start position */ char *crd_dpl; /* [sng] Modifiable duplicate of coordinates string */ char *spc_ptr; /* [sng] Pointer to space character (' ') */ int crd_nbr=0; /* [nbr] Number of names in coordinates attribute */ int crd_spt=0; /* [nbr] Number of "spatial-like" (that include "degree" in units) coordinates */ int crd_idx=0; /* [idx] Counter for coordinate names */ for(crd_idx=0;crd_idx<NCO_MAX_CRD_PER_VAR;crd_idx++) crd_nm[crd_idx]=NULL; crd_dpl=(char *)strdup(cf->crd_sng); /* Search for spaces starting from end of string */ while((spc_ptr=strrchr(crd_dpl,' '))){ crd_nm[crd_nbr]=spc_ptr+1L; crd_nbr++; /* NUL-terminate so next search ends here */ *spc_ptr='\0'; } /* !sbs_ptr */ /* Final coordinate name begins where coordinate string starts */ crd_nm[crd_nbr]=crd_dpl; /* Change crd_nbr from 0-based index to actual coordinate number */ crd_nbr++; if(crd_nbr < 2){ (void)fprintf(stderr,"%s: WARNING %s found only %d coordinate(s) in \"coordinates\" attribute \"%s\", at least two are required. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,crd_nbr,cf->crd_sng); goto skp_cf; } /* !crd_nbr */ /* If more than two coordinate names are present, choose first two (searching backwards from end) with "degree" in units attributes, otherwise just choose first two */ crd_idx=crd_spt=0; while(crd_spt < 2 && crd_idx < crd_nbr){ cf->crd_nm[crd_spt]=crd_nm[crd_idx]; if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[crd_spt],&cf->crd_id[crd_spt])) == NC_NOERR){ cf->unt_sng[crd_spt]=nco_char_att_get(in_id,cf->crd_id[crd_spt],unt_sng); if(cf->unt_sng[crd_spt]){ if(strcasestr(cf->unt_sng[crd_spt],"degree")){ /* Increment count of spatial-like coordinates... */ crd_spt++; }else{ /* ...or free() memory allocated during search */ cf->unt_sng[crd_spt]=(char *)nco_free(cf->unt_sng[crd_spt]); } /* !strcasestr() */ crd_idx++; } /* !rcd && att_typ */ } /* !rcd */ } /* !crd_spt */ /* If while()-loop above was successful, our search is over Otherwise, use first two coordinate names regardless of units, and print more diagnostics */ if(crd_spt < 2){ cf->crd_nm[0]=crd_nm[0]; cf->crd_nm[1]=crd_nm[1]; if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[0],&cf->crd_id[0])) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports first coordinates variable %s not found. Turning-off CF coordinates search for this file.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0]); goto skp_cf; } /* !rcd */ if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[1],&cf->crd_id[1])) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports second coordinates variable %s not found. Turning-off CF coordinates search for this file.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[1]); goto skp_cf; } /* !rcd */ cf->unt_sng[0]=nco_char_att_get(in_id,cf->crd_id[0],unt_sng); if(cf->unt_sng[0]){ if(!strcasestr(cf->unt_sng[0],"degrees_")) (void)fprintf(stderr,"%s: WARNING %s reports first coordinates variable %s has weird units attribute = %s. May not detect correct ordering of latitude and longitude coordinates\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],cf->unt_sng[0]); } /* !rcd && att_typ */ cf->unt_sng[1]=nco_char_att_get(in_id,cf->crd_id[1],unt_sng); if(cf->unt_sng[1]){ if(!strcasestr(cf->unt_sng[1],"degrees_")) (void)fprintf(stderr,"%s: WARNING %s reports second coordinates variable %s has weird units attribute = %s. May not detect correct ordering of latitude and longitude coordinates\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[1],cf->unt_sng[1]); } /* !rcd && att_typ */ } /* !crd_spt */ int crd_rnk; /* [nbr] Coordinate rank */ rcd=nco_inq_varndims(in_id,cf->crd_id[0],&crd_rnk); if(crd_rnk != 2){ (void)fprintf(stderr,"%s: INFO %s reports coordinates variable %s has %i dimension(s). Skipping CF coordinates method.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],crd_rnk); goto skp_cf; } /* !crd_rnk */ rcd=nco_inq_vardimid(in_id,cf->crd_id[0],cf->dmn_id); cf->dmn_nm[0]=(char *)nco_malloc(NC_MAX_NAME*sizeof(NC_CHAR)); cf->dmn_nm[1]=(char *)nco_malloc(NC_MAX_NAME*sizeof(NC_CHAR)); rcd=nco_inq_dimname(in_id,cf->dmn_id[0],cf->dmn_nm[0]); rcd=nco_inq_dimname(in_id,cf->dmn_id[1],cf->dmn_nm[1]); /* "coordinates" convention does not guarantee lat, lon are specified in that order Use "units" values, if any, to determine order In absence of "units", assume order is lat, lon */ nco_bool crd0_is_lat=False; /* [flg] First coordinate is latitude */ nco_bool crd0_is_lon=False; /* [flg] First coordinate is longitude */ nco_bool crd1_is_lat=False; /* [flg] Second coordinate is latitude */ nco_bool crd1_is_lon=False; /* [flg] Second coordinate is longitude */ if(cf->unt_sng[0]){ if(!strcasecmp(cf->unt_sng[0],"degrees_north")) crd0_is_lat=True; if(!strcasecmp(cf->unt_sng[0],"degrees_east")) crd0_is_lon=True; } /* endif */ if(cf->unt_sng[1]){ if(!strcasecmp(cf->unt_sng[1],"degrees_north")) crd1_is_lat=True; if(!strcasecmp(cf->unt_sng[1],"degrees_east")) crd1_is_lon=True; } /* endif */ assert((crd0_is_lat && crd1_is_lon) || (crd0_is_lon && crd1_is_lat)); int idx_lat; int idx_lon; if(crd0_is_lat && crd1_is_lon){ idx_lat=0; idx_lon=1; }else{ idx_lat=1; idx_lon=0; } /* endif */ /* Dimensions and coordinates have been vetted. Store as primary lookup names. Dimensions are always returned in order [LRV,MRV]=[0,1] LRV is along-track direction, and MRV is across-track (at least in NASA data) Internally we label LRV as "lat" and MRV as "lon" so that code looks similar for curvilinear and rectangular grids */ dmn_id_lat=cf->dmn_id[0]; dmn_id_lon=cf->dmn_id[1]; /* Subtlety: lat_nm_in is coordinate (variable+dimension) name when specified from command-line (as in nco_grd_nfr()), dimension name when found through CF-method (as in nco_rgr_wgt()). This confusing distinction could be avoided by passing command-line dimension names through-to nco_rgr_wgt(). However, that route would require complex priorities for what to do when passing command-line coordinate names not dimension names and visa-versa. */ lat_nm_in=strdup(cf->dmn_nm[0]); lon_nm_in=strdup(cf->dmn_nm[1]); //lat_nm_in=strdup(cf->crd_nm[idx_lat]); //lon_nm_in=strdup(cf->crd_nm[idx_lon]); /* Next four lines unnecessary in nco_rgr_wgt() which only needs dimension names (it reads input coordinates from map-file not data-file) */ //lat_ctr_id=cf->crd_id[idx_lat]; //lon_ctr_id=cf->crd_id[idx_lon]; //lat_dmn_nm=strdup(cf->dmn_nm[0]); //lon_dmn_nm=strdup(cf->dmn_nm[1]); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s reports coordinates variable %s \"coordinates\" attribute \"%s\" points to coordinates %s and %s. Latitude coordinate \"%s\" has dimensions \"%s\" and \"%s\". Longitude coordinate \"%s\" has dimensions \"%s\" and \"%s\".\n",nco_prg_nm_get(),fnc_nm,rgr_var,cf->crd_sng,cf->crd_nm[0],cf->crd_nm[1],cf->crd_nm[idx_lat],cf->dmn_nm[idx_lat],cf->dmn_nm[idx_lon],cf->crd_nm[idx_lon],cf->dmn_nm[idx_lat],cf->dmn_nm[idx_lon]); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s Coordinates %s and %s \"units\" values are \"%s\" and \"%s\", respectively.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],cf->crd_nm[1],cf->unt_sng[0] ? cf->unt_sng[0] : "(non-existent)",cf->unt_sng[1] ? cf->unt_sng[1] : "(non-existent)"); /* Clean-up CF coordinates memory */ if(crd_dpl) crd_dpl=(char *)nco_free(crd_dpl); if(cf->crd_sng) cf->crd_sng=(char *)nco_free(cf->crd_sng); if(cf->dmn_nm[0]) cf->dmn_nm[0]=(char *)nco_free(cf->dmn_nm[0]); if(cf->dmn_nm[1]) cf->dmn_nm[1]=(char *)nco_free(cf->dmn_nm[1]); if(cf->unt_sng[0]) cf->unt_sng[0]=(char *)nco_free(cf->unt_sng[0]); if(cf->unt_sng[1]) cf->unt_sng[1]=(char *)nco_free(cf->unt_sng[1]); // if(foo) foo=(char *)nco_free(foo); } /* !rgr_var */ /* goto skp_cf */ skp_cf: /* free() any abandoned cf structure now */ if(!flg_cf) if(cf) cf=(cf_crd_sct *)nco_free(cf); rcd=NC_NOERR; /* End CF-coordinates block */ if(flg_grd_in_1D){ long col_nbr_in_dat; /* [nbr] Number of columns in input datafile */ /* Check default or command-line option first, then search usual suspects, and if that fails then guess unstructured dimension is dimension in input file with size n_a expected by input map file, suggested by PJCS Using internal database names first ensures users can pick between multiple dimensions of size n_a 20180313: fxm New PJCS algorithm is superior, should eliminate internal database for unstructured grids? Database is necessary for 2D grids because otherwise no good way to disambiguate latitude from longitude */ if(col_nm_in && (rcd=nco_inq_dimid_flg(in_id,col_nm_in,&dmn_id_col)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(in_id,"lndgrid",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("lndgrid"); /* CLM */ else if((rcd=nco_inq_dimid_flg(in_id,"nCells",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("nCells"); /* MPAS-O/I */ else if((rcd=nco_inq_dimid_flg(in_id,"nEdges",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("nEdges"); /* MPAS-O/I */ else if((rcd=nco_inq_dimid_flg(in_id,"ncol_d",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("ncol_d"); /* EAM dynamics grid */ else if((rcd=nco_inq_dimid_flg(in_id,"ncol_p",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("ncol_d"); /* EAM physics grid */ else if((rcd=nco_inq_dimid_flg(in_id,"sounding_id",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("sounding_id"); /* OCO2 */ /* 20180605: Database matches to above names may be false-positives ALM/CLM/CTSM/ELM store all possible dimension names that archived variables could use NCO only prints dimensions used in variables, while ncdump prints all dimensions From ncdump we find usually unused ALM/CLM/CTSM/ELM dimensions: gridcell, lndunit, column, pft, levurb, numrad, levsno Check that matched dimension has expected size: */ if(dmn_id_col != NC_MIN_INT){ rcd=nco_inq_dimlen(in_id,dmn_id_col,&col_nbr_in_dat); if(col_nbr_in != col_nbr_in_dat){ dmn_id_col=NC_MIN_INT; if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s database-prioritized unstructured dimension candidate \"%s\" has size not expected by supplied map-file: mapfile col_nbr_in = %ld != %ld = col_nbr_in from datafile. HINT: Check that source grid (i.e., \"grid A\") used to create mapfile matches grid on which data are stored in input datafile.\n",nco_prg_nm_get(),fnc_nm,col_nm_in,col_nbr_in,col_nbr_in_dat); } /* !col_nbr_in */ }else{ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s expects data on an unstructured grid yet cannot find a dimension name that matches the usual suspects for unstructured dimensions (ncol, gridcell, lndgrid, nCells, nEdges, sounding_id). Consider specifying horizontal dimension name to ncks with \"--rgr col_nm=foo\" or to ncremap with \"ncremap -R '--rgr col_nm=foo'\", and consider requesting the NCO project to add this horizontal dimension name to its internal database.\n",nco_prg_nm_get(),fnc_nm); } /* !dmn_id_col */ if(dmn_id_col == NC_MIN_INT){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s Proceeding with fallback algorithm to guess unstructured dimension as first dimension in data file of equal size to that expected by supplied map-file...\n",nco_prg_nm_get(),fnc_nm); /* 20180312: Unstructured dimension must have same size as input map file, suggested by PJCS */ int *dmn_ids_in; /* [nbr] Input file dimension IDs */ int dmn_nbr_in; /* [nbr] Number of dimensions in input file */ const int flg_prn=0; /* [enm] Parent flag */ rcd=nco_inq_dimids(in_id,&dmn_nbr_in,NULL,flg_prn); dmn_ids_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); rcd=nco_inq_dimids(in_id,NULL,dmn_ids_in,flg_prn); /* Find dimension, if any, with same size as map "a" src_grid_dims[0] = n_a dimension */ for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ dmn_id_col=dmn_ids_in[dmn_idx]; rcd=nco_inq_dimlen(in_id,dmn_id_col,&col_nbr_in_dat); if(col_nbr_in == col_nbr_in_dat){ rcd=nco_inq_dimname(in_id,dmn_id_col,col_nm_in); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s found that dimension %s in datafile has same size (n_a = %ld) expected by map-file. Assuming %s is the unstructured dimension.\n",nco_prg_nm_get(),fnc_nm,col_nm_in,col_nbr_in,col_nm_in); break; } /* !col_nbr_in */ } /* !dmn_idx */ if(dmn_ids_in) dmn_ids_in=(int *)nco_free(dmn_ids_in); if(dmn_idx == dmn_nbr_in){ dmn_id_col=NC_MIN_INT; (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") expects data on an unstructured grid but cannot find a dimension in the input data file (or, with ncremap, a possibly already subsetted intermediate file) that matches the size of the unstructured dimension in the supplied map-file = src_grd_dims[0] = n_a = %ld.\nHINT: Ensure at least one member of the variable extraction list has a spatial dimension of size = %ld\n",nco_prg_nm_get(),fnc_nm,col_nbr_in,col_nbr_in); nco_exit(EXIT_FAILURE); } /* !dmn_idx */ } /* !col_nm_in */ } /* !1D */ if(flg_grd_in_2D){ long lat_nbr_in_dat; /* [nbr] Number of latitudes in input datafile */ if(lat_nm_in && (rcd=nco_inq_dimid_flg(in_id,lat_nm_in,&dmn_id_lat)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(in_id,"latitude",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("latitude"); else if((rcd=nco_inq_dimid_flg(in_id,"lat",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("lat"); else if((rcd=nco_inq_dimid_flg(in_id,"Latitude",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("Latitude"); else if((rcd=nco_inq_dimid_flg(in_id,"Lat",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("Lat"); else if((rcd=nco_inq_dimid_flg(in_id,"south_north",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("south_north"); /* WRF */ else if((rcd=nco_inq_dimid_flg(in_id,"south_north_stag",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("south_north_stag"); else if((rcd=nco_inq_dimid_flg(in_id,"YDim:location",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("YDim:location"); /* AIRS L3 */ else if((rcd=nco_inq_dimid_flg(in_id,"YDim:MOD_Grid_monthly_CMG_VI",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("YDim:MOD_Grid_monthly_CMG_VI"); /* MODIS MOD13C2 */ else if((rcd=nco_inq_dimid_flg(in_id,"natrack",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("natrack"); /* MODIS DeepBlue SeaWiFS L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"nj",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("nj"); /* CICE RTM */ else if((rcd=nco_inq_dimid_flg(in_id,"nlat",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("nlat"); /* POP */ else if((rcd=nco_inq_dimid_flg(in_id,"nscan",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("nscan"); /* AMSR, TRMM */ else if((rcd=nco_inq_dimid_flg(in_id,"nTimes",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("nTimes"); /* OMI L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"number_of_lines",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("number_of_lines"); /* DSCOVR L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"GeoTrack",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("GeoTrack"); /* AIRS L2 DAP NC */ else if((rcd=nco_inq_dimid_flg(in_id,"GeoTrack:L2_Standard_atmospheric&surface_product",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("GeoTrack:L2_Standard_atmospheric&surface_product"); /* AIRS L2 HDF */ else if((rcd=nco_inq_dimid_flg(in_id,"Cell_Along_Swath:mod04",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("Cell_Along_Swath:mod04"); /* MODIS MOD04 L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"Cell_Along_Swath_mod04",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("Cell_Along_Swath_mod04"); /* MODIS MOD04 L2 (ncl_convert2nc changes colon to underscore) */ else if((rcd=nco_inq_dimid_flg(in_id,"CO_Latitude",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("CO_Latitude"); else if((rcd=nco_inq_dimid_flg(in_id,"j",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("j"); /* CMIP5 NorESM1 ocean */ else if((rcd=nco_inq_dimid_flg(in_id,"latitude0",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("latitude0"); /* Oxford */ else if((rcd=nco_inq_dimid_flg(in_id,"y",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("y"); /* NEMO */ else if((rcd=nco_inq_dimid_flg(in_id,"x",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("x"); /* NSIDC polar stereographic (NB: unfortunate incompatible conflict between NEMO & NSIDC names) */ else if((rcd=nco_inq_dimid_flg(in_id,"y1",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("y1"); /* NSIDC EASE */ else if((rcd=nco_inq_dimid_flg(in_id,"ygrid_0",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("ygrid_0"); /* NWS HRRR */ else{ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports unable to find latitude dimension in input file. Tried the usual suspects. HINT: Inform regridder of input latitude dimension name with \"ncks --rgr lat_nm_in=name\" or \"ncremap -R '--rgr lat_nm_in=name'\"\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !lat */ rcd=nco_inq_dimlen(in_id,dmn_id_lat,&lat_nbr_in_dat); if(lat_nbr_in != lat_nbr_in_dat){ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports mapfile and data file dimension sizes disagree: mapfile lat_nbr_in = %ld != %ld = lat_nbr_in from datafile. HINT: Check that source grid (i.e., \"grid A\") used to create mapfile matches grid on which data are stored in input datafile.\n",nco_prg_nm_get(),fnc_nm,lat_nbr_in,lat_nbr_in_dat); nco_exit(EXIT_FAILURE); } /* !err */ long lon_nbr_in_dat; /* [nbr] Number of longitudes in input datafile */ if(lon_nm_in && (rcd=nco_inq_dimid_flg(in_id,lon_nm_in,&dmn_id_lon)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(in_id,"longitude",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("longitude"); else if((rcd=nco_inq_dimid_flg(in_id,"lon",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("lon"); else if((rcd=nco_inq_dimid_flg(in_id,"Longitude",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("Longitude"); else if((rcd=nco_inq_dimid_flg(in_id,"Lon",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("Lon"); else if((rcd=nco_inq_dimid_flg(in_id,"west_east",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("west_east"); /* WRF */ else if((rcd=nco_inq_dimid_flg(in_id,"west_east_stag",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("west_east_stag"); else if((rcd=nco_inq_dimid_flg(in_id,"XDim:location",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("XDim:location"); /* AIRS L3 */ else if((rcd=nco_inq_dimid_flg(in_id,"XDim:MOD_Grid_monthly_CMG_VI",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("XDim:MOD_Grid_monthly_CMG_VI"); /* MODIS MOD13C2 */ else if((rcd=nco_inq_dimid_flg(in_id,"ni",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("ni"); /* CICE RTM */ else if((rcd=nco_inq_dimid_flg(in_id,"nlon",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("nlon"); /* POP */ else if((rcd=nco_inq_dimid_flg(in_id,"npix",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("npix"); /* AMSR */ else if((rcd=nco_inq_dimid_flg(in_id,"npixel",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("npixel"); /* TRMM */ else if((rcd=nco_inq_dimid_flg(in_id,"nxtrack",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("nxtrack"); /* MODIS DeepBlue SeaWiFS L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"nXtrack",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("nXtrack"); /* OMI L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"number_of_pixels",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("number_of_pixels"); /* DSCOVR L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"GeoXTrack",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("GeoXTrack"); /* AIRS L2 DAP NC */ else if((rcd=nco_inq_dimid_flg(in_id,"GeoXTrack:L2_Standard_atmospheric&surface_product",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("GeoXTrack:L2_Standard_atmospheric&surface_product"); /* AIRS L2 HDF */ else if((rcd=nco_inq_dimid_flg(in_id,"Cell_Across_Swath:mod04",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("Cell_Across_Swath:mod04"); /* MODIS MOD04 L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"Cell_Across_Swath_mod04",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("Cell_Across_Swath_mod04"); /* MODIS MOD04 L2 (ncl_convert2nc changes colon to underscore) */ else if((rcd=nco_inq_dimid_flg(in_id,"i",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("i"); /* CMIP5 NorESM1 ocean */ else if((rcd=nco_inq_dimid_flg(in_id,"longitude0",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("longitude0"); /* Oxford */ else if((rcd=nco_inq_dimid_flg(in_id,"x",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("x"); /* NEMO */ else if((rcd=nco_inq_dimid_flg(in_id,"y",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("y"); /* NSIDC polar stereographic (NB: unfortunate incompatible conflict between NEMO & NSIDC names) */ else if((rcd=nco_inq_dimid_flg(in_id,"x1",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("x1"); /* NSIDC EASE */ else if((rcd=nco_inq_dimid_flg(in_id,"xgrid_0",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("xgrid_0"); /* NWS HRRR */ else{ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports unable to find longitude dimension in input file. Tried the usual suspects. HINT: Inform regridder of input longitude dimension name with \"ncks --rgr lon_nm_in=name\" or \"ncremap -R '--rgr lon_nm_in=name'\"\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !lat */ rcd=nco_inq_dimlen(in_id,dmn_id_lon,&lon_nbr_in_dat); if(lon_nbr_in != lon_nbr_in_dat){ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports mapfile and data file dimension sizes disagree: mapfile lon_nbr_in = %ld != %ld = lon_nbr_in from datafile. HINT: Check that source grid (i.e., \"grid A\") used to create mapfile matches grid on which data are stored in input datafile.\n",nco_prg_nm_get(),fnc_nm,lon_nbr_in,lon_nbr_in_dat); nco_exit(EXIT_FAILURE); } /* !err */ } /* !2D */ /* Do not extract grid variables (that are also extensive variables) like lon, lat, area, and masks If necessary, use remap data to diagnose them from scratch Other extensive variables (like counts, population) will be extracted and summed not averaged */ /* Exception list source: ALM/CLM: landmask (20170504: Debatable, including erroneous mask may be better than completely excluding an expected mask) (20170504: must keep landfrac since regridded by ncremap for SGS option) AMSR: Latitude, Longitude CAM, CERES, CMIP5: lat, lon CAM, CMIP5: gw, lat_bnds, lon_bnds CAM-FV: slon, slat, w_stag (w_stag is weights for slat grid, analagous to gw for lat grid) CAM-SE, EAM, MOSART: area CICE: latt_bounds, lont_bounds, latu_bounds, lonu_bounds, TLAT, TLON, ULAT, ULON (NB: CICE uses ?LON and POP uses ?LONG) (aice is ice area, tmask is state-variable mask, both not currently excluded, although all binary masks like tmask should be recomputed on new grid) DSCOVR L2: latitude, longitude ESMF: gridcell_area GPM: S1_Latitude, S1_Longitude HIRDLS: Latitude MAR/RACMO: LAT, LON MLS: CO_Latitude MPAS-O/I/LI: areaCell, latCell, lonCell and others that are all handled by separated MPAS convention implementation below NCO: lat_vertices, lon_vertices NEMO: nav_lat, nav_lon NWS HRRR: gridlat_0, gridlon_0 OCO2: latitude_bnds, longitude_bnds OMI DOMINO: Latitude, LatitudeCornerpoints, Longitude, LongitudeCornerpoints Oxford: global_latitude0, global_longitude0, latitude0, longitude0 POP: TLAT, TLONG, ULAT, ULONG (NB: CICE uses ?LON and POP uses ?LONG) (POP does not archive spatial bounds) TRMM: Latitude, Longitude UV-CDAT regridder: bounds_lat, bounds_lon Unknown: XLAT_M, XLONG_M WRF: XLAT, XLONG */ const int var_xcl_lst_nbr=49; /* [nbr] Number of objects on exclusion list */ const char *var_xcl_lst[]={"/area","/gridcell_area","/gw","/LAT","/lat","/Latitude","/latitude","/nav_lat","/global_latitude0","gridlat_0","/latitude0","/slat","/TLAT","/ULAT","/XLAT","/XLAT_M","/CO_Latitude","/S1_Latitude","/lat_bnds","/lat_vertices","/latt_bounds","/latu_bounds","/latitude_bnds","/LatitudeCornerpoints","/bounds_lat","/LON","/lon","/Longitude","/longitude","/nav_lon","/global_longitude0","gridlon_0","/longitude0","/slon","/TLON","/TLONG","/ULON","/ULONG","/XLONG","/XLONG_M","/CO_Longitude","/S1_Longitude","/lon_bnds","/lon_vertices","/lont_bounds","/lonu_bounds","/longitude_bnds","/LongitudeCornerpoints","/bounds_lon","/w_stag"}; int var_cpy_nbr=0; /* [nbr] Number of copied variables */ int var_rgr_nbr=0; /* [nbr] Number of regridded variables */ int var_xcl_nbr=0; /* [nbr] Number of deleted variables */ int var_crt_nbr=0; /* [nbr] Number of created variables */ int var_xtn_nbr=0; /* [nbr] Number of extensive variables */ unsigned int idx_tbl; /* [idx] Counter for traversal table */ const unsigned int trv_nbr=trv_tbl->nbr; /* [idx] Number of traversal table entries */ for(idx=0;idx<var_xcl_lst_nbr;idx++){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++) if(!strcmp(trv_tbl->lst[idx_tbl].nm_fll,var_xcl_lst[idx])) break; if(idx_tbl < trv_nbr){ if(trv_tbl->lst[idx_tbl].flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) pre-defined exclusion-list variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll); var_xcl_nbr++; } /* endif */ trv_tbl->lst[idx_tbl].flg_xtr=False; } /* endif */ } /* !idx */ cnv_sct *cnv; /* [sct] Convention structure */ /* Determine conventions (ARM/CCM/CCSM/CF/MPAS) for treating file */ cnv=nco_cnv_ini(in_id); if(cnv->MPAS){ /* 20160228: MPAS has a host of mysterious grid and extensive variables that should probably not be regridded 20180206: Add from MPAS-LI xCell, yCell, zCell, and [xyz]Edge, and [xyz]Vertex 20180917: Restrict exclusion list to a subset of variables with nCells-dimension Six nCells-variables may be valuable when regridded to lat/lon mpas_xcl_lst in nco_rgr_wgt() and MPAS var_xcl_lst in nco_var_is_fix() differ by these six variables: areaCell for comparison to area(lat,lon) cellMask for area-weighted mask maxLevelCell for area-weighted underwater topographic mask xCell, yCell, zCell for area-weighted cartesian coordinates 20180918: Regridder currently only works on cell-based coordinates Decided regridder will omit not copy fields on vertex- or edge-based coordinates until it can regrid them Regridding vertex- or edge-based fields would require new sparse matrix for vertices or edges How would ERWG or TempestRemap handle that? MPAS geophysical variables on vertex-based (not cell-based) coordinates include: avg_airStressVertexUGeo_1, avg_airStressVertexVGeo_1, uOceanVelocityVertexGeo_1, uVelocityGeo_1, vOceanVelocityVertexGeo_1, vVelocityGeo_1 MPAS geophysical variables on edge-based (not cell-based) coordinates include: principalStress1Var_1, principalStress2Var_1 */ const int mpas_xcl_lst_nbr=35; const char *mpas_xcl_lst[]={"/angleEdge","/areaTriangle","/cellsOnCell","/cellsOnEdge","/cellsOnVertex","/dcEdge","/dvEdge","/edgeMask","/edgesOnCell","/edgesOnEdge","/edgesOnVertex","/indexToCellID","/indexToEdgeID","/indexToVertexID","/kiteAreasOnVertex","/latCell","/latEdge","/latVertex","/lonCell","/lonEdge","/lonVertex","/maxLevelEdgeTop","/meshDensity","/nEdgesOnCell","/nEdgesOnEdge","/vertexMask","/verticesOnCell","/verticesOnEdge","/weightsOnEdge","/xEdge","/yEdge","/zEdge","/xVertex","/yVertex","/zVertex"}; for(idx=0;idx<mpas_xcl_lst_nbr;idx++){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++) if(!strcmp(trv_tbl->lst[idx_tbl].nm_fll,mpas_xcl_lst[idx])) break; if(idx_tbl < trv_nbr){ if(trv_tbl->lst[idx_tbl].flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) pre-defined MPAS exclusion-list variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll); var_xcl_nbr++; } /* endif */ trv_tbl->lst[idx_tbl].flg_xtr=False; } /* endif */ } /* !idx */ } /* !MPAS */ char *dmn_nm_cp; /* [sng] Dimension name as char * to reduce indirection */ int dmn_nbr_in; /* [nbr] Number of dimensions in input variable */ int dmn_nbr_out; /* [nbr] Number of dimensions in output variable */ nco_bool has_lon; /* [flg] Contains longitude dimension */ nco_bool has_lat; /* [flg] Contains latitude dimension */ trv_sct trv; /* [sct] Traversal table object structure to reduce indirection */ /* Define regridding flag for each variable */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; dmn_nbr_in=trv_tbl->lst[idx_tbl].nbr_dmn; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ has_lon=False; has_lat=False; if(flg_grd_in_2D){ for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ /* Pre-determine flags necessary during next loop */ dmn_nm_cp=trv.var_dmn[dmn_idx].dmn_nm; /* fxm: Generalize to include any variable containing two coordinates with "standard_name" = "latitude" and "longitude" */ if(!has_lon) has_lon=!strcmp(dmn_nm_cp,lon_nm_in); if(!has_lat) has_lat=!strcmp(dmn_nm_cp,lat_nm_in); } /* end loop over dimensions */ } /* !flg_grd_in_2D */ for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ dmn_nm_cp=trv.var_dmn[dmn_idx].dmn_nm; /* Regrid variables containing the horizontal spatial dimension on 1D grids, and both latitude and longitude on 2D grids */ if(!strcmp(dmn_nm_cp,col_nm_in) || (has_lon && has_lat)){ trv_tbl->lst[idx_tbl].flg_rgr=True; var_rgr_nbr++; break; } /* endif */ } /* end loop over dimensions */ if(dmn_idx == dmn_nbr_in){ /* Not regridded, so must be omitted or copied... */ if(flg_grd_in_2D && (has_lon || has_lat)){ /* Single spatial dimensional variables on 2D input grids are likely extensive (e.g., grd_mrd_lng from bds) These could be salvaged with explicit rules or implicit assumptions */ trv_tbl->lst[idx_tbl].flg_xtr=False; var_xcl_nbr++; if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) extensive-seeming (e.g., 1D spatial variable in 2D input grid, or 2D spatial variable without primary grid dimensions from multi-grid file (e.g., west_east_stag or south_north_stag instead of west_east or south_north)) variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll); }else{ /* !omitted */ /* Copy all variables that are not regridded or omitted */ var_cpy_nbr++; } /* !omitted */ } /* endif not regridded */ } /* end nco_obj_typ_var */ } /* end idx_tbl */ if(!var_rgr_nbr) (void)fprintf(stdout,"%s: WARNING %s reports no variables fit regridding criteria. The regridder expects something to regrid, and variables not regridded are copied straight to output. HINT: If the name(s) of the input horizontal spatial dimensions to be regridded (e.g., latitude and longitude or column) do not match NCO's preset defaults (case-insensitive unambiguous forms and abbreviations of \"latitude\", \"longitude\", and \"ncol\", respectively) then change the dimension names that NCO looks for. Instructions are at http://nco.sf.net/nco.html#regrid, e.g., \"ncks --rgr col=lndgrid --rgr lat=north\" or \"ncremap -R '--rgr col=lndgrid --rgr lat=north'\".\n",nco_prg_nm_get(),fnc_nm); for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.flg_rgr){ for(int xtn_idx=0;xtn_idx<rgr->xtn_nbr;xtn_idx++){ /* 20150927: Extensive variable treatments are still in alpha-development Currently testing on AIRS TSurfStd_ct (by summing not averaging) In future may consider variables that need more complex (non-summing) extensive treatment MPAS-O/I has a zillion of these [xyz]Cell, cellsOnCell, fCell, indexToCellID, maxLevelCell, meshDensity Not to mention the variables that depend on nEdges and nVertices... */ if(!strcmp(trv.nm,rgr->xtn_var[xtn_idx])){ trv_tbl->lst[idx_tbl].flg_xtn=True; var_xtn_nbr++; if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO Variable %s will be treated as extensive (summed not averaged)\n",nco_prg_nm_get(),trv.nm_fll); } /* !strcmp */ } /* !xtn_idx */ } /* !flg_rgr */ } /* !idx_tbl */ if(nco_dbg_lvl_get() >= nco_dbg_sbr){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr) (void)fprintf(stderr,"Regrid %s? %s\n",trv.nm,trv.flg_rgr ? "Yes" : "No"); } /* end idx_tbl */ } /* end dbg */ /* Lay-out regridded file */ aed_sct aed_mtd; char *area_nm_out; char *att_nm; char *bnd_nm_out; char *bnd_tm_nm_out; char *col_nm_out; char *frc_nm_out; char *lat_bnd_nm_out; char *lat_dmn_nm_out; char *lat_nm_out; char *lat_wgt_nm; char *lon_bnd_nm_out; char *lon_dmn_nm_out; char *lon_nm_out; char *msk_nm_out; char *slat_nm_out=NULL; char *slat_wgt_nm_out=NULL; char *slon_nm_out=NULL; int dmn_id_bnd; /* [id] Dimension ID */ int dmn_id_bnd_tm; /* [id] Dimension ID */ int dmn_id_slat; /* [id] Dimension ID */ int dmn_id_slon; /* [id] Dimension ID */ int area_out_id; /* [id] Variable ID for area */ int frc_out_id; /* [id] Variable ID for fraction */ int lon_out_id; /* [id] Variable ID for longitude */ int lat_out_id; /* [id] Variable ID for latitude */ int lat_wgt_id; /* [id] Variable ID for latitude weight */ int lon_bnd_id; /* [id] Variable ID for lon_bnds/lon_vertices */ int lat_bnd_id; /* [id] Variable ID for lat_bnds/lat_vertices */ int msk_out_id; /* [id] Variable ID for mask */ int slat_out_id; /* [id] Variable ID for staggered latitude */ int slat_wgt_id; /* [id] Variable ID for staggered latitude weight */ int slon_out_id; /* [id] Variable ID for staggered longitude */ int dmn_ids_out[dmn_nbr_grd_max]; /* [id] Dimension IDs array for output variable */ long dmn_srt_out[dmn_nbr_grd_max]; long dmn_cnt_tuo[dmn_nbr_grd_max]; /* Name output dimensions/variables */ area_nm_out=rgr->area_nm; bnd_tm_nm_out=rgr->bnd_tm_nm; frc_nm_out=rgr->frc_nm; lat_bnd_nm_out=rgr->lat_bnd_nm; lat_wgt_nm=rgr->lat_wgt_nm; lon_bnd_nm_out=rgr->lon_bnd_nm; msk_nm_out=rgr->msk_nm; /* Use explicitly specified output names, if any, otherwise use input names (either explicitly specified or discovered by fuzzing) */ if(rgr->col_nm_out) col_nm_out=rgr->col_nm_out; else col_nm_out=col_nm_in; if(rgr->lat_dmn_nm) lat_dmn_nm_out=rgr->lat_dmn_nm; else lat_dmn_nm_out=lat_nm_in; if(rgr->lon_dmn_nm) lon_dmn_nm_out=rgr->lon_dmn_nm; else lon_dmn_nm_out=lon_nm_in; if(rgr->lat_nm_out) lat_nm_out=rgr->lat_nm_out; else lat_nm_out=lat_nm_in; if(rgr->lon_nm_out) lon_nm_out=rgr->lon_nm_out; else lon_nm_out=lon_nm_in; if(flg_grd_out_1D){ bnd_nm_out=rgr->vrt_nm; lat_bnd_nm_out=rgr->lat_vrt_nm; lon_bnd_nm_out=rgr->lon_vrt_nm; } /* !flg_grd_out_1D */ if(flg_grd_out_crv){ bnd_nm_out=rgr->bnd_nm; } /* !flg_grd_out_crv */ if(flg_grd_out_rct){ bnd_nm_out=rgr->bnd_tm_nm; /* NB: default to bnd_tm_nm for spatial bounds */ } /* !flg_grd_out_rct */ if(flg_grd_out_2D){ lat_bnd_nm_out=rgr->lat_bnd_nm; lon_bnd_nm_out=rgr->lon_bnd_nm; } /* !flg_grd_out_2D */ if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ slat_nm_out=strdup("slat"); slat_wgt_nm_out=strdup("w_stag"); slon_nm_out=strdup("slon"); } /* !nco_grd_lat_fv */ /* Ensure temporal bounds dimension name is distinct from spatial bounds when their sizes differ */ if(bnd_nbr_out != bnd_tm_nbr_out){ if(!strcmp(bnd_nm_out,bnd_tm_nm_out)){ (void)fprintf(stdout,"%s: INFO %s reports spatial and temporal output bounds dimensions are identical (and named \"%s\") by default for rectangular output grids because both can be stored as 2D arrays. That cannot work for this mapping because temporal and spatial bounds dimensions sizes differ (bnd_nbr_out = %d, bnd_tm_nbr_out = %d). Using fall-back spatial bounds name \"%s\" instead. HINT: You may change one or both manually with \"ncks --rgr bnd_nm=name\" or \"ncks --rgr bnd_tm_nm=name\", or, using ncremap, with \"ncremap -R '--rgr bnd_nm=name'\" or \"ncremap -R '--rgr bnd_tm_nm=name'\"\n",nco_prg_nm_get(),fnc_nm,bnd_tm_nm_out,bnd_nbr_out,bnd_tm_nbr_out,bnd_nm_out); } /* !strcmp() */ } /* !bnd_nbr_out */ /* Persistent metadata */ aed_sct aed_mtd_crd; char *att_val_crd=NULL; char *att_nm_crd=NULL; att_nm_crd=strdup("coordinates"); aed_mtd_crd.att_nm=att_nm_crd; if(flg_grd_out_1D || flg_grd_out_crv) aed_mtd_crd.mode=aed_overwrite; else aed_mtd_crd.mode=aed_delete; aed_mtd_crd.type=NC_CHAR; aed_mtd_crd.sz=strlen(lat_nm_out)+strlen(lon_nm_out)+1L; att_val_crd=(char *)nco_malloc((aed_mtd_crd.sz+1L)*nco_typ_lng(aed_mtd_crd.type)); (void)sprintf(att_val_crd,"%s %s",lat_nm_out,lon_nm_out); aed_mtd_crd.val.cp=att_val_crd; /* Reminder: Regridder area_out options, e.g., --rgr area_out, set flg_area_out to control adding "area" variable to regridded output Regridder cll_msr options, --rgr cll_msr, set flg_cll_msr to control adding "cell_measures" attribute to regridded output ncks & ncra cll_msr options, --cll_msr, set EXTRACT_CLL_MSR to control adding "cell_measures" variables (e.g., area) to extraction list of input file EXTRACT_CLL_MSR supercedes --rgr area_out in determining whether to add "area" to regridded output */ nco_bool flg_area_out=rgr->flg_area_out; /* [flg] Add area to output */ nco_bool flg_cll_msr=rgr->flg_cll_msr; /* [flg] Add cell_measures attribute */ aed_sct aed_mtd_cll_msr; char *att_nm_cll_msr=NULL; char *att_val_cll_msr=NULL; if(flg_cll_msr){ att_nm_cll_msr=strdup("cell_measures"); aed_mtd_cll_msr.att_nm=att_nm_cll_msr; aed_mtd_cll_msr.mode=aed_overwrite; aed_mtd_cll_msr.type=NC_CHAR; att_val_cll_msr=(char *)nco_malloc((strlen(area_nm_out)+6L+1L)*nco_typ_lng(aed_mtd_cll_msr.type)); (void)sprintf(att_val_cll_msr,"area: %s",area_nm_out); aed_mtd_cll_msr.sz=strlen(att_val_cll_msr); aed_mtd_cll_msr.val.cp=att_val_cll_msr; } /* !flg_cll_msr */ /* Define new horizontal dimensions before all else */ if(flg_grd_out_1D){ rcd+=nco_def_dim(out_id,col_nm_out,col_nbr_out,&dmn_id_col); } /* !flg_grd_out_1D */ if(flg_grd_out_2D){ rcd+=nco_def_dim(out_id,lat_dmn_nm_out,lat_nbr_out,&dmn_id_lat); rcd+=nco_def_dim(out_id,lon_dmn_nm_out,lon_nbr_out,&dmn_id_lon); if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ rcd+=nco_def_dim(out_id,slat_nm_out,slat_nbr_out,&dmn_id_slat); rcd+=nco_def_dim(out_id,slon_nm_out,slon_nbr_out,&dmn_id_slon); } /* !nco_grd_lat_fv */ } /* !flg_grd_out_2D */ /* If dimension has not been defined, define it */ rcd=nco_inq_dimid_flg(out_id,bnd_tm_nm_out,&dmn_id_bnd_tm); if(rcd != NC_NOERR) rcd=nco_def_dim(out_id,bnd_tm_nm_out,bnd_tm_nbr_out,&dmn_id_bnd_tm); /* If dimension has not been defined, define it */ rcd=nco_inq_dimid_flg(out_id,bnd_nm_out,&dmn_id_bnd); if(rcd != NC_NOERR) rcd=nco_def_dim(out_id,bnd_nm_out,bnd_nbr_out,&dmn_id_bnd); char dmn_nm[NC_MAX_NAME]; /* [sng] Dimension name */ char *var_nm; /* [sng] Variable name */ int *dmn_id_in=NULL; /* [id] Dimension IDs */ int *dmn_id_out=NULL; /* [id] Dimension IDs */ int var_id_in; /* [id] Variable ID */ int var_id_out; /* [id] Variable ID */ nc_type var_typ_out; /* [enm] Variable type to write to disk */ nc_type var_typ_rgr; /* [enm] Variable type used during regridding */ nco_bool PCK_ATT_CPY=True; /* [flg] Copy attributes "scale_factor", "add_offset" */ int shuffle; /* [flg] Turn-on shuffle filter */ int deflate; /* [flg] Turn-on deflate filter */ deflate=(int)True; shuffle=NC_SHUFFLE; dfl_lvl=rgr->dfl_lvl; fl_out_fmt=rgr->fl_out_fmt; /* Define new coordinates and grid variables in regridded file */ if(flg_grd_out_1D){ rcd+=nco_def_var(out_id,lat_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col,&lat_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lon_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col,&lon_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; dmn_ids_out[0]=dmn_id_col; dmn_ids_out[1]=dmn_id_bnd; rcd+=nco_def_var(out_id,lat_bnd_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; dmn_ids_out[0]=dmn_id_col; dmn_ids_out[1]=dmn_id_bnd; rcd+=nco_def_var(out_id,lon_bnd_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; if(flg_area_out){ rcd+=nco_def_var(out_id,area_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col,&area_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_area_out */ if(flg_frc_out_wrt){ rcd+=nco_def_var(out_id,frc_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col,&frc_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,frc_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_frc_out_wrt */ if(flg_msk_out){ rcd+=nco_def_var(out_id,msk_nm_out,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_col,&msk_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_msk_out */ } /* !flg_grd_out_1D */ if(flg_grd_out_crv){ dmn_ids_out[0]=dmn_id_lat; dmn_ids_out[1]=dmn_id_lon; rcd+=nco_def_var(out_id,lat_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lat_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lon_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lon_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; if(flg_area_out){ rcd+=nco_def_var(out_id,area_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&area_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_area_out */ if(flg_frc_out_wrt){ rcd+=nco_def_var(out_id,frc_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&frc_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,frc_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_frc_out_wrt */ if(flg_msk_out){ rcd+=nco_def_var(out_id,msk_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&msk_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_msk_out */ dmn_ids_out[0]=dmn_id_lat; dmn_ids_out[1]=dmn_id_lon; dmn_ids_out[2]=dmn_id_bnd; rcd+=nco_def_var(out_id,lat_bnd_nm_out,crd_typ_out,dmn_nbr_3D,dmn_ids_out,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lon_bnd_nm_out,crd_typ_out,dmn_nbr_3D,dmn_ids_out,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_grd_out_crv */ if(flg_grd_out_rct){ rcd+=nco_def_var(out_id,lat_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lat,&lat_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lon_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lon,&lon_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ rcd+=nco_def_var(out_id,slat_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_slat,&slat_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,slat_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,slat_wgt_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_slat,&slat_wgt_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,slat_wgt_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,slon_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_slon,&slon_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,slon_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !nco_grd_lat_fv */ dmn_ids_out[0]=dmn_id_lat; dmn_ids_out[1]=dmn_id_bnd; rcd+=nco_def_var(out_id,lat_bnd_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; dmn_ids_out[0]=dmn_id_lon; dmn_ids_out[1]=dmn_id_bnd; rcd+=nco_def_var(out_id,lon_bnd_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lat_wgt_nm,crd_typ_out,dmn_nbr_1D,&dmn_id_lat,&lat_wgt_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_wgt_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; dmn_ids_out[0]=dmn_id_lat; dmn_ids_out[1]=dmn_id_lon; if(flg_area_out){ rcd+=nco_def_var(out_id,area_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&area_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_area_out */ if(flg_frc_out_wrt){ rcd+=nco_def_var(out_id,frc_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&frc_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,frc_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_frc_out_wrt */ if(flg_msk_out){ rcd+=nco_def_var(out_id,msk_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&msk_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_msk_out */ } /* !flg_grd_out_rct */ /* Pre-allocate dimension ID and cnt/srt space */ int dmn_nbr_max; /* [nbr] Maximum number of dimensions variable can have in input or output */ int dmn_in_fst; /* [idx] Offset of input- relative to output-dimension due to non-MRV dimension insertion */ int dmn_nbr_rec; /* [nbr] Number of unlimited dimensions */ int *dmn_ids_rec=NULL; /* [id] Unlimited dimension IDs */ rcd+=nco_inq_ndims(in_id,&dmn_nbr_max); dmn_nbr_max++; /* Safety in case regridding adds dimension */ dmn_id_in=(int *)nco_malloc(dmn_nbr_max*sizeof(int)); dmn_id_out=(int *)nco_malloc(dmn_nbr_max*sizeof(int)); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); dmn_cnt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* Identify all record-dimensions in input file */ rcd+=nco_inq_unlimdims(in_id,&dmn_nbr_rec,dmn_ids_rec); if(dmn_nbr_rec > 0){ dmn_ids_rec=(int *)nco_malloc(dmn_nbr_rec*sizeof(int)); rcd+=nco_inq_unlimdims(in_id,&dmn_nbr_rec,dmn_ids_rec); } /* !dmn_nbr_rec */ int flg_pck; /* [flg] Variable is packed on disk */ nco_bool has_mss_val; /* [flg] Has numeric missing value attribute */ double mss_val_dbl; /* Define regridded and copied variables in output file */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv_tbl->lst[idx_tbl].flg_mrv=True; trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ var_nm=trv.nm; /* Preserve input type in output type */ var_typ_out=trv.var_typ; /* Demote DP to SP to save space. fxm: missing value type will then be inconsistent if copied without demotion */ //if(trv.var_typ == NC_DOUBLE) var_typ_out=NC_FLOAT; else var_typ_out=trv.var_typ; dmn_nbr_in=trv.nbr_dmn; dmn_nbr_out=trv.nbr_dmn; rcd=nco_inq_varid(in_id,var_nm,&var_id_in); rcd=nco_inq_varid_flg(out_id,var_nm,&var_id_out); /* If variable has not been defined, define it */ if(rcd != NC_NOERR){ if(trv.flg_rgr){ /* Regrid */ rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); dmn_in_fst=0; rcd=nco_inq_var_packing(in_id,var_id_in,&flg_pck); if(flg_pck) (void)fprintf(stdout,"%s: WARNING %s reports variable \"%s\" is packed so results unpredictable. HINT: If regridded values seems weird, retry after unpacking input file with, e.g., \"ncpdq -U in.nc out.nc\"\n",nco_prg_nm_get(),fnc_nm,var_nm); has_mss_val=nco_mss_val_get_dbl(in_id,var_id_in,(double *)NULL); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_id_in[dmn_idx],dmn_nm); /* Is horizontal dimension last, i.e., most-rapidly-varying? */ if(flg_grd_in_1D && !strcmp(dmn_nm,col_nm_in)){ if(dmn_idx != dmn_nbr_in-1){ /* Unstructured input grid has col in non-MRV location (expect this with, e.g., MPAS-O/I native grid dimension-ordering */ (void)fprintf(stdout,"%s: WARNING %s reports unstructured grid spatial coordinate %s is (zero-based) dimension %d of input variable to be regridded %s which has %d dimensions. The NCO regridder does not support unstructured spatial dimensions that are not the last (i.e., most rapidly varying) dimension of an input variable, so results are likely garbage.\nHINT: Re-arrange input file dimensions to place horizontal dimension(s) last with, e.g., \'ncpdq -a time,lev,%s in.nc out.nc\' prior to calling the regridder. E3SM users: If this is an MPAS dataset with a new (unknown to ncremap) dimension, please ask Charlie to add the dimension to the ncremap dimension permutation list.\n",nco_prg_nm_get(),fnc_nm,dmn_nm,dmn_idx,var_nm,dmn_nbr_in,dmn_nm); trv_tbl->lst[idx_tbl].flg_mrv=False; } /* !dmn_idx */ } /* !flg_grd_in_1D */ if(flg_grd_in_2D && (!strcmp(dmn_nm,lat_nm_in) || !strcmp(dmn_nm,lon_nm_in))){ /* Are horizontal dimensions most-rapidly-varying? */ if(dmn_idx != dmn_nbr_in-1 && dmn_idx != dmn_nbr_in-2){ /* NB: Lat/lon input grid has lat/lon in non-MRV location (expect this with, e.g., AIRS L2 grid dimension-ordering */ (void)fprintf(stdout,"%s: WARNING %s reports lat-lon grid spatial coordinate %s is (zero-based) dimension %d of input variable to be regridded %s which has %d dimensions. The NCO regridder does not support rectangular lat-lon dimension(s) that are not the last two (i.e., most rapidly varying) dimensions of an input variable, so results are likely garbage.\nHINT: Re-arrange input file dimensions to place horizontal dimensions last with, e.g., \'ncpdq -a time,lev,lat,lon in.nc out.nc\' prior to calling the regridder.\n",nco_prg_nm_get(),fnc_nm,dmn_nm,dmn_idx,var_nm,dmn_nbr_in); trv_tbl->lst[idx_tbl].flg_mrv=False; } /* !dmn_idx */ } /* !flg_grd_in_2D */ if(flg_grd_out_1D){ if((nco_rgr_typ == nco_rgr_grd_2D_to_1D) && (!strcmp(dmn_nm,lat_nm_in) || !strcmp(dmn_nm,lon_nm_in))){ /* Replace orthogonal horizontal dimensions by unstructured horizontal dimension already defined */ if(!strcmp(dmn_nm,lat_nm_in)){ /* Replace lat with col */ dmn_id_out[dmn_idx]=dmn_id_col; dmn_cnt[dmn_idx]=col_nbr_out; } /* endif lat */ if(!strcmp(dmn_nm,lon_nm_in)){ /* Assume non-MRV dimensions are ordered lat/lon. Replace lat with col. Shift MRV dimensions to left after deleting lon. */ dmn_id_out[dmn_idx]=NC_MIN_INT; dmn_cnt[dmn_idx]=NC_MIN_INT; dmn_nbr_out--; /* Reduce output dimension position of all subsequent input dimensions by one */ if(!trv_tbl->lst[idx_tbl].flg_mrv) dmn_in_fst=-1; } /* endif lon */ }else{ /* Dimension col_nm_in has already been defined as col_nm_out, replicate all other dimensions */ if(!strcmp(dmn_nm,col_nm_in)) rcd=nco_inq_dimid_flg(out_id,col_nm_out,dmn_id_out+dmn_idx); else rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx+dmn_in_fst); if(rcd != NC_NOERR){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt+dmn_idx+dmn_in_fst); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt[dmn_idx+dmn_in_fst]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt[dmn_idx+dmn_in_fst],dmn_id_out+dmn_idx+dmn_in_fst); } /* !rcd */ } /* !lat && !lon */ } /* !flg_grd_out_1D */ if(flg_grd_out_2D){ if(nco_rgr_typ == nco_rgr_grd_1D_to_2D && !strcmp(dmn_nm,col_nm_in)){ /* Replace unstructured horizontal dimension by orthogonal horizontal dimensions already defined */ dmn_id_out[dmn_idx]=dmn_id_lat; dmn_id_out[dmn_idx+1]=dmn_id_lon; dmn_cnt[dmn_idx]=lat_nbr_out; dmn_cnt[dmn_idx+1]=lon_nbr_out; dmn_nbr_out++; /* Increase output dimension position of all subsequent input dimensions by one */ if(!trv_tbl->lst[idx_tbl].flg_mrv) dmn_in_fst=1; }else{ /* Dimensions lat/lon_nm_in have already been defined as lat/lon_nm_out, replicate all other dimensions */ if(!strcmp(dmn_nm,lat_nm_in)) rcd=nco_inq_dimid_flg(out_id,lat_dmn_nm_out,dmn_id_out+dmn_idx); else if(!strcmp(dmn_nm,lon_nm_in)) rcd=nco_inq_dimid_flg(out_id,lon_dmn_nm_out,dmn_id_out+dmn_idx); else rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx+dmn_in_fst); if(rcd != NC_NOERR){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt+dmn_idx+dmn_in_fst); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt[dmn_idx+dmn_in_fst]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt[dmn_idx+dmn_in_fst],dmn_id_out+dmn_idx+dmn_in_fst); } /* !rcd */ } /* !col */ } /* !1D_to_2D */ } /* !dmn_idx */ }else{ /* !flg_rgr */ /* Replicate non-regridded variables */ rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_id_in[dmn_idx],dmn_nm); rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx); if(rcd != NC_NOERR){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt+dmn_idx); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt[dmn_idx]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt[dmn_idx],dmn_id_out+dmn_idx); } /* !rcd */ } /* !dmn_idx */ } /* !flg_rgr */ rcd=nco_def_var(out_id,var_nm,var_typ_out,dmn_nbr_out,dmn_id_out,&var_id_out); /* Duplicate netCDF4 settings when possible */ if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC){ /* Deflation */ if(dmn_nbr_out > 0){ int dfl_lvl_in; /* [enm] Deflate level [0..9] */ rcd=nco_inq_var_deflate(in_id,var_id_in,&shuffle,&deflate,&dfl_lvl_in); /* Copy original deflation settings */ if(deflate || shuffle) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl_in); /* Overwrite HDF Lempel-Ziv compression level, if requested */ if(dfl_lvl == 0) deflate=(int)False; else deflate=(int)True; /* Turn-off shuffle when uncompressing otherwise chunking requests may fail */ if(dfl_lvl == 0) shuffle=NC_NOSHUFFLE; /* Shuffle never, to my knowledge, increases filesize, so shuffle by default when manually deflating */ if(dfl_lvl >= 0) shuffle=NC_SHUFFLE; if(dfl_lvl >= 0) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl); } /* !dmn_nbr_out */ } /* !NC_FORMAT_NETCDF4 */ (void)nco_att_cpy(in_id,out_id,var_id_in,var_id_out,PCK_ATT_CPY); if(trv.flg_rgr){ aed_mtd_crd.var_nm=var_nm; aed_mtd_crd.id=var_id_out; (void)nco_aed_prc(out_id,var_id_out,aed_mtd_crd); if(flg_cll_msr){ aed_mtd_cll_msr.var_nm=var_nm; aed_mtd_cll_msr.id=var_id_out; (void)nco_aed_prc(out_id,var_id_out,aed_mtd_cll_msr); } /* !flg_cll_msr */ } /* !flg_rgr */ } /* !rcd */ } /* !var */ } /* end idx_tbl */ /* Free pre-allocated array space */ /* col_nm_in will not otherwise be free'd if it was guessed as usual suspect */ if(col_nm_in != rgr->col_nm_in) col_nm_in=(char *)nco_free(col_nm_in); if(dmn_id_in) dmn_id_in=(int *)nco_free(dmn_id_in); if(dmn_id_out) dmn_id_out=(int *)nco_free(dmn_id_out); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt) dmn_cnt=(long *)nco_free(dmn_cnt); if(dmn_ids_rec) dmn_ids_rec=(int *)nco_free(dmn_ids_rec); /* Define new metadata in regridded file */ if(flg_area_out){ rcd=nco_char_att_put(out_id,area_nm_out,"long_name","Solid angle subtended by gridcell"); rcd=nco_char_att_put(out_id,area_nm_out,"standard_name","solid_angle"); rcd=nco_char_att_put(out_id,area_nm_out,"units","steradian"); if(flg_grd_out_1D || flg_grd_out_crv) rcd=nco_char_att_put(out_id,area_nm_out,att_nm_crd,att_val_crd); att_val=(char *)nco_calloc((strlen(lat_dmn_nm_out)+strlen(lon_dmn_nm_out)+8L),sizeof(char)); (void)sprintf(att_val,"%s, %s: sum",lat_dmn_nm_out,lon_dmn_nm_out); rcd=nco_char_att_put(out_id,area_nm_out,"cell_mathods",att_val); if(att_val) att_val=(char *)nco_free(att_val); } /* !flg_area_out */ if(flg_frc_out_wrt){ rcd=nco_char_att_put(out_id,frc_nm_out,"long_name","Fraction of gridcell valid on destination grid"); if(flg_grd_out_1D || flg_grd_out_crv) rcd=nco_char_att_put(out_id,area_nm_out,att_nm_crd,att_val_crd); att_val=(char *)nco_calloc((strlen(lat_dmn_nm_out)+strlen(lon_dmn_nm_out)+8L),sizeof(char)); (void)sprintf(att_val,"%s, %s: sum",lat_dmn_nm_out,lon_dmn_nm_out); rcd=nco_char_att_put(out_id,frc_nm_out,"cell_mathods",att_val); } /* !flg_frc_out_wrt */ if(flg_msk_out){ rcd=nco_char_att_put(out_id,msk_nm_out,"long_name","Mask (0 = invalid destination, 1 = valid destination)"); if(flg_grd_out_1D || flg_grd_out_crv) rcd=nco_char_att_put(out_id,area_nm_out,att_nm_crd,att_val_crd); } /* !flg_msk_out */ rcd=nco_char_att_put(out_id,lat_nm_out,"long_name","Latitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,lat_nm_out,"standard_name","latitude"); rcd=nco_char_att_put(out_id,lat_nm_out,"units","degrees_north"); // 20200205: Attach "axis" attribute to single-dimensional geospatial coordinates not to two-dimensional coordinate variables per CF Conventions section 5.2 if(!flg_grd_out_crv) rcd=nco_char_att_put(out_id,lat_nm_out,"axis","Y"); double vld_min; vld_min=-90.0; att_nm=strdup("valid_min"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lat_nm_out; aed_mtd.id=lat_out_id; aed_mtd.sz=1; aed_mtd.type=NC_DOUBLE; aed_mtd.val.dp=&vld_min; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lat_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); double vld_max; vld_max=90.0; att_nm=strdup("valid_max"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lat_nm_out; aed_mtd.id=lat_out_id; aed_mtd.sz=1; aed_mtd.type=NC_DOUBLE; aed_mtd.val.dp=&vld_max; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lat_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); rcd=nco_char_att_put(out_id,lat_nm_out,"bounds",lat_bnd_nm_out); if(flg_grd_out_rct) att_val=strdup("Gridcell latitude interfaces"); else att_val=strdup("Gridcell latitude vertices"); rcd=nco_char_att_put(out_id,lat_bnd_nm_out,"long_name",att_val); rcd=nco_char_att_put(out_id,lon_nm_out,"long_name","Longitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,lon_nm_out,"standard_name","longitude"); rcd=nco_char_att_put(out_id,lon_nm_out,"units","degrees_east"); // 20200205: Attach "axis" attribute to single-dimensional geospatial coordinates not to two-dimensional coordinate variables per CF Conventions section 5.2 if(!flg_grd_out_crv) rcd=nco_char_att_put(out_id,lon_nm_out,"axis","X"); /* UGRID Conventions define "topology" and "modulo" attributes https://github.com/ugrid-conventions/ugrid-conventions My understanding is these should only be utilized for global grids */ if(nco_rgr_typ == nco_rgr_grd_2D_to_2D){ /* fxm: change this to check whether lon_spn >= 360 or nco_grd_xtn == global */ att_nm=strdup("modulo"); double modulo=360.0; aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lon_nm_out; aed_mtd.id=lon_out_id; aed_mtd.sz=1; aed_mtd.type=NC_DOUBLE; aed_mtd.val.dp=&modulo; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lon_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); rcd=nco_char_att_put(out_id,lon_nm_out,"topology","circular"); } /* !nco_rgr_grd_2D_to_2D */ if(lon_ctr_out[0] >= 0.0) vld_min=0.0; else vld_min=-180.0; att_nm=strdup("valid_min"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lon_nm_out; aed_mtd.id=lon_out_id; aed_mtd.sz=1; aed_mtd.type=NC_DOUBLE; aed_mtd.val.dp=&vld_min; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lon_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); if(lon_ctr_out[0] >= 0.0) vld_max=360.0; else vld_max=180.0; att_nm=strdup("valid_max"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lon_nm_out; aed_mtd.id=lon_out_id; aed_mtd.sz=1; aed_mtd.type=NC_DOUBLE; aed_mtd.val.dp=&vld_max; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lon_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); rcd=nco_char_att_put(out_id,lon_nm_out,"bounds",lon_bnd_nm_out); att_nm=strdup("bounds"); att_val=lon_bnd_nm_out; aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lon_nm_out; aed_mtd.id=lon_out_id; aed_mtd.sz=strlen(att_val); aed_mtd.type=NC_CHAR; aed_mtd.val.cp=att_val; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lon_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); if(flg_grd_out_rct) att_val=strdup("Gridcell longitude interfaces"); else att_val=strdup("Gridcell longitude vertices"); rcd=nco_char_att_put(out_id,lon_bnd_nm_out,"long_name",att_val); if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ rcd=nco_char_att_put(out_id,slat_nm_out,"long_name","Latitude for staggered FV grid"); rcd=nco_char_att_put(out_id,slat_nm_out,"units","degrees_north"); rcd=nco_char_att_put(out_id,slat_wgt_nm_out,"long_name","Latitude weights for staggered FV grid"); rcd=nco_char_att_put(out_id,slon_nm_out,"long_name","Longitude for staggered FV grid"); rcd=nco_char_att_put(out_id,slon_nm_out,"units","degrees_east"); } /* !nco_grd_lat_fv */ if(flg_grd_out_rct) rcd=nco_char_att_put(out_id,lat_wgt_nm,"long_name","Latitude quadrature weights (normalized to sum to 2.0 on global grids)"); rcd=nco_char_att_put(out_id,NULL,"map_file",fl_in); rcd=nco_char_att_put(out_id,NULL,"input_file",rgr->fl_in); /* Annotate persistent metadata that should appear last in attribute list */ if(flg_grd_out_1D){ if(flg_area_out) rcd=nco_char_att_put(out_id,area_nm_out,att_nm_crd,att_val_crd); if(flg_frc_out_wrt) rcd=nco_char_att_put(out_id,frc_nm_out,att_nm_crd,att_val_crd); if(flg_msk_out) rcd=nco_char_att_put(out_id,msk_nm_out,att_nm_crd,att_val_crd); } /* !flg_grd_out_1D */ /* Persistent metadata */ if(att_nm_crd) att_nm_crd=(char *)nco_free(att_nm_crd); if(att_val_crd) att_val_crd=(char *)nco_free(att_val_crd); if(flg_cll_msr){ if(att_nm_cll_msr) att_nm_cll_msr=(char *)nco_free(att_nm_cll_msr); if(att_val_cll_msr) att_val_cll_msr=(char *)nco_free(att_val_cll_msr); } /* !flg_cll_msr */ if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ if(slat_nm_out) slat_nm_out=(char *)nco_free(slat_nm_out); if(slat_wgt_nm_out) slat_wgt_nm_out=(char *)nco_free(slat_wgt_nm_out); if(slon_nm_out) slon_nm_out=(char *)nco_free(slon_nm_out); } /* !nco_grd_lat_fv */ /* Turn-off default filling behavior to enhance efficiency */ nco_set_fill(out_id,NC_NOFILL,&fll_md_old); /* Begin data mode */ (void)nco_enddef(out_id); /* Write new coordinates and variables to regridded file */ if(flg_grd_out_1D){ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=col_nbr_out; (void)nco_put_vara(out_id,lat_out_id,dmn_srt_out,dmn_cnt_tuo,lat_ctr_out,crd_typ_out); dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=col_nbr_out; (void)nco_put_vara(out_id,lon_out_id,dmn_srt_out,dmn_cnt_tuo,lon_ctr_out,crd_typ_out); dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=col_nbr_out; dmn_cnt_tuo[1]=bnd_nbr_out; (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt_out,dmn_cnt_tuo,lat_bnd_out,crd_typ_out); dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=col_nbr_out; dmn_cnt_tuo[1]=bnd_nbr_out; (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt_out,dmn_cnt_tuo,lon_bnd_out,crd_typ_out); if(flg_area_out){ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=col_nbr_out; (void)nco_put_vara(out_id,area_out_id,dmn_srt_out,dmn_cnt_tuo,area_out,crd_typ_out); } /* !flg_area_out */ if(flg_msk_out){ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=col_nbr_out; (void)nco_put_vara(out_id,msk_out_id,dmn_srt_out,dmn_cnt_tuo,msk_out,crd_typ_out); } /* !flg_msk_out */ } /* !flg_grd_out_1D */ if(flg_grd_out_crv){ dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=lat_nbr_out; dmn_cnt_tuo[1]=lon_nbr_out; (void)nco_put_vara(out_id,lat_out_id,dmn_srt_out,dmn_cnt_tuo,lat_ctr_out,crd_typ_out); (void)nco_put_vara(out_id,lon_out_id,dmn_srt_out,dmn_cnt_tuo,lon_ctr_out,crd_typ_out); if(flg_area_out){ (void)nco_put_vara(out_id,area_out_id,dmn_srt_out,dmn_cnt_tuo,area_out,crd_typ_out); } /* !flg_area_out */ if(flg_frc_out_wrt){ (void)nco_put_vara(out_id,frc_out_id,dmn_srt_out,dmn_cnt_tuo,frc_out,crd_typ_out); } /* !flg_frc_out_wrt */ if(flg_msk_out){ (void)nco_put_vara(out_id,msk_out_id,dmn_srt_out,dmn_cnt_tuo,msk_out,crd_typ_out); } /* !flg_msk_out */ dmn_srt_out[0]=dmn_srt_out[1]=dmn_srt_out[2]=0L; dmn_cnt_tuo[0]=lat_nbr_out; dmn_cnt_tuo[1]=lon_nbr_out; dmn_cnt_tuo[2]=bnd_nbr_out; /* NB: 20160803 Semantically confusing---curvilinear grids must write *_crn_out data into *_bnd_out arrays */ (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt_out,dmn_cnt_tuo,lat_crn_out,crd_typ_out); (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt_out,dmn_cnt_tuo,lon_crn_out,crd_typ_out); } /* !flg_grd_out_crv */ if(flg_grd_out_rct){ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=lat_nbr_out; (void)nco_put_vara(out_id,lat_out_id,dmn_srt_out,dmn_cnt_tuo,lat_ctr_out,crd_typ_out); dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=lon_nbr_out; (void)nco_put_vara(out_id,lon_out_id,dmn_srt_out,dmn_cnt_tuo,lon_ctr_out,crd_typ_out); if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=slat_nbr_out; (void)nco_put_vara(out_id,slat_out_id,dmn_srt_out,dmn_cnt_tuo,slat_ctr_out,crd_typ_out); (void)nco_put_vara(out_id,slat_wgt_id,dmn_srt_out,dmn_cnt_tuo,slat_wgt_out,crd_typ_out); dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=slon_nbr_out; (void)nco_put_vara(out_id,slon_out_id,dmn_srt_out,dmn_cnt_tuo,slon_ctr_out,crd_typ_out); if(slat_ctr_out) slat_ctr_out=(double *)nco_free(slat_ctr_out); if(slat_wgt_out) slat_wgt_out=(double *)nco_free(slat_wgt_out); if(slon_ctr_out) slon_ctr_out=(double *)nco_free(slon_ctr_out); } /* !nco_grd_lat_fv */ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=lat_nbr_out; (void)nco_put_vara(out_id,lat_wgt_id,dmn_srt_out,dmn_cnt_tuo,lat_wgt_out,crd_typ_out); dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=lat_nbr_out; dmn_cnt_tuo[1]=bnd_nbr_out; (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt_out,dmn_cnt_tuo,lat_bnd_out,crd_typ_out); dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=lon_nbr_out; dmn_cnt_tuo[1]=bnd_nbr_out; (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt_out,dmn_cnt_tuo,lon_bnd_out,crd_typ_out); dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=lat_nbr_out; dmn_cnt_tuo[1]=lon_nbr_out; if(flg_area_out){ (void)nco_put_vara(out_id,area_out_id,dmn_srt_out,dmn_cnt_tuo,area_out,crd_typ_out); } /* !flg_area_out */ if(flg_frc_out_wrt){ (void)nco_put_vara(out_id,frc_out_id,dmn_srt_out,dmn_cnt_tuo,frc_out,crd_typ_out); } /* !flg_frc_out_wrt */ if(flg_msk_out){ (void)nco_put_vara(out_id,msk_out_id,dmn_srt_out,dmn_cnt_tuo,msk_out,crd_typ_out); } /* !flg_msk_out */ } /* !flg_grd_out_rct */ /* Regrid or copy variable values */ const double wgt_vld_thr=rgr->wgt_vld_thr; /* [frc] Weight threshold for valid destination value */ const nco_bool flg_rnr=rgr->flg_rnr; /* [flg] Renormalize destination values by valid area */ char *sgs_frc_nm=NULL; char *sgs_msk_nm=NULL; double *sgs_frc_in=NULL; double *sgs_frc_out=NULL; double *var_val_dbl_in=NULL; double *var_val_dbl_out=NULL; double *wgt_vld_out=NULL; double var_val_crr; int *tally=NULL; /* [nbr] Number of valid (non-missing) values */ int lvl_idx; /* [idx] Level index */ int lvl_nbr; /* [nbr] Number of levels */ int thr_idx; /* [idx] Thread index */ size_t dst_idx; size_t idx_in; /* [idx] Input grid index */ size_t idx_out; /* [idx] Output grid index */ size_t var_sz_in; /* [nbr] Number of elements in variable (will be self-multiplied) */ size_t var_sz_out; /* [nbr] Number of elements in variable (will be self-multiplied) */ size_t val_in_fst; /* [nbr] Number of elements by which current N-D slab input values are offset from origin */ size_t val_out_fst; /* [nbr] Number of elements by which current N-D slab output values are offset from origin */ /* 20190322: Prior to entering OpenMP loop, collect specified SGS information */ const double sgs_nrm=rgr->sgs_nrm; /* [frc] Sub-gridscale normalization */ if(rgr->sgs_frc_nm){ /* Normalization test: fl_in=20181217.CNTL_CNPCTC1850_OIBGC.ne30_oECv3.edison.clm2.h0.2000-12.nc /bin/cp -f ${DATA}/hdf/${fl_in} ~/elm_raw.nc ncremap -P sgs -v FSDS,TBOT,GPP -a aave -s ${DATA}/grids/ne30np4_pentagons.091226.nc -g ${DATA}/grids/cmip6_180x360_scrip.20181001.nc ~/elm_raw.nc ~/elm_sgs.nc # Original SGS method ncks -A -v grid_area ${DATA}/grids/ne30np4_pentagons.091226.nc ~/elm_sgs.nc ncremap -P gsg -v FSDS,TBOT,GPP -m ${DATA}/maps/map_ne30np4_to_cmip6_180x360_aave.20181001.nc ~/elm_raw.nc ~/elm_gsg.nc # New SGS method */ if(rgr->sgs_msk_nm) sgs_msk_nm=(char *)strdup(rgr->sgs_msk_nm); sgs_frc_nm=(char *)strdup(rgr->sgs_frc_nm); var_nm=sgs_frc_nm; var_typ_rgr=NC_DOUBLE; /* NB: Regrid in double precision */ var_typ_out=NC_DOUBLE; /* NB: sgs_frc_out must be double precision */ var_sz_in=1L; /* Compute from scratch to be sure it matches grd_sz_in */ var_sz_out=grd_sz_out; /* Assume this holds */ char *fl_sgs=NULL; /* [sng] External sub-gridscale file name */ int sgs_id; /* [id] netCDF file ID for external sub-gridscale file */ sgs_id=in_id; if((rcd=nco_inq_varid_flg(sgs_id,var_nm,&var_id_in)) != NC_NOERR){ /* If sgs_frc_nm is not in input file then search for it in external area file */ char *sls_ptr; /* [sng] Pointer to last slash character (' ') */ sls_ptr=strrchr(var_nm,'/'); if(!sls_ptr){ (void)fprintf(stderr,"%s: ERROR %s (aka \"the regridder\") reports unable to find sgs_frc_nm = %s in current input file, and unable to identify filename (ending with slash '/') portion of that string to serve as local external file for sgs_frc input, exiting\n",nco_prg_nm_get(),fnc_nm,sgs_frc_nm); nco_exit(EXIT_FAILURE); } /* !sls_ptr */ sgs_frc_nm=(char *)strdup(sls_ptr+1L); /* Copy variable-name portion of string */ *sls_ptr='\0'; /* NULL-terminate filename */ fl_sgs=(char *)strdup(var_nm); var_nm=sgs_frc_nm; /* NB: too tricky? */ rcd=nco_open(fl_sgs,NC_NOWRITE,&sgs_id); if((rcd=nco_inq_varid_flg(sgs_id,var_nm,&var_id_in)) != NC_NOERR){ (void)fprintf(stderr,"%s: ERROR %s (aka \"the regridder\") reports unable to find sgs_frc_nm = \"%s\" in local external file %s, exiting\n",nco_prg_nm_get(),fnc_nm,sgs_frc_nm,fl_sgs); nco_exit(EXIT_FAILURE); } /* !rcd */ if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s obtaining sgs_frc = %s from file %s\n",nco_prg_nm_get(),fnc_nm,sgs_frc_nm,fl_sgs); } /* !rcd */ rcd=nco_inq_varndims(sgs_id,var_id_in,&dmn_nbr_in); dmn_nbr_max= dmn_nbr_in > dmn_nbr_out ? dmn_nbr_in : dmn_nbr_out; dmn_id_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* max() for both input and output grids */ dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); rcd=nco_inq_vardimid(sgs_id,var_id_in,dmn_id_in); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimlen(sgs_id,dmn_id_in[dmn_idx],dmn_cnt_in+dmn_idx); var_sz_in*=dmn_cnt_in[dmn_idx]; dmn_srt[dmn_idx]=0L; } /* !dmn_idx */ if(var_sz_in != grd_sz_in){ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") requires that sgs_frc = %s be same size as spatial grid but var_sz_in = %lu != %lu = grd_sz_in\n",nco_prg_nm_get(),fnc_nm,var_nm,var_sz_in,grd_sz_in); nco_exit(EXIT_FAILURE); } /* !var_sz_in */ /* Missing value setup (NB: ELM landfrac has _FillValue and is _FillValue where masked */ has_mss_val=nco_mss_val_get_dbl(sgs_id,var_id_in,&mss_val_dbl); sgs_frc_in=(double *)nco_malloc_dbg(var_sz_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() sgs_frc_in value buffer"); rcd=nco_get_vara(sgs_id,var_id_in,dmn_srt,dmn_cnt_in,sgs_frc_in,var_typ_rgr); /* If sgs_frc comes from external local file, close it now */ if(fl_sgs){ rcd=nco_close(sgs_id); fl_sgs=(char *)nco_free(fl_sgs); } /* !fl_sgs */ /* Initialize output */ sgs_frc_out=(double *)nco_malloc_dbg(grd_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() sgs_frc_out value buffer"); /* Initialize and regrid sgs_frc_out 20190907: sgs_frc_in (landfrac) is _FillValue (1.0e36) for ELM datasets in all masked gridcells, and is always positive definite (never zero) in all unmasked gridcells because it it a true area. ELM sgs_frc_out is always positive definite gridcell area everywhere, with no missing values and no zero values. 20190910: MPAS-Seaice datasets have no mask, and sgs_frc_in (timeMonthly_avg_iceAreaCell) is never (ncatted-appended) _FillValue (-9.99999979021477e+33) and is usually zero because it is time-mean area-fraction of sea ice which only exists in polar regions. MPAS-Seaice sgs_frc_out is zero in all gridcells without sea-ice. Regardless of input source, following blocks guarantee that sgs_frc_out is defined everywhere, is never a missing value (sgs_frc_out is zero where sgs_frc_in may have been _FillValue), and is always safe to multiply and normalize by sgs_frc_out in main regridding loop */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) sgs_frc_out[dst_idx]=0.0; if(!has_mss_val) for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) sgs_frc_out[row_dst_adr[lnk_idx]]+=sgs_frc_in[col_src_adr[lnk_idx]]*wgt_raw[lnk_idx]; if(has_mss_val) for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) if((var_val_crr=sgs_frc_in[col_src_adr[lnk_idx]]) != mss_val_dbl) sgs_frc_out[row_dst_adr[lnk_idx]]+=var_val_crr*wgt_raw[lnk_idx]; /* Sanity check sgs_frc_out */ if(nco_dbg_lvl_get() >= nco_dbg_fl){ /* 20190326: sgs_frc expressed as a fraction must never exceed sgs_nrm CICE expresses sgs_frc (aice) in percent, i.e., sgs_nrm=100.0 Sum total value of sgs_frc (as opposed to gridcell_area) depends on grid resolution */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ /* 20190907: Approximate comparison because rounding causes frequent exceedances of sgs_nrm by epsilon ~ 1.0e-15 */ if((float)sgs_frc_out[dst_idx] > sgs_nrm) (void)fprintf(stdout,"%s: INFO %s reports sgs_frc_out[%lu] = %19.15f > %g = sgs_nrm\n",nco_prg_nm_get(),fnc_nm,dst_idx,sgs_frc_out[dst_idx],sgs_nrm); } /* !dst_idx */ } /* !dbg */ // for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ // (void)fprintf(stdout,"%s: INFO %s reports sgs_frc_out[%lu] = %19.15f\n",nco_prg_nm_get(),fnc_nm,dst_idx,sgs_frc_out[dst_idx]); // } /* !dst_idx */ if(dmn_id_in) dmn_id_in=(int *)nco_free(dmn_id_in); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); } /* !sgs_frc_nm */ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"Regridding progress: # means regridded, ~ means copied\n"); /* Using naked stdin/stdout/stderr in parallel region generates warning Copy appropriate filehandle to variable scoped as shared in parallel clause */ FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */ /* OpenMP notes: default(none): GCC9.x does not accept this (https://github.com/nco/nco/issues/114) perhaps because of fp_stdout/stderr? Intel accepts it. firstprivate(): Pointers that could be inadvertently free()'d if they lost their NULL-initialization private(): Almost everything else shared(): uggh...shared clause depends on both compiler and compiler-version 1. All const variables are default shared for gcc >= 4.9.2, 2. fnc_nm (only!) must be explicit shared for g++ 4.6.3 (travis) 3. flg_rnr,fnc_nm,wgt_vld_thr must be explicit shared for icc 13.1.3 (rhea) 4. assert() cannot be used in OpenMP blocks 5. Good discussion of "const" variables in shared() clause here http://jakascorner.com/blog/2016/07/omp-default-none-and-const.html 20200221: fxm Revisit default(none) in light of above article */ #ifdef __GNUG__ # define GCC_LIB_VERSION ( __GNUC__ * 100 + __GNUC_MINOR__ * 10 + __GNUC_PATCHLEVEL__ ) # if GCC_LIB_VERSION < 490 # define GXX_OLD_OPENMP_SHARED_TREATMENT 1 # endif /* 480 */ # if GCC_LIB_VERSION >= 900 # define GXX_WITH_OPENMP5_GPU_SUPPORT 1 # endif /* 900 */ #endif /* !__GNUC__ */ #if defined( __INTEL_COMPILER) # pragma omp parallel for default(none) firstprivate(dmn_cnt_in,dmn_cnt_out,dmn_srt,dmn_id_in,dmn_id_out,tally,var_val_dbl_in,var_val_dbl_out,wgt_vld_out) private(dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dst_idx,has_mss_val,idx,idx_in,idx_out,idx_tbl,in_id,lnk_idx,lvl_idx,lvl_nbr,mss_val_dbl,rcd,thr_idx,trv,val_in_fst,val_out_fst,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr,var_val_crr) shared(col_src_adr,dmn_nbr_hrz_crd,flg_frc_nrm,flg_rnr,fnc_nm,frc_out,lnk_nbr,out_id,row_dst_adr,sgs_frc_nm,sgs_frc_in,sgs_frc_out,sgs_msk_nm,wgt_raw,wgt_vld_thr) #else /* !__INTEL_COMPILER */ # ifdef GXX_OLD_OPENMP_SHARED_TREATMENT # pragma omp parallel for default(none) firstprivate(dmn_cnt_in,dmn_cnt_out,dmn_srt,dmn_id_in,dmn_id_out,tally,var_val_dbl_in,var_val_dbl_out,wgt_vld_out) private(dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dst_idx,has_mss_val,idx,idx_in,idx_out,idx_tbl,in_id,lnk_idx,lvl_idx,lvl_nbr,mss_val_dbl,rcd,thr_idx,trv,val_in_fst,val_out_fst,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr,var_val_crr) shared(col_src_adr,dmn_nbr_hrz_crd,flg_frc_nrm,fnc_nm,frc_out,lnk_nbr,out_id,row_dst_adr,sgs_frc_nm,sgs_frc_in,sgs_frc_out,sgs_msk_nm,wgt_raw) # else /* !old g++ */ # if defined(GXX_WITH_OPENMP5_GPU_SUPPORT) && 0 # pragma omp target teams distribute parallel for firstprivate(dmn_cnt_in,dmn_cnt_out,dmn_srt,dmn_id_in,dmn_id_out,tally,var_val_dbl_in,var_val_dbl_out,wgt_vld_out) private(dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dst_idx,has_mss_val,idx,idx_in,idx_out,idx_tbl,in_id,lnk_idx,lvl_idx,lvl_nbr,mss_val_dbl,rcd,thr_idx,trv,val_in_fst,val_out_fst,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr,var_val_crr) shared(col_src_adr,dmn_nbr_hrz_crd,flg_frc_nrm,frc_out,lnk_nbr,out_id,row_dst_adr,sgs_frc_nm,sgs_frc_in,sgs_frc_out,sgs_msk_nm,wgt_raw) # else # pragma omp parallel for firstprivate(dmn_cnt_in,dmn_cnt_out,dmn_srt,dmn_id_in,dmn_id_out,tally,var_val_dbl_in,var_val_dbl_out,wgt_vld_out) private(dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dst_idx,has_mss_val,idx,idx_in,idx_out,idx_tbl,in_id,lnk_idx,lvl_idx,lvl_nbr,mss_val_dbl,rcd,thr_idx,trv,val_in_fst,val_out_fst,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr,var_val_crr) shared(col_src_adr,dmn_nbr_hrz_crd,flg_frc_nrm,frc_out,lnk_nbr,out_id,row_dst_adr,sgs_frc_nm,sgs_frc_in,sgs_frc_out,sgs_msk_nm,wgt_raw) # endif /* !GCC >= 9.0 */ # endif /* !GCC < 4.9 */ #endif /* !__INTEL_COMPILER */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; thr_idx=omp_get_thread_num(); in_id=trv_tbl->in_id_arr[thr_idx]; #ifdef _OPENMP if(nco_dbg_lvl_get() >= nco_dbg_grp && !thr_idx && !idx_tbl) (void)fprintf(fp_stdout,"%s: INFO %s reports regrid loop uses %d thread%s\n",nco_prg_nm_get(),fnc_nm,omp_get_num_threads(),(omp_get_num_threads() > 1) ? "s" : ""); if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s: INFO thread = %d, idx_tbl = %d, nm = %s\n",nco_prg_nm_get(),thr_idx,idx_tbl,trv.nm); #endif /* !_OPENMP */ if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s%s ",trv.flg_rgr ? "#" : "~",trv.nm); if(trv.flg_rgr){ /* Regrid variable */ var_nm=trv.nm; var_typ_rgr=NC_DOUBLE; /* NB: Perform regridding in double precision */ var_typ_out=trv.var_typ; /* NB: Output type in file is same as input type */ var_sz_in=1L; var_sz_out=1L; rcd=nco_inq_varid(in_id,var_nm,&var_id_in); rcd=nco_inq_varid(out_id,var_nm,&var_id_out); rcd=nco_inq_varndims(in_id,var_id_in,&dmn_nbr_in); rcd=nco_inq_varndims(out_id,var_id_out,&dmn_nbr_out); dmn_nbr_max= dmn_nbr_in > dmn_nbr_out ? dmn_nbr_in : dmn_nbr_out; dmn_id_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); dmn_id_out=(int *)nco_malloc(dmn_nbr_out*sizeof(int)); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* max() for both input and output grids */ dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); dmn_cnt_out=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); rcd=nco_inq_vardimid(out_id,var_id_out,dmn_id_out); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt_in+dmn_idx); var_sz_in*=dmn_cnt_in[dmn_idx]; dmn_srt[dmn_idx]=0L; } /* !dmn_idx */ for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){ rcd=nco_inq_dimlen(out_id,dmn_id_out[dmn_idx],dmn_cnt_out+dmn_idx); if(dmn_cnt_out[dmn_idx] == 0L){ /* No records have been written, so overwrite zero output record size with input record size */ char dmn_rec_nm[NC_MAX_NAME]; /* [sng] Record dimension name */ int dmn_rec_id_in; rcd=nco_inq_dimname(out_id,dmn_id_out[dmn_idx],dmn_rec_nm); rcd=nco_inq_dimid(in_id,dmn_rec_nm,&dmn_rec_id_in); rcd=nco_inq_dimlen(in_id,dmn_rec_id_in,dmn_cnt_out+dmn_idx); } /* !dmn_cnt_out */ var_sz_out*=dmn_cnt_out[dmn_idx]; dmn_srt[dmn_idx]=0L; } /* end loop over dimensions */ /* Compute number and size of non-lat/lon or non-col dimensions (e.g., level, time, species, wavelength) Denote their convolution by level or 'lvl' for shorthand There are lvl_nbr elements for each lat/lon or col position 20151011: Until today assume lat/lon and col are most-rapidly varying dimensions 20151011: Until today lvl_nbr missed last non-spatial dimension for 1D output */ lvl_nbr=1; /* Simple prescription of lvl_nbr works when horizontal dimension(s) is/are MRV */ for(dmn_idx=0;dmn_idx<dmn_nbr_out-dmn_nbr_hrz_crd;dmn_idx++) lvl_nbr*=dmn_cnt_out[dmn_idx]; /* Missing value setup */ has_mss_val=nco_mss_val_get_dbl(in_id,var_id_in,&mss_val_dbl); /* Memory requirements of next four malloc's (i.e., exclusive of wgt_raw) add up to ~7*sizeof(uncompressed var) for NC_FLOAT and ~3.5*sizeof(uncompressed var) for NC_DOUBLE */ var_val_dbl_in=(double *)nco_malloc_dbg(var_sz_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() input value buffer"); var_val_dbl_out=(double *)nco_malloc_dbg(var_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() output value buffer"); if(has_mss_val) tally=(int *)nco_malloc_dbg(var_sz_out*nco_typ_lng(NC_INT),fnc_nm,"Unable to malloc() tally buffer"); if(has_mss_val && flg_rnr) wgt_vld_out=(double *)nco_malloc_dbg(var_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() output renormalization weight buffer"); /* Initialize output */ (void)memset(var_val_dbl_out,0,var_sz_out*nco_typ_lng(var_typ_rgr)); if(has_mss_val) (void)memset(tally,0,var_sz_out*nco_typ_lng(NC_INT)); if(wgt_vld_out) (void)memset(wgt_vld_out,0,var_sz_out*nco_typ_lng(var_typ_rgr)); /* Obtain input variable */ rcd=nco_get_vara(in_id,var_id_in,dmn_srt,dmn_cnt_in,var_val_dbl_in,var_typ_rgr); /* 20150914: Intensive variables require normalization, extensive do not Intensive variables (temperature, wind speed, mixing ratio) do not depend on gridcell boundaries Extensive variables (population, counts, numbers of things) depend on gridcell boundaries Extensive variables are the exception in models, yet are commonly used for sampling information, e.g., number of photons, number of overpasses Pass extensive variable list to NCO with, e.g., --xtn=TSurfStd_ct,... 20190420: Remove languishing, unfinished intensive variable code */ clock_t tm_srt; /* [us] Microseconds at start */ clock_t tm_end; /* [us] Microseconds at end */ float tm_drn; /* [s] Seconds elapsed */ if(nco_dbg_lvl_get() >= nco_dbg_var) tm_srt=clock(); /* This first block is for "normal" variables without sub-gridscale fractions */ if(!sgs_frc_out){ /* Apply weights */ if(!has_mss_val){ if(lvl_nbr == 1){ /* Weight single-level fields without missing values */ #ifdef ENABLE_GPU # pragma omp target data map(to:col_src_adr[0:lnk_nbr],row_dst_adr[0:lnk_nbr],var_val_dbl_in[0:var_sz_in],wgt_raw[0:lnk_nbr]) map(tofrom:var_val_dbl_out[0:var_sz_out]) # pragma omp target teams distribute parallel for simd schedule(static,1) #else /* !ENABLE_GPU */ # if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 ) # pragma omp simd # endif /* !__GNUC__ */ #endif /* !ENABLE_GPU */ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) var_val_dbl_out[row_dst_adr[lnk_idx]]+=var_val_dbl_in[col_src_adr[lnk_idx]]*wgt_raw[lnk_idx]; }else{ val_in_fst=0L; val_out_fst=0L; /* Weight multi-level fields without missing values */ #ifdef ENABLE_GPU # pragma omp target data map(to:col_src_adr[0:lnk_nbr],row_dst_adr[0:lnk_nbr],var_val_dbl_in[0:var_sz_in],wgt_raw[0:lnk_nbr]) map(tofrom:var_val_dbl_out[0:var_sz_out]) # pragma omp parallel for reduction(+:val_in_fst,val_out_fst) #endif /* !ENABLE_GPU */ for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ //if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(fp_stdout,"%s lvl_idx = %d val_in_fst = %li, val_out_fst = %li\n",trv.nm,lvl_idx,val_in_fst,val_out_fst); #ifdef ENABLE_GPU # pragma omp target teams distribute parallel for simd schedule(static,1) #else /* !ENABLE_GPU */ # if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 ) # pragma omp simd # endif /* !__GNUC__ */ #endif /* !ENABLE_GPU */ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) var_val_dbl_out[row_dst_adr[lnk_idx]+val_out_fst]+=var_val_dbl_in[col_src_adr[lnk_idx]+val_in_fst]*wgt_raw[lnk_idx]; val_in_fst+=grd_sz_in; val_out_fst+=grd_sz_out; } /* !lvl_idx */ } /* lvl_nbr > 1 */ }else{ /* has_mss_val */ if(lvl_nbr == 1){ /* Weight single-level fields with missing values */ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ idx_in=col_src_adr[lnk_idx]; idx_out=row_dst_adr[lnk_idx]; if((var_val_crr=var_val_dbl_in[idx_in]) != mss_val_dbl){ var_val_dbl_out[idx_out]+=var_val_crr*wgt_raw[lnk_idx]; if(wgt_vld_out) wgt_vld_out[idx_out]+=wgt_raw[lnk_idx]; tally[idx_out]++; } /* !mss_val_dbl */ } /* !lnk_idx */ }else{ /* lvl_nbr > 1 */ val_in_fst=0L; val_out_fst=0L; /* Weight multi-level fields with missing values */ for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ idx_in=col_src_adr[lnk_idx]+val_in_fst; idx_out=row_dst_adr[lnk_idx]+val_out_fst; if((var_val_crr=var_val_dbl_in[idx_in]) != mss_val_dbl){ var_val_dbl_out[idx_out]+=var_val_crr*wgt_raw[lnk_idx]; if(wgt_vld_out) wgt_vld_out[idx_out]+=wgt_raw[lnk_idx]; tally[idx_out]++; } /* !mss_val_dbl */ } /* !lnk_idx */ val_in_fst+=grd_sz_in; val_out_fst+=grd_sz_out; } /* !lvl_idx */ } /* lvl_nbr > 1 */ } /* !has_mss_val */ if(!has_mss_val){ /* frc_dst = frc_out = dst_frac = frac_b contains non-unity elements and normalization type is "destarea" or "dstarea" or "none" When this occurs for conservative remapping, follow "destarea" normalization procedure See SCRIP manual p. 11 and http://www.earthsystemmodeling.org/esmf_releases/public/last, specifically http://www.earthsystemmodeling.org/esmf_releases/public/last/ESMF_refdoc/node3.html#SECTION03029000000000000000 "frac_a: When a conservative regridding method is used, this contains the fraction of each source cell that participated in the regridding. When a non-conservative regridding method is used, this array is set to 0.0. frac_b: When a conservative regridding method is used, this contains the fraction of each destination cell that participated in the regridding. When a non-conservative regridding method is used, this array is set to 1.0 where the point participated in the regridding (i.e. was within the unmasked source grid), and 0.0 otherwise. If the first-order conservative interpolation method is specified ("-m conserve") then the destination field may need to be adjusted by the destination fraction (frac_b). This should be done if the normalization type is ``dstarea'' (sic, really "destarea") and if the destination grid extends outside the unmasked source grid. If it isn't known if the destination extends outside the source, then it doesn't hurt to apply the destination fraction. (If it doesn't extend outside, then the fraction will be 1.0 everywhere anyway.) The following code shows how to adjust an already interpolated destination field (dst_field) by the destination fraction. The variables n_b, and frac_b are from the weight file: ! Adjust destination field by fraction do i=1, n_b if (frac_b(i) .ne. 0.0) then dst_field(i)=dst_field(i)/frac_b(i) endif enddo" NB: Non-conservative interpolation methods (e.g., bilinear) should NOT apply this normalization (theoretically there is no danger in doing so because frc_out == 1 always for all gridcells that participate in bilinear remapping and frc_out == 0 otherwise) NCO's renormalization procedure below is similar to the ESMF-recommended procedure above. However, users can control NCO renormalization with, e.g., --rnr_thr=0.1, or override it completely with --rnr_thr=none. Moreover, frac_b == frc_dst is determined solely by solely by gridcell binary mask overlaps during weight generation. It is time-invariant and 2D. Missing values (e.g., AOD) can vary in time and can be 3D (or N-D) and so can wgt_vld_out. Hence NCO renormalization is more flexible. flg_frc_nrm (i.e., ESMF-recommended) normalization makes fields pretty for graphics, yet is non-conservative because e.g., MPAS Ocean gridcells projected onto global uniform grids would have their SSTs normalized for prettiness on coastal gridpoints, which is inherently non-conservative. 20190912: Make "ESMF renormalization" of fields without missing values (i.e., "destarea") opt-in rather than default "destarea" and frac_b = frc_dst together set flg_frc_nrm Formerly flg_frc_nrm triggered ESMF renormalization by default Now flg_frc_nrm and user-explicitly-set --rnr_thr to [0.0,1.0] must both be true to trigger it This keep conservative maps conservative by default NB: This "ESMF renormalization" normalizes by frac_b == frc_dst (not by wgt_vld_out) regardless of rnr_thr 20151018: Avoid double-normalizing by only executing fractional normalization (flg_frc_nrm) block when !has_mss_val, and valid area normalization when has_mss_val */ if(flg_frc_nrm){ /* Only renormalize when frac_b < 1.0 (because frac_b == 1.0 does nothing) */ if(flg_rnr){ /* 20190912: Only renormalize when user explicitly requests it (because renormalization is non-conservative). Prior to today, renormalization was by default, henceforth it is opt-in. */ if(lvl_nbr == 1){ /* Fractionally renormalize single-level fields without missing values */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx]/=frc_out[dst_idx]; }else{ /* Fractionally renormalize multi-level fields without missing values */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ if(frc_out[dst_idx] != 0.0){ for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ var_val_dbl_out[dst_idx+lvl_idx*grd_sz_out]/=frc_out[dst_idx]; } /* !lvl_idx */ } /* !frc_out */ } /* !dst_idx */ } /* lvl_nbr > 1 */ } /* !flg_rnr */ } /* !flg_frc_nrm */ } /* !has_mss_val */ if(has_mss_val){ /* NCL and ESMF treatment of weights and missing values described at https://www.ncl.ucar.edu/Applications/ESMF.shtml#WeightsAndMasking http://earthsystemmodeling.org/esmf_releases/non_public/ESMF_6_1_1/ESMF_refdoc/node5.html#SECTION05012600000000000000 NCO implements one of two procedures: "conservative" or "renormalized" The "conservative" algorithm uses all valid data from the input grid on the output grid Destination cells receive the weighted valid values of the source cells This is conservative because the global integrals of the source and destination fields are equal The "renormalized" algorithm divides the destination value by the sum of the valid weights This returns "reasonable" values, i.e., the mean of the valid input values However, renormalization is equivalent to extrapolating valid data to missing regions Hence the input and output integrals are unequal and the regridding is not conservative */ /* In fields with missing value, destination cells with no accumulated weight are missing value */ for(dst_idx=0;dst_idx<var_sz_out;dst_idx++) if(!tally[dst_idx]) var_val_dbl_out[dst_idx]=mss_val_dbl; if(flg_rnr){ // if(nco_dbg_lvl_get() >= nco_dbg_quiet) (void)fprintf(fp_stdout,"%s: DEBUG renormalization for %s uses flg_rnr block\n",nco_prg_nm_get(),var_nm); if(wgt_vld_thr == 0.0){ /* Renormalize cells with no threshold by valid accumulated weight */ for(dst_idx=0;dst_idx<var_sz_out;dst_idx++) if(tally[dst_idx]) var_val_dbl_out[dst_idx]/=wgt_vld_out[dst_idx]; }else{ /* Renormalize cells with threshold by valid accumulated weight if weight exceeds threshold */ for(dst_idx=0;dst_idx<var_sz_out;dst_idx++) if(wgt_vld_out[dst_idx] >= wgt_vld_thr){var_val_dbl_out[dst_idx]/=wgt_vld_out[dst_idx];}else{var_val_dbl_out[dst_idx]=mss_val_dbl;} } /* !wgt_vld_thr */ } /* !flg_rnr */ } /* !has_mss_val */ } /* !sgs_frc_out */ /* Variables with sub-gridscale fractions require "double-weighting" and normalization */ if(sgs_frc_out){ if(!strcmp(var_nm,sgs_frc_nm)){ /* Copy shared variable sgs_frc_out that was regridded before OpenMP loop 20190911: Reasons to copy sgs_frc_out into sgs_frc_nm data include speed, consistency, and well-definedness of sgs_frc_out. One reason to regrid sgs_frc_nm here is consistency with original, raw dataset: ELM landfrac is masked so regridding it here (rather than using sgs_frc_out) would produce a regridded dataset more identical to raw ELM output. The same can be said for CICE (I think). MPAS cellMask and timeMonthly_avg_iceAreaCell are not masked, and so should produce the same values as sgs_frc_out if regridded here. */ memcpy(var_val_dbl_out,sgs_frc_out,grd_sz_out*nco_typ_lng(var_typ_rgr)); }else if(sgs_msk_nm && !strcmp(var_nm,sgs_msk_nm)){ /* Compute binary mask directly from shared sgs_frc_out (guaranteed to be all valid values) */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx]=1.0; }else{ /* !sgs_msk_nm */ /* "Double-weight" all other sub-gridscale input values by sgs_frc_in and overlap weight, normalize by sgs_frc_out */ if(!has_mss_val){ if(lvl_nbr == 1){ /* SGS-regrid single-level fields without missing values */ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) var_val_dbl_out[row_dst_adr[lnk_idx]]+=var_val_dbl_in[col_src_adr[lnk_idx]]*wgt_raw[lnk_idx]*sgs_frc_in[col_src_adr[lnk_idx]]; /* NB: MPAS-Seaice dataset sgs_frc_out is usually zero in non-polar regions */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx]/=sgs_frc_out[dst_idx]; }else{ /* lvl_nbr > 1 */ /* SGS-regrid multi-level fields without missing values */ val_in_fst=0L; val_out_fst=0L; for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ idx_in=col_src_adr[lnk_idx]; idx_out=row_dst_adr[lnk_idx]; var_val_dbl_out[idx_out+val_out_fst]+=var_val_dbl_in[idx_in+val_in_fst]*wgt_raw[lnk_idx]*sgs_frc_in[idx_in]; tally[idx_out]++; } /* !lnk_idx */ /* Normalize current level values */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx+val_out_fst]/=sgs_frc_out[dst_idx]; val_in_fst+=grd_sz_in; val_out_fst+=grd_sz_out; } /* !lvl_idx */ } /* lvl_nbr > 1 */ }else{ /* !has_mss_val */ if(lvl_nbr == 1){ /* SGS-regrid single-level fields with missing values */ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ idx_in=col_src_adr[lnk_idx]; idx_out=row_dst_adr[lnk_idx]; if((var_val_crr=var_val_dbl_in[idx_in]) != mss_val_dbl){ var_val_dbl_out[idx_out]+=var_val_crr*wgt_raw[lnk_idx]*sgs_frc_in[idx_in]; tally[idx_out]++; } /* !mss_val_dbl */ } /* !lnk_idx */ /* NB: Normalization clause is complex to support sgs_frc_out from both ELM and MPAS-Seaice */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(!tally[dst_idx]){var_val_dbl_out[dst_idx]=mss_val_dbl;}else{if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx]/=sgs_frc_out[dst_idx];} }else{ /* lvl_nbr > 1 */ /* SGS-regrid multi-level fields with missing values */ val_in_fst=0L; val_out_fst=0L; for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ idx_in=col_src_adr[lnk_idx]+val_in_fst; idx_out=row_dst_adr[lnk_idx]+val_out_fst; if((var_val_crr=var_val_dbl_in[idx_in]) != mss_val_dbl){ var_val_dbl_out[idx_out]+=var_val_crr*wgt_raw[lnk_idx]*sgs_frc_in[col_src_adr[lnk_idx]]; tally[idx_out]++; } /* !mss_val_dbl */ } /* !lnk_idx */ /* Normalize current level values */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ idx_out=dst_idx+val_out_fst; if(!tally[idx_out]){var_val_dbl_out[idx_out]=mss_val_dbl;}else{if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[idx_out]/=sgs_frc_out[dst_idx];} } /* dst_idx */ val_in_fst+=grd_sz_in; val_out_fst+=grd_sz_out; } /* !lvl_idx */ } /* lvl_nbr > 1 */ } /* !has_mss_val */ } /* !sgs_msk_nm */ } /* !sgs_frc_out */ if(nco_dbg_lvl_get() >= nco_dbg_var){ tm_end=clock(); tm_drn=(float)(tm_end-tm_srt)/CLOCKS_PER_SEC; (void)fprintf(fp_stdout,"%s: INFO Compute time for %s (thread %d/%d): %g s\n",nco_prg_nm_get(),trv.nm,thr_idx,omp_get_num_threads(),tm_drn); } /* !dbg */ #pragma omp critical { /* begin OpenMP critical */ // rcd=nco_put_var(out_id,var_id_out,var_val_dbl_out,var_typ_rgr); rcd=nco_put_vara(out_id,var_id_out,dmn_srt,dmn_cnt_out,var_val_dbl_out,var_typ_rgr); } /* end OpenMP critical */ if(dmn_id_in) dmn_id_out=(int *)nco_free(dmn_id_in); if(dmn_id_out) dmn_id_out=(int *)nco_free(dmn_id_out); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out); if(tally) tally=(int *)nco_free(tally); if(var_val_dbl_out) var_val_dbl_out=(double *)nco_free(var_val_dbl_out); if(var_val_dbl_in) var_val_dbl_in=(double *)nco_free(var_val_dbl_in); if(wgt_vld_out) wgt_vld_out=(double *)nco_free(wgt_vld_out); }else{ /* !trv.flg_rgr */ /* Use standard NCO copy routine for variables that are not regridded */ #pragma omp critical { /* begin OpenMP critical */ (void)nco_cpy_var_val(in_id,out_id,(FILE *)NULL,(md5_sct *)NULL,trv.nm,trv_tbl); } /* end OpenMP critical */ } /* !flg_rgr */ } /* !xtr */ } /* end (OpenMP parallel for) loop over idx_tbl */ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"\n"); if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s completion report: Variables regridded = %d (%d extensive), copied unmodified = %d, omitted = %d, created = %d\n",nco_prg_nm_get(),fnc_nm,var_rgr_nbr,var_xtn_nbr,var_cpy_nbr,var_xcl_nbr,var_crt_nbr); /* Free memory allocated for grid reading/writing */ if(area_out) area_out=(double *)nco_free(area_out); if(col_src_adr) col_src_adr=(int *)nco_free(col_src_adr); if(dmn_sz_in_int) dmn_sz_in_int=(int *)nco_free(dmn_sz_in_int); if(dmn_sz_out_int) dmn_sz_out_int=(int *)nco_free(dmn_sz_out_int); if(frc_out) frc_out=(double *)nco_free(frc_out); if(lat_bnd_out) lat_bnd_out=(double *)nco_free(lat_bnd_out); if(lat_crn_out) lat_crn_out=(double *)nco_free(lat_crn_out); if(lat_ctr_out) lat_ctr_out=(double *)nco_free(lat_ctr_out); if(lat_ntf_out) lat_ntf_out=(double *)nco_free(lat_ntf_out); if(lat_wgt_out) lat_wgt_out=(double *)nco_free(lat_wgt_out); if(lon_bnd_out) lon_bnd_out=(double *)nco_free(lon_bnd_out); if(lon_crn_out) lon_crn_out=(double *)nco_free(lon_crn_out); if(lon_ctr_out) lon_ctr_out=(double *)nco_free(lon_ctr_out); if(lon_ntf_out) lon_ntf_out=(double *)nco_free(lon_ntf_out); if(msk_out) msk_out=(int *)nco_free(msk_out); if(row_dst_adr) row_dst_adr=(int *)nco_free(row_dst_adr); if(sgs_frc_nm) sgs_frc_nm=(char *)nco_free(sgs_frc_nm); if(sgs_frc_in) sgs_frc_in=(double *)nco_free(sgs_frc_in); if(sgs_frc_out) sgs_frc_out=(double *)nco_free(sgs_frc_out); if(sgs_msk_nm) sgs_msk_nm=(char *)nco_free(sgs_msk_nm); if(wgt_raw) wgt_raw=(double *)nco_free(wgt_raw); return rcd; } /* end nco_rgr_wgt() */ void nco_bsl_zro /* Return Bessel function zeros */ (const int bsl_zro_nbr, /* O [nbr] Order of Bessel function */ double * const bsl_zro) /* O [frc] Bessel zero */ { /* Purpose: Return Bessel function zeros Source: CCM code /fs/cgd/csm/models/atm/ccm3.5.8/src/ccmlsm_share/bsslzr.F Return bsl_zro_nbr zeros (or if bsl_zro_nbr > 50, approximate zeros), of the Bessel function j0 First 50 zeros are given exactly, and remaining zeros are computed by extrapolation, and therefore are not exact Original version: CCM1 Standardized: J. Rosinski, June 1992 Reviewed: J. Hack, D. Williamson, August 1992 Reviewed: J. Hack, D. Williamson, April 1996 Modified 19970123 by Jim Rosinski to use double precision arithmetic ~2000: Converted to Fortran9X by C. Zender, changed all real*16 statements to double precision (real*8) 20150530: Converted to C99 by C. Zender */ const char fnc_nm[]="nco_bsl_zro()"; /* [sng] Function name */ const double pi=M_PI; // [frc] 3 const double bsl_zro_tbl[]={ // Zeros of Bessel functions of order 1 to 50 -1.e36, 2.4048255577, 5.5200781103, 8.6537279129, 11.7915344391, 14.9309177086, 18.0710639679, 21.2116366299, 24.3524715308, 27.4934791320, 30.6346064684, 33.7758202136, 36.9170983537, 40.0584257646, 43.1997917132, 46.3411883717, 49.4826098974, 52.6240518411, 55.7655107550, 58.9069839261, 62.0484691902, 65.1899648002, 68.3314693299, 71.4729816036, 74.6145006437, 77.7560256304, 80.8975558711, 84.0390907769, 87.1806298436, 90.3221726372, 93.4637187819, 96.6052679510, 99.7468198587, 102.8883742542, 106.0299309165, 109.1714896498, 112.3130502805, 115.4546126537, 118.5961766309, 121.7377420880, 124.8793089132, 128.0208770059, 131.1624462752, 134.3040166383, 137.4455880203, 140.5871603528, 143.7287335737, 146.8703076258, 150.0118824570, 153.1534580192, 156.2950342685}; const int bsl_zro_tbl_nbr_max=50; /* [nbr] */ int bsl_idx; /* [idx] Counting index */ /* Main Code */ if(nco_dbg_lvl_get() >= nco_dbg_sbr) (void)fprintf(stdout,"%s: DEBUG Entering %s\n",nco_prg_nm_get(),fnc_nm); /* NB: Initialize bsl_zro[0] but (in C) never use it Initialization prevents uninitialized memory warnings */ for(bsl_idx=0;bsl_idx<=bsl_zro_nbr;bsl_idx++) if(bsl_idx <= bsl_zro_tbl_nbr_max) bsl_zro[bsl_idx]=bsl_zro_tbl[bsl_idx]; if(bsl_zro_nbr > bsl_zro_tbl_nbr_max) for(bsl_idx=bsl_zro_tbl_nbr_max+1;bsl_idx<=bsl_zro_nbr;bsl_idx++) bsl_zro[bsl_idx]=bsl_zro[bsl_idx-1]+pi; if(nco_dbg_lvl_get() == nco_dbg_old){ (void)fprintf(stdout,"%s: DEBUG %s reports bsl_zro_nbr = %d\n",nco_prg_nm_get(),fnc_nm,bsl_zro_nbr); (void)fprintf(stdout,"idx\tbsl_zro\n"); for(bsl_idx=1;bsl_idx<=bsl_zro_nbr;bsl_idx++) (void)fprintf(stdout,"%d\t%g\n",bsl_idx,bsl_zro[bsl_idx]); } /* endif dbg */ return; } /* end nco_bsl_zro() */ void nco_lat_wgt_gss /* [fnc] Compute and return sine of Gaussian latitudes and their weights */ (const int lat_nbr, /* I [nbr] Latitude number */ const nco_bool flg_s2n, /* I [enm] Latitude grid-direction is South-to-North */ double * const lat_sin, /* O [frc] Sine of latitudes */ double * const wgt_Gss) /* O [frc] Gaussian weights */ { /* Purpose: Compute and return sine of Gaussian latitudes and their weights Returned arrays are ordered south-to-north (S->N), not (N->S) Source: CCM /fs/cgd/csm/models/atm/ccm3.5.8/src/ccmlsm_share/gauaw.F Calculate sine of latitudes lat_sin(lat_nbr) and weights wgt_Gss(lat_nbr) for Gaussian quadrature Algorithm described in Davis and Rabinowitz, Journal of Research of the NBS, V 56, Jan 1956 Zeros of Bessel function j0, obtained from nco_bsl_zro(), are first guess for abscissae Original version: CCM1 Standardized: L. Bath, Jun 1992 L. Buja, Feb 1996 Reviewed: D. Williamson, J. Hack, Aug 1992 D. Williamson, J. Hack, Feb 1996 19970123 Modified by Jim Rosinski to use real*16 arithmetic in order to achieve (nearly) identical weights and latitudes on all machines. ~2000: Converted to Fortran9X by C. Zender, changed all real*16 statements to double precision (real*8) 20150530: Converted to C99 by C. Zender 20150725: Verified against tabulation at http://pomax.github.io/bezierinfo/legendre-gauss.html#n64 */ const char fnc_nm[]="nco_lat_wgt_gss()"; /* [sng] Function name */ const double eps_rlt=1.0e-16; // Convergence criterion (NB: Threshold was 1.0d-27 in real*16, 1.0e-15 fine for real*8, 1.0e-16 pushes double precision to the brink) const double pi=M_PI; // [frc] 3 const int itr_nbr_max=20; // [nbr] Maximum number of iterations double c_cff; // Constant combination coefficient double lat_idx_dbl; // Latitude index, double precision double lat_nnr_idx_dbl; // Inner latitude index, double precision double lat_nbr_dbl; // [nbr] Number of latitudes, double precision double pk=double_CEWI; // Polynomial double pkm1; // Polynomial double pkm2; // Polynomial double pkmrk; // Polynomial double sp; // Current iteration latitude increment double xz; // Abscissa estimate double cos_arg; // Intermediate parameter introduced while attempting to eliminate valgrind "uninitialised value" warnings int itr_cnt; // Iteration counter int lat_idx; // [idx] Counting index (latitude) int lat_sym_idx; // [idx] Counting index (symmetric latitude) int lat_nnr_idx; // [idx] Counting index (inner latitude loop) int lat_nbr_rcp2; // lat_nbr/2 (number of latitudes in hemisphere) double *lat_sin_p1; // Sine of Gaussian latitudes double precision double *wgt_Gss_p1; // Gaussian weights double precision /* Main Code */ if(nco_dbg_lvl_get() >= nco_dbg_sbr) (void)fprintf(stdout,"%s: DEBUG Entering %s\n",nco_prg_nm_get(),fnc_nm); /* Arrays with Fortran indexing (indicated by "plus one" = "_p1") keep numerical algorithm in C identical to Fortran */ lat_sin_p1=(double *)nco_malloc((lat_nbr+1)*sizeof(double)); // Sine of Gaussian latitudes double precision wgt_Gss_p1=(double *)nco_malloc((lat_nbr+1)*sizeof(double)); // Gaussian weights double precision /* Use Newton iteration to find abscissae */ c_cff=0.25*(1.0-4.0/(pi*pi)); lat_nbr_dbl=lat_nbr; lat_nbr_rcp2=lat_nbr/2; // NB: Integer arithmetic (void)nco_bsl_zro(lat_nbr_rcp2,lat_sin_p1); for(lat_idx=1;lat_idx<=lat_nbr_rcp2;lat_idx++){ // NB: Loop starts at 1 // 20150713: Introduce intermediate parameter cos_arg in attempt to eliminate valgrind "uninitialised value" warnings emitted by cos() (actually __cos_sse()) // Warnings occur with gcc-compiled code, not with clang-compiled code cos_arg=lat_sin_p1[lat_idx]/sqrt((lat_nbr_dbl+0.5)*(lat_nbr_dbl+0.5)+c_cff); xz=cos(cos_arg); /* First approximation to xz */ itr_cnt=0; /* goto label_73 */ label_73: pkm2=1.0; pkm1=xz; if(++itr_cnt > itr_nbr_max){ (void)fprintf(stdout,"%s: ERROR %s reports convergence only %g after %d iterations for lat_idx = %d\n",nco_prg_nm_get(),fnc_nm,fabs(sp),itr_nbr_max,lat_idx); nco_exit(EXIT_FAILURE); } /* endif */ /* Compute Legendre polynomial */ for(lat_nnr_idx=2;lat_nnr_idx<=lat_nbr;lat_nnr_idx++){ lat_nnr_idx_dbl=lat_nnr_idx; pk=((2.0*lat_nnr_idx_dbl-1.0)*xz*pkm1-(lat_nnr_idx_dbl-1.0)*pkm2)/lat_nnr_idx_dbl; pkm2=pkm1; pkm1=pk; } /* end inner loop over lat_nnr */ pkm1=pkm2; pkmrk=(lat_nbr_dbl*(pkm1-xz*pk))/(1.0-xz*xz); sp=pk/pkmrk; xz=xz-sp; /* NB: Easy to introduce bug here by not replacing Fortran abs() with C fabs() */ if(fabs(sp) > eps_rlt) goto label_73; lat_sin_p1[lat_idx]=xz; wgt_Gss_p1[lat_idx]=(2.0*(1.0-xz*xz))/((lat_nbr_dbl*pkm1)*(lat_nbr_dbl*pkm1)); } /* end outer loop over lat */ if(lat_nbr != lat_nbr_rcp2*2){ /* When lat_nbr is odd, compute weight at Equator */ lat_sin_p1[lat_nbr_rcp2+1]=0.0; pk=2.0/(lat_nbr_dbl*lat_nbr_dbl); for(lat_idx=2;lat_idx<=lat_nbr;lat_idx+=2){ lat_idx_dbl=lat_idx; pk=pk*lat_idx_dbl*lat_idx_dbl/((lat_idx_dbl-1.0)*(lat_idx_dbl-1.0)); } /* end loop over lat */ wgt_Gss_p1[lat_nbr_rcp2+1]=pk; } /* endif lat_nbr is odd */ /* Complete sets of abscissas and weights, using symmetry properties */ for(lat_idx=1;lat_idx<=lat_nbr_rcp2;lat_idx++){ lat_sym_idx=lat_nbr-lat_idx+1; lat_sin_p1[lat_sym_idx]=-lat_sin_p1[lat_idx]; wgt_Gss_p1[lat_sym_idx]=wgt_Gss_p1[lat_idx]; } /* end loop over lat */ /* Shift by one to remove Fortran offset in p1 arrays */ //memcpy(lat_sin,lat_sin_p1,lat_nbr*sizeof(double)); //memcpy(wgt_Gss,wgt_Gss_p1,lat_nbr*sizeof(double)); /* Reverse and shift arrays because original CCM code algorithm computes latitudes from north-to-south Shift by one to remove Fortran offset in p1 arrays */ if(flg_s2n){ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ lat_sin[lat_idx]=lat_sin_p1[lat_nbr-lat_idx]; wgt_Gss[lat_idx]=wgt_Gss_p1[lat_nbr-lat_idx]; } /* end loop over lat */ }else{ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ lat_sin[lat_idx]=lat_sin_p1[lat_idx+1]; wgt_Gss[lat_idx]=wgt_Gss_p1[lat_idx+1]; } /* end loop over lat */ } /* !flg_s2n */ if(nco_dbg_lvl_get() == nco_dbg_old){ (void)fprintf(stdout,"%s: DEBUG %s reports lat_nbr = %d\n",nco_prg_nm_get(),fnc_nm,lat_nbr); (void)fprintf(stdout,"idx\tasin\tngl_rad\tngl_dgr\tgw\n"); for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) (void)fprintf(stdout,"%d\t%g\t%g\t%g%g\n",lat_idx,lat_sin[lat_idx],asin(lat_sin[lat_idx]),180.0*asin(lat_sin[lat_idx])/pi,wgt_Gss[lat_idx]); } /* endif dbg */ if(wgt_Gss_p1) wgt_Gss_p1=(double *)nco_free(wgt_Gss_p1); if(lat_sin_p1) lat_sin_p1=(double *)nco_free(lat_sin_p1); return; } /* end nco_lat_wgt_gss() */ void nco_sph_plg_area /* [fnc] Compute area of spherical polygon */ (rgr_sct * const rgr, /* I [sct] Regridding structure */ const double * const lat_bnd, /* [dgr] Latitude boundaries of rectangular grid */ const double * const lon_bnd, /* [dgr] Longitude boundaries of rectangular grid */ const long col_nbr, /* [nbr] Number of columns in grid */ const int bnd_nbr, /* [nbr] Number of bounds in gridcell */ double * const area) /* [sr] Gridcell area */ { /* Purpose: Compute area of spherical polygon */ /* Computing triangular area accurately is hard in corner cases Spherical triangle suffer from at least as many issues as planar, which are described by "Miscalculating Area and Angles of a Needle-like Triangle" by W. Kahan, UC Berkeley In particular, the Law of Cosines and Heron's formula can be ill-conditioned For spherical triangles L'Huilier's Theorem is superior to Girard's Formula: http://mathworld.wolfram.com/LHuiliersTheorem.html Girard's formula depends on pi-minus-angle and angle is usually quite small in our applications so precision would be lost L'Huilier's theorem depends only on angles (a,b,c) and semi-perimeter (s) and is well-conditioned for small angles semi-perimeter = half-perimeter of triangle = 0.5*(a+b+c) Spherical Excess (SE) difference between the sum of the angles of a spherical triangle area and a planar triangle area with same interior angles (that sum to pi) SE is also the solid angle subtended by the spherical triangle and that's, well, astonishing and pretty cool Wikipedia shows a better SE formula for triangles that are ill-conditioned for L'Huilier's formula because a = b ~ 0.5c https://en.wikipedia.org/wiki/Spherical_trigonometry#Area_and_spherical_excess See also interesting discussion of L'Huilier by Charles Karney who suggests his own alternative: http://osgeo-org.1560.x6.nabble.com/Area-of-a-spherical-polygon-td3841625.html The discussion mentions Mil94 Robert D. Miller, Computing the area of a spherical polygon, Graphic Gems IV, chapter II.4, pages 132-137. http://books.google.com/books?id=CCqzMm_-WucC&pg=PA132&lpg=PA132&dq=miller+area+spherical+polygon+gems&source=bl&ots=mrnvZ6NJcm&sig=CMg8eaD8dzP5snMaPeCQzgoFWUk&hl=sv&ei=4G-YTKv5GsWZOI-mmZQP&sa=X&oi=book_result&ct=result&resnum=1&ved=0CBQQ6AEwAA#v=onepage&q&f=false Mil94 contains similar ideas to my method for spherical polygons (decomposing into adjacent multiple triangles from single vertex) However, his method places single vertex at pole, then adds signed areas to obtain full polygon area His method may suffer from degraded precision because of roundoff error and long side-lengths So-called "proper" spherical triangle are those for which all angles are less than pi, so a+b+c<3*pi Cartesian coordinates of (lat,lon)=(theta,phi) are (x,y,z)=(cos(theta)*cos(phi),cos(theta)*sin(phi),sin(theta)) Dot-product rule for vectors gives interior angle/arc length between two points: cos(a)=u dot v=cos(theta1)*cos(phi1)*cos(theta2)*cos(phi2)+cos(theta1)*sin(phi1)*cos(theta2)*sin(phi2)+sin(theta1)*sin(theta2) Spherical law of cosines relates interior angles/arc-lengths (a,b,c) to surface angles (A,B,C) in spherical triangle: https://en.wikipedia.org/wiki/Spherical_law_of_cosines cos(a)=cos(b)*cos(c)+sin(b)*sin(c)*cos(A) cos(b)=cos(c)*cos(a)+sin(c)*sin(a)*cos(B) cos(c)=cos(a)*cos(b)+sin(a)*sin(b)*cos(C) cos(A)=[cos(a)-cos(b)*cos(c)]/[sin(b)*sin(c)] cos(B)=[cos(b)-cos(c)*cos(a)]/[sin(c)*sin(a)] cos(C)=[cos(c)-cos(a)*cos(b)]/[sin(a)*sin(b)] Bounds information on unstructured grids will use bounds_nbr=maximum(vertice_nbr) Unused vertices are stored as either repeated points (ACME does this) or, conceiveably, as missing values Given (lat,lon) for N-points algorithm to find area of spherical polygon is: 1. Any decomposition, Girard areas: Loses precision due to mismatch between pi and small spherical excesses A. Find interior angles/arc-lengths (a,b,c,d...) using spherical law of cosines along each edge B. Apply generalized Girard formula SE_n = Sum(A_n) - (N-2) - pi 2. CSZ decomposition (N-2 triangles) with L'Huilier areas, Convert polygon into triangles by cycling spoke through all sides from common apex This method requires computation of N-2 (not N) triangles, though fewer sides due to optimization It works on all convex polygons (interior angles less than 180) but not, in general, concave polygons Whether it works or not on concave polygons depends upon their exact shape and the choice of apex point A. First three non-identical points form first triangle with sides A,B,C (first+second point define A, etc.) i. First vertice anchors all triangles ii. Third vertice of preceding triangle becomes second vertice of next triangle iii. Next non-identical point becomes last vertice of next triangle iv. Side C of previous triangle is side A of next triangle B. For each triangle, compute area with L'Huilier formula unless A = B ~ 0.5*C then use SAS formula 3. centroidal decomposition, N triangle version by Taylor, L'Huilier areas: Compute polygon centroid and treat this as hub from which spokes are drawn to all vertices This method requires computation of N triangles, though fewer sides due to optimization Moreover, it works on all convex polygons and on slightly concave polygons Centroid/hub has clear view of interior of most simple concave polygons 4. Any decomposition but with exact RLL grids by Zender and Agress 20160918 A. Decompose polygon into triangles via any method (e.g., method 2 or 3 above) B. Determine whether triangle is spherical or contains RLL (constant latitude) C. Spherical triangles use L'Huilier, RLL triangles use series expansion */ const char fnc_nm[]="nco_sph_plg_area()"; const double dgr2rdn=M_PI/180.0; int bnd_nbr_ttl; /* [nbr] Number of bounds in gridcell accounting for possibility of centroid information */ long idx; /* [idx] Counting index for unrolled grids */ short int bnd_idx; /* Shift to this method once we pass rgr into nco_sph_plg_area() */ nco_bool flg_mth_csz=False; /* [flg] Use CSZ's advancing polygon bisector method */ nco_bool flg_mth_ctr=False; /* [flg] Use centroid method to compute polygon area */ nco_edg_typ_enm edg_typ; /* [enm] Arc-type for triangle edges */ nco_ply_tri_mth_typ_enm ply_tri_mth; /* [enm] Polygon decomposition method */ if(rgr->edg_typ == nco_edg_nil) rgr->edg_typ=nco_edg_gtc; edg_typ=rgr->edg_typ; /* [enm] Arc-type for triangle edges */ ply_tri_mth=rgr->ply_tri_mth; /* [enm] Polygon decomposition method */ if(ply_tri_mth == nco_ply_tri_mth_csz) flg_mth_csz=True; if(ply_tri_mth == nco_ply_tri_mth_ctr) flg_mth_ctr=True; assert(flg_mth_ctr != flg_mth_csz); bnd_nbr_ttl=bnd_nbr; // Allocate space for one extra boundary to store centroid information if necessary if(flg_mth_ctr) bnd_nbr_ttl=bnd_nbr+1; double *lat_bnd_rdn=NULL_CEWI; /* [rdn] Latitude boundaries of rectangular destination grid */ double *lon_bnd_rdn=NULL_CEWI; /* [rdn] Longitude boundaries of rectangular destination grid */ double *lat_bnd_sin=NULL_CEWI; /* [frc] Sine of latitude boundaries of rectangular destination grid */ double *lon_bnd_sin=NULL_CEWI; /* [frc] Sine of longitude boundaries of rectangular destination grid */ double *lat_bnd_cos=NULL_CEWI; /* [frc] Cosine of latitude boundaries of rectangular destination grid */ double *lon_bnd_cos=NULL_CEWI; /* [frc] Cosine of longitude boundaries of rectangular destination grid */ /* Allocate one extra space for some arrays to store polygon centroid values for each column for ply_tri_mth=ctr */ lon_bnd_rdn=(double *)nco_malloc(col_nbr*bnd_nbr_ttl*sizeof(double)); lat_bnd_rdn=(double *)nco_malloc(col_nbr*bnd_nbr_ttl*sizeof(double)); lon_bnd_cos=(double *)nco_malloc(col_nbr*bnd_nbr*sizeof(double)); lat_bnd_cos=(double *)nco_malloc(col_nbr*bnd_nbr_ttl*sizeof(double)); lon_bnd_sin=(double *)nco_malloc(col_nbr*bnd_nbr*sizeof(double)); lat_bnd_sin=(double *)nco_malloc(col_nbr*bnd_nbr*sizeof(double)); memcpy(lat_bnd_rdn,lat_bnd,col_nbr*bnd_nbr*sizeof(double)); memcpy(lon_bnd_rdn,lon_bnd,col_nbr*bnd_nbr*sizeof(double)); for(idx=0;idx<col_nbr*bnd_nbr;idx++){ lon_bnd_rdn[idx]*=dgr2rdn; lat_bnd_rdn[idx]*=dgr2rdn; lon_bnd_cos[idx]=cos(lon_bnd_rdn[idx]); lat_bnd_cos[idx]=cos(lat_bnd_rdn[idx]); lon_bnd_sin[idx]=sin(lon_bnd_rdn[idx]); lat_bnd_sin[idx]=sin(lat_bnd_rdn[idx]); } /* !idx */ double area_smc_crc; /* [sr] Small-circle correction to spherical triangle area */ double area_smc; /* [sr] Gridcell area allowing for latitude-triangles */ double area_ttl; /* [sr] Total area of input polygon list assuming spherical triangles */ double area_smc_ttl; /* [sr] Total area of input polygon list allowing for latitude-triangles */ double area_smc_crc_ttl; /* [sr] Latitude-triangle correction (should be small) to total area of input polygon list */ double area_smc_crc_abs_ttl; /* [sr] Latitude-triangle absolute correction (no compensation of positive/negative contributions, should be no smaller than above) to total area of input polygon list */ double lat_ctr; /* [dgr] Latitude of polygon centroid */ double lon_ctr; /* [dgr] Longitude of polygon centroid */ double lat_ctr_rdn; /* [rdn] Latitude of polygon centroid */ double lon_ctr_rdn; /* [rdn] Longitude of polygon centroid */ double lat_ctr_cos; /* [frc] Cosine latitude of polygon centroid */ double lat_dlt; /* [rdn] Latitudinal difference */ double lon_dlt; /* [rdn] Longitudinal difference */ double ngl_a; /* [rdn] Interior angle/great circle arc a */ double ngl_b; /* [rdn] Interior angle/great circle arc b */ double ngl_c; /* [rdn] Interior angle/great circle arc c */ double ngl_ltr_a; /* [rdn] Interior angle/small circle arc a, canonical latitude-triangle geometry */ double ngl_ltr_b; /* [rdn] Interior angle/great circle arc b, canonical latitude-triangle geometry */ double ngl_ltr_c; /* [rdn] Interior angle/great circle arc c, canonical latitude-triangle geometry */ double prm_smi; /* [rdn] Semi-perimeter of triangle */ double sin_hlf_tht; /* [frc] Sine of half angle/great circle arc theta connecting two points */ double xcs_sph; /* [sr] Spherical excess */ int tri_nbr; /* [nbr] Number of triangles in polygon */ long bnd_vld_nbr=NC_MIN_INT; /* [idx] Number of valid (non-duplicative) vertices in each triangle */ long *a_idx; /* [idx] Point A 1-D indices for each triangle in polygon */ long *b_idx; /* [idx] Point B 1-D indices for each triangle in polygon */ long *c_idx; /* [idx] Point C 1-D indices for each triangle in polygon */ long *vrt_vld=NULL; /* [idx] Absolute 1-D indices of valid vertices */ long idx_a; /* [idx] Point A 1-D index */ long idx_b; /* [idx] Point B 1-D index */ long idx_c; /* [idx] Point C 1-D index */ nco_bool flg_sas_ndl=False; /* [flg] L'Huilier's formula will fail due to needle where one side exceeds semi-perimeter */ nco_bool flg_sas_isc=False; /* [flg] L'Huilier's formula is ill-conditioned due to flat, near-isoceles triangle */ nco_bool flg_sas_a=False; /* [flg] Use SAS triangle formula with central angle a */ nco_bool flg_sas_b=False; /* [flg] Use SAS triangle formula with central angle b */ nco_bool flg_sas_c=False; /* [flg] Use SAS triangle formula with central angle c */ nco_bool flg_ply_has_smc; /* [flg] Any triangle in polygon has small-circle edge */ nco_bool flg_tri_crr_smc; /* [flg] Current triangle has small_circle edge */ /* Initialize global accumulators */ area_ttl=0.0; area_smc_ttl=0.0; area_smc_crc_ttl=0.0; area_smc_crc_abs_ttl=0.0; for(long col_idx=0;col_idx<col_nbr;col_idx++){ /* Initialize local properties and accumulators for this cell/polygon */ flg_ply_has_smc=False; ngl_c=double_CEWI; /* Otherwise compiler unsure ngl_c is initialized first use */ area[col_idx]=0.0; area_smc=0.0; tri_nbr=0; if(col_idx == 0){ a_idx=(long *)nco_calloc(bnd_nbr,sizeof(long)); b_idx=(long *)nco_calloc(bnd_nbr,sizeof(long)); c_idx=(long *)nco_calloc(bnd_nbr,sizeof(long)); vrt_vld=(long *)nco_calloc(bnd_nbr,sizeof(long)); } /* !col_idx */ /* Safety re-initialization to ease debugging, not strictly necessary */ for(bnd_idx=0;bnd_idx<bnd_nbr;bnd_idx++){ vrt_vld[bnd_idx]=NC_MIN_INT; a_idx[bnd_idx]=NC_MIN_INT; b_idx[bnd_idx]=NC_MIN_INT; c_idx[bnd_idx]=NC_MIN_INT; } /* !bnd_idx */ if(flg_mth_ctr){ double lon_dff; /* [dgr] Longitude difference */ long bnd_srt_idx; /* [idx] Absolute starting index of vertices in polygon */ long bnd_idx; /* [idx] Offset of current valid vertex index from starting index */ long bnd_vld_idx; /* [idx] Absolute index of last valid vertex */ /* First vertice is always valid */ bnd_srt_idx=bnd_nbr*col_idx; bnd_vld_idx=bnd_srt_idx; vrt_vld[0]=bnd_vld_idx; lat_ctr=lat_bnd[bnd_srt_idx]; lon_ctr=lon_bnd[bnd_srt_idx]; bnd_vld_nbr=1; /* First guess for next valid index */ bnd_idx=1; /* bnd_idx labels offset from first vertex of next valid (i.e., non-duplicative) vertex */ while(bnd_idx<bnd_nbr){ /* Skip repeated points that must occur when polygon has fewer than allowed vertices */ while(lon_bnd[bnd_vld_idx] == lon_bnd[bnd_srt_idx+bnd_idx] && lat_bnd[bnd_srt_idx] == lat_bnd[bnd_srt_idx+bnd_idx]){ /* Next valid vertice must not duplicate first vertex */ bnd_idx++; /* Have we already found all valid vertices? */ if(bnd_idx == bnd_nbr) break; } /* !while */ /* Jump to normalization when all valid vertices found */ if(bnd_idx == bnd_nbr) break; /* Current vertex is valid (non-duplicative) */ bnd_vld_idx=bnd_srt_idx+bnd_idx; vrt_vld[bnd_vld_nbr]=bnd_vld_idx; bnd_vld_nbr++; if(nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: DEBUG %s reports centroidal decomposition col_idx=%lu, bnd_nbr=%d, bnd_idx=%ld, bnd_vld_idx=%ld, bnd_vld_nbr=%ld\n",nco_prg_nm_get(),fnc_nm,col_idx,bnd_nbr,bnd_idx,bnd_vld_idx,bnd_vld_nbr); assert(bnd_vld_nbr <= bnd_nbr); lat_ctr+=lat_bnd[bnd_vld_idx]; lon_ctr+=lon_bnd[bnd_vld_idx]; lon_dff=lon_bnd[bnd_vld_idx]-lon_bnd[0]; if(lon_dff >= 180.0){ lon_ctr-=360.0; }else if(lon_dff <= -180.0){ lon_ctr+=360.0; } /* !lon_dff */ /* Search for next valid vertice in next iteration */ bnd_idx++; } /* !bnd_idx */ /* Compute centroid */ lat_ctr/=bnd_vld_nbr; lon_ctr/=bnd_vld_nbr; /* Centroid can become point A of bnd_nbr polygons or optimize algorithm: 1. Skip sub-dividing polygon into centroid-based triangles for bnd_vld_nbr == 3 2. Split quadrilaterals into two (non-centroid) triangles for bnd_vld_nbr == 4 3. Use full centroid-based triangle algorithm for bnd_vld_nbr >= 5 */ lat_ctr_rdn=lat_ctr*dgr2rdn; lon_ctr_rdn=lon_ctr*dgr2rdn; lat_ctr_cos=cos(lat_ctr_rdn); /* Place centroid values in extended arrays for easy access */ lat_bnd_rdn[(col_idx+1)*bnd_nbr_ttl-1L]=lat_ctr_rdn; lon_bnd_rdn[(col_idx+1)*bnd_nbr_ttl-1L]=lon_ctr_rdn; lat_bnd_cos[(col_idx+1)*bnd_nbr_ttl-1L]=lat_ctr_cos; /* Polygon centroid and valid vertices are now known */ assert(bnd_vld_nbr > 2); if(bnd_vld_nbr == 3){ /* Three vertices only means polygon is already decomposed into a triangle */ tri_nbr=1; a_idx[0]=vrt_vld[0]; b_idx[0]=vrt_vld[1]; c_idx[0]=vrt_vld[2]; }else if(bnd_vld_nbr == 4){ /* Bisect quadrilateral into two triangles rather than use centroid and have four triantles */ tri_nbr=2; a_idx[0]=vrt_vld[0]; b_idx[0]=vrt_vld[1]; c_idx[0]=vrt_vld[2]; a_idx[1]=vrt_vld[0]; /* NB: Order is important. This way side C of triangle[0] = side A of trangle[1] */ b_idx[1]=vrt_vld[2]; c_idx[1]=vrt_vld[3]; }else if(bnd_vld_nbr >= 5){ /* Centroid method has as many triangles as valid vertices */ tri_nbr=bnd_vld_nbr; for(int tri_idx=0;tri_idx<tri_nbr;tri_idx++){ a_idx[tri_idx]=(col_idx+1)*bnd_nbr_ttl-1L; /* A is always centroid, store values at end of arrays */ b_idx[tri_idx]=vrt_vld[tri_idx]; c_idx[tri_idx]=vrt_vld[(tri_idx+1)%tri_nbr]; } /* !tri_idx */ } /* !bnd_vld_nbr */ } /* !flg_mth_ctr */ if(flg_mth_csz){ /* A is always first vertice of all triangles */ idx_a=bnd_nbr*col_idx; /* Start search for B at next vertice */ bnd_idx=1; /* bnd_idx labels offset from point A of potential location of triangle points B and C We know that bnd_idx(A) == 0, bnd_idx(B) < bnd_nbr-1, bnd_idx(C) < bnd_nbr */ while(bnd_idx<bnd_nbr-1){ /* Only first triangle must search for B, subsequent triangles recycle previous C as current B */ if(tri_nbr == 0){ /* Skip repeated points that must occur when polygon has fewer than allowed vertices */ /* 20200115: Prior to today we never skipped polar points (same latitudes but different longitudes) That worked fine in practice for spherical triangles partly because triangles from CSZ decomposition (aka hub-and-spoke decomposition) are additive, even with multiple points on the same great circle, and partly due to luck (a starting vertex surrounded by points on the same geodesic would break it). Moreover, repeated polar points pose no issues for L'Huilier's (or Girard's) method which depends only on the interior angles and side lengths, not the longitudes of polar points. Small circles change that last part, and we must now eliminate repeated polar points. */ if(edg_typ == nco_edg_smc){ /* Skip repeated numerically identical points */ while(lon_bnd[idx_a] == lon_bnd[idx_a+bnd_idx] && lat_bnd[idx_a] == lat_bnd[idx_a+bnd_idx]){ /* Next vertice may not duplicate A */ bnd_idx++; /* If there is no room for C then all triangles found */ if(bnd_idx == bnd_nbr-1) break; } /* !while */ /* Skip geometrically identical (i.e., repeated polar) points */ while((fabs(lat_bnd[idx_a]) == 90.0) && (fabs(lat_bnd[idx_a+bnd_idx]) == 90.0)){ bnd_idx++; if(bnd_idx == bnd_nbr-1) break; } /* !while */ }else if(edg_typ != nco_edg_smc){ /* Spherical polygongs can use simpler, pre-20200116 algorithm to eliminate repeated points */ while(lon_bnd[idx_a] == lon_bnd[idx_a+bnd_idx] && lat_bnd[idx_a] == lat_bnd[idx_a+bnd_idx]){ /* Next vertice may not duplicate A */ bnd_idx++; /* If there is no room for C then all triangles found */ if(bnd_idx == bnd_nbr-1) break; } /* !while */ }else{ abort(); } /* !edg_typ */ /* Jump to next column when all triangles found */ if(bnd_idx == bnd_nbr-1) break; } /* !tri_nbr */ idx_b=idx_a+bnd_idx; /* Search for C at next vertice */ bnd_idx++; /* fxm */ while(lon_bnd[idx_b] == lon_bnd[idx_a+bnd_idx] && lat_bnd[idx_b] == lat_bnd[idx_a+bnd_idx]){ /* Next vertice may not duplicate B */ bnd_idx++; /* If there is no room for C then all triangles found */ if(bnd_idx == bnd_nbr) break; } /* !while */ /* Jump to next column when all triangles found */ if(bnd_idx == bnd_nbr) break; idx_c=idx_a+bnd_idx; /* Valid triangle, vertices are known and labeled */ a_idx[tri_nbr]=idx_a; b_idx[tri_nbr]=idx_b; c_idx[tri_nbr]=idx_c; tri_nbr++; /* Begin search for next B at current C */ bnd_idx=idx_c-idx_a; } /* !bnd_idx */ } /* !flg_mth_csz */ /* Triangles are known for requested decomposition method Compute and accumulate their area Optimized algorithm recycles previous arc c as current arc a (after first triangle) */ for(int tri_idx=0;tri_idx<tri_nbr;tri_idx++){ idx_a=a_idx[tri_idx]; idx_b=b_idx[tri_idx]; idx_c=c_idx[tri_idx]; if(nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: DEBUG %s reports triangle vertices: col_idx=%lu, tri_idx=%d, idx_a=%ld, idx_b=%ld, idx_c=%ld\n",nco_prg_nm_get(),fnc_nm,col_idx,tri_idx,idx_a,idx_b,idx_c); /* Compute interior angle/great circle arc a for first triangle; subsequent triangles recycle previous arc c */ if(tri_idx == 0){ /* 20150831: Test by computing ncol=0 area in conus chevrons grid, compare to MAT results ncremap -s ${DATA}/grids/ne30np4_pentagons.091226.nc -g ${DATA}/grids/257x512_SCRIP.20150901.nc -m ${DATA}/maps/map_ne30np4_to_fv257x512_bilin.20150901.nc ncremap -s ${DATA}/grids/257x512_SCRIP.20150901.nc -g ${DATA}/grids/conusx4v1np4_chevrons_scrip_c150815.nc -m ${DATA}/maps/map_fv257x512_to_conusx4v1np4_chevrons_bilin.20150901.nc ncks -O -D 5 -v FSNT --map ${DATA}/maps/map_ne30np4_to_fv257x512_bilin.150418.nc ${DATA}/ne30/raw/famipc5_ne30_v0.3_00003.cam.h0.1979-01.nc ${DATA}/ne30/rgr/fv_FSNT.nc ncks -O -D 5 -v FSNT --rgr diagnose_area --map ${DATA}/maps/map_fv257x512_to_conusx4v1np4_chevrons_bilin.20150901.nc ${DATA}/ne30/rgr/fv_FSNT.nc ${DATA}/ne30/rgr/dogfood.nc ncks -O -D 1 --rgr infer#diagnose_area --rgr grid=${HOME}/grd.nc ${DATA}/ne30/rgr/dogfood.nc ~/foo.nc ncks -H -s %20.15e, -v area -d ncol,0 ${DATA}/ne30/rgr/dogfood.nc ncks -H -s %20.15e, -v grid_area -d grid_size,0 ${DATA}/grids/conusx4v1np4_chevrons_scrip_c150815.nc ncks -H -s %20.15e, -v grid_area -d grid_size,0 ${HOME}/grd.nc ncol=0 on conus chevrons file: 3.653857995295246e-05 raw GLL weight 3.653857995294305e-05 ESMF weight (area_b from map-file) 3.653857995294302e-05 matlab CSZ decomposition (N-2 triangles) computed at SNL by MAT 3.653857995294301e-05 matlab centroidal decomposition (N triangles) computed at SNL by MAT 3.653857995294258e-05 NCO CSZ _and_ centroidal decompositions (new haversine) 3.653857995289623e-05 NCO CSZ decomposition (old acos) 20191011: Tested this same polygon in ESMF and NCO weight-generator NCO maps begin with first destination gridcell, find next ESMF gridcell by searching for first col: ncks --trd -C -v col ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc | egrep "=1 " ncks -H --trd -s %20.15e -C -d n_b,0 -v area_b ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc 3.653857995294305e-05 ncks -H --trd -s '%20.15e, ' -C -d n_b,0 -v area_b ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_nco.20191001.nc 3.653857995295246e-05 ESMF and NCO weight-generators produce nearly identical S results to double-precision: ncks -H --trd -s '%20.15e, ' -C -d n_s,0,1 -v S ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_nco.20191001.nc 2.181999640069480e-03, 1.309571213636605e-02 ncks -H --trd -s %20.15e -C -d n_s,207436 -d n_s,209617 -v S ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc 2.181999640069454e-03, 1.309571213636510e-02 Compare first five polygon areas: ncks --trd -H -C -s '%20.15e, ' -d n_b,0,4 -v area_b ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc ncks --trd -H -C -s '%20.15e, ' -d n_b,0,4 -v area_b ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_nco.20191001.nc 3.653857995294305e-05, 1.250459284052488e-04, 1.448204605591709e-04, 8.223598867312266e-05, 8.585831933875070e-05, # aave 3.653857995294258e-05, 1.250459284052470e-04, 1.448204605591675e-04, 8.223598867312247e-05, 8.585831933875186e-05, Compare total areas: ncwa -O -y ttl -v area.? ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc ~/foo_aave.nc ncwa -O -y ttl -v area.? ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_nco.20191001.nc ~/foo_nco.nc ncks --trd -H -C -s '%20.15e, ' -v area.? ~/foo_aave.nc ncks --trd -H -C -s '%20.15e, ' -v area.? ~/foo_nco.nc aave: 1.256637061435867e+01, 1.256637061435973e+01 nco: 1.256637061435857e+01, 1.256637061435955e+01 4*pi: 1.25663706143591729538e+01 Does (tru_glb_ttl/NCO_glb_ttl)*NCO_lcl = ESMF_lcl ? (1.25663706143591729538/1.256637061435857)*3.653857995294258=3.6538579952944333 No, normalization alone does not explain differences between ESMF and NCO It does not appear that ESMF does a global normalization of areas/weights */ /* Computing great circle arcs over small arcs requires care since central angle is near 0 degrees Cosine small angles changes slowly for such angles, and leads to precision loss Use haversine formula instead of spherical law of cosines formula https://en.wikipedia.org/wiki/Great-circle_distance */ /* Interior angle/great circle arc a, spherical law of cosines formula (loses precision): cos_a=lat_bnd_cos[idx_a]*lon_bnd_cos[idx_a]*lat_bnd_cos[idx_b]*lon_bnd_cos[idx_b]+ lat_bnd_cos[idx_a]*lon_bnd_sin[idx_a]*lat_bnd_cos[idx_b]*lon_bnd_sin[idx_b]+ lat_bnd_sin[idx_a]*lat_bnd_sin[idx_b];ngl_a=acos(cos_a); */ /* Interior angle/great circle arc a, haversine formula: */ // 20160918: Use branch cut rules for longitude lon_dlt=fabs(nco_lon_dff_brnch_rdn(lon_bnd_rdn[idx_a],lon_bnd_rdn[idx_b])); lat_dlt=fabs(lat_bnd_rdn[idx_a]-lat_bnd_rdn[idx_b]); sin_hlf_tht=sqrt(pow(sin(0.5*lat_dlt),2)+lat_bnd_cos[idx_a]*lat_bnd_cos[idx_b]*pow(sin(0.5*lon_dlt),2)); ngl_a=2.0*asin(sin_hlf_tht); }else{ /* !tri_idx == 0 */ ngl_a=ngl_c; } /* !tri_idx == 0 */ /* Interior angle/great circle arc b */ lon_dlt=fabs(nco_lon_dff_brnch_rdn(lon_bnd_rdn[idx_b],lon_bnd_rdn[idx_c])); lat_dlt=fabs(lat_bnd_rdn[idx_b]-lat_bnd_rdn[idx_c]); sin_hlf_tht=sqrt(pow(sin(0.5*lat_dlt),2)+lat_bnd_cos[idx_b]*lat_bnd_cos[idx_c]*pow(sin(0.5*lon_dlt),2)); ngl_b=2.0*asin(sin_hlf_tht); /* Interior angle/great circle arc c */ lon_dlt=fabs(nco_lon_dff_brnch_rdn(lon_bnd_rdn[idx_c],lon_bnd_rdn[idx_a])); lat_dlt=fabs(lat_bnd_rdn[idx_c]-lat_bnd_rdn[idx_a]); sin_hlf_tht=sqrt(pow(sin(0.5*lat_dlt),2)+lat_bnd_cos[idx_c]*lat_bnd_cos[idx_a]*pow(sin(0.5*lon_dlt),2)); ngl_c=2.0*asin(sin_hlf_tht); /* Semi-perimeter */ prm_smi=0.5*(ngl_a+ngl_b+ngl_c); /* L'Huilier's formula results in NaN if any side exceeds semi-perimeter This can occur in needle-shaped triangles due to rounding errors in derived arc lengths a, b, c 20200203: Problematic needles occurs a few dozen times in ne120pg2 -> cmip6 maps Problematic isoceles triangles are much rarer than problematic needles Therefore look for needle-issues first, then, if none found, look for isoceles issues Wikipedia recommends treating ill-conditioned triangles by Side-Angle-Side (SAS) formula https://en.wikipedia.org/wiki/Spherical_trigonometry Diagnose needles beforehand and call SAS routines as above to avoid NaN in L'Huilier Label problematic needle triangles by shortest side, e.g., "flg_sas_a" means (b ~ c) and a ~ 0.0 */ flg_sas_ndl=flg_sas_isc=flg_sas_a=flg_sas_b=flg_sas_c=False; if(ngl_a > prm_smi){if(ngl_b > ngl_c) flg_sas_c=True; else flg_sas_b=True;} /* a exceeds semi-perimeter */ else if(ngl_b > prm_smi){if(ngl_c > ngl_a) flg_sas_a=True; else flg_sas_c=True;} /* b exceeds semi-perimeter */ else if(ngl_c > prm_smi){if(ngl_a > ngl_b) flg_sas_b=True; else flg_sas_a=True;} /* c exceeds semi-perimeter */ if(flg_sas_a || flg_sas_b || flg_sas_c) flg_sas_ndl=True; if(!flg_sas_ndl){ /* L'Huilier's formula becomes ill-conditioned when two sides are one half the third side This occurs for flat, isoceles-shaped triangles Label problematic isoceles triangles by longest side, e.g., "flg_sas_a" means (b ~ c) ~ 0.5*a */ /* Sensitivity tests on ~20191014 showed that triangular ill-conditioning treatment (i.e., switching to SAS method) does not improve (and may degrade) accuracy for eps_ill_cnd > 1.0e-15 */ const double eps_ill_cnd=1.0e-15; /* [frc] Ill-conditioned tolerance for interior angle/great circle arcs in triangle */ const double eps_ill_cnd_dbl=2.0*eps_ill_cnd; /* [frc] Ill-conditioned tolerance for interior angle/great circle arcs in triangle */ if((fabs(ngl_a-ngl_b) < eps_ill_cnd) && (fabs(ngl_a-0.5*ngl_c) < eps_ill_cnd_dbl)) flg_sas_c=True; /* c is twice a and b */ else if((fabs(ngl_b-ngl_c) < eps_ill_cnd) && (fabs(ngl_b-0.5*ngl_a) < eps_ill_cnd_dbl)) flg_sas_a=True; /* a is twice b and c */ else if((fabs(ngl_c-ngl_a) < eps_ill_cnd) && (fabs(ngl_c-0.5*ngl_b) < eps_ill_cnd_dbl)) flg_sas_b=True; /* b is twice c and a */ if(flg_sas_a || flg_sas_b || flg_sas_c) flg_sas_isc=True; } /* !flg_sas_ndl */ if(flg_sas_isc || flg_sas_ndl){ /* Compute area using SAS formula */ double cos_hlf_C; /* [frc] Cosine of half of canoncal surface angle C */ //double sin_hlf_C; /* [frc] Sine of half of canoncal surface angle C */ double ngl_sfc_ltr_C; /* [rdn] Canonical surface angle/great circle arc C */ double tan_hlf_a_tan_hlf_b; /* [frc] Product of tangents of one-half of nearly equal canoncal sides */ double xcs_sph_hlf_tan; /* [frc] Tangent of one-half the spherical excess */ /* Transform sides into canonical order for formula where C is surface angle between arcs a and b */ if(flg_sas_c){ ngl_ltr_a=ngl_a; ngl_ltr_b=ngl_b; ngl_ltr_c=ngl_c; } /* !flg_sas_c */ if(flg_sas_a){ ngl_ltr_a=ngl_b; ngl_ltr_b=ngl_c; ngl_ltr_c=ngl_a; } /* !flg_sas_a */ if(flg_sas_b){ ngl_ltr_a=ngl_c; ngl_ltr_b=ngl_a; ngl_ltr_c=ngl_b; } /* !flg_sas_b */ if(flg_sas_ndl && (nco_dbg_lvl_get() >= nco_dbg_scl)) (void)fprintf(stdout,"%s: INFO %s reports col_idx = %li triangle %d is needle-shaped triangle with a side that exceeds semi-perimeter = %0.16e. Eschew L'Huilier's formula for spherical excess to avoid NaN. Could use SAS formula with canonical central interior arc c = %0.16e.\n",nco_prg_nm_get(),fnc_nm,col_idx,tri_idx,prm_smi,ngl_ltr_c); if(flg_sas_isc && (nco_dbg_lvl_get() >= nco_dbg_scl)) (void)fprintf(stdout,"%s: INFO %s reports col_idx = %li triangle %d is nearly flat isoceles-shaped triangle. Canonical arcs a and b differ by %0.16e. Eschew L'Huilier's formula for spherical excess to avoid low precision. Could use SAS formula.\n",nco_prg_nm_get(),fnc_nm,col_idx,tri_idx,fabs(ngl_ltr_a-ngl_ltr_b)); /* Determine canonical surface angle C To find any angle given three spherical triangle sides, Wikipedia opines: "The cosine rule may be used to give the angles A, B, and C but, to avoid ambiguities, the half-angle formulae are preferred." Half-angle formulae include two applicable variants that yield the sine or cosine of half C Then C is determined as twice the asin() or acos() function, respectively For needle-shaped triangles, RHS sin formula is ~ sin^2(0)/sin(a)*sin(b) ~ 0.0 For needle-shaped triangles, RHS cos formula is ~ sin^2(s)/sin(a)*sin(b) ~ 0.5 For flat isoceles triangles, RHS sin formula is ~ sin^2(0)/sin(a)*sin(b) ~ 0.0 For flat isoceles triangles, RHS cos formula is ~ sin(s)*sin(0)/sin(a)*sin(b) ~ 0.0 Use sin formula since both needle- and isoceles-shaped triangles have RHS ~ 0.0 where arcsin() is most precise 20200203: Half-angle sine formula gives NaNs, and half-angle cosine formula works on ne120pg2->cmip. Why? Adopting cosine formula because it works */ //sin_hlf_C=sqrt(sin(prm_smi-ngl_ltr_a)*sin(prm_smi-ngl_ltr_b)/(sin(ngl_ltr_a)*sin(ngl_ltr_b))); // Half-angle sine formula cos_hlf_C=sqrt(sin(prm_smi)*sin(prm_smi-ngl_ltr_c)/(sin(ngl_ltr_a)*sin(ngl_ltr_b))); // Half-angle cosine formula //ngl_sfc_ltr_C=2.0*asin(sin_hlf_C); ngl_sfc_ltr_C=2.0*acos(cos_hlf_C); /* SAS formula */ tan_hlf_a_tan_hlf_b=tan(0.5*ngl_ltr_a)*tan(0.5*ngl_ltr_b); xcs_sph_hlf_tan=tan_hlf_a_tan_hlf_b*sin(ngl_sfc_ltr_C)/(1.0+tan_hlf_a_tan_hlf_b*cos(ngl_sfc_ltr_C)); assert(fabs(xcs_sph_hlf_tan) != M_PI_2); xcs_sph=2.0*atan(xcs_sph_hlf_tan); if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO SAS area formula for polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16e, %0.16e, %0.16e). Spherical excess = %0.16e.\n",nco_prg_nm_get(),col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c,xcs_sph); // Single-line version // xcs_sph=2.0*atan(tan(0.5*ngl_ltr_a)*tan(0.5*ngl_ltr_b)*sin(2.0*acos(sqrt(sin(prm_smi)*sin(prm_smi-ngl_c)/(sin(ngl_a)*sin(ngl_b)))))/(1.0+tan_hlf_a_tan_hlf_b*cos(2.0*acos(sqrt(sin(prm_smi)*sin(prm_smi-ngl_c)/(sin(ngl_a)*sin(ngl_b))))))); /* Above procedure for problematic needle-shaped and isoceles-shaped triangles degrades statistics For ne30pg2, ne120pg2 -> cmip, setting area = 0.0 _greatly_ improves area statistics (Why?) Set spherical excess to zero for problematic needle-shaped and isoceles-shaped triangles */ /* fxm: Make zeroing skinny needles/isoceles-shaped triangle-areas a command-line option? */ if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO Setting SAS area = 0.0\n",nco_prg_nm_get()); xcs_sph=0.0; /* !flg_sas */ }else{ double xcs_sph_qtr_tan; /* [frc] Tangent of one-quarter the spherical excess */ xcs_sph_qtr_tan=sqrt(tan(0.5*prm_smi)*tan(0.5*(prm_smi-ngl_a))*tan(0.5*(prm_smi-ngl_b))*tan(0.5*(prm_smi-ngl_c))); assert(fabs(xcs_sph_qtr_tan) != M_PI_2); xcs_sph=4.0*atan(xcs_sph_qtr_tan); /* 20191014: Aggregate all previous area-related commands into one, gigantic, unreadable, possibly more precise command (tested and it is more obfuscated but not more precise) */ // xcs_sph=4.0*atan(sqrt(tan(0.5*0.5*(ngl_a+ngl_b+ngl_c))*tan(0.5*(0.5*(ngl_a+ngl_b+ngl_c)-ngl_a))*tan(0.5*(0.5*(ngl_a+ngl_b+ngl_c)-ngl_b))*tan(0.5*(0.5*(ngl_a+ngl_b+ngl_c)-ngl_c)))); } /* !flg_sas */ if(isnan(xcs_sph)){ const double eps_ngl_skn=1.0e-13; /* [frc] Angles skinnier than this form needles whose area ~ 0.0 */ /* Categorize reason for NaN */ (void)fprintf(stdout,"%s: WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING\nUnxpected NaN polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16e, %0.16e, %0.16e).\n",nco_prg_nm_get(),col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c); if( /* Side exceeds semi-perimeter */ (ngl_a > prm_smi) || (ngl_b > prm_smi) || (ngl_c > prm_smi) ){ (void)fprintf(stdout,"%s: WARNING Triangle side exceeds semi-perimeter = %0.16e polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16e, %0.16e, %0.16e). Assigned triangle area = 0.0.\n",nco_prg_nm_get(),prm_smi,col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c); }else if( /* Are angles too skinny? Quite often on ne30pg2, ne120pg2 */ (ngl_a < eps_ngl_skn) || (ngl_b < eps_ngl_skn) || (ngl_c < eps_ngl_skn) ){ (void)fprintf(stdout,"%s: WARNING Triangle has at least one skinny angles < %g [rdn] for polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16f, %0.16f, %0.16f). Assigned triangle area = 0.0.\n",nco_prg_nm_get(),eps_ngl_skn,col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c); }else if( /* Are two vertices identical to double-precision? Never on ne30pg2, ne120pg2 */ ((lat_bnd[idx_a] == lat_bnd[idx_b]) && (lon_bnd[idx_a] == lon_bnd[idx_b])) || ((lat_bnd[idx_b] == lat_bnd[idx_c]) && (lon_bnd[idx_b] == lon_bnd[idx_c])) || ((lat_bnd[idx_c] == lat_bnd[idx_a]) && (lon_bnd[idx_c] == lon_bnd[idx_a])) ){ (void)fprintf(stdout,"%s: WARNING Triangle has repeated points for polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g). Assigned triangle area = 0.0.\n",nco_prg_nm_get(),col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c]); }else{ (void)fprintf(stdout,"%s: WARNING Triangle area formula yields NaN for polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16f, %0.16f, %0.16f). Are points co-linear? Assigned triangle area = 0.0.\n",nco_prg_nm_get(),col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c); } /* !co-linear */ xcs_sph=0.0; } /* !NaN */ area[col_idx]+=xcs_sph; /* Accumulate spherical triangle area into reported polygon area and adjust below */ area_smc+=xcs_sph; /* Accumulate spherical triangle area into small-circle polygon area and adjust below */ area_ttl+=xcs_sph; /* Accumulate spherical triangle area into spherical polygon area */ area_smc_ttl+=xcs_sph; /* Accumulate spherical triangle area into total polygon area and adjust below */ /* 20160918 from here to end of loop is non-spherical work 20170217: Temporarily turn-off latitude circle diagnostics because Sungduk's POP case breaks them Canonical latitude-triangle geometry has point A at apex and points B and C at same latitude ncremap --dbg=1 --alg_typ=nco --grd_src=${DATA}/grids/ne30np4_pentagons.091226.nc --grd_dst=${DATA}/grids/cmip6_180x360_scrip.20181001.nc --map=${DATA}/maps/map_ne30np4_to_cmip6_180x360_nco.20190601.nc ncremap --dbg=1 -R 'edg_typ=smc' --alg_typ=nco --grd_src=${DATA}/grids/ne30np4_pentagons.091226.nc --grd_dst=${DATA}/grids/cmip6_180x360_scrip.20181001.nc --map=${DATA}/maps/map_ne30np4_to_cmip6_180x360_smc.20190601.nc */ flg_tri_crr_smc=False; if(lat_bnd_rdn[idx_a] == lat_bnd_rdn[idx_b] || lat_bnd_rdn[idx_b] == lat_bnd_rdn[idx_c] || lat_bnd_rdn[idx_c] == lat_bnd_rdn[idx_a]){ /* Set flag only if triangle is not degenerate. Degenerate triangles (3 points on a geodesic) have zero area */ if(xcs_sph != 0.0) flg_ply_has_smc=flg_tri_crr_smc=True; if(nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: DEBUG Found small circle triangle with vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g)\n",nco_prg_nm_get(),lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c]); } /* endif */ if((edg_typ == nco_edg_smc) && flg_tri_crr_smc){ double ngl_plr; /* [rdn] Polar angle (co-latitude) */ long idx_ltr_a; /* [idx] Point A (apex) of canonical latitude-triangle geometry, 1-D index */ long idx_ltr_b; /* [idx] Point B (base) of canonical latitude-triangle geometry, 1-D index */ long idx_ltr_c; /* [idx] Point C (base) of canonical latitude-triangle geometry, 1-D index */ /* Rotate labels to standard position with vertex A, equi-latitude points B and C */ if(lat_bnd_rdn[idx_a] == lat_bnd_rdn[idx_b]){ idx_ltr_a=idx_c; idx_ltr_b=idx_a; idx_ltr_c=idx_b; ngl_ltr_a=ngl_c; ngl_ltr_b=ngl_a; ngl_ltr_c=ngl_b; ngl_plr=fabs(M_PI_2-lat_bnd_rdn[idx_a]); }else if(lat_bnd_rdn[idx_b] == lat_bnd_rdn[idx_c]){ idx_ltr_a=idx_a; idx_ltr_b=idx_b; idx_ltr_c=idx_c; ngl_ltr_a=ngl_a; ngl_ltr_b=ngl_b; ngl_ltr_c=ngl_c; ngl_plr=fabs(M_PI_2-lat_bnd_rdn[idx_b]); }else if(lat_bnd_rdn[idx_c] == lat_bnd_rdn[idx_a]){ idx_ltr_a=idx_b; idx_ltr_b=idx_c; idx_ltr_c=idx_a; ngl_ltr_a=ngl_b; ngl_ltr_b=ngl_c; ngl_ltr_c=ngl_a; ngl_plr=fabs(M_PI_2-lat_bnd_rdn[idx_c]); }else{ (void)fprintf(stdout,"%s: ERROR latitudes not equal in small circle section. Vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g)\n",nco_prg_nm_get(),lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c]); abort(); } /* endif */ /* 20160918: Compute exact area of latitude triangle wedge */ double xpn_x; /* [frc] Expansion parameter */ lon_dlt=fabs(nco_lon_dff_brnch_rdn(lon_bnd_rdn[idx_ltr_b],lon_bnd_rdn[idx_ltr_c])); assert(lon_dlt != 0.0); // Latitude triangles must have bases with distinct longitudes if(lon_dlt != M_PI){ /* Normal clause executed for small-circle triangles */ /* Numeric conditioning uncertain. Approaches divide-by-zero when lon_dlt << 1 */ xpn_x=lat_bnd_sin[idx_ltr_b]*(1.0-cos(lon_dlt))/sin(lon_dlt); assert(fabs(xpn_x) != M_PI_2); area_smc_crc=2.0*atan(xpn_x); /* 20170217: Sungduk's POP regrid triggers following abort(): ncremap -D 1 -i ~/pop_g16.nc -d ~/cam_f19.nc -o ~/foo.nc */ //assert(xpn_x >= 0.0); //if(lat_bnd[idx_ltr_b] > 0.0) area_smc_crc+=-lon_dlt*lat_bnd_sin[idx_ltr_b]; else area_smc_crc+=+lon_dlt*lat_bnd_sin[idx_ltr_b]; area_smc_crc+=-lon_dlt*lat_bnd_sin[idx_ltr_b]; }else{ /* 20200228: Latitude triangles may have bases with longitudes that differ by 180 degrees Consider a quadrilateral with four equidistant vertices in longitude, and that caps a pole: CSZ decomposition technique divides this into two triangles each with three co-latitudinal points and no vertex at pole Solution candidates: 1. Divide such quadrilaterals using centroid technique Just realized current implementation of centroid decomposition fails on polar caps Failure occurs because centroid latitude is +/- ~90 not mean of vertices' latitudes Must impute "pseudo-centroid" with latitude +/- 90 instead of averaging vertex latitudes Requires testing each polygon to determine if it contains pole <- Too difficult/expensive 2. Assume latitude triangles whose base is 180 degrees are at pole Compute area exactly using analytic formula for annular lune */ (void)fprintf(stdout,"%s: INFO longitudes differ by pi in small circle section. Vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g)\n",nco_prg_nm_get(),lat_bnd[idx_ltr_a],lon_bnd[idx_ltr_a],lat_bnd[idx_ltr_b],lon_bnd[idx_ltr_b],lat_bnd[idx_ltr_c],lon_bnd[idx_ltr_c]); (void)fprintf(stdout,"%s: DEBUG col_nbr=%lu, bnd_nbr=%d, col_idx=%ld, area=%g. Vertices [0..bnd_nbr-1] in format idx (lat,lon)\n",nco_prg_nm_get(),col_nbr,bnd_nbr,col_idx,xcs_sph); for(int bnd_idx=0;bnd_idx<bnd_nbr;bnd_idx++) (void)fprintf(stdout,"%2d (%g, %g)\n",bnd_idx,lat_bnd[bnd_nbr*col_idx+bnd_idx],lon_bnd[bnd_nbr*col_idx+bnd_idx]); (void)fprintf(stdout,"%s: INFO Assuming this triangle is decomposed from polar cap polygon. Treating area with analytic formula for annular lune\n",nco_prg_nm_get()); /* Compute small circle correction as difference between spherical triangle area and standard annuular lune formula Small circle correction is positive-definite for polar triangles so use fabs(sin(lat_bnd_sin)) */ area_smc_crc=lon_dlt*fabs(lat_bnd_sin[idx_ltr_b])-area_smc; } /* !lon_dlt */ // Adjust diagnostic areas by small-circle area correction area_smc+=area_smc_crc; area_smc_ttl+=area_smc_crc; area_smc_crc_ttl+=area_smc_crc; area_smc_crc_abs_ttl+=fabs(area_smc_crc); // 20200109: Adjust area reported to calling code by small-circle area correction area[col_idx]+=area_smc_crc; if(0){ /* 20160918: Approximate area of latitude triangle wedge. Use truncated power expansion of exact formula. */ double xpn_x_sqr; /* [frc] Expansion parameter squared */ double xpn_sum; /* [frc] Expansion sum */ double xpn_nmr; /* [frc] Expansion term numerator */ double xpn_trm; /* [frc] Expansion term */ double xpn_dnm; /* [frc] Expansion term denominator */ const unsigned short int rdr_xpn=3; /* [nbr] Order of N in trigonometric series expansion */ unsigned short int idx_xpn; /* [idx] Index in series expansion */ xpn_x=cos(ngl_plr)*(1.0-cos(lon_dlt))/sin(lon_dlt); xpn_x_sqr=xpn_x*xpn_x; xpn_nmr=xpn_x; xpn_dnm=1.0; xpn_trm=xpn_nmr/xpn_dnm; xpn_sum+=xpn_trm; for(idx_xpn=3;idx_xpn<=rdr_xpn;idx_xpn+=2){ xpn_nmr*=xpn_x_sqr; xpn_dnm*=(idx_xpn-1)*idx_xpn; xpn_trm=xpn_nmr/xpn_dnm; xpn_sum+=xpn_trm; } /* !idx_xpn */ (void)fprintf(stdout,"%s: Small-circle area using series approximation...not implemented yet\n",nco_prg_nm_get()); } /* !0 */ if(nco_dbg_lvl_get() >= nco_dbg_scl){ (void)fprintf(stdout,"%s: INFO %s col_idx = %li triangle %d spherical area, latitude-triangle area, %% difference: %g, %g, %g%%\n",nco_prg_nm_get(),fnc_nm,col_idx,tri_idx,xcs_sph,xcs_sph+area_smc_crc,100.0*area_smc_crc/xcs_sph); if(fabs(area_smc_crc/xcs_sph) > 0.1){ (void)fprintf(stdout,"%s: DEBUG Non-spherical correction exceeds 10%% for current triangle with vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g)\n",nco_prg_nm_get(),lat_bnd[idx_ltr_a],lon_bnd[idx_ltr_a],lat_bnd[idx_ltr_b],lon_bnd[idx_ltr_b],lat_bnd[idx_ltr_c],lon_bnd[idx_ltr_c]); } /* !fabs */ } /* !dbg */ } /* !edg_typ && flg_tri_crr_smc */ } /* !tri_idx */ if(edg_typ == nco_edg_smc && flg_ply_has_smc){ /* Current gridcell contained at least one latitude-triangle */ if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO %s col_idx = %li spherical area, small circle area, %% difference: %g, %g, %g%%\n",nco_prg_nm_get(),fnc_nm,col_idx,area[col_idx],area_smc,100.0*(area_smc-area[col_idx])/area[col_idx]); } /* !edg_typ && !flg_ply_has_smc */ } /* !col_idx */ if(edg_typ == nco_edg_smc && nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO %s total spherical area, small circle area, %% difference, crc_ttl, crc_abs_ttl: %g, %g, %g%%, %g, %g\n",nco_prg_nm_get(),fnc_nm,area_ttl,area_smc_ttl,100.0*(area_smc_ttl-area_ttl)/area_ttl,area_smc_crc_ttl,area_smc_crc_abs_ttl); if(vrt_vld) vrt_vld=(long *)nco_free(vrt_vld); if(a_idx) a_idx=(long *)nco_free(a_idx); if(b_idx) b_idx=(long *)nco_free(b_idx); if(c_idx) c_idx=(long *)nco_free(c_idx); if(lat_bnd_rdn) lat_bnd_rdn=(double *)nco_free(lat_bnd_rdn); if(lon_bnd_rdn) lon_bnd_rdn=(double *)nco_free(lon_bnd_rdn); if(lat_bnd_cos) lat_bnd_cos=(double *)nco_free(lat_bnd_cos); if(lon_bnd_cos) lon_bnd_cos=(double *)nco_free(lon_bnd_cos); if(lat_bnd_sin) lat_bnd_sin=(double *)nco_free(lat_bnd_sin); if(lon_bnd_sin) lon_bnd_sin=(double *)nco_free(lon_bnd_sin); } /* !nco_sph_plg_area() */ int /* O [enm] Return code */ nco_rgr_tps /* [fnc] Regrid using TempestRemap library */ (rgr_sct * const rgr) /* I/O [sct] Regridding structure */ { /* Purpose: Regrid fields using TempestRemap "library" (more precisely, executables) Routine was originally written to call Tempest executables However, that functionality was all placed into the ncremap shell script Thus this C-interface is currently unused TempestRemap2 has a library that may be accessed on-line Test Tempest library: no way to activate yet export DATA_TEMPEST='/data/zender/rgr';ncks -O --rgr=Y ${DATA}/rgr/essgcm14_clm.nc ~/foo.nc */ const char fnc_nm[]="nco_rgr_tps()"; const int fmt_chr_nbr=6; const char *cmd_rgr_fmt; char *cmd_rgr; char fl_grd_dst[]="/tmp/foo_outRLLMesh.g"; char *fl_grd_dst_cdl; int rcd_sys; int lat_nbr_rqs=180; int lon_nbr_rqs=360; nco_rgr_tps_cmd nco_tps_cmd; /* [enm] TempestRemap command enum */ char *nvr_DATA_TEMPEST; /* [sng] Directory where Tempest grids, meshes, and weights are stored */ nvr_DATA_TEMPEST=getenv("DATA_TEMPEST"); rgr->drc_tps= (nvr_DATA_TEMPEST && strlen(nvr_DATA_TEMPEST) > 0L) ? (char *)strdup(nvr_DATA_TEMPEST) : (char *)strdup("/tmp"); if(nco_dbg_lvl_get() >= nco_dbg_crr){ (void)fprintf(stderr,"%s: INFO %s reports\n",nco_prg_nm_get(),fnc_nm); (void)fprintf(stderr,"drc_tps = %s, ",rgr->drc_tps ? rgr->drc_tps : "NULL"); (void)fprintf(stderr,"\n"); } /* endif dbg */ /* Allow for whitespace characters in fl_grd_dst Assume CDL translation results in acceptable name for shell commands */ fl_grd_dst_cdl=nm2sng_fl(fl_grd_dst); /* Construct and execute regridding command */ nco_tps_cmd=nco_rgr_GenerateRLLMesh; cmd_rgr_fmt=nco_tps_cmd_fmt_sng(nco_tps_cmd); cmd_rgr=(char *)nco_malloc((strlen(cmd_rgr_fmt)+strlen(fl_grd_dst_cdl)-fmt_chr_nbr+1UL)*sizeof(char)); if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stderr,"%s: %s reports generating %d by %d RLL mesh in %s...\n",nco_prg_nm_get(),fnc_nm,lat_nbr_rqs,lon_nbr_rqs,fl_grd_dst); (void)sprintf(cmd_rgr,cmd_rgr_fmt,lat_nbr_rqs,lon_nbr_rqs,fl_grd_dst_cdl); rcd_sys=system(cmd_rgr); if(rcd_sys == -1){ (void)fprintf(stdout,"%s: ERROR %s unable to complete TempestRemap regridding command \"%s\"\n",nco_prg_nm_get(),fnc_nm,cmd_rgr); nco_exit(EXIT_FAILURE); } /* end if */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"done\n"); /* Clean-up memory */ if(fl_grd_dst_cdl) fl_grd_dst_cdl=(char *)nco_free(fl_grd_dst_cdl); if(cmd_rgr) cmd_rgr=(char *)nco_free(cmd_rgr); return NCO_NOERR; } /* end nco_rgr_tps() */ const char * /* O [sng] String describing two-dimensional grid-type */ nco_grd_2D_sng /* [fnc] Convert two-dimensional grid-type enum to string */ (const nco_grd_2D_typ_enm nco_grd_2D_typ) /* I [enm] Two-dimensional grid-type enum */ { /* Purpose: Convert two-dimensional grid-type enum to string */ switch(nco_grd_2D_typ){ case nco_grd_2D_unk: return "Unknown, unclassified, or unrepresentable 2D grid type (e.g., unstructured, curvilinear, POP displaced-pole)"; case nco_grd_2D_gss: return "Gaussian latitude grid. Used by spectral transform models, e.g., CCM 1-3, CAM 1-3, ECMWF Forecast, LSM, MATCH, NCEP (R1, R2), UCICTM."; case nco_grd_2D_fv: return "Cap-latitude grid, aka FV-scalar grid (in Lin-Rood representation). When global (not regional) in extent and with odd number of latitudes, poles are considered at (and labeled as) centers of first and last gridcells. For example lat_ctr=-90,-89,-88,... and lat_crn=-89.5,-88.5,-87.5,... Thus pole-gridcells span half the equi-angular latitude increment of the rest of the grid. Used by CAM FV (i.e., CAM 4-6), ECMWF (ERA-I, ERA40, ERA5), GEOS-CHEM, UCICTM, UKMO."; case nco_grd_2D_eqa: return "Uniform/Equi-Angular latitude grid. Uniform/Equi-angle (everywhere) latitude grid. When global (not regional) in extent and with even number of latitudes, poles are at corners/edges of first and last gridcells. For example lat_ctr=-89.5,-88.5,-87.5,... and lat_crn=-90,-89,-88,.... When global, forms valid FV-staggered (aka FV-velocity, aka offset) grid (for Lin-Rood representation). Used by CIESIN/SEDAC, IGBP-DIS, TOMS AAI, WOCE."; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_grd_2D_sng() */ const char * /* O [sng] String describing latitude grid-type */ nco_grd_lat_sng /* [fnc] Convert latitude grid-type enum to string */ (const nco_grd_lat_typ_enm nco_grd_lat_typ) /* I [enm] Latitude grid-type enum */ { /* Purpose: Convert latitude grid-type enum to string */ switch(nco_grd_lat_typ){ case nco_grd_lat_unk: return "Unknown, unclassified, or unrepresentable latitude grid type (e.g., unstructured, curvilinear, POP3)"; case nco_grd_lat_gss: return "Gaussian latitude grid used by global spectral models: CCM 1-3, CAM 1-3, ECMWF Forecast, LSM, MATCH, NCEP (R1, R2), UCICTM."; case nco_grd_lat_fv: return "Cap-latitude grid, aka FV-scalar grid (in Lin-Rood representation). When global (not regional) in extent and with odd number of latitudes, poles are considered at (and labeled as) centers of first and last gridcells. For example lat_ctr=-90,-89,-88,... and lat_crn=-89.5,-88.5,-87.5,... Thus pole-gridcells span half the equi-angular latitude increment of the rest of the grid. Used by CAM FV (i.e., CAM 4-6), ECMWF (ERA-I, ERA40, ERA5), GEOS-CHEM, UCICTM, UKMO."; case nco_grd_lat_eqa: return "Uniform/Equi-Angular latitude grid. Uniform/Equi-angle (everywhere) latitude grid. When global (not regional) in extent and with even number of latitudes, poles are at corners/edges of first and last gridcells. For example lat_ctr=-89.5,-88.5,-87.5,... and lat_crn=-90,-89,-88,.... When global, forms valid FV-staggered (aka FV-velocity, aka offset) grid (for Lin-Rood representation). Used by CIESIN/SEDAC, IGBP-DIS, TOMS AAI, WOCE."; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_grd_lat_sng() */ const char * /* O [sng] String describing longitude grid-type */ nco_grd_lon_sng /* [fnc] Convert longitude grid-type enum to string */ (const nco_grd_lon_typ_enm nco_grd_lon_typ) /* I [enm] Longitude grid-type enum */ { /* Purpose: Convert longitude grid-type enum to string */ switch(nco_grd_lon_typ){ case nco_grd_lon_unk: return "Unknown, unclassified, or unrepresentable longitude grid type (e.g., unstructured, curvilinear)"; case nco_grd_lon_180_wst: return "Date line at west edge of first longitude cell"; case nco_grd_lon_180_ctr: return "Date line at center of first longitude cell"; case nco_grd_lon_Grn_wst: return "Greenwich at west edge of first longitude cell"; case nco_grd_lon_Grn_ctr: return "Greenwich at center of first longitude cell"; case nco_grd_lon_bb: return "Longitude grid determined by bounding box (lon_wst/lon_est) and gridcell number (lon_nbr)"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_grd_lon_sng() */ const char * /* O [sng] String describing grid extent */ nco_grd_xtn_sng /* [fnc] Convert two-dimensional grid-extent enum to string */ (const nco_grd_xtn_enm nco_grd_xtn) /* I [enm] Grid-extent enum */ { /* Purpose: Convert grid-extent enum to string */ switch(nco_grd_xtn){ case nco_grd_xtn_nil: return "Unknown"; case nco_grd_xtn_glb: return "Global"; case nco_grd_xtn_rgn: return "Regional"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_grd_xtn_sng() */ const char * /* O [sng] String describing grid conversion */ nco_rgr_grd_sng /* [fnc] Convert grid conversion enum to string */ (const nco_rgr_typ_enm nco_rgr_typ) /* I [enm] Grid conversion enum */ { /* Purpose: Convert grid conversion enum to string */ switch(nco_rgr_typ){ case nco_rgr_grd_1D_to_1D: return "1D_to_1D"; case nco_rgr_grd_1D_to_2D: return "1D_to_2D"; case nco_rgr_grd_2D_to_1D: return "2D_to_1D"; case nco_rgr_grd_2D_to_2D: return "2D_to_2D"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_rgr_grd_sng() */ const char * /* O [sng] String describing regridding method */ nco_rgr_mth_sng /* [fnc] Convert regridding method enum to string */ (const nco_rgr_mth_typ_enm nco_rgr_mth_typ) /* I [enm] Regridding method enum */ { /* Purpose: Convert regridding method enum to string */ switch(nco_rgr_mth_typ){ case nco_rgr_mth_conservative: return "Conservative remapping"; case nco_rgr_mth_bilinear: return "Bilinear remapping"; case nco_rgr_mth_none: return "none"; case nco_rgr_mth_unknown: return "Unknown (TempestRemap or ESMF_weight_only)"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_rgr_mth_sng() */ const char * /* O [sng] String describing mapfile generator */ nco_rgr_mpf_sng /* [fnc] Convert mapfile generator enum to string */ (const nco_rgr_mpf_typ_enm nco_rgr_mpf_typ) /* I [enm] Mapfile generator enum */ { /* Purpose: Convert mapfile generator enum to string */ switch(nco_rgr_mpf_typ){ case nco_rgr_mpf_ESMF: return "ESMF Offline Regridding Weight Generator (ERWG), either from ESMF_RegridWeightGen directly or via NCL"; case nco_rgr_mpf_SCRIP: return "SCRIP (original LANL package)"; case nco_rgr_mpf_Tempest: return "TempestRemap (GenerateOfflineMap)"; case nco_rgr_mpf_ESMF_weight_only: return "ESMF Offline Regridding Weight Generator (ERWG), either from ESMF_RegridWeightGen directly or via NCL, with --weight_only option from ERWG 7.1+"; case nco_rgr_mpf_NCO: return "netCDF Operators (NCO) Offline Regridding Weight Generator"; case nco_rgr_mpf_unknown: return "Unknown Weight Generator"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_rgr_mpf_sng() */ const char * /* O [sng] String describing regridding normalization */ nco_rgr_nrm_sng /* [fnc] Convert regridding normalization enum to string */ (const nco_rgr_nrm_typ_enm nco_rgr_nrm_typ) /* I [enm] Regridding normalization enum */ { /* Purpose: Convert regridding normalization enum to string */ switch(nco_rgr_nrm_typ){ case nco_rgr_nrm_fracarea: return "fracarea"; case nco_rgr_nrm_destarea: return "destarea"; case nco_rgr_nrm_none: return "none"; case nco_rgr_nrm_unknown: return "Unknown (possibilities include ESMF_weight_only, NCO, and TempestRemap)"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_rgr_nrm_sng() */ const char * /* O [sng] String containing regridding command and format */ nco_tps_cmd_fmt_sng /* [fnc] Convert TempestRemap command enum to command string */ (const nco_rgr_tps_cmd nco_tps_cmd) /* I [enm] TempestRemap command enum */ { /* Purpose: Convert TempestRemap command enum to command string and format */ switch(nco_tps_cmd){ case nco_rgr_ApplyOfflineMap: return "ApplyOfflineMap"; case nco_rgr_CalculateDiffNorms: return "CalculateDiffNorms"; case nco_rgr_GenerateCSMesh: return "GenerateCSMesh --res %d --file %s"; case nco_rgr_GenerateGLLMetaData: return "GenerateGLLMetaData"; case nco_rgr_GenerateICOMesh: return "GenerateICOMesh"; case nco_rgr_GenerateLambertConfConicMesh: return "GenerateLambertConfConicMesh"; case nco_rgr_GenerateOfflineMap: return "GenerateOfflineMap --in_mesh %s --out_mesh %s --ov_mesh %s --in_data %s --out_data %s"; case nco_rgr_GenerateOverlapMesh: return "GenerateOverlapMesh --a %s --b %s --out %s"; case nco_rgr_GenerateRLLMesh: return "GenerateRLLMesh --lat %d --lon %d --file %s"; case nco_rgr_GenerateTestData: return "GenerateTestData --mesh %s --np %d --test %d --out %s"; case nco_rgr_MeshToTxt: return "MeshToTxt"; case nco_rgr_AAA_nil: case nco_rgr_ZZZ_last: default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_tps_cmd_fmt_sng() */ const char * /* O [sng] String containing regridding command name */ nco_tps_cmd_sng /* [fnc] Convert TempestRemap command enum to command name */ (const nco_rgr_tps_cmd nco_tps_cmd) /* I [enm] TempestRemap command enum */ { /* Purpose: Convert TempestRemap command enum to command string */ switch(nco_tps_cmd){ case nco_rgr_ApplyOfflineMap: return "ApplyOfflineMap"; case nco_rgr_CalculateDiffNorms: return "CalculateDiffNorms"; case nco_rgr_GenerateCSMesh: return "GenerateCSMesh"; case nco_rgr_GenerateGLLMetaData: return "GenerateGLLMetaData"; case nco_rgr_GenerateICOMesh: return "GenerateICOMesh"; case nco_rgr_GenerateLambertConfConicMesh: return "GenerateLambertConfConicMesh"; case nco_rgr_GenerateOfflineMap: return "GenerateOfflineMap"; case nco_rgr_GenerateOverlapMesh: return "GenerateOverlapMesh"; case nco_rgr_GenerateRLLMesh: return "GenerateRLLMesh"; case nco_rgr_GenerateTestData: return "GenerateTestData"; case nco_rgr_MeshToTxt: return "MeshToTxt"; case nco_rgr_AAA_nil: case nco_rgr_ZZZ_last: default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_tps_cmd_sng() */ int /* O [enm] Return code */ nco_grd_mk /* [fnc] Create SCRIP-format grid file */ (rgr_sct * const rgr) /* I/O [sct] Regridding structure */ { /* Purpose: Use grid information to create SCRIP-format grid file Spherical geometry terminology: spherical cap = spherical dome = volume cut-off by plane spherical lune = digon = area bounded by two half-great circles = base of spherical wedge spherical segment = volume defined by cutting sphere with pair parallel planes spherical sector = volume subtended by lat1 spherical wedge = ungula = volume subtended by lon2-lon1 spherical zone = area of spherical segment excluding bases spherical quadrangle = area of intersection of spherical zone and lune (i.e., area of bearing = angle from true north geodesic = shortest path between points on a surface great circle = orthodrome = "straight path" = geodesic of the sphere convergency = difference (in azimuth?) between great circle tracks at two different positions conversion angle = angle between geodesic and rhumb line rhumb line = loxodrome = "oblique (or slanted) path" = line of constant azimuth Formulae: http://www.movable-type.co.uk/scripts/latlong.html # On-line Javascript implementation http://williams.best.vwh.net/avform.htm ACME: https://acme-svn2.ornl.gov/acme-repo/acme/mapping/grids https://acme-svn2.ornl.gov/acme-repo/acme/inputdata/cpl/gridmaps NCAR: yellowstone.ucar.edu:/glade/p/cesm/cseg/mapping/grids yellowstone.ucar.edu:/glade/p_old/cesm/cseg/mapping/grids Global RLL grids: ncks -O -D 1 --rgr ttl='Equiangular grid 180x360' --rgr grid=${DATA}/grids/180x360_SCRIP.20150901.nc --rgr latlon=180,360 --rgr lat_typ=eqa --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='Equiangular grid 90x180' --rgr grid=${DATA}/grids/90x180_SCRIP.20150901.nc --rgr latlon=90,180 --rgr lat_typ=eqa --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc Maps for global RLL grids: ESMF_RegridWeightGen -s ${DATA}/grids/180x360_SCRIP.20150901.nc -d ${DATA}/grids/90x180_SCRIP.20150901.nc -w ${DATA}/maps/map_180x360_to_90x180.20150901.nc --method conserve ESMF_RegridWeightGen -s ${DATA}/grids/90x180_SCRIP.20150901.nc -d ${DATA}/grids/180x360_SCRIP.20150901.nc -w ${DATA}/maps/map_90x180_to_180x360.20150901.nc --method conserve ACME grids: ncks -O -D 1 --rgr ttl='FV-scalar grid 129x256' --rgr grid=${DATA}/grids/129x256_SCRIP.20150910.nc --rgr latlon=129,256 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='FV-scalar grid 257x512' --rgr grid=${DATA}/grids/257x512_SCRIP.20150910.nc --rgr latlon=257,512 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='FV-scalar grid 801x1600' --rgr grid=${DATA}/grids/801x1600_SCRIP.20150910.nc --rgr latlon=801,1600 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ACME maps: ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/129x256_SCRIP.20150910.nc -w ${DATA}/maps/map_ne30np4_to_fv129x256_aave.20150910.nc --method conserve ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/257x512_SCRIP.20150910.nc -w ${DATA}/maps/map_ne30np4_to_fv257x512_bilin.20150910.nc --method bilinear ESMF_RegridWeightGen -s ${DATA}/grids/ne120np4_pentagons.100310.nc -d ${DATA}/grids/257x512_SCRIP.20150910.nc -w ${DATA}/maps/map_ne120np4_to_fv257x512_aave.20150910.nc --method conserve ESMF_RegridWeightGen -s ${DATA}/grids/ne120np4_pentagons.100310.nc -d ${DATA}/grids/801x1600_SCRIP.20150910.nc -w ${DATA}/maps/map_ne120np4_to_fv801x1600_bilin.20150910.nc --method bilinear AMWG grids: AMWG diagnostics (until ~2016) mis-diagnose FV grids with odd numbers of latitudes as Gaussian Grids ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 96x144 for horizontal resolution 1.9x2.5 degrees' --rgr grid=${DATA}/grids/96x144_SCRIP.20160301.nc --rgr latlon=96,144 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 192x288 for horizontal resolution 0.9x1.25 degrees' --rgr grid=${DATA}/grids/192x288_SCRIP.20160301.nc --rgr latlon=192,288 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 128x256 for horizontal resolution 1.4x1.4 degrees' --rgr grid=${DATA}/grids/128x256_SCRIP.20160301.nc --rgr latlon=128,256 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 256x512 for horizontal resolution 0.7x0.7 degrees' --rgr grid=${DATA}/grids/256x512_SCRIP.20160301.nc --rgr latlon=256,512 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 800x1600 for horizontal resolution 0.225x0.225 degrees' --rgr grid=${DATA}/grids/800x1600_SCRIP.20160301.nc --rgr latlon=800,1600 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='Equiangular grid 360x720 produced by RTM' --rgr grid=${DATA}/grids/360x720rtm_SCRIP.20160301.nc --rgr latlon=360,720 --rgr lat_typ=eqa --rgr lon_typ=180_wst ~/nco/data/in.nc ~/foo.nc AMWG maps old method (no provenance archived): ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/128x256_SCRIP.20160301.nc -w ${DATA}/maps/map_ne30np4_to_fv128x256_aave.20160301.nc --method conserve ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/256x512_SCRIP.20160301.nc -w ${DATA}/maps/map_ne30np4_to_fv256x512_bilin.20160301.nc --method bilinear ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/256x512_SCRIP.20160301.nc -w ${DATA}/maps/map_ne30np4_to_fv256x512_aave.20160301.nc --method conserve ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/800x1600_SCRIP.20160301.nc -w ${DATA}/maps/map_ne30np4_to_fv800x1600_bilin.20160301.nc --method bilinear AMWG maps with ncremap (preferred method): ncremap -s ${DATA}/grids/ne30np4_pentagons.091226.nc -g ${DATA}/grids/128x256_SCRIP.20160301.nc -m ${DATA}/maps/map_ne30np4_to_fv128x256_aave.20160301.nc -w esmf -a conserve ncremap -s ${DATA}/grids/ne30np4_pentagons.091226.nc -g ${DATA}/grids/256x512_SCRIP.20160301.nc -m ${DATA}/maps/map_ne30np4_to_fv256x512_bilin.20160301.nc -w esmf -a bilinear ncremap -s ${DATA}/grids/ne120np4_pentagons.100310.nc -g ${DATA}/grids/256x512_SCRIP.20160301.nc -m ${DATA}/maps/map_ne120np4_to_fv256x512_aave.20160301.nc -w esmf -a conserve ncremap -s ${DATA}/grids/ne120np4_pentagons.100310.nc -g ${DATA}/grids/800x1600_SCRIP.20160301.nc -m ${DATA}/maps/map_ne120np4_to_fv800x1600_bilin.20160301.nc -w esmf -a bilinear MPAS grids: NCO cannot yet generate MPAS grids, but given an MPAS grid it can generate appropriate maps MPAS maps: ncremap -s ${DATA}/grids/oEC60to30.SCRIP.150729.nc -g ${DATA}/grids/t62_SCRIP.20150901.nc -m ${DATA}/maps/map_oEC60to30_to_t62_aave.20160301.nc -w esmf -a conserve ncremap -s ${DATA}/grids/oEC60to30.SCRIP.150729.nc -g ${DATA}/grids/t62_SCRIP.20150901.nc -m ${DATA}/maps/map_oEC60to30_to_t62_bilin.20160301.nc -w esmf -a bilinear Regional RLL grids: ncks -O -D 1 --rgr ttl='Equiangular grid 180x360' --rgr grid=${DATA}/sld/rgr/grd_dst.nc --rgr latlon=100,100 --rgr snwe=30.0,70.0,-120.0,-90.0 ~/nco/data/in.nc ~/foo.nc Global RLL skeleton: ncks -O -D 1 --rgr ttl='Equiangular grid 180x360' --rgr skl=${DATA}/sld/rgr/skl_180x360.nc --rgr grid=${DATA}/grids/180x360_SCRIP.20150901.nc --rgr latlon=180,360#lat_typ=eqa#lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc Curvilinear grids: ncks -O -D 1 --rgr ttl='Curvilinear grid 10x20. Degenerate case.' --rgr crv --rgr lon_crv=0.0 --rgr skl=${DATA}/sld/rgr/skl_crv.nc --rgr grid=${DATA}/sld/rgr/grd_crv.nc --rgr latlon=10,20 --rgr snwe=-5.0,5.0,-10.0,10.0 ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='Curvilinear grid 10x20. Curvilinearity = 1.0 lon' --rgr lon_crv=1.0 --rgr skl=${DATA}/sld/rgr/skl_crv.nc --rgr grid=${DATA}/sld/rgr/grd_crv.nc --rgr latlon=10,20 --rgr snwe=-5.0,5.0,-10.0,10.0 ~/nco/data/in.nc ~/foo.nc 1-D Latitude (no longitude) grids: ncks -O -D 1 --rgr ttl='Latitude-only zonal grid' --rgr skl=${DATA}/sld/rgr/skl_lat_10dgr_uni.nc --rgr grid=${DATA}/sld/rgr/grd_lat_10dgr_uni.nc --rgr latlon=18,1 --rgr snwe=-90,90,0,360 ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='Latitude-only zonal grid' --rgr skl=${DATA}/sld/rgr/skl_lat_05dgr_cap.nc --rgr grid=${DATA}/sld/rgr/grd_lat_05dgr_cap.nc --rgr latlon=37,1 --rgr snwe=-90,90,0,360 ~/nco/data/in.nc ~/foo.nc ncremap -i ${DATA}/sld/rgr/skl_lat_10dgr_uni.nc -d ${DATA}/sld/rgr/skl_lat_05dgr_cap.nc -m ${DATA}/maps/map_lat10uni_to_lat05cap_aave.nc -o ~/rgr/lat10to05.nc ESMF_RegridWeightGen -s ${DATA}/sld/rgr/grd_lat_10dgr_uni.nc -d ${DATA}/sld/rgr/grd_lat_05dgr_cap.nc -w ${DATA}/maps/map_lat10uni_to_lat05cap_aave.nc --method conserve */ const char fnc_nm[]="nco_grd_mk()"; /* [sng] Function name */ const double rdn2dgr=180.0/M_PI; const double dgr2rdn=M_PI/180.0; const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */ const int dmn_nbr_2D=2; /* [nbr] Rank of 2-D grid variables */ const int dmn_nbr_3D=3; /* [nbr] Rank of 3-D grid variables */ const int dmn_nbr_grd_max=dmn_nbr_3D; /* [nbr] Maximum rank of grid variables */ const int itr_nbr_max=20; // [nbr] Maximum number of iterations const nc_type crd_typ=NC_DOUBLE; char *fl_out_tmp=NULL_CEWI; char *fl_out; char grd_area_nm[]="grid_area"; /* 20150830: NB ESMF_RegridWeightGen --user_areas looks for variable named "grid_area" */ char dmn_sz_nm[]="grid_dims"; char grd_crn_lat_nm[]="grid_corner_lat"; char grd_crn_lon_nm[]="grid_corner_lon"; char grd_crn_nm[]="grid_corners"; char grd_ctr_lat_nm[]="grid_center_lat"; char grd_ctr_lon_nm[]="grid_center_lon"; char grd_rnk_nm[]="grid_rank"; char grd_sz_nm[]="grid_size"; char msk_nm[]="grid_imask"; double *grd_ctr_lat; /* [dgr] Latitude centers of grid */ double *grd_ctr_lon; /* [dgr] Longitude centers of grid */ double *grd_crn_lat; /* [dgr] Latitude corners of grid */ double *grd_crn_lon; /* [dgr] Longitude corners of grid */ double *area; /* [sr] Area of grid */ double *lat_bnd=NULL_CEWI; /* [dgr] Latitude boundaries of rectangular grid */ double *lat_crn=NULL; /* [dgr] Latitude corners of rectangular grid */ double *lat_ctr=NULL_CEWI; /* [dgr] Latitude centers of rectangular grid */ double *lat_ntf=NULL; /* [dgr] Latitude interfaces of rectangular grid */ double *lat_wgt=NULL; /* [dgr] Latitude weights of rectangular grid */ double *lon_bnd=NULL_CEWI; /* [dgr] Longitude boundaries of rectangular grid */ double *lon_crn=NULL; /* [dgr] Longitude corners of rectangular grid */ double *lon_ctr=NULL_CEWI; /* [dgr] Longitude centers of rectangular grid */ double *lon_ntf=NULL; /* [dgr] Longitude interfaces of rectangular grid */ double area_ttl=0.0; /* [frc] Exact sum of area */ double lat_crv; /* [dgr] Latitudinal curvilinearity */ double lon_crv; /* [dgr] Longitudinal curvilinearity */ double lat_nrt; /* [dgr] Latitude of northern edge of grid */ double lat_sth; /* [dgr] Latitude of southern edge of grid */ double lat_wgt_ttl=0.0; /* [frc] Actual sum of quadrature weights */ double lat_wgt_gss; /* [frc] Latitude weight estimated from interface latitudes */ double lon_est; /* [dgr] Longitude of eastern edge of grid */ double lon_wst; /* [dgr] Longitude of western edge of grid */ double lon_ncr; /* [dgr] Longitude increment */ double lat_ncr; /* [dgr] Latitude increment */ double lon_spn; /* [dgr] Longitude span */ double lat_spn; /* [dgr] Latitude span */ double *wgt_Gss=NULL; // [frc] Gaussian weights double precision int *msk=NULL; /* [flg] Mask of grid */ int *dmn_sz_int; /* [nbr] Array of dimension sizes of grid */ int dmn_ids[dmn_nbr_grd_max]; /* [id] Dimension IDs array for output variable */ int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int fl_out_fmt=NC_FORMAT_CLASSIC; /* [enm] Output file format */ int out_id; /* I [id] Output netCDF file ID */ int rcd=NC_NOERR; int area_id; /* [id] Area variable ID */ int dmn_id_grd_crn; /* [id] Grid corners dimension ID */ int dmn_id_grd_rnk; /* [id] Grid rank dimension ID */ int dmn_id_grd_sz; /* [id] Grid size dimension ID */ int dmn_sz_int_id; /* [id] Grid dimension sizes ID */ int grd_crn_lat_id; /* [id] Grid corner latitudes variable ID */ int grd_crn_lon_id; /* [id] Grid corner longitudes variable ID */ int grd_ctr_lat_id; /* [id] Grid center latitudes variable ID */ int grd_ctr_lon_id; /* [id] Grid center longitudes variable ID */ int itr_cnt; /* Iteration counter */ int msk_id; /* [id] Mask variable ID */ long dmn_srt[dmn_nbr_grd_max]; long dmn_cnt[dmn_nbr_grd_max]; long bnd_nbr; /* [nbr] Number of bounds in gridcell */ long col_nbr; /* [nbr] Number of columns in grid */ long crn_idx; /* [idx] Counting index for corners */ long grd_crn_nbr; /* [nbr] Number of corners in gridcell */ long grd_rnk_nbr; /* [nbr] Number of dimensions in grid */ long grd_sz_nbr; /* [nbr] Number of gridcells in grid */ long idx2; /* [idx] Counting index for unrolled grids */ long idx; /* [idx] Counting index for unrolled grids */ long lat_idx2; /* [idx] Counting index for unrolled latitude */ long lat_idx; long lat_nbr; /* [nbr] Number of latitudes in grid */ long lon_idx2; /* [idx] Counting index for unrolled longitude */ long lon_idx; long lon_nbr; /* [nbr] Number of longitudes in grid */ nco_bool FORCE_APPEND=False; /* Option A */ nco_bool FORCE_OVERWRITE=True; /* Option O */ nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool SHARE_CREATE=rgr->flg_uio; /* [flg] Create (netCDF3-only) file(s) with unbuffered I/O */ nco_bool SHARE_OPEN=rgr->flg_uio; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */ nco_bool WRT_TMP_FL=False; /* [flg] Write output to temporary file */ nco_bool flg_grd_1D=False; nco_bool flg_grd_2D=False; nco_bool flg_grd_crv=False; nco_bool flg_s2n=True; /* I [enm] Latitude grid-direction is South-to-North */ nco_grd_2D_typ_enm grd_typ; /* [enm] Grid-type enum */ nco_grd_lat_drc_enm lat_drc; /* [enm] Latitude grid-direction enum */ nco_grd_lat_typ_enm lat_typ; /* [enm] Latitude grid-type enum */ nco_grd_lon_typ_enm lon_typ; /* [enm] Longitude grid-type enum */ size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ dfl_lvl=rgr->dfl_lvl; grd_typ=rgr->grd_typ; /* [enm] Grid type */ fl_out=rgr->fl_grd; fl_out_fmt=rgr->fl_out_fmt; lat_drc=rgr->lat_drc; /* [enm] Latitude grid direction */ lat_typ=rgr->lat_typ; /* [enm] Latitude grid type */ lon_typ=rgr->lon_typ; /* [enm] Longitude grid type */ lat_nbr=rgr->lat_nbr; /* [nbr] Number of latitudes in grid */ lon_nbr=rgr->lon_nbr; /* [nbr] Number of longitudes in grid */ lat_crv=rgr->lat_crv; /* [dgr] Latitude curvilinearity */ lon_crv=rgr->lon_crv; /* [dgr] Longitude curvilinearity */ lat_sth=rgr->lat_sth; /* [dgr] Latitude of southern edge of grid */ lon_wst=rgr->lon_wst; /* [dgr] Longitude of western edge of grid */ lat_nrt=rgr->lat_nrt; /* [dgr] Latitude of northern edge of grid */ lon_est=rgr->lon_est; /* [dgr] Longitude of eastern edge of grid */ /* Use curvilinear coordinates (lat and lon are 2D arrays) if flg_crv already set or it lat_crv or lon_crv set */ if(lat_crv != 0.0 || lon_crv != 0.0 || rgr->flg_crv) flg_grd_crv=True; if(lat_drc == nco_grd_lat_drc_n2s) flg_s2n=False; /* Assume 2D grid */ flg_grd_2D=True; grd_rnk_nbr=dmn_nbr_2D; /* Assume quadrilaterals */ grd_crn_nbr=4; /* Assume rectangles */ bnd_nbr=2; col_nbr=lat_nbr*lon_nbr; grd_sz_nbr=lat_nbr*lon_nbr; /* Allocate space for output data */ area=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); dmn_sz_int=(int *)nco_malloc(grd_rnk_nbr*nco_typ_lng((nc_type)NC_INT)); msk=(int *)nco_malloc(grd_sz_nbr*nco_typ_lng((nc_type)NC_INT)); lat_bnd=(double *)nco_malloc(lat_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lat_crn=(double *)nco_malloc(lat_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lat_ctr=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lat_ntf=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ)); lat_wgt=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lon_bnd=(double *)nco_malloc(lon_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lon_crn=(double *)nco_malloc(lon_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_ctr=(double *)nco_malloc(lon_nbr*nco_typ_lng(crd_typ)); lon_ntf=(double *)nco_malloc((lon_nbr+1L)*nco_typ_lng(crd_typ)); wgt_Gss=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); grd_ctr_lat=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); grd_ctr_lon=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); grd_crn_lat=(double *)nco_malloc(grd_crn_nbr*grd_sz_nbr*nco_typ_lng(crd_typ)); grd_crn_lon=(double *)nco_malloc(grd_crn_nbr*grd_sz_nbr*nco_typ_lng(crd_typ)); /* Define variable values */ int lon_psn=int_CEWI; /* [idx] Ordinal position of longitude in rectangular grid dimension-size array */ int lat_psn=int_CEWI; /* [idx] Ordinal position of latitude in rectangular grid dimension-size array */ if(grd_rnk_nbr == dmn_nbr_2D){ lon_psn=0; /* SCRIP introduced [lon,lat] convention because more natural for Fortran */ lat_psn=1; } /* !flg_grd_in_2D */ dmn_sz_int[lon_psn]=lon_nbr; dmn_sz_int[lat_psn]=lat_nbr; for(idx=0;idx<grd_sz_nbr;idx++) msk[idx]=1; /* Compute rectangular arrays NB: Much is a more-generic rewrite of map/map_grd.F90:map_grd_mk() */ /* 20150827: Old rule: Longitude grid was entirely specified by one of four longitude map tokens: Grn_ctr,Grn_wst,180_ctr,180_wst New rule: User may specify bounds (lon_wst,lon_est,lat_sth,lat_nrt) independently of grid token Such bounds ALWAYS refer bounding box interface edges, NEVER to centers of first last gridcells Bounds and number of gridcells completely determine uniform grid so former longitude-type tokens have no effect when bounds specified (so letting grid-type tokens affect grid would over-determine grid and lead to errors) Hence, grid-type tokens may be used as short-hand to specify grids but may not be required to exist later (because regional grids would not have specified them) Grid grid-type tokens lon_bb/lat_bb imply bounding box was originally used to specify bounds 1x1 degree global grid with first longitude centered at Greenwich: --lon_nbr=360 --lon_typ Grn_ctr --lon_nbr=360 --lon_wst=-0.5 --lon_est=359.5 1x1 degree global grid with Greenwich at west edge of first longitude: --lon_nbr=360 --lon_typ Grn_wst --lon_nbr=360 --lon_wst=0.0 --lon_est=360.0 1x1 degree regional grid, total size 9x9 degrees, Greenwich at center of middle gridcell: --lon_nbr=9 --lon_wst=-4.5 --lon_est=4.5 1x1 degree regional grid, total size 10x10 degrees, Greenwich at east/west edges of middle two gridcells --lon_nbr=10 --lon_wst=-5.0 --lon_est=5.0 */ /* Were east/west longitude bounds set explicitly or implicitly? NB: This is redundant since it was done in nco_rgr_ini(), yet better safe than sorry */ if(lon_wst != NC_MAX_DOUBLE || lon_est != NC_MAX_DOUBLE) lon_typ=rgr->lon_typ=nco_grd_lon_bb; if(lon_wst == NC_MAX_DOUBLE){ /* Precomputed longitude grids begin with longitude 0.0 or -180.0 degrees */ switch(lon_typ){ case nco_grd_lon_bb: case nco_grd_lon_Grn_ctr: case nco_grd_lon_Grn_wst: lon_wst=0.0; break; case nco_grd_lon_180_ctr: case nco_grd_lon_180_wst: lon_wst=-180.0; break; default: nco_dfl_case_generic_err(); break; } /* !lon_typ */ } /* !lon */ if(lon_est == NC_MAX_DOUBLE){ /* Precomputed longitude grids end with longitude 360.0 or 180.0 degrees */ switch(lon_typ){ case nco_grd_lon_bb: case nco_grd_lon_Grn_ctr: case nco_grd_lon_Grn_wst: lon_est=360.0; break; case nco_grd_lon_180_ctr: case nco_grd_lon_180_wst: lon_est=180.0; break; default: nco_dfl_case_generic_err(); break; } /* !lon_typ */ } /* !lon */ /* Determine longitude increment from span of pre-centered bounding box (centering will not change span) */ lon_spn=lon_est-lon_wst; lon_ncr=lon_spn/lon_nbr; /* Centering: If user did not set explicit longitude bounds then... */ if(lon_typ != nco_grd_lon_bb) /* map_lon_ctr_typ determines whether lon_wst refers to cell center or Western edge */ if((lon_typ == nco_grd_lon_Grn_ctr) || (lon_typ == nco_grd_lon_180_ctr)) lon_wst=lon_wst-(lon_ncr/2.0); /* Re-derive lon_est from lon_wst and lon_nbr (more fundamental properties) */ lon_est=lon_wst+lon_ncr*lon_nbr; /* lon_wst and lon_est have been set and will not change */ assert(lon_wst < lon_est); lon_ntf[0L]=lon_wst; lon_ntf[lon_nbr]=lon_est; for(lon_idx=1L;lon_idx<lon_nbr;lon_idx++) lon_ntf[lon_idx]=lon_ntf[0L]+lon_idx*lon_ncr; /* Ensure rounding errors do not produce unphysical grid */ lon_ntf[lon_nbr]=lon_ntf[0L]+lon_spn; /* Finished with longitude, now tackle latitude */ /* Were south/north latitude bounds set explicitly or implicitly? */ // if(lat_sth != NC_MAX_DOUBLE || lat_nrt != NC_MAX_DOUBLE) lon_typ=rgr->lat_typ=nco_grd_lat_bb; if(lat_sth == NC_MAX_DOUBLE) lat_sth=-90.0; if(lat_nrt == NC_MAX_DOUBLE) lat_nrt=90.0; /* Determine latitude increment from span of pre-centered bounding box (centering will not change span) */ lat_spn=lat_nrt-lat_sth; lat_ncr=lat_spn/lat_nbr; const long lat_nbr_hlf=lat_nbr/2L; // [nbr] Half number of latitudes (e.g., lat_nbr_hlf=32 for lat_nbr=64 and 65) double *lat_sin=NULL; // [frc] Sine of Gaussian latitudes double precision /* Create S->N grid. If user requested N->S, flip grid at end */ // if(flg_s2n) lat_ntf[0L]=lat_sth; else lat_ntf[0L]=lat_nrt; lat_ntf[0L]=lat_sth; switch(lat_typ){ case nco_grd_lat_fv: lat_ncr=lat_spn/(lat_nbr-1L); lat_ntf[1L]=lat_ntf[0L]+0.5*lat_ncr; for(lat_idx=2L;lat_idx<lat_nbr;lat_idx++) lat_ntf[lat_idx]=lat_ntf[1L]+(lat_idx-1L)*lat_ncr; break; case nco_grd_lat_eqa: lat_ncr=lat_spn/lat_nbr; for(lat_idx=1L;lat_idx<lat_nbr;lat_idx++) lat_ntf[lat_idx]=lat_ntf[0L]+lat_idx*lat_ncr; break; case nco_grd_lat_gss: lat_sin=(double *)nco_malloc(lat_nbr*sizeof(double)); (void)nco_lat_wgt_gss(lat_nbr,True,lat_sin,wgt_Gss); for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_ctr[lat_idx]=rdn2dgr*asin(lat_sin[lat_idx]); /* First guess for lat_ntf is midway between Gaussian abscissae */ for(lat_idx=1L;lat_idx<lat_nbr;lat_idx++) lat_ntf[lat_idx]=0.5*(lat_ctr[lat_idx-1L]+lat_ctr[lat_idx]); /* Iterate guess until area between interfaces matches Gaussian weight (compute for one hemisphere, make other symmetric) */ for(lat_idx=1L;lat_idx<lat_nbr_hlf;lat_idx++){ double fofx_at_x0; /* [frc] Function to iterate evaluated at current guess */ double dfdx_at_x0; /* [frc] Derivative of equation evaluated at current guess */ const double eps_rlt_cnv=1.0e-15; // Convergence criterion (1.0e-16 pushes double precision to the brink) itr_cnt=0; lat_wgt_gss=fabs(sin(dgr2rdn*lat_ntf[lat_idx])-sin(dgr2rdn*lat_ntf[lat_idx-1L])); fofx_at_x0=wgt_Gss[lat_idx-1L]-lat_wgt_gss; while(fabs(fofx_at_x0) > eps_rlt_cnv){ /* Newton-Raphson iteration: Let x=lat_ntf[lat_idx], y0=lat_ntf[lat_idx-1L], gw = Gaussian weight (exact solution) f(x)=sin(dgr2rdn*x)-sin(dgr2rdn*y0)-gw=0 # s2n grid f(x)=sin(dgr2rdn*y0)-sin(dgr2rdn*x)-gw=0 # n2s grid dfdx(x)= dgr2rdn*cos(dgr2rdn*x) # s2n grid dfdx(x)=-dgr2rdn*cos(dgr2rdn*x) # n2s grid x_better=x0-f(x0)/f'(x0) */ dfdx_at_x0=dgr2rdn*cos(dgr2rdn*lat_ntf[lat_idx]); /* 20190613: n2s latitudes are constructed s2n and flipped to n2s later Hence next line is commented-out in construction mode but used in infer mode */ // if(!flg_s2n) dfdx_at_x0=-dfdx_at_x0; lat_ntf[lat_idx]+=fofx_at_x0/dfdx_at_x0; /* NB: not sure why this is minus not plus but it works :) */ lat_wgt_gss=fabs(sin(dgr2rdn*lat_ntf[lat_idx])-sin(dgr2rdn*lat_ntf[lat_idx-1L])); fofx_at_x0=wgt_Gss[lat_idx-1L]-lat_wgt_gss; if(++itr_cnt > itr_nbr_max){ (void)fprintf(stdout,"%s: ERROR %s reports convergence only %g after %d iterations for lat_idx = %ld\n",nco_prg_nm_get(),fnc_nm,fabs(fofx_at_x0),itr_nbr_max,lat_idx); nco_exit(EXIT_FAILURE); } /* endif */ } /* !while */ } /* !lat_idx */ /* Use Gaussian grid symmetry to obtain same interfaces in both hemispheres (avoids cumulative rounding errors) */ if(lat_nbr%2){ /* lat_nbr is odd */ for(lat_idx=1L;lat_idx<=lat_nbr_hlf+1L;lat_idx++) lat_ntf[lat_nbr_hlf+lat_idx]=-lat_ntf[lat_nbr_hlf-lat_idx+1L]; }else{ /* lat_nbr is even */ for(lat_idx=1L;lat_idx<lat_nbr_hlf;lat_idx++) lat_ntf[lat_nbr_hlf+lat_idx]=-lat_ntf[lat_nbr_hlf-lat_idx]; } /* !flg_lat_evn */ break; default: nco_dfl_case_generic_err(); break; } /* !lat_typ */ /* Ensure rounding errors do not produce unphysical grid */ lat_ntf[lat_nbr]=lat_nrt; if(nco_dbg_lvl_get() > nco_dbg_old){ (void)fprintf(stderr,"%s: DEBUG %s Gaussian abscissae/interfaces for lat_nbr=%ld\n",nco_prg_nm_get(),fnc_nm,lat_nbr); (void)fprintf(stderr,"idx\tlat_ctr\tlat_ntf\tntf_p1\n"); for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++){ (void)fprintf(stderr,"%ld\t%20.15f\t%20.15f\t%20.15f\n",lat_idx,lat_ctr[lat_idx],lat_ntf[lat_idx],lat_ntf[lat_idx+1L]); } /* !lat_idx */ } /* !dbg */ /* Always define longitude centers midway between interfaces */ for(lon_idx=0L;lon_idx<=lon_nbr-1L;lon_idx++) lon_ctr[lon_idx]=0.5*(lon_ntf[lon_idx]+lon_ntf[lon_idx+1L]); /* Many grids have center latitude equally spaced between interfaces */ if(lat_typ != nco_grd_lat_fv && lat_typ != nco_grd_lat_gss){ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_ctr[lat_idx]=0.5*(lat_ntf[lat_idx]+lat_ntf[lat_idx+1L]); } /* !lat_typ */ /* Cap grids excepted---they place centers of first/last gridcells at poles */ if(lat_typ == nco_grd_lat_fv){ lat_ctr[0L]=lat_ntf[0L]; for(lat_idx=1L;lat_idx<lat_nbr-1L;lat_idx++) lat_ctr[lat_idx]=0.5*(lat_ntf[lat_idx]+lat_ntf[lat_idx+1L]); lat_ctr[lat_nbr-1L]=lat_ntf[lat_nbr]; } /* !cap */ /* Gaussian grid centerpoints are defined by solutions to Legendre polynomials */ if(lat_typ == nco_grd_lat_gss){ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_ctr[lat_idx]=rdn2dgr*asin(lat_sin[lat_idx]); } /* !Gaussian */ for(idx=0L;idx<lon_nbr;idx++){ lon_bnd[2*idx]=lon_ntf[idx]; lon_bnd[2*idx+1L]=lon_ntf[idx+1L]; } /* !idx */ for(idx=0L;idx<lat_nbr;idx++){ lat_bnd[2*idx]=lat_ntf[idx]; lat_bnd[2*idx+1L]=lat_ntf[idx+1L]; } /* !idx */ if(nco_dbg_lvl_get() >= nco_dbg_crr){ for(idx=0L;idx<lat_nbr;idx++){ (void)fprintf(stdout,"lat[%li] = %g, vertices = ",idx,lat_ctr[idx]); for(int bnd_idx=0L;bnd_idx<bnd_nbr;bnd_idx++) (void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lat_bnd[bnd_nbr*idx+bnd_idx],bnd_idx == bnd_nbr-1 ? "]\n" : ", "); } /* end loop over lat */ } /* endif dbg */ /* Use centers and boundaries to diagnose latitude weights */ switch(lat_typ){ case nco_grd_lat_eqa: case nco_grd_lat_fv: for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=fabs(sin(dgr2rdn*lat_ntf[lat_idx+1L])-sin(dgr2rdn*lat_ntf[lat_idx])); break; case nco_grd_lat_gss: for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=wgt_Gss[lat_idx]; break; default: nco_dfl_case_generic_err(); break; } /* !lat_typ */ /* Fuzzy test of latitude weight normalization 20180903 Tolerance threshold of eps_rlt_max=1.0e-14 is too strict for Gaussian grids somewhere lat_nbr >~ 150 20180904 Tolerance threshold of eps_rlt_max=1.0e-12 allows Gaussian grids like ECMWF O1280 Newton-Raphson method of interface determination may need improvement to fix that Tolerance threshold of 1.0e-14 works for all relevant E3SM Uniform and Cap grids */ //const double eps_rlt_max=1.0e-14; /* [frc] Round-off error tolerance: Used 1.0e-14 until 20180904 */ const double eps_rlt_max=1.0e-12; /* [frc] Round-off error tolerance: Used 1.0e-12 since 20180904 */ double lat_wgt_ttl_xpc; /* [frc] Expected sum of latitude weights */ lat_wgt_ttl=0.0; for(idx=0L;idx<lat_nbr;idx++) lat_wgt_ttl+=lat_wgt[idx]; lat_wgt_ttl_xpc=fabs(sin(dgr2rdn*lat_bnd[2*(lat_nbr-1)+1L])-sin(dgr2rdn*lat_bnd[0L])); if(grd_typ != nco_grd_2D_unk && 1.0-lat_wgt_ttl/lat_wgt_ttl_xpc > eps_rlt_max){ (void)fprintf(stdout,"%s: ERROR %s reports grid normalization does not meet precision tolerance eps_rlt_max = %20.15f\nlat_wgt_ttl = %20.15f, lat_wgt_ttl_xpc = %20.15f, lat_wgt_frc = %20.15f, eps_rlt = %20.15f\n",nco_prg_nm_get(),fnc_nm,eps_rlt_max,lat_wgt_ttl,lat_wgt_ttl_xpc,lat_wgt_ttl/lat_wgt_ttl_xpc,1.0-lat_wgt_ttl/lat_wgt_ttl_xpc); nco_exit(EXIT_FAILURE); } /* !imprecise */ /* 20180831 Code above assumes grids run S->N User can request N->S grids with --rgr lat_drc=n2s If so, flip grid before unrolling into output arrays */ if(!flg_s2n){ double *lat_ctr_tmp=NULL_CEWI; /* [dgr] Temporary Latitude centers of rectangular grid */ double *lat_wgt_tmp=NULL; /* [dgr] Temporary Latitude weights of rectangular grid */ double *lat_ntf_tmp=NULL; /* [dgr] Temporary Latitude interfaces of rectangular grid */ lat_ctr_tmp=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lat_ntf_tmp=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ)); lat_wgt_tmp=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); long tmp_idx; /* [idx] Temporary index for swapping values */ for(idx=0L;idx<lat_nbr;idx++){ lat_ctr_tmp[idx]=lat_ctr[idx]; lat_wgt_tmp[idx]=lat_wgt[idx]; } /* !idx */ for(idx=0L;idx<lat_nbr;idx++){ tmp_idx=lat_nbr-idx-1L; lat_ctr[idx]=lat_ctr_tmp[tmp_idx]; lat_wgt[idx]=lat_wgt_tmp[tmp_idx]; } /* !idx */ for(idx=0L;idx<lat_nbr+1L;idx++){ lat_ntf_tmp[idx]=lat_ntf[idx]; } /* !idx */ for(idx=0L;idx<lat_nbr+1L;idx++){ tmp_idx=lat_nbr+1L-idx-1L; /* NB: Subtle index difference */ lat_ntf[idx]=lat_ntf_tmp[tmp_idx]; } /* !idx */ for(idx=0L;idx<lat_nbr;idx++){ lat_bnd[2*idx]=lat_ntf[idx]; lat_bnd[2*idx+1L]=lat_ntf[idx+1L]; } /* !idx */ if(lat_ctr_tmp) lat_ctr_tmp=(double *)nco_free(lat_ctr_tmp); if(lat_ntf_tmp) lat_ntf_tmp=(double *)nco_free(lat_ntf_tmp); if(lat_wgt_tmp) lat_wgt_tmp=(double *)nco_free(lat_wgt_tmp); } /* !flg_s2n */ assert(grd_crn_nbr == 4); for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++){ idx=grd_crn_nbr*lon_idx; lon_crn[idx]=lon_ntf[lon_idx]; lon_crn[idx+1L]=lon_ntf[lon_idx+1L]; lon_crn[idx+2L]=lon_ntf[lon_idx+1L]; lon_crn[idx+3L]=lon_ntf[lon_idx]; } /* !lon_idx */ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++){ idx=grd_crn_nbr*lat_idx; lat_crn[idx]=lat_ntf[lat_idx]; lat_crn[idx+1L]=lat_ntf[lat_idx]; lat_crn[idx+2L]=lat_ntf[lat_idx+1L]; lat_crn[idx+3L]=lat_ntf[lat_idx+1L]; } /* !lat_idx */ /* Stuff rectangular arrays into unrolled arrays */ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++){ for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++){ idx=lat_idx*lon_nbr+lon_idx; grd_ctr_lat[idx]=lat_ctr[lat_idx]; grd_ctr_lon[idx]=lon_ctr[lon_idx]; for(crn_idx=0L;crn_idx<grd_crn_nbr;crn_idx++){ idx2=grd_crn_nbr*idx+crn_idx; lat_idx2=lat_idx*grd_crn_nbr+crn_idx; lon_idx2=lon_idx*grd_crn_nbr+crn_idx; grd_crn_lat[idx2]=lat_crn[lat_idx2]; grd_crn_lon[idx2]=lon_crn[lon_idx2]; } /* !crn */ } /* !lon */ } /* !lat */ if(flg_grd_crv){ /* Impose curvilinearity by adding lon_crv offset to each row relative to previous row, and lat_crv offset to each column relative to previous column */ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++){ for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++){ idx=lat_idx*lon_nbr+lon_idx; grd_ctr_lat[idx]+=lon_idx*lat_crv; grd_ctr_lon[idx]+=lat_idx*lon_crv; for(crn_idx=0L;crn_idx<grd_crn_nbr;crn_idx++){ idx2=grd_crn_nbr*idx+crn_idx; lat_idx2=lat_idx*grd_crn_nbr+crn_idx; lon_idx2=lon_idx*grd_crn_nbr+crn_idx; grd_crn_lat[idx2]=lat_crn[lat_idx2]; grd_crn_lon[idx2]=lon_crn[lon_idx2]; if(crn_idx == 0L || crn_idx == 1L){ grd_crn_lat[idx2]+=lat_idx*lat_crv; /* LL, LR */ grd_crn_lon[idx2]+=lat_idx*lon_crv; /* LL, LR */ }else if(crn_idx == 2L || crn_idx == 3L){ grd_crn_lat[idx2]+=(lat_idx+1L)*lat_crv; /* UL, UR */ grd_crn_lon[idx2]+=(lat_idx+1L)*lon_crv; /* UL, UR */ } /* !crn */ } /* !crn */ } /* !lon */ } /* !lat */ } /* !flg_grd_crv */ /* 20190613: Convert CW quadrilaterals to CCW quadrilaterals so TempestRemap accepts grids Default construction/inferral method orders corners CCW and CW for s2n and n2s grids, respectively */ if(!flg_s2n){ nco_bool flg_ccw; /* [flg] Gridcell is CCW */ const int rcr_lvl=1; /* [nbr] Recursion level (1 is top level, 2 and greater are recursed */ const int idx_ccw=0; /* [idx] Index of starting vertice for CCW check (Point A = tail side AB) */ for(idx=0L;idx<grd_sz_nbr;idx++){ idx2=grd_crn_nbr*idx; flg_ccw=nco_ccw_chk(grd_crn_lat+idx2,grd_crn_lon+idx2,grd_crn_nbr,idx_ccw,rcr_lvl); if(!flg_ccw && nco_dbg_lvl_get() >= nco_dbg_vec) (void)fprintf(stderr,"%s: DEBUG %s reports nco_ccw_chk() tried to change idx = %lu from CW to CCW\n",nco_prg_nm_get(),fnc_nm,idx); } /* !idx */ } /* !flg_s2n */ if(nco_dbg_lvl_get() >= nco_dbg_std){ long int idx_crn_ll; long int idx_crn_lr; long int idx_crn_ur; long int idx_crn_ul; long idx_dbg; idx_dbg=rgr->idx_dbg; idx_crn_ll=grd_crn_nbr*idx_dbg+0L; idx_crn_lr=grd_crn_nbr*idx_dbg+1L; idx_crn_ur=grd_crn_nbr*idx_dbg+2L; idx_crn_ul=grd_crn_nbr*idx_dbg+3L; (void)fprintf(stderr,"%s: INFO %s idx_dbg = %li, Center [lat,lon]=[%g,%g]; Corners LL [%g,%g] LR [%g,%g] UR [%g,%g] UL [%g,%g]\n",nco_prg_nm_get(),fnc_nm,idx_dbg,grd_ctr_lat[idx_dbg],grd_ctr_lon[idx_dbg],grd_crn_lat[idx_crn_ll],grd_crn_lon[idx_crn_ll],grd_crn_lat[idx_crn_lr],grd_crn_lon[idx_crn_lr],grd_crn_lat[idx_crn_ur],grd_crn_lon[idx_crn_ur],grd_crn_lat[idx_crn_ul],grd_crn_lon[idx_crn_ul]); } /* !dbg */ if(flg_grd_crv){ /* Area of arbitrary curvilinear grids requires spherical trigonometry */ nco_sph_plg_area(rgr,grd_crn_lat,grd_crn_lon,grd_sz_nbr,grd_crn_nbr,area); }else{ /* Area of rectangular spherical zones from elementary calculus results 20150906: Half-angle formulae for better conditioning improve area normalization for 801x1600 by 2.0e-15 area[lat_idx*lon_nbr+lon_idx]=dgr2rdn*(lon_bnd[2*lon_idx+1L]-lon_bnd[2*lon_idx])*2.0*(sin(0.5*dgr2rdn*lat_bnd[2*lat_idx+1L])*cos(0.5*dgr2rdn*lat_bnd[2*lat_idx+1L])-sin(0.5*dgr2rdn*lat_bnd[2*lat_idx])*cos(0.5*dgr2rdn*lat_bnd[2*lat_idx])); Gain not worth the extra complexity */ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++) /* fabs() ensures positive area in n2s grids */ area[lat_idx*lon_nbr+lon_idx]=fabs(dgr2rdn*(lon_bnd[2*lon_idx+1L]-lon_bnd[2*lon_idx])*(sin(dgr2rdn*lat_bnd[2*lat_idx+1L])-sin(dgr2rdn*lat_bnd[2*lat_idx]))); } /* !flg_grd_2D */ if(nco_dbg_lvl_get() >= nco_dbg_sbr){ lat_wgt_ttl=0.0; area_ttl=0.0; if(flg_grd_2D){ (void)fprintf(stderr,"%s: INFO %s reports destination rectangular latitude grid:\n",nco_prg_nm_get(),fnc_nm); for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_wgt_ttl+=lat_wgt[lat_idx]; } /* !flg_grd_2D */ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++) area_ttl+=area[lat_idx*lon_nbr+lon_idx]; (void)fprintf(stdout,"lat_wgt_ttl = %20.15f, frc_lat_wgt = %20.15f, area_ttl = %20.15f, frc_area = %20.15f\n",lat_wgt_ttl,lat_wgt_ttl/2.0,area_ttl,area_ttl/(4.0*M_PI)); assert(area_ttl > 0.0); assert(area_ttl <= 4.0*M_PI); } /* endif dbg */ /* Open grid file */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id); /* Define dimensions */ rcd=nco_def_dim(out_id,grd_crn_nm,grd_crn_nbr,&dmn_id_grd_crn); rcd=nco_def_dim(out_id,grd_sz_nm,grd_sz_nbr,&dmn_id_grd_sz); rcd=nco_def_dim(out_id,grd_rnk_nm,grd_rnk_nbr,&dmn_id_grd_rnk); int shuffle; /* [flg] Turn-on shuffle filter */ int deflate; /* [flg] Turn-on deflate filter */ deflate=(int)True; shuffle=NC_SHUFFLE; /* Define variables */ (void)nco_def_var(out_id,dmn_sz_nm,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_grd_rnk,&dmn_sz_int_id); /* NB: Too small to deflate */ (void)nco_def_var(out_id,grd_area_nm,(nc_type)crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&area_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,msk_nm,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_grd_sz,&msk_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,grd_ctr_lat_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&grd_ctr_lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_ctr_lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,grd_ctr_lon_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&grd_ctr_lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_ctr_lon_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_grd_sz; dmn_ids[1]=dmn_id_grd_crn; (void)nco_def_var(out_id,grd_crn_lat_nm,crd_typ,dmn_nbr_2D,dmn_ids,&grd_crn_lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_crn_lat_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_grd_sz; dmn_ids[1]=dmn_id_grd_crn; (void)nco_def_var(out_id,grd_crn_lon_nm,crd_typ,dmn_nbr_2D,dmn_ids,&grd_crn_lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_crn_lon_id,shuffle,deflate,dfl_lvl); /* Define global and "units" attributes */ char *att_val; rcd=nco_char_att_put(out_id,NULL,"title",rgr->grd_ttl); rcd=nco_char_att_put(out_id,NULL,"Conventions","SCRIP"); const char usr_cpp[]=TKN2SNG(USER); /* [sng] Hostname from C pre-processor */ rcd=nco_char_att_put(out_id,NULL,"created_by",usr_cpp); rcd=nco_char_att_put(out_id,NULL,"grid_generator","NCO"); (void)nco_hst_att_cat(out_id,rgr->cmd_ln); (void)nco_vrs_att_cat(out_id); rcd=nco_char_att_put(out_id,NULL,"latitude_grid_type",nco_grd_lat_sng(lat_typ)); rcd=nco_char_att_put(out_id,NULL,"longitude_grid_type",nco_grd_lon_sng(lon_typ)); rcd=nco_char_att_put(out_id,grd_area_nm,"long_name","Solid Angle Subtended on Source Grid"); rcd=nco_char_att_put(out_id,grd_area_nm,"standard_name","solid_angle"); rcd=nco_char_att_put(out_id,grd_area_nm,"units","steradian"); rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"long_name","Latitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"standard_name","latitude"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"bounds",grd_crn_lat_nm); rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"long_name","Longitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"standard_name","longitude"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"units","degrees_east"); else rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"bounds",grd_crn_lon_nm); rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"long_name","Latitude of Grid Cell Vertices"); rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"standard_name","latitude"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"long_name","Longitude of Grid Cell Vertices"); rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"standard_name","longitude"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"units","degrees_east"); else rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ rcd=nco_char_att_put(out_id,msk_nm,"long_name","Binary Integer Mask for Grid"); rcd=nco_char_att_put(out_id,msk_nm,"units","none"); /* Begin data mode */ (void)nco_enddef(out_id); /* Write variables */ dmn_srt[0]=0L; dmn_cnt[0]=grd_rnk_nbr; rcd=nco_put_vara(out_id,dmn_sz_int_id,dmn_srt,dmn_cnt,dmn_sz_int,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,msk_id,dmn_srt,dmn_cnt,msk,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,grd_ctr_lat_id,dmn_srt,dmn_cnt,grd_ctr_lat,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,grd_ctr_lon_id,dmn_srt,dmn_cnt,grd_ctr_lon,crd_typ); dmn_srt[0]=0L; dmn_srt[1]=0L; dmn_cnt[0]=grd_sz_nbr; dmn_cnt[1]=grd_crn_nbr; rcd=nco_put_vara(out_id,grd_crn_lat_id,dmn_srt,dmn_cnt,grd_crn_lat,crd_typ); dmn_srt[0]=0L; dmn_srt[1]=0L; dmn_cnt[0]=grd_sz_nbr; dmn_cnt[1]=grd_crn_nbr; rcd=nco_put_vara(out_id,grd_crn_lon_id,dmn_srt,dmn_cnt,grd_crn_lon,crd_typ); /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); fl_out=rgr->fl_skl; if(fl_out){ /* Write skeleton data file on requested grid Skeleton file can then be populated with data for testing */ char *area_nm; char *bnd_nm; // char *bnd_tm_nm; char *col_nm_out; char *lat_nm_out; /* [sng] Name of output dimension for latitude */ char *lat_wgt_nm; char *lon_nm_out; /* [sng] Name of variable to recognize as longitude */ char *lat_bnd_nm; /* [sng] Name of latitude boundary variable */ char *lon_bnd_nm; /* [sng] Name of longitude boundary variable */ // int area_id; /* [id] Variable ID for area */ int dmn_id_bnd; /* [id] Dimension ID */ //int dmn_id_bnd_tm; /* [id] Dimension ID */ int dmn_id_col; /* [id] Dimension ID */ int dmn_id_lat; /* [id] Dimension ID */ int dmn_id_lon; /* [id] Dimension ID */ int lat_bnd_id; /* [id] Variable ID for lat_bnds/lat_vertices */ int lat_id; /* [id] Variable ID for latitude */ int lat_wgt_id; /* [id] Variable ID for latitude weight */ int lon_bnd_id; /* [id] Variable ID for lon_bnds/lon_vertices */ int lon_id; /* [id] Variable ID for longitude */ /* Use explicitly specified output names, if any, otherwise use input names (either explicitly specified or discovered by fuzzing) */ if(rgr->lat_nm_out) lat_nm_out=rgr->lat_nm_out; else lat_nm_out=(char *)strdup("lat"); if(rgr->lon_nm_out) lon_nm_out=rgr->lon_nm_out; else lon_nm_out=(char *)strdup("lon"); if(rgr->col_nm_out) col_nm_out=rgr->col_nm_out; else col_nm_out=(char *)strdup("ncol"); /* Name output dimensions */ area_nm=rgr->area_nm; bnd_nm=rgr->bnd_nm; //bnd_tm_nm=rgr->bnd_tm_nm; lat_bnd_nm=rgr->lat_bnd_nm; lat_wgt_nm=rgr->lat_wgt_nm; lon_bnd_nm=rgr->lon_bnd_nm; /* Use names discovered by fuzzing */ if(flg_grd_1D){ bnd_nm=rgr->vrt_nm; lat_bnd_nm=rgr->lat_vrt_nm; lon_bnd_nm=rgr->lon_vrt_nm; } /* !flg_grd_1D */ if(flg_grd_2D){ bnd_nm=rgr->bnd_nm; lat_bnd_nm=rgr->lat_bnd_nm; lon_bnd_nm=rgr->lon_bnd_nm; } /* !flg_grd_2D */ /* Open grid file */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id); /* Define dimensions */ if(flg_grd_crv){ rcd=nco_def_dim(out_id,bnd_nm,grd_crn_nbr,&dmn_id_bnd); }else{ rcd=nco_def_dim(out_id,bnd_nm,bnd_nbr,&dmn_id_bnd); } /* !flg_grd_crv */ if(flg_grd_1D){ rcd=nco_def_dim(out_id,col_nm_out,col_nbr,&dmn_id_col); } /* !flg_grd_1D */ if(flg_grd_2D){ rcd=nco_def_dim(out_id,lat_nm_out,lat_nbr,&dmn_id_lat); rcd=nco_def_dim(out_id,lon_nm_out,lon_nbr,&dmn_id_lon); } /* !flg_grd_2D */ /* Define new coordinates and variables in regridded file */ if(flg_grd_1D){ (void)nco_def_var(out_id,lat_nm_out,crd_typ,dmn_nbr_1D,&dmn_id_col,&lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,lon_nm_out,crd_typ,dmn_nbr_1D,&dmn_id_col,&lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_col; dmn_ids[1]=dmn_id_bnd; (void)nco_def_var(out_id,lat_bnd_nm,crd_typ,dmn_nbr_2D,dmn_ids,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_col; dmn_ids[1]=dmn_id_bnd; (void)nco_def_var(out_id,lon_bnd_nm,crd_typ,dmn_nbr_2D,dmn_ids,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,area_nm,crd_typ,dmn_nbr_1D,&dmn_id_col,&area_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl); } /* !flg_grd_1D */ if(flg_grd_crv){ dmn_ids[0]=dmn_id_lat; dmn_ids[1]=dmn_id_lon; (void)nco_def_var(out_id,lat_nm_out,crd_typ,dmn_nbr_2D,dmn_ids,&lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,lon_nm_out,crd_typ,dmn_nbr_2D,dmn_ids,&lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,area_nm,crd_typ,dmn_nbr_2D,dmn_ids,&area_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_lat; dmn_ids[1]=dmn_id_lon; dmn_ids[2]=dmn_id_bnd; (void)nco_def_var(out_id,lat_bnd_nm,crd_typ,dmn_nbr_3D,dmn_ids,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,lon_bnd_nm,crd_typ,dmn_nbr_3D,dmn_ids,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); }else if(flg_grd_2D){ (void)nco_def_var(out_id,lat_nm_out,crd_typ,dmn_nbr_1D,&dmn_id_lat,&lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,lon_nm_out,crd_typ,dmn_nbr_1D,&dmn_id_lon,&lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_lat; dmn_ids[1]=dmn_id_bnd; (void)nco_def_var(out_id,lat_bnd_nm,crd_typ,dmn_nbr_2D,dmn_ids,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_lon; dmn_ids[1]=dmn_id_bnd; (void)nco_def_var(out_id,lon_bnd_nm,crd_typ,dmn_nbr_2D,dmn_ids,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,lat_wgt_nm,crd_typ,dmn_nbr_1D,&dmn_id_lat,&lat_wgt_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_wgt_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_lat; dmn_ids[1]=dmn_id_lon; (void)nco_def_var(out_id,area_nm,crd_typ,dmn_nbr_2D,dmn_ids,&area_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl); } /* !flg_grd_2D */ /* Define attributes */ rcd=nco_char_att_put(out_id,NULL,"title",rgr->grd_ttl); rcd=nco_char_att_put(out_id,NULL,"Conventions","CF-1.6"); rcd=nco_char_att_put(out_id,NULL,"created_by",usr_cpp); (void)nco_hst_att_cat(out_id,rgr->cmd_ln); (void)nco_vrs_att_cat(out_id); rcd=nco_char_att_put(out_id,NULL,"latitude_grid_type",nco_grd_lat_sng(lat_typ)); rcd=nco_char_att_put(out_id,NULL,"longitude_grid_type",nco_grd_lon_sng(lon_typ)); rcd=nco_char_att_put(out_id,area_nm,"long_name","Solid angle subtended by gridcell"); rcd=nco_char_att_put(out_id,area_nm,"standard_name","solid_angle"); rcd=nco_char_att_put(out_id,area_nm,"units","steradian"); rcd=nco_char_att_put(out_id,lat_nm_out,"long_name","Latitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,lat_nm_out,"standard_name","latitude"); rcd=nco_char_att_put(out_id,lat_nm_out,"units","degrees_north"); rcd=nco_char_att_put(out_id,lat_nm_out,"axis","Y"); rcd=nco_char_att_put(out_id,lat_nm_out,"bounds",lat_bnd_nm); if(flg_grd_2D) att_val=strdup("Gridcell latitude interfaces"); else att_val=strdup("Gridcell latitude vertices"); rcd=nco_char_att_put(out_id,lat_bnd_nm,"long_name",att_val); if(flg_grd_2D) rcd=nco_char_att_put(out_id,lat_wgt_nm,"long_name","Latitude quadrature weights (normalized to sum to 2.0 on global grids)"); rcd=nco_char_att_put(out_id,lon_nm_out,"long_name","Longitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,lon_nm_out,"standard_name","longitude"); rcd=nco_char_att_put(out_id,lon_nm_out,"units","degrees_east"); rcd=nco_char_att_put(out_id,lon_nm_out,"axis","X"); rcd=nco_char_att_put(out_id,lon_nm_out,"bounds",lon_bnd_nm); if(flg_grd_2D) att_val=strdup("Gridcell longitude interfaces"); else att_val=strdup("Gridcell longitude vertices"); rcd=nco_char_att_put(out_id,lon_bnd_nm,"long_name",att_val); /* Begin data mode */ (void)nco_enddef(out_id); /* Write new coordinates and variables to regridded file */ if(flg_grd_1D){ dmn_srt[0]=0L; dmn_cnt[0]=col_nbr; (void)nco_put_vara(out_id,lat_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=col_nbr; (void)nco_put_vara(out_id,lon_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=col_nbr; dmn_cnt[1]=bnd_nbr; (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_bnd,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=col_nbr; dmn_cnt[1]=bnd_nbr; (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_bnd,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=col_nbr; (void)nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); } /* !flg_grd_1D */ if(flg_grd_crv){ dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=lon_nbr; (void)nco_put_vara(out_id,lat_id,dmn_srt,dmn_cnt,grd_ctr_lat,crd_typ); (void)nco_put_vara(out_id,lon_id,dmn_srt,dmn_cnt,grd_ctr_lon,crd_typ); (void)nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); dmn_srt[0]=dmn_srt[1]=0L;dmn_srt[2]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=lon_nbr; dmn_cnt[2]=grd_crn_nbr; (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt,dmn_cnt,grd_crn_lat,crd_typ); (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt,dmn_cnt,grd_crn_lon,crd_typ); }else if(flg_grd_2D){ dmn_srt[0]=0L; dmn_cnt[0]=lat_nbr; (void)nco_put_vara(out_id,lat_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=lon_nbr; (void)nco_put_vara(out_id,lon_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=lat_nbr; (void)nco_put_vara(out_id,lat_wgt_id,dmn_srt,dmn_cnt,lat_wgt,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=bnd_nbr; (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_bnd,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lon_nbr; dmn_cnt[1]=bnd_nbr; (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_bnd,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=lon_nbr; (void)nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); } /* !flg_grd_2D */ /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); } /* !fl_out */ /* Free memory associated with input file */ if(dmn_sz_int) dmn_sz_int=(int *)nco_free(dmn_sz_int); if(msk) msk=(int *)nco_free(msk); if(area) area=(double *)nco_free(area); if(grd_ctr_lat) grd_ctr_lat=(double *)nco_free(grd_ctr_lat); if(grd_ctr_lon) grd_ctr_lon=(double *)nco_free(grd_ctr_lon); if(grd_crn_lat) grd_crn_lat=(double *)nco_free(grd_crn_lat); if(grd_crn_lon) grd_crn_lon=(double *)nco_free(grd_crn_lon); if(lat_bnd) lat_bnd=(double *)nco_free(lat_bnd); if(lat_crn) lat_crn=(double *)nco_free(lat_crn); if(lat_ctr) lat_ctr=(double *)nco_free(lat_ctr); if(lat_ntf) lat_ntf=(double *)nco_free(lat_ntf); if(lat_sin) lat_sin=(double *)nco_free(lat_sin); if(lat_wgt) lat_wgt=(double *)nco_free(lat_wgt); if(lon_bnd) lon_bnd=(double *)nco_free(lon_bnd); if(lon_crn) lon_crn=(double *)nco_free(lon_crn); if(lon_ctr) lon_ctr=(double *)nco_free(lon_ctr); if(lon_ntf) lon_ntf=(double *)nco_free(lon_ntf); if(wgt_Gss) wgt_Gss=(double *)nco_free(wgt_Gss); return rcd; } /* !nco_grd_mk() */ int /* O [enm] Return code */ nco_grd_nfr /* [fnc] Infer SCRIP-format grid file from input data file */ (rgr_sct * const rgr) /* I/O [sct] Regridding structure */ { /* Purpose: Use grid information and guesswork to create SCRIP-format grid file from input data file Test curvilinear grids: ncks -O -D 1 --rgr infer --rgr grid=${DATA}/sld/rgr/grd_airs.nc ${DATA}/sld/raw/AIRS.2014.10.01.202.L2.TSurfStd.Regrid010.1DLatLon.nc ~/foo.nc ncks -O -D 1 --rgr infer --rgr grid=${DATA}/sld/rgr/grd_airs.nc ${DATA}/sld/raw/AIRS.2014.10.01.202.L2.TSurfStd.Regrid010.1DLatLon.hole.nc ~/foo.nc */ const char fnc_nm[]="nco_grd_nfr()"; /* [sng] Function name */ const double rdn2dgr=180.0/M_PI; const double dgr2rdn=M_PI/180.0; const int dmn_nbr_0D=0; /* [nbr] Rank of 0-D grid variables */ const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */ const int dmn_nbr_2D=2; /* [nbr] Rank of 2-D grid variables */ const int dmn_nbr_grd_max=4; /* [nbr] Maximum rank of grid variables (msk_[src/dst] could be rank 4) */ const int itr_nbr_max=20; // [nbr] Maximum number of iterations const int idx_ccw=0; /* [idx] Index of starting vertice for CCW check (Point A = tail side AB) */ const int rcr_lvl=1; /* [nbr] Recursion level (1 is top level, 2 and greater are recursed */ const nc_type crd_typ=NC_DOUBLE; char *area_nm_in=NULL; char *fl_in; char *fl_out; char *fl_out_tmp=NULL_CEWI; char *fl_pth_lcl=NULL; char *msk_nm_in=NULL; char dmn_nm[NC_MAX_NAME]; /* [sng] Dimension name */ /* SCRIP-format grid names are non-negotiable and thus fixed not dynamic */ char area_nm[]="grid_area"; /* 20150830: NB ESMF_RegridWeightGen --user_areas looks for variable named "grid_area" */ char dmn_sz_nm[]="grid_dims"; char grd_crn_lat_nm[]="grid_corner_lat"; char grd_crn_lon_nm[]="grid_corner_lon"; char grd_crn_nm[]="grid_corners"; char grd_ctr_lat_nm[]="grid_center_lat"; char grd_ctr_lon_nm[]="grid_center_lon"; char grd_rnk_nm[]="grid_rank"; char grd_sz_nm[]="grid_size"; char msk_nm[]="grid_imask"; char unt_sng[]="units"; /* netCDF-standard units attribute name */ double *grd_ctr_lat; /* [dgr] Latitude centers of grid */ double *grd_ctr_lon; /* [dgr] Longitude centers of grid */ double *grd_crn_lat; /* [dgr] Latitude corners of grid */ double *grd_crn_lon; /* [dgr] Longitude corners of grid */ double *area; /* [sr] Area of grid */ double *lat_bnd=NULL_CEWI; /* [dgr] Latitude boundaries of rectangular grid */ double *lat_crn=NULL; /* [dgr] Latitude corners of rectangular grid */ double *lat_ctr=NULL_CEWI; /* [dgr] Latitude centers of rectangular grid */ double *lat_ntf=NULL; /* [dgr] Latitude interfaces of rectangular grid */ double *lat_wgt=NULL; /* [dgr] Latitude weights of rectangular grid */ double *lon_bnd=NULL_CEWI; /* [dgr] Longitude boundaries of rectangular grid */ double *lon_crn=NULL; /* [dgr] Longitude corners of rectangular grid */ double *lon_ctr=NULL_CEWI; /* [dgr] Longitude centers of rectangular grid */ double *lon_ntf=NULL; /* [dgr] Longitude interfaces of rectangular grid */ double area_ttl=0.0; /* [frc] Exact sum of area */ //double lat_nrt; /* [dgr] Latitude of northern edge of grid */ double lat_sth; /* [dgr] Latitude of southern edge of grid */ double lat_wgt_ttl=0.0; /* [frc] Actual sum of quadrature weights */ double lat_wgt_gss; /* [frc] Latitude weight estimated from interface latitudes */ // double lon_est; /* [dgr] Longitude of eastern edge of grid */ double lon_wst; /* [dgr] Longitude of western edge of grid */ double lon_ncr; /* [dgr] Longitude increment */ double lat_ncr; /* [dgr] Latitude increment */ double lon_spn; /* [dgr] Longitude span */ double lat_spn; /* [dgr] Latitude span */ double mss_val_area_dbl; double mss_val_ctr_dbl; double mss_val_msk_dbl; int *msk=NULL; /* [flg] Mask of grid */ int *dmn_sz_int; /* [nbr] Array of dimension sizes of grid */ int dmn_ids[dmn_nbr_grd_max]; /* [id] Dimension IDs array for output variable */ int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int dmn_idx; /* [idx] Dimension index */ int fl_out_fmt=NC_FORMAT_CLASSIC; /* [enm] Output file format */ int in_id; /* I [id] Input netCDF file ID */ int md_open; /* [enm] Mode flag for nc_open() call */ int out_id; /* I [id] Output netCDF file ID */ int rcd=NC_NOERR; int area_id=NC_MIN_INT; /* [id] Area variable ID */ int dmn_id_grd_crn; /* [id] Grid corners dimension ID */ int dmn_id_grd_rnk; /* [id] Grid rank dimension ID */ int dmn_id_grd_sz; /* [id] Grid size dimension ID */ int dmn_sz_int_id; /* [id] Grid dimension sizes ID */ int grd_crn_lat_id; /* [id] Grid corner latitudes variable ID */ int grd_crn_lon_id; /* [id] Grid corner longitudes variable ID */ int grd_ctr_lat_id; /* [id] Grid center latitudes variable ID */ int grd_ctr_lon_id; /* [id] Grid center longitudes variable ID */ int itr_cnt; /* Iteration counter */ int lat_rnk; /* [nbr] Rank of latitude coordinate */ int lon_rnk; /* [nbr] Rank of longitude coordinate */ int lat_ctr_id=NC_MIN_INT; /* [id] Latitude centers of rectangular grid variable ID */ int lon_ctr_id=NC_MIN_INT; /* [id] Longitude centers of rectangular grid variable ID */ int lat_bnd_id=NC_MIN_INT; /* [id] Latitude centers of rectangular grid variable ID */ int lon_bnd_id=NC_MIN_INT; /* [id] Longitude centers of rectangular grid variable ID */ int msk_id=NC_MIN_INT; /* [id] Mask variable ID */ int msk_rnk_nbr; /* [id] Mask rank */ int mss_val_int_out=NC_MIN_INT; /* [nbr] Value that can be non-erroneously pointed to */ int val_two=2; /* [nbr] Value that can be non-erroneously pointed to */ int val_zero=0; /* [nbr] Value that can be non-erroneously pointed to */ int var_id; /* [id] Current variable ID */ long dmn_srt[dmn_nbr_grd_max]; long dmn_cnt[dmn_nbr_grd_max]; long bnd_idx; long bnd_nbr=NC_MIN_INT; /* [nbr] Number of bounds in gridcell */ long col_idx; long col_nbr; /* [nbr] Number of columns in grid */ long crn_idx; /* [idx] Counting index for corners */ long dmn_sz; /* [nbr] Size of current dimension */ long grd_crn_nbr; /* [nbr] Number of corners in gridcell */ long grd_rnk_nbr=int_CEWI; /* [nbr] Number of dimensions in grid */ long grd_sz_nbr; /* [nbr] Number of gridcells in grid */ long idx2; /* [idx] Counting index for unrolled grids */ long idx; /* [idx] Counting index for unrolled grids */ long idx_crn; long idx_ctr; long lat_idx2; /* [idx] Counting index for unrolled latitude */ long lat_idx; long lat_nbr; /* [nbr] Number of latitudes in grid */ long lon_idx2; /* [idx] Counting index for unrolled longitude */ long lon_idx; long lon_nbr; /* [nbr] Number of longitudes in grid */ long int idx_crn_ll; long int idx_crn_lr; long int idx_crn_ur; long int idx_crn_ul; nco_bool FL_RTR_RMT_LCN; nco_bool FORCE_APPEND=False; /* Option A */ nco_bool FORCE_OVERWRITE=True; /* Option O */ nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */ nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool SHARE_CREATE=rgr->flg_uio; /* [flg] Create (netCDF3-only) file(s) with unbuffered I/O */ nco_bool SHARE_OPEN=rgr->flg_uio; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */ nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */ nco_bool WRT_TMP_FL=False; /* [flg] Write output to temporary file */ nco_bool flg_1D_psd_rct_bnd=False; /* [flg] Unstructured input grid with pseudo-rectangular bounds */ nco_bool flg_ccw; /* [flg] Gridcell is CCW */ nco_bool flg_grd_1D=False; nco_bool flg_grd_2D=False; nco_bool flg_grd_crv=False; nco_bool flg_s2n=True; /* [enm] Latitude grid-direction is South-to-North */ nco_bool flg_wrt_crn=True; nco_bool flg_crn_grd_lat_lon=False; /* [flg] Curvilinear corner array ordered non-canonically as grd_nbr,lat_nbr,lon_nbr */ nco_bool use_mss_val_area=False; nco_bool has_mss_val_area=False; nco_bool has_mss_val_bnd=False; nco_bool has_mss_val_ctr=False; nco_bool has_mss_val_msk=False; nco_grd_2D_typ_enm grd_typ; /* [enm] Grid-type enum */ nco_grd_lat_typ_enm lat_typ; /* [enm] Latitude grid-type enum */ nco_grd_lon_typ_enm lon_typ; /* [enm] Longitude grid-type enum */ nco_grd_xtn_enm nco_grd_xtn=nco_grd_xtn_nil; /* [enm] Grid-extent enum */ nc_type msk_typ; ptr_unn msk_unn; size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ /* Algorithm: Read grid information from input data file (aka *_in) Close input file Once grid dimensions known, allocate output grid arrays (aka *_out) Open output file (aka grid-file) Use guesswork and standard algorithms to fill-in output arrays */ /* Duplicate (because nco_fl_mk_lcl() free()'s fl_in) */ fl_in=(char *)strdup(rgr->fl_in); /* Make sure file is on local system and is readable or die trying */ fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); /* Open file using appropriate buffer size hints and verbosity */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; if(SHARE_OPEN) md_open=md_open|NC_SHARE; rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,&in_id); char *bnd_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as bounds */ char *col_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as column */ char *lat_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as latitude */ char *lon_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as longitude */ char *lat_nm_in=NULL_CEWI; /* [sng] Name of variable to recognize as latitude */ char *lon_nm_in=NULL_CEWI; /* [sng] Name of variable to recognize as longitude */ char *lat_bnd_nm=NULL_CEWI; /* [sng] Name of latitude boundary variable */ char *lon_bnd_nm=NULL_CEWI; /* [sng] Name of longitude boundary variable */ int dmn_id_bnd=NC_MIN_INT; /* [id] Dimension ID for spatial bounds */ int dmn_id_col=NC_MIN_INT; /* [id] Dimension ID for unstructured grids */ int dmn_id_lat=NC_MIN_INT; /* [id] Dimension ID for latitude */ int dmn_id_lon=NC_MIN_INT; /* [id] Dimension ID for longitude */ /* Begin CF-coordinates block */ cf_crd_sct *cf=NULL; char *rgr_var; /* [sng] Variable for special regridding treatment */ nco_bool flg_cf=False; /* [flg] Follow CF Coordinates convention to find and infer grid */ rgr_var=rgr->var_nm; if(rgr_var){ /* Infer grid from special variable Intended to be variable that has both horizontal dimensions and "coordinates" attribute, e.g., ncks --cdl -m ${DATA}/hdf/narrmon-a_221_20100101_0000_000.nc | grep coordinates 4LFTX_221_SPDY_S113:coordinates = "gridlat_221 gridlon_221" ; Usage: ncks -O -D 3 --rgr infer --rgr_var=4LFTX_221_SPDY_S113 --rgr grid=~/grd_narr.nc ${DATA}/hdf/narrmon-a_221_20100101_0000_000.nc ~/foo.nc */ char crd_sng[]="coordinates"; /* CF-standard coordinates attribute name */ cf=(cf_crd_sct *)nco_malloc(sizeof(cf_crd_sct)); cf->crd=False; /* [flg] CF coordinates information is complete */ cf->crd_id[0]=NC_MIN_INT; /* [id] Coordinate ID, first */ cf->crd_id[1]=NC_MIN_INT; /* [id] Coordinate ID, second */ cf->crd_nm[0]=NULL; /* [sng] Coordinate name, first */ cf->crd_nm[1]=NULL; /* [sng] Coordinate name, second */ cf->crd_sng=NULL; /* [sng] Coordinates attribute value */ cf->dmn_id[0]=NC_MIN_INT; /* [id] Dimension ID, first */ cf->dmn_id[1]=NC_MIN_INT; /* [id] Dimension ID, second */ cf->dmn_nm[0]=NULL; /* [sng] Dimension name, first */ cf->dmn_nm[1]=NULL; /* [sng] Dimension name, second */ cf->unt_sng[0]=NULL; /* [sng] Units string, first coordinate */ cf->unt_sng[1]=NULL; /* [sng] Units string, second coordinate */ cf->var_id=NC_MIN_INT; /* [id] Coordinate variable ID */ cf->var_nm=NULL; /* [sng] Coordinates variable name */ cf->var_type=NC_NAT; /* [enm] Coordinates variable type */ if((rcd=nco_inq_varid_flg(in_id,rgr_var,&cf->var_id)) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports special \"coordinates\" variable %s not found. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,rgr_var); goto skp_cf; } /* !rcd */ cf->crd_sng=nco_char_att_get(in_id,cf->var_id,crd_sng); if(cf->crd_sng){ cf->crd=True; }else{ /* !rcd && att_typ */ (void)fprintf(stderr,"%s: WARNING %s reports coordinates variable %s does not have character-valued \"coordinates\" attribute. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,rgr_var); goto skp_cf; } /* !rcd && att_typ */ /* Valid coordinates attribute requires two coordinate names separated by space character */ char *crd_nm[NCO_MAX_CRD_PER_VAR]; /* [sng] Coordinate name start position */ char *crd_dpl; /* [sng] Modifiable duplicate of coordinates string */ char *spc_ptr; /* [sng] Pointer to space character (' ') */ int crd_nbr=0; /* [nbr] Number of names in coordinates attribute */ int crd_spt=0; /* [nbr] Number of "spatial-like" (that include "degree" in units) coordinates */ int crd_idx=0; /* [idx] Counter for coordinate names */ for(crd_idx=0;crd_idx<NCO_MAX_CRD_PER_VAR;crd_idx++) crd_nm[crd_idx]=NULL; crd_dpl=(char *)strdup(cf->crd_sng); /* Search for spaces starting from end of string */ while((spc_ptr=strrchr(crd_dpl,' '))){ crd_nm[crd_nbr]=spc_ptr+1L; crd_nbr++; /* NUL-terminate so next search ends here */ *spc_ptr='\0'; } /* !sbs_ptr */ /* Final coordinate name begins where coordinate string starts */ crd_nm[crd_nbr]=crd_dpl; /* Change crd_nbr from 0-based index to actual coordinate number */ crd_nbr++; if(crd_nbr < 2){ (void)fprintf(stderr,"%s: WARNING %s found only %d coordinate(s) in \"coordinates\" attribute \"%s\", at least two are required. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,crd_nbr,cf->crd_sng); goto skp_cf; } /* !crd_nbr */ /* If more than two coordinate names are present, choose first two (searching backwards from end) with "degree" in units attributes, otherwise just choose first two */ crd_idx=crd_spt=0; while(crd_spt < 2 && crd_idx < crd_nbr){ cf->crd_nm[crd_spt]=crd_nm[crd_idx]; if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[crd_spt],&cf->crd_id[crd_spt])) == NC_NOERR){ cf->unt_sng[crd_spt]=nco_char_att_get(in_id,cf->crd_id[crd_spt],unt_sng); if(cf->unt_sng[crd_spt]){ if(strcasestr(cf->unt_sng[crd_spt],"degree")){ /* Increment count of spatial-like coordinates... */ crd_spt++; }else{ /* ...or free() memory allocated during search */ cf->unt_sng[crd_spt]=(char *)nco_free(cf->unt_sng[crd_spt]); } /* !strcasestr() */ crd_idx++; } /* !rcd && att_typ */ } /* !rcd */ } /* !crd_spt */ /* If while()-loop above was successful, our search is over Otherwise, use first two coordinate names regardless of units, and print more diagnostics */ if(crd_spt < 2){ cf->crd_nm[0]=crd_nm[0]; cf->crd_nm[1]=crd_nm[1]; if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[0],&cf->crd_id[0])) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports first coordinates variable %s not found. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0]); goto skp_cf; } /* !rcd */ if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[1],&cf->crd_id[1])) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports second coordinates variable %s not found. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[1]); goto skp_cf; } /* !rcd */ cf->unt_sng[0]=nco_char_att_get(in_id,cf->crd_id[0],unt_sng); if(cf->unt_sng[0]){ if(!strcasestr(cf->unt_sng[0],"degree")) (void)fprintf(stderr,"%s: WARNING %s reports first coordinates variable %s has weird units attribute = %s. May not detect correct ordering of latitude and longitude coordinates\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],cf->unt_sng[0]); } /* !rcd && att_typ */ cf->unt_sng[1]=nco_char_att_get(in_id,cf->crd_id[1],unt_sng); if(cf->unt_sng[1]){ if(!strcasestr(cf->unt_sng[1],"degree")) (void)fprintf(stderr,"%s: WARNING %s reports second coordinates variable %s has weird units attribute = %s. May not detect correct ordering of latitude and longitude coordinates\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[1],cf->unt_sng[1]); } /* !rcd && att_typ */ } /* !crd_spt */ int crd_rnk; /* [nbr] Coordinate rank */ rcd=nco_inq_varndims(in_id,cf->crd_id[0],&crd_rnk); if(crd_rnk != 2){ (void)fprintf(stderr,"%s: INFO %s reports coordinates variable %s has %i dimension(s). Skipping CF coordinates method.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],crd_rnk); goto skp_cf; } /* !crd_rnk */ rcd=nco_inq_vardimid(in_id,cf->crd_id[0],cf->dmn_id); cf->dmn_nm[0]=(char *)nco_malloc(NC_MAX_NAME*sizeof(NC_CHAR)); cf->dmn_nm[1]=(char *)nco_malloc(NC_MAX_NAME*sizeof(NC_CHAR)); rcd=nco_inq_dimname(in_id,cf->dmn_id[0],cf->dmn_nm[0]); rcd=nco_inq_dimname(in_id,cf->dmn_id[1],cf->dmn_nm[1]); /* "coordinates" convention does not guarantee lat, lon are specified in that order Use "units" values, if any, to determine order In absence of "units", assume order is lat, lon */ nco_bool crd0_is_lat=False; /* [flg] First coordinate is latitude */ nco_bool crd0_is_lon=False; /* [flg] First coordinate is longitude */ nco_bool crd1_is_lat=False; /* [flg] Second coordinate is latitude */ nco_bool crd1_is_lon=False; /* [flg] Second coordinate is longitude */ if(cf->unt_sng[0]){ if(!strcasecmp(cf->unt_sng[0],"degrees_north") || !strcasecmp(cf->unt_sng[0],"degree_north") || !strcasecmp(cf->unt_sng[0],"degree_N") || !strcasecmp(cf->unt_sng[0],"degrees_N") || !strcasecmp(cf->unt_sng[0],"degreeN") || !strcasecmp(cf->unt_sng[0],"degreesN")) crd0_is_lat=True; if(!strcasecmp(cf->unt_sng[0],"degrees_east") || !strcasecmp(cf->unt_sng[0],"degree_east") || !strcasecmp(cf->unt_sng[0],"degree_E") || !strcasecmp(cf->unt_sng[0],"degrees_E") || !strcasecmp(cf->unt_sng[0],"degreeE") || !strcasecmp(cf->unt_sng[0],"degreesE")) crd0_is_lon=True; } /* endif */ if(cf->unt_sng[1]){ if(!strcasecmp(cf->unt_sng[1],"degrees_north") || !strcasecmp(cf->unt_sng[1],"degree_north") || !strcasecmp(cf->unt_sng[1],"degree_N") || !strcasecmp(cf->unt_sng[1],"degrees_N") || !strcasecmp(cf->unt_sng[1],"degreeN") || !strcasecmp(cf->unt_sng[1],"degreesN")) crd1_is_lat=True; if(!strcasecmp(cf->unt_sng[1],"degrees_east") || !strcasecmp(cf->unt_sng[1],"degree_east") || !strcasecmp(cf->unt_sng[1],"degree_E") || !strcasecmp(cf->unt_sng[1],"degrees_E") || !strcasecmp(cf->unt_sng[1],"degreeE") || !strcasecmp(cf->unt_sng[1],"degreesE")) crd1_is_lon=True; } /* endif */ assert((crd0_is_lat && crd1_is_lon) || (crd0_is_lon && crd1_is_lat)); int idx_lat; int idx_lon; if(crd0_is_lat && crd1_is_lon){ idx_lat=0; idx_lon=1; }else{ idx_lat=1; idx_lon=0; } /* endif */ /* Dimensions and coordinates have been vetted. Store as primary lookup names. Dimensions are always returned in order [LRV,MRV]=[0,1] LRV is along-track direction, and MRV is across-track (at least in NASA data) Internally we label LRV as "lat" and MRV as "lon" so that code looks similar for curvilinear and rectangular grids */ dmn_id_lat=cf->dmn_id[0]; dmn_id_lon=cf->dmn_id[1]; /* Subtlety: lat_nm_in is coordinate (variable+dimension) name when specified from command-line (as in nco_grd_nfr()), dimension name when found through CF-method (as in nco_rgr_wgt()). This confusing distinction could be avoided by passing command-line dimension names through-to nco_rgr_wgt(). However, that route would require complex priorities for what to do when passing command-line coordinate names not dimension names and visa-versa. */ //lat_nm_in=strdup(cf->dmn_nm[0]); //lon_nm_in=strdup(cf->dmn_nm[1]); lat_nm_in=strdup(cf->crd_nm[idx_lat]); lon_nm_in=strdup(cf->crd_nm[idx_lon]); /* Next four lines unnecessary in nco_rgr_wgt() which only needs dimension names (it reads input coordinates from map- not data-file) */ lat_ctr_id=cf->crd_id[idx_lat]; lon_ctr_id=cf->crd_id[idx_lon]; lat_dmn_nm=strdup(cf->dmn_nm[0]); lon_dmn_nm=strdup(cf->dmn_nm[1]); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s reports coordinates variable %s \"coordinates\" attribute \"%s\" points to coordinates %s and %s. Latitude coordinate \"%s\" has LRV (along-track) and MRV (across-track) dimensions \"%s\" and \"%s\", respectively.\n",nco_prg_nm_get(),fnc_nm,rgr_var,cf->crd_sng,cf->crd_nm[0],cf->crd_nm[1],cf->crd_nm[idx_lat],cf->dmn_nm[0],cf->dmn_nm[1]); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s Coordinates %s and %s \"units\" values are \"%s\" and \"%s\", respectively.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],cf->crd_nm[1],cf->unt_sng[0] ? cf->unt_sng[0] : "(non-existent)",cf->unt_sng[1] ? cf->unt_sng[1] : "(non-existent)"); /* Clean-up CF coordinates memory */ if(crd_dpl) crd_dpl=(char *)nco_free(crd_dpl); if(cf->crd_sng) cf->crd_sng=(char *)nco_free(cf->crd_sng); if(cf->dmn_nm[0]) cf->dmn_nm[0]=(char *)nco_free(cf->dmn_nm[0]); if(cf->dmn_nm[1]) cf->dmn_nm[1]=(char *)nco_free(cf->dmn_nm[1]); if(cf->unt_sng[0]) cf->unt_sng[0]=(char *)nco_free(cf->unt_sng[0]); if(cf->unt_sng[1]) cf->unt_sng[1]=(char *)nco_free(cf->unt_sng[1]); // if(foo) foo=(char *)nco_free(foo); } /* !rgr_var */ /* goto skp_cf */ skp_cf: /* free() any abandoned cf structure now */ if(!flg_cf) if(cf) cf=(cf_crd_sct *)nco_free(cf); rcd=NC_NOERR; /* End CF-coordinates block */ /* Locate fields that must be present in input file Required variables are usually latitude and longitude Currently these variables must be in root group This fails for, e.g., OMI L2 which has coordinates /GEOLOCATION_DATA/[Latitude,Longitude] fxm: Generalize with traversal table so usual suspect coordinates may be in any group */ if(lat_ctr_id == NC_MIN_INT){ if(rgr->lat_nm_in && (rcd=nco_inq_varid_flg(in_id,rgr->lat_nm_in,&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup(rgr->lat_nm_in); else if((rcd=nco_inq_varid_flg(in_id,"latitude",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("latitude"); else if((rcd=nco_inq_varid_flg(in_id,"Latitude",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("Latitude"); /* AMSR, HIRDLS, TRMM */ else if((rcd=nco_inq_varid_flg(in_id,"lat",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("lat"); /* CAM */ else if((rcd=nco_inq_varid_flg(in_id,"lat_d",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("lat_d"); /* EAM dynamics grid */ else if((rcd=nco_inq_varid_flg(in_id,"Lat",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("Lat"); else if((rcd=nco_inq_varid_flg(in_id,"XLAT",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("XLAT"); /* WRF */ else if((rcd=nco_inq_varid_flg(in_id,"XLAT_M",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("XLAT_M"); /* Unknown */ else if((rcd=nco_inq_varid_flg(in_id,"LAT",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("LAT"); /* MAR/RACMO */ else if((rcd=nco_inq_varid_flg(in_id,"TLAT",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("TLAT"); /* CICE, POP */ else if((rcd=nco_inq_varid_flg(in_id,"ULAT",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("ULAT"); /* CICE, POP */ else if((rcd=nco_inq_varid_flg(in_id,"latCell",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("latCell"); /* MPAS-O/I */ else if((rcd=nco_inq_varid_flg(in_id,"nav_lat",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("nav_lat"); /* NEMO */ else if((rcd=nco_inq_varid_flg(in_id,"global_latitude0",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("global_latitude0"); /* Oxford */ else if((rcd=nco_inq_varid_flg(in_id,"latitude0",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("latitude0"); /* Oxford NB: Must search for global_* first */ else if((rcd=nco_inq_varid_flg(in_id,"CO_Latitude",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("CO_Latitude"); /* MLS */ else if((rcd=nco_inq_varid_flg(in_id,"S1_Latitude",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("S1_Latitude"); /* GPM */ else if((rcd=nco_inq_varid_flg(in_id,"yc",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("yc"); /* RTM */ else if((rcd=nco_inq_varid_flg(in_id,"gridlat_0",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("gridlat_0"); /* NWS HRRR */ } /* !lat_ctr_id */ if(lon_ctr_id == NC_MIN_INT){ if(rgr->lon_nm_in && (rcd=nco_inq_varid_flg(in_id,rgr->lon_nm_in,&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup(rgr->lon_nm_in); else if((rcd=nco_inq_varid_flg(in_id,"longitude",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("longitude"); else if((rcd=nco_inq_varid_flg(in_id,"Longitude",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("Longitude"); /* AMSR, TRMM */ else if((rcd=nco_inq_varid_flg(in_id,"lon",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("lon"); /* CAM */ else if((rcd=nco_inq_varid_flg(in_id,"lon_d",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("lon"); /* EAM dynamics grid */ else if((rcd=nco_inq_varid_flg(in_id,"Lon",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("Lon"); else if((rcd=nco_inq_varid_flg(in_id,"XLONG",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("XLONG"); /* WRF */ else if((rcd=nco_inq_varid_flg(in_id,"XLONG_M",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("XLONG_M"); /* Unknown */ else if((rcd=nco_inq_varid_flg(in_id,"LON",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("LON"); /* MAR/RACMO */ else if((rcd=nco_inq_varid_flg(in_id,"TLON",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("TLON"); /* CICE */ else if((rcd=nco_inq_varid_flg(in_id,"TLONG",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("TLONG"); /* POP */ else if((rcd=nco_inq_varid_flg(in_id,"ULON",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("ULON"); /* CICE */ else if((rcd=nco_inq_varid_flg(in_id,"ULONG",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("ULONG"); /* POP */ else if((rcd=nco_inq_varid_flg(in_id,"lonCell",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("lonCell"); /* MPAS-O/I */ else if((rcd=nco_inq_varid_flg(in_id,"nav_lon",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("nav_lon"); /* NEMO */ else if((rcd=nco_inq_varid_flg(in_id,"global_longitude0",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("global_longitude0"); /* Oxford NB: Must search for global_* first */ else if((rcd=nco_inq_varid_flg(in_id,"longitude0",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("longitude0"); /* Oxford */ else if((rcd=nco_inq_varid_flg(in_id,"CO_Longitude",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("CO_Longitude"); /* MLS */ else if((rcd=nco_inq_varid_flg(in_id,"S1_Longitude",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("S1_Longitude"); /* GPM */ else if((rcd=nco_inq_varid_flg(in_id,"xc",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("xc"); /* RTM */ else if((rcd=nco_inq_varid_flg(in_id,"gridlon_0",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("gridlon_0"); /* NWS HRRR */ } /* !lon_ctr_id */ if(!lat_nm_in || !lon_nm_in){ (void)fprintf(stdout,"%s: ERROR %s unable to identify latitude and/or longitude variable.\nHINT: Potential causes and workarounds for this include: 1. Coordinate variables must be in the root directory (not in a group). If this might be the problem, try to \"flatten\" the input file before regridding it (see http://nco.sf.net/nco.html#flatten). 2. Horizontal dimensions with \"unusual\" names are hard to identify unless the user designates them somehow. ncremap will search for horizontal dimensions named in the \"coordinates\" attribute in a template variable specified with the \"-V rgr_var\" option. 3. NCO will also search its own internal database for likely names of horizontal coordinate variables (lat, latitude, LAT, XLAT, etc.). Contact the NCO project to have your idiosyncratic coordinate names added to the internal database.\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !lat_nm_in */ /* Rank of coordinates determines whether grid is curvilinear */ rcd+=nco_inq_varndims(in_id,lat_ctr_id,&lat_rnk); rcd+=nco_inq_varndims(in_id,lon_ctr_id,&lon_rnk); /* If lat_ctr and lon_ctr share same and only dimension then grid is unstructured */ if(lat_rnk*lon_rnk == 1){ rcd+=nco_inq_vardimid(in_id,lat_ctr_id,&dmn_id_lat); rcd+=nco_inq_vardimid(in_id,lon_ctr_id,&dmn_id_lon); if(dmn_id_lat == dmn_id_lon){ dmn_id_col=dmn_id_lat; dmn_id_lat=NC_MIN_INT; dmn_id_lon=NC_MIN_INT; rcd+=nco_inq_dimname(in_id,dmn_id_col,dmn_nm); col_dmn_nm=(char *)strdup(dmn_nm); flg_grd_1D=True; } /* !unstructured */ } /* lat_rnk == lon_rnk == 1 */ if(lat_rnk*lon_rnk == 1 && dmn_id_lat != NC_MIN_INT && dmn_id_lon != NC_MIN_INT){ flg_grd_crv=False; flg_grd_2D=True; } /* !lat_rnk */ if(lat_rnk == dmn_nbr_2D || lon_rnk == dmn_nbr_2D){ flg_grd_crv=True; flg_grd_2D=False; } /* !lat_rnk */ if(lat_rnk > dmn_nbr_2D || lon_rnk > dmn_nbr_2D){ (void)fprintf(stdout,"%s: ERROR %s reports an identified grid variable (%s with rank %d and/or %s with rank %d) has rank greater than two---grid variables currently must have rank 1 or 2.\nHINT: If grid variables do not vary in time, then temporally average them (with, e.g., ncwa -a time in.nc out.nc) prior to inferring grid\n",nco_prg_nm_get(),fnc_nm,lat_nm_in,lat_rnk,lon_nm_in,lon_rnk); nco_exit(EXIT_FAILURE); } /* !3D */ if(lat_rnk*lon_rnk != 1 && lat_rnk*lon_rnk != 4) assert(False); /* Scrutinize coordinates for their dimensions NB: Unstructure already known */ if(flg_grd_2D){ rcd+=nco_inq_dimname(in_id,dmn_id_lat,dmn_nm); lat_dmn_nm=(char *)strdup(dmn_nm); rcd+=nco_inq_dimname(in_id,dmn_id_lon,dmn_nm); lon_dmn_nm=(char *)strdup(dmn_nm); } /* !flg_grd_2D */ if(flg_grd_crv){ rcd+=nco_inq_vardimid(in_id,lat_ctr_id,dmn_ids); /* fxm: use cf struct and match with units name, if any? normally curvilinear grid dimensions are just pixel dimensions that are not aligned north-south or east-west */ dmn_id_lat=dmn_ids[0]; dmn_id_lon=dmn_ids[1]; rcd+=nco_inq_dimname(in_id,dmn_id_lat,dmn_nm); lat_dmn_nm=(char *)strdup(dmn_nm); rcd+=nco_inq_dimname(in_id,dmn_id_lon,dmn_nm); lon_dmn_nm=(char *)strdup(dmn_nm); } /* !flg_grd_crv */ if(!(lat_dmn_nm && lon_dmn_nm) && !col_dmn_nm){ (void)fprintf(stdout,"%s: ERROR %s unable to identify latitude and/or longitude dimension and/or column dimension.\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !col_dmn_nm !lat_dmn_nm !lon_dmn_nm */ /* Locate spatial dimensions that may be present NB: bounds dimensions may present a special problem CAM-FV and CAM-SE use nbnd for temporal bounds and have no spatial bounds dimension CAM3 uses tbnd for temporal bounds and has no spatial bounds dimension CICE and POP use d2 for temporal bounds, and CICE uses nvertices for spatial bounds while POP uses nothing Hence search for nvertices before nbnd to ensure spatial bound is found first */ if((rcd=nco_inq_dimid_flg(in_id,"nv",&dmn_id_bnd)) == NC_NOERR) bnd_dmn_nm=strdup("nv"); /* fxm */ else if((rcd=nco_inq_dimid_flg(in_id,"nvertices",&dmn_id_bnd)) == NC_NOERR) bnd_dmn_nm=strdup("nvertices"); /* CICE */ /* Use dimension IDs to get dimension sizes and grid size */ if(flg_grd_1D){ rcd+=nco_inq_dimlen(in_id,dmn_id_col,&col_nbr); lat_nbr=lon_nbr=col_nbr; }else{ rcd+=nco_inq_dimlen(in_id,dmn_id_lat,&lat_nbr); rcd+=nco_inq_dimlen(in_id,dmn_id_lon,&lon_nbr); col_nbr=NC_MIN_INT; } /* !flg_grd_1D */ if(dmn_id_bnd != NC_MIN_INT) rcd+=nco_inq_dimlen(in_id,dmn_id_bnd,&grd_crn_nbr); if(dmn_id_bnd != NC_MIN_INT) rcd+=nco_inq_dimlen(in_id,dmn_id_bnd,&bnd_nbr); if(flg_grd_1D){ /* Unstructured grid (e.g., CAM-SE) */ grd_rnk_nbr=dmn_nbr_1D; grd_typ=nco_grd_2D_unk; lat_typ=nco_grd_lat_unk; lon_typ=nco_grd_lon_unk; /* 1D grids without their own boundaries are at the mercy of the weight generator */ if(dmn_id_bnd == NC_MIN_INT){ (void)fprintf(stdout,"%s: WARNING %s reports an unstructured grid without spatial boundary information. NCO can copy but not infer spatial boundaries from unstructured grids. Thus NCO will not write spatial bounds to the gridfile inferred from this input file. Instead, the weight generator that ingests this gridfile must generate weights for gridcells with unknown spatial extent. This is feasible for grids and mappings where weights masquerade as areas and are determined by underlying grid and interpolation type (e.g., bilinear remapping of spectral element grid). Unfortunately, the ESMF_RegridWeightGen (ERWG) program requires cell interfaces in both grid files, so ERWG will break on this gridfile. Other weight generators such as TempestRemap may be more successful with this SCRIP file.\n",nco_prg_nm_get(),fnc_nm); (void)fprintf(stdout,"%s: HINT Re-run the regridder, this time adding the \"-s src_grd\" option to specify the source grid file in SCRIP format. That SCRIP file will have the spatial bounds information required by the ESMF_RegridWeightGen (ERWG) program, so that the regridder will circumvent inferring the underlying grid through its black but fragile magic.\n",nco_prg_nm_get()); flg_wrt_crn=False; /* Input could actually be from grid with no polygonal definition, e.g., CAM-SE Corner number is non-deterministic since, e.g., CAM-SE dual grid can be fit to quadrilaterals, pentagons, chevrons, etc. Bounds will not be diagnosed so safe to set grd_crn_nbr to harmless (though weird) value like 4 However, ERWG requires presence of valid corner dimension "grid_corners" and arrays in input SCRIP file So ERWG will break when reading this SCRIP file regardless of whether it contains arrays (with bogus values) By default do not write grid corner values */ grd_crn_nbr=4; } /* !dmn_id_bnd */ if(bnd_nbr == 2){ /* Unstructured grids with bounds information (e.g., OCO2) may use a pseudo-rectangular convention of archiving latitude and longitude bounds as 2xN (rather than 4XN) arrays even though cell have four corners. "convention" is that two latitudes and two longitudes can specify rectangular boundary cell In this case, bnd_nbr=grd_crn_nbr=2=sizeof(nv)=sizeof(nvertices) currently Set number of corners to rectangular and leave bnd_nbr as is */ grd_crn_nbr=4; flg_1D_psd_rct_bnd=True; } /* !bnd_nbr */ }else if(flg_grd_2D){ /* !flg_grd_1D */ /* Assume 2D grid of uninitialized type */ grd_rnk_nbr=dmn_nbr_2D; grd_typ=nco_grd_2D_nil; lat_typ=nco_grd_lat_nil; lon_typ=nco_grd_lon_nil; /* Assume rectangular grids that do not specify otherwise use quadrilaterals */ if(dmn_id_bnd == NC_MIN_INT) grd_crn_nbr=4; /* Sometimes we infer from a 2D grid, like those produced by nco_grd_mk(), that has bounds with nv=2 This signals rectangular gridcell bounds are interfaces not vertices (to save half the space) These rectangles really have four corners so we change grd_crn_nbr (not bnd_nbr) accordingly */ if(grd_crn_nbr == 2) grd_crn_nbr=4; /* Convention is to archive only two bounds for rectangular grids (since sides are identical) Non-quadrilateral rectangular grids are untested */ if(grd_crn_nbr == 4) bnd_nbr=2; }else if(flg_grd_crv){ /* !flg_grd_2D */ /* Assume curvilinear grid (e.g., WRF) */ flg_grd_2D=False; grd_rnk_nbr=dmn_nbr_2D; grd_typ=nco_grd_2D_unk; lat_typ=nco_grd_lat_unk; lon_typ=nco_grd_lon_unk; /* Assume curvilinear grids that do not specify otherwise use quadrilaterals */ if(dmn_id_bnd == NC_MIN_INT) grd_crn_nbr=4; /* Assume quadrilaterals are, well, quadrilaterals (e.g., rhomboids) not necessarily rectangles Non-quadrilateral curvilinear grids are untested */ if(grd_crn_nbr == 4) bnd_nbr=4; else assert(False); } /* !flg_grd_crv */ /* Allocate space for output data */ if(flg_grd_1D) grd_sz_nbr=col_nbr; else grd_sz_nbr=lat_nbr*lon_nbr; dmn_sz_int=(int *)nco_malloc(grd_rnk_nbr*nco_typ_lng((nc_type)NC_INT)); area=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); msk=(int *)nco_malloc(grd_sz_nbr*nco_typ_lng((nc_type)NC_INT)); if(flg_grd_1D){ if(bnd_nbr != NC_MIN_INT) lat_bnd=(double *)nco_malloc(grd_sz_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lat_crn=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lat_ctr=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); lat_ntf=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ)); lat_wgt=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); if(bnd_nbr != NC_MIN_INT) lon_bnd=(double *)nco_malloc(grd_sz_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lon_crn=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_ctr=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); lon_ntf=(double *)nco_malloc((lon_nbr+1L)*nco_typ_lng(crd_typ)); }else if(flg_grd_2D){ /* !flg_grd_1D */ lat_bnd=(double *)nco_malloc(lat_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lat_crn=(double *)nco_malloc(lat_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lat_ctr=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lat_ntf=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ)); lat_wgt=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lon_bnd=(double *)nco_malloc(lon_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lon_crn=(double *)nco_malloc(lon_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_ctr=(double *)nco_malloc(lon_nbr*nco_typ_lng(crd_typ)); lon_ntf=(double *)nco_malloc((lon_nbr+1L)*nco_typ_lng(crd_typ)); }else if(flg_grd_crv){ /* !flg_grd_2D */ lat_bnd=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lat_crn=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lat_ctr=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); lat_ntf=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ)); lat_wgt=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lon_bnd=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_crn=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_ctr=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); lon_ntf=(double *)nco_malloc((lon_nbr+1L)*nco_typ_lng(crd_typ)); } /* !flg_grd_crv */ grd_ctr_lat=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); grd_ctr_lon=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); grd_crn_lat=(double *)nco_malloc(grd_crn_nbr*grd_sz_nbr*nco_typ_lng(crd_typ)); grd_crn_lon=(double *)nco_malloc(grd_crn_nbr*grd_sz_nbr*nco_typ_lng(crd_typ)); /* Locate fields that may be present in input file */ if((rcd=nco_inq_varid_flg(in_id,"lat_bnds",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("lat_bnds"); else if((rcd=nco_inq_varid_flg(in_id,"latt_bounds",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("latt_bounds"); else if((rcd=nco_inq_varid_flg(in_id,"latu_bounds",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("latu_bounds"); else if((rcd=nco_inq_varid_flg(in_id,"lat_ntf",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("lat_ntf"); else if((rcd=nco_inq_varid_flg(in_id,"lat_vertices",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("lat_vertices"); else if((rcd=nco_inq_varid_flg(in_id,"latitude_bnds",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("latitude_bnds"); /* OCO2 */ else if((rcd=nco_inq_varid_flg(in_id,"LatitudeCornerpoints",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("LatitudeCornerpoints"); /* OMI */ if((rcd=nco_inq_varid_flg(in_id,"lon_bnds",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lon_bnds"); else if((rcd=nco_inq_varid_flg(in_id,"lont_bounds",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lont_bounds"); else if((rcd=nco_inq_varid_flg(in_id,"lonu_bounds",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lonu_bounds"); else if((rcd=nco_inq_varid_flg(in_id,"lon_ntf",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lon_ntf"); else if((rcd=nco_inq_varid_flg(in_id,"lon_vertices",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lon_vertices"); else if((rcd=nco_inq_varid_flg(in_id,"longitude_bnds",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("longitude_bnds"); /* OCO2 */ else if((rcd=nco_inq_varid_flg(in_id,"LongitudeCornerpoints",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("LongitudeCornerpoints"); /* OMI */ if((rcd=nco_inq_varid_flg(in_id,"area",&area_id)) == NC_NOERR) area_nm_in=strdup("area"); else if((rcd=nco_inq_varid_flg(in_id,"Area",&area_id)) == NC_NOERR) area_nm_in=strdup("Area"); else if((rcd=nco_inq_varid_flg(in_id,"areaCell",&area_id)) == NC_NOERR) area_nm_in=strdup("areaCell"); /* MPAS-O/I */ else if((rcd=nco_inq_varid_flg(in_id,"grid_area",&area_id)) == NC_NOERR) area_nm_in=strdup("grid_area"); else if((rcd=nco_inq_varid_flg(in_id,"area_d",&area_id)) == NC_NOERR) area_nm_in=strdup("area_d"); /* EAM dynamics grid */ else if((rcd=nco_inq_varid_flg(in_id,"area_p",&area_id)) == NC_NOERR) area_nm_in=strdup("area_p"); /* EAM physics grid */ // else if((rcd=nco_inq_varid_flg(in_id,"aice",&area_id)) == NC_NOERR) area_nm_in=strdup("aice"); /* CICE time-dependent ice area (3D), not total gridcell area */ else if((rcd=nco_inq_varid_flg(in_id,"tarea",&area_id)) == NC_NOERR) area_nm_in=strdup("tarea"); /* CICE time-invariant state-variable gridcell area (2D) */ else if((rcd=nco_inq_varid_flg(in_id,"uarea",&area_id)) == NC_NOERR) area_nm_in=strdup("uarea"); /* CICE time-invariant dynamics variables (2D) */ msk_nm_in=rgr->msk_var; if(msk_nm_in){ if(!strcasecmp(msk_nm_in,"none")){ /* 20170814: Some variables named "*mask*" are, e.g., quality control masks not regridding masks per se */ msk_nm_in=(char *)nco_free(msk_nm_in); }else{ /* User-supplied name overrides database */ rcd=nco_inq_varid(in_id,msk_nm_in,&msk_id); } /* !msk_nm_in */ }else{ /* Otherwise search database */ if((rcd=nco_inq_varid_flg(in_id,"mask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("mask"); else if((rcd=nco_inq_varid_flg(in_id,"Mask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("Mask"); else if((rcd=nco_inq_varid_flg(in_id,"grid_imask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("grid_imask"); else if((rcd=nco_inq_varid_flg(in_id,"landmask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("landmask"); /* ALM/CLM */ else if((rcd=nco_inq_varid_flg(in_id,"tmask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("tmask"); /* CICE */ } /* !msk_nm_in */ /* Mask field requires special handling for non-conformant models */ if(msk_id != NC_MIN_INT){ /* 20151201: All models tested define mask as NC_INT except CICE which uses NC_FLOAT 20160111: Few observations tested define mask. Exceptions include AMSR and GHRSST. AMSR uses NC_SHORT to store bitmasks. Bitmask is 1 for missing data, and up to 128 for various quality levels of valid data. Hence, almost better to ignore AMSR mask variable. GHRSST uses NC_BYTE for its 3D "mask" bit-mask of surface-type values 1,2,4,8,16. */ rcd=nco_inq_varndims(in_id,msk_id,&msk_rnk_nbr); if(msk_rnk_nbr != grd_rnk_nbr && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports input mask variable \"%s\" is rank %d while grid is rank %ld so will use first timestep/layer to determine output mask\n",nco_prg_nm_get(),fnc_nm,msk_nm_in,msk_rnk_nbr,grd_rnk_nbr); rcd=nco_inq_vartype(in_id,msk_id,&msk_typ); msk_unn.vp=(void *)nco_malloc(grd_sz_nbr*nco_typ_lng(msk_typ)); } /* !msk */ /* All grids: Some real-world datasets violate convention that coordinates ought never have missing values CICE lists missing value for lat/lon_ctr arrays (TLAT, TLONG) and re-uses that for bounds arrays (latt_bounds, lont_bounds) that do not bother to have their own missing value attributes Without counter-example, assume has_mss_val_bnd=has_mss_val_ctr and mss_val_bnd_dbl=mss_val_ctr_dbl */ has_mss_val_bnd=has_mss_val_ctr=nco_mss_val_get_dbl(in_id,lat_ctr_id,&mss_val_ctr_dbl); if(flg_grd_1D){ /* Obtain fields that must be present in unstructured input file */ dmn_srt[0]=0L; dmn_cnt[0]=col_nbr; rcd=nco_get_vara(in_id,lat_ctr_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ); rcd=nco_get_vara(in_id,lon_ctr_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ); /* Obtain fields that may be present in unstructured input file */ if(area_id != NC_MIN_INT) rcd=nco_get_vara(in_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); if(msk_id != NC_MIN_INT){ if(msk_rnk_nbr > grd_rnk_nbr){ /* Retrieve mask elements only from first horizontal grid, e.g., first timestep, first layer... */ for(dmn_idx=0;dmn_idx<msk_rnk_nbr-grd_rnk_nbr;dmn_idx++){ dmn_srt[dmn_idx]=0L; dmn_cnt[dmn_idx]=1L; } /* !dmn_idx */ dmn_srt[dmn_idx]=0L; dmn_cnt[dmn_idx]=col_nbr; } /* !msk_rnk_nbr */ rcd=nco_get_vara(in_id,msk_id,dmn_srt,dmn_cnt,msk_unn.vp,msk_typ); } /* !msk_id */ dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=col_nbr; if(flg_1D_psd_rct_bnd){ dmn_cnt[1]=bnd_nbr; if(lat_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_bnd,crd_typ); if(lon_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_bnd,crd_typ); }else{ dmn_cnt[1]=grd_crn_nbr; if(lat_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_crn,crd_typ); if(lon_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_crn,crd_typ); } /* !flg_1D_psd_rct_bnd */ } /* !flg_grd_1D */ if(flg_grd_crv){ /* Obtain fields that must be present in curvilinear input file */ dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=lon_nbr; rcd=nco_get_vara(in_id,lat_ctr_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ); rcd=nco_get_vara(in_id,lon_ctr_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ); /* 20150923: Also input, if present in curvilinear file, corners, area, and mask area and mask are same size as lat and lon */ if(area_id != NC_MIN_INT) rcd=nco_get_vara(in_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); if(msk_id != NC_MIN_INT){ if(msk_rnk_nbr > grd_rnk_nbr){ /* Retrieve mask elements only from first horizontal grid, e.g., first timestep, first layer... */ for(dmn_idx=0;dmn_idx<msk_rnk_nbr-grd_rnk_nbr;dmn_idx++){ dmn_srt[dmn_idx]=0L; dmn_cnt[dmn_idx]=1L; } /* !dmn_idx */ dmn_srt[dmn_idx]=dmn_srt[dmn_idx+1]=0L; dmn_cnt[dmn_idx]=lat_nbr; dmn_cnt[dmn_idx+1]=lon_nbr; } /* !msk_rnk_nbr */ rcd=nco_get_vara(in_id,msk_id,dmn_srt,dmn_cnt,msk_unn.vp,msk_typ); } /* !msk_id */ /* Corners are on curvilinear corner grid Rectangular boundaries (i.e., lat_bnd=[lat_nbr,2]) DNE for curvilinear grids Read-in *_crn arrays in curvilinear grids, and *_bnd arrays for rectilinear grids Rank-ordering of corner arrays is usually lat_nbr,lon_nbr,grd_crn_nbr as produced/expected by SCRIP However some datasets, e.g., OMI DOMINO use grd_crn_nbr,lat_nbr,lon_nbr Sigh... */ dmn_srt[0]=dmn_srt[1]=dmn_srt[2]=0L; if(lat_bnd_id != NC_MIN_INT && lon_bnd_id != NC_MIN_INT){ rcd=nco_inq_vardimid(in_id,lat_bnd_id,dmn_ids); if((dmn_ids[0] == dmn_id_lat && dmn_ids[1] == dmn_id_lon) || (dmn_ids[0] == dmn_id_lon && dmn_ids[1] == dmn_id_lat)){ dmn_id_bnd=dmn_ids[2]; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=lon_nbr; dmn_cnt[2]=grd_crn_nbr; }else if((dmn_ids[1] == dmn_id_lat && dmn_ids[2] == dmn_id_lon) || (dmn_ids[1] == dmn_id_lon && dmn_ids[2] == dmn_id_lat)){ dmn_id_bnd=dmn_ids[0]; dmn_cnt[0]=grd_crn_nbr; dmn_cnt[1]=lat_nbr; dmn_cnt[2]=lon_nbr; flg_crn_grd_lat_lon=True; }else{ (void)fprintf(stdout,"%s: WARNING %s confused by dimension-ordering of latitude bounds variable \"%s. Will ignore this bounds variable and attempt to extrapolate vertices from centers internally...\n",nco_prg_nm_get(),fnc_nm,lat_nm_in); lat_bnd_id=NC_MIN_INT; lon_bnd_id=NC_MIN_INT; } /* !dmn_ids */ rcd=nco_get_vara(in_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_crn,crd_typ); rcd=nco_get_vara(in_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_crn,crd_typ); if(flg_crn_grd_lat_lon){ /* Permute corner arrays from non-canonical (grd_nbr,lat_nbr,lon_nbr) to canonical (lat_nbr,lon_nbr,grd_nbr) order */ double *lat_crn_tmp=NULL; double *lon_crn_tmp=NULL; lat_crn_tmp=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_crn_tmp=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); memcpy(lat_crn_tmp,lat_crn,grd_sz_nbr*grd_crn_nbr*sizeof(double)); memcpy(lon_crn_tmp,lon_crn,grd_sz_nbr*grd_crn_nbr*sizeof(double)); for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){ for(idx=0;idx<grd_sz_nbr;idx++){ lat_idx=idx/lon_nbr; lon_idx=idx%lon_nbr; /* NB: Variables differ (lat vs. lon) but indexes are identical in next two lines */ lat_crn[lat_idx*lon_nbr*grd_crn_nbr+lon_idx*grd_crn_nbr+crn_idx]=lat_crn_tmp[crn_idx*grd_sz_nbr+idx]; lon_crn[lat_idx*lon_nbr*grd_crn_nbr+lon_idx*grd_crn_nbr+crn_idx]=lon_crn_tmp[crn_idx*grd_sz_nbr+idx]; } /* !idx */ } /* !crn_idx */ if(lat_crn_tmp) lat_crn_tmp=(double *)nco_free(lat_crn_tmp); if(lon_crn_tmp) lon_crn_tmp=(double *)nco_free(lon_crn_tmp); /* In this code branch, thought to be executed only for OMI DOMINO grids, re-compute grid center arrays (known to contain missing values) as centroids of supplied grid corners */ for(idx=0;idx<grd_sz_nbr;idx++){ lat_idx=idx/lon_nbr; lon_idx=idx%lon_nbr; lat_ctr[idx]=0.25*(lat_crn[idx*grd_crn_nbr+0L]+lat_crn[idx*grd_crn_nbr+1L]+lat_crn[idx*grd_crn_nbr+2L]+lat_crn[idx*grd_crn_nbr+3L]); lon_ctr[idx]=nco_lon_crn_avg_brnch(lon_crn[idx*grd_crn_nbr+0L],lon_crn[idx*grd_crn_nbr+1L],lon_crn[idx*grd_crn_nbr+2L],lon_crn[idx*grd_crn_nbr+3L]); } /* !idx */ } /* !flg_crd_grd_lat_lon */ } /* !lat_bnd_id */ } /* !flg_grd_crv */ if(flg_grd_2D){ int lon_psn_in=1L; /* [idx] Ordinal position of longitude dimension in rectangular grid variables like area */ int lat_psn_in=0L; /* [idx] Ordinal position of latitude dimension in rectangular grid variables like area */ int tpl_id=NC_MIN_INT; /* [id] ID of template field */ /* Obtain fields that must be present in input file */ dmn_srt[0L]=0L; dmn_cnt[0L]=lat_nbr; rcd=nco_get_vara(in_id,lat_ctr_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ); dmn_srt[0L]=0L; dmn_cnt[0L]=lon_nbr; rcd=nco_get_vara(in_id,lon_ctr_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ); if(lat_ctr[1L] < lat_ctr[0L]) flg_s2n=False; /* Use fields that may be present in input file to override, if necessary, default lon/lat order area and mask are both suitable templates for determining input lat/lon ordering NB: Algorithm assumes area is same rank as grid, and falls-back to mask if that has same rank as grid */ if(area_id != NC_MIN_INT) tpl_id=area_id; else if(msk_id != NC_MIN_INT && msk_rnk_nbr == grd_rnk_nbr) tpl_id=msk_id; if(tpl_id != NC_MIN_INT){ int tpl_rnk_nbr; var_id=tpl_id; /* NB: Template variable rank may exceed two with --msk_[src/dst] (e.g., SST(time,lat,lon)) */ rcd=nco_inq_varndims(in_id,var_id,&tpl_rnk_nbr); rcd=nco_inq_vardimid(in_id,var_id,dmn_ids); /* fxm: Optimize discovery of lat/lon ordering */ for(dmn_idx=0;dmn_idx<grd_rnk_nbr;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_ids[dmn_idx],dmn_nm); rcd+=nco_inq_dimlen(in_id,dmn_ids[dmn_idx],&dmn_sz); if(!strcmp(dmn_nm,lat_dmn_nm)){ assert(dmn_sz == lat_nbr); assert(dmn_idx == 0); lat_psn_in=dmn_idx; } /* !lat */ if(!strcmp(dmn_nm,lon_dmn_nm)){ assert(dmn_sz == lon_nbr); assert(dmn_idx == 1); lon_psn_in=dmn_idx; } /* !lon */ } /* !dmn_idx */ } /* !tpl */ /* Obtain fields that may be present in input file */ if(area_id != NC_MIN_INT){ var_id=area_id; rcd=nco_inq_vardimid(in_id,var_id,dmn_ids); dmn_srt[lat_psn_in]=0L; dmn_cnt[lat_psn_in]=lat_nbr; dmn_srt[lon_psn_in]=0L; dmn_cnt[lon_psn_in]=lon_nbr; rcd=nco_get_vara(in_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); } /* !area */ if(msk_id != NC_MIN_INT){ var_id=msk_id; rcd=nco_inq_vardimid(in_id,var_id,dmn_ids); dmn_srt[lat_psn_in]=0L; dmn_cnt[lat_psn_in]=lat_nbr; dmn_srt[lon_psn_in]=0L; dmn_cnt[lon_psn_in]=lon_nbr; if(msk_rnk_nbr != grd_rnk_nbr){ /* Retrieve mask elements only from first horizontal grid, e.g., first timestep, first layer... */ for(dmn_idx=0;dmn_idx<msk_rnk_nbr-grd_rnk_nbr;dmn_idx++){ dmn_srt[dmn_idx]=0L; dmn_cnt[dmn_idx]=1L; } /* !dmn_idx */ dmn_srt[dmn_idx]=dmn_srt[dmn_idx+1]=0L; dmn_cnt[dmn_idx+lat_psn_in]=lat_nbr; dmn_cnt[dmn_idx+lon_psn_in]=lon_nbr; } /* !msk_rnk_nbr */ rcd=nco_get_vara(in_id,msk_id,dmn_srt,dmn_cnt,msk_unn.vp,msk_typ); } /* !msk */ /* Rectangular boundaries are often on "abbreviated" bounds grid (two bounds per center) Read-in *_crn arrays for 1D and curvilinear grids, and *_bnd arrays for rectilinear grids */ dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=bnd_nbr; if(lat_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_bnd,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lon_nbr; dmn_cnt[1]=bnd_nbr; if(lon_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_bnd,crd_typ); } /* !flg_grd_2D */ /* Additional information that may be required for any input grid */ if(area_id != NC_MIN_INT) has_mss_val_area=nco_mss_val_get_dbl(in_id,area_id,&mss_val_area_dbl); if(msk_id != NC_MIN_INT) has_mss_val_msk=nco_mss_val_get_dbl(in_id,msk_id,&mss_val_msk_dbl); /* 20160115: AMSR coordinates are packed as NC_SHORT with scale_value=0.01f. What to do? Is it worth unpacking everything? */ int flg_pck; /* [flg] Variable is packed on disk */ rcd=nco_inq_var_packing(in_id,lat_ctr_id,&flg_pck); if(flg_pck) (void)fprintf(stdout,"%s: WARNING %s reports lat_ctr variable \"%s\" is packed so results unpredictable. HINT: If grid-generation causes problems, retry after unpacking input file with, e.g., \"ncpdq -U in.nc out.nc\"\n",nco_prg_nm_get(),fnc_nm,lat_nm_in); rcd=nco_inq_var_packing(in_id,lon_ctr_id,&flg_pck); if(flg_pck) (void)fprintf(stdout,"%s: WARNING %s reports lon_ctr variable \"%s\" is packed so results unpredictable. HINT: If grid-generation causes problems, retry after unpacking input file with, e.g., \"ncpdq -U in.nc out.nc\"\n",nco_prg_nm_get(),fnc_nm,lon_nm_in); /* Close input netCDF file */ nco_close(in_id); /* Remove local copy of file */ if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in); /* Above this line, fl_in and in_id refer to input file to be regridded Below this line, fl_out and out_id refer to grid-file to be output */ dfl_lvl=rgr->dfl_lvl; fl_out=rgr->fl_grd; fl_out_fmt=rgr->fl_out_fmt; if(!fl_out){ (void)fprintf(stdout,"%s: ERROR %s filename for inferred SCRIP grid-file is uninitialized, supply it with \"ncks --rgr grid=filename.nc\" or \"ncremap -R '--rgr grid=filename.nc'\"\n",nco_prg_nm_get(),fnc_nm); (void)fprintf(stdout,"%s: HINT ncremap supplies an automatically generated default name for any output SCRIP grid-file. Users of the standalone regridder (ncks) must explicitly specify a name for the inferred SCRIP grid-file.\n",nco_prg_nm_get()); nco_exit(EXIT_FAILURE); } /* !fl_out */ /* Define output variable values */ int lon_psn; /* [idx] Ordinal position of longitude dimension in rectangular grid dimension-size array */ int lat_psn; /* [idx] Ordinal position of latitude dimension in rectangular grid dimension-size array */ if(grd_rnk_nbr == dmn_nbr_1D){ dmn_sz_int[0]=col_nbr; }else if(grd_rnk_nbr == dmn_nbr_2D){ /* !dmn_nbr_1D */ /* SCRIP introduced [lon,lat] convention because more natural for Fortran NB: This [lon,lat] convention applies ONLY to grid_dims variable Write all other SCRIP variables as [lat,lon] Nonsensical? Yes, but backwards compatibility is priceless */ lon_psn=0; lat_psn=1; dmn_sz_int[lon_psn]=lon_nbr; dmn_sz_int[lat_psn]=lat_nbr; } /* !dmn_nbr_2D */ if(flg_grd_crv){ /* For curvilinear grids first, if necessary, infer corner boundaries Then perform sanity check using same code on inferred and copied grids */ if(False && has_mss_val_bnd && grd_crn_nbr == 4 && !strcmp(lat_bnd_nm,"latt_bounds") && !strcmp(lon_bnd_nm,"lont_bounds") && lat_bnd_id != NC_MIN_INT && lon_bnd_id != NC_MIN_INT){ /* Only CESM CICE is known to fit these constraints Cell center locations are (misleadingly) reported in a regular, rectangular, regional grid Cell corners/boundaries are regular only in SH, curvilinear in NH, i.e., displaced or tripole grid Grid is from southernmost Antarctic Ocean latitude and longitude near 79S,320E to North Pole Nominal centers do not agree with true centers computed from corners CICE may run in decomposed/unstructured mode, each column writes separately to output buffer? This could explain missing coordinates in non-ocean gridcells However, land points are completely masked (grid centers and corners are missing) Oversight? Why not write coordinates for land-masked cells? Regridder needs corners so we fill-in missing boundaries with derived grid Gave up on inferring 20170521 once tri-pole grid complexity became apparent */ const long idx_dbg=rgr->idx_dbg; double lat_ctr_drv; /* [dgr] Latitude center, derived */ double lon_ctr_drv; /* [dgr] Longitude center, derived */ double lat_crn_drv; /* [dgr] Latitude corner, derived */ double lon_crn_drv; /* [dgr] Longitude corner, derived */ long idx_ctr_sth; /* [idx] Index of southern neighbor */ long idx_ctr_nrt; /* [idx] Index of northern neighbor */ long idx_crn_sth; /* [idx] Index of southern neighbor */ long idx_crn_nrt; /* [idx] Index of northern neighbor */ long lon_idx_crr; /* [idx] Current longitude index */ long lon_vld_frs; /* [idx] First valid longitude in latitude row */ long *lon_vld_prv=NULL; /* [idx] Previous valid longitude in latitude row */ long *lon_vld_nxt=NULL; /* [idx] Next valid longitude in latitude row */ lon_vld_prv=(long *)nco_malloc(lon_nbr*sizeof(long)); lon_vld_nxt=(long *)nco_malloc(lon_nbr*sizeof(long)); /* First valid gridcell sets west and south bounds of entire grid */ for(idx_ctr=0;idx_ctr<grd_sz_nbr;idx_ctr++){ if(lat_ctr[idx_ctr] != mss_val_ctr_dbl) break; } /* !grd_sz_nbr */ assert(idx_ctr != grd_sz_nbr); idx_crn=idx_ctr*grd_crn_nbr; lat_sth=lat_crn[idx_crn]; lat_ncr=lat_crn[idx_crn+3]-lat_crn[idx_crn]; /* ul-ll */ lon_wst=lon_crn[idx_crn]; lon_ncr=lon_crn[idx_crn+1]-lon_crn[idx_crn]; /* lr-ll */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s will assume grid is regional CICE in curvilinear format with masked land. Will diagnose missing cell boundaries and centers from present boundaries and centers in grid of size lat_nbr=%ld, lon_nbr=%ld.\n",nco_prg_nm_get(),fnc_nm,lat_nbr,lon_nbr); for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ idx_ctr=lat_idx*lon_nbr; /* Find first valid longitude at this latitude */ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++) if(lat_ctr[idx_ctr+lon_idx] != mss_val_ctr_dbl) break; lon_vld_frs=lon_idx; /* 20170519: Verified all tri-pole grid latitudes have at least one valid point */ if(lon_vld_frs == -1L) abort(); for(lon_idx_crr=0;lon_idx_crr<lon_nbr;lon_idx++){ /* Find previous and next valid longitude for all longitudes at this latitude Cells can be their own previous/next valid longitude */ lon_vld_prv[lon_idx_crr]=-1L; lon_vld_nxt[lon_idx_crr]=-1L; /* Start from current longitude and move left (west)... */ for(lon_idx=lon_idx_crr;lon_idx>=0;lon_idx--) if(lat_ctr[idx_ctr+lon_idx] != mss_val_ctr_dbl) break; if(lon_idx >= 0) lon_vld_prv[lon_idx_crr]=lon_idx; /* Start from current longitude and move right (east)... */ for(lon_idx=lon_idx_crr;lon_idx<lon_nbr;lon_idx++) if(lat_ctr[idx_ctr+lon_idx] != mss_val_ctr_dbl) break; if(lon_idx < lon_nbr) lon_vld_nxt[lon_idx_crr]=lon_idx; /* Wrap west if previous valid cell not found */ lon_vld_prv[lon_idx_crr]=lon_vld_prv[lon_nbr-1L]; /* Wrap east if next valid cell not found */ lon_vld_nxt[lon_idx_crr]=lon_vld_nxt[0]; } /* !lon_idx_crr */ /* Derive centers and corners for each missing point */ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ idx_ctr=lat_idx*lon_nbr+lon_idx; idx_crn=idx_ctr*grd_crn_nbr; if(lat_ctr[idx_ctr] != mss_val_ctr_dbl){ lat_sth=lat_crn[idx_crn]; lat_ncr=lat_crn[idx_crn+3]-lat_crn[idx_crn]; /* ul-ll */ lat_ctr_drv=lat_sth+0.5*lat_ncr; lat_crn_drv=lat_sth; lon_wst=lon_crn[idx_crn]; lon_ncr=lon_crn[idx_crn+1]-lon_crn[idx_crn]; /* lr-ll */ lon_ctr_drv=lon_wst+lon_ncr*(lon_idx+0.5); if(nco_dbg_lvl_get() >= nco_dbg_std && idx_ctr == idx_dbg) (void)fprintf(stdout,"%s: DEBUG %s idx=%ld lat_idx=%ld, lon_idx=%ld, lat_sth=%g, lat_ncr=%g, lon_wst=%g, lon_ncr=%g\n",nco_prg_nm_get(),fnc_nm,idx_ctr,lat_idx,lon_idx,lat_sth,lat_ncr,lon_wst,lon_ncr); } /* !idx_ctr */ if(lat_ctr[idx_ctr] == mss_val_ctr_dbl){ if(lat_idx != 0L){ /* Not bottom row */ idx_ctr_sth=idx_ctr-lon_nbr; if(lat_ctr[idx_ctr_sth] != mss_val_ctr_dbl){ /* Copy southern corners from northern corners of southern neighbor */ idx_crn_sth=idx_ctr_sth*grd_crn_nbr; lat_crn[idx_crn+0L]=lat_crn[idx_crn_sth+3L]; lat_crn[idx_crn+1L]=lat_crn[idx_crn_sth+2L]; lon_crn[idx_crn+0L]=lon_crn[idx_crn_sth+3L]; lon_crn[idx_crn+1L]=lon_crn[idx_crn_sth+2L]; } /* !mss_val */ } /* !lat_idx */ if(lat_idx != lat_nbr-1L){ /* Not top row */ idx_ctr_nrt=idx_ctr+lon_nbr; if(lat_ctr[idx_ctr_nrt] != mss_val_ctr_dbl){ /* Copy northern corners from southern corners of northern neighbor */ idx_crn_nrt=idx_ctr_nrt*grd_crn_nbr; lat_crn[idx_crn+2L]=lat_crn[idx_crn_nrt+1L]; lat_crn[idx_crn+3L]=lat_crn[idx_crn_nrt+0L]; lon_crn[idx_crn+2L]=lon_crn[idx_crn_nrt+1L]; lon_crn[idx_crn+3L]=lon_crn[idx_crn_nrt+0L]; } /* !mss_val */ } /* !lat_idx */ /* Got to here before giving up Idea was to interpolate missing cell corners between previous and next valid cell */ /* Algorithm assumes lon_wst never changes (too simple for displaced/tri_pole) */ lon_ctr_drv=lon_wst+lon_ncr*(lon_idx+0.5); lon_crn_drv=lon_wst+lon_ncr*lon_idx; if(lon_ctr_drv >= 360.0) lon_ctr_drv-=360.0; lat_ctr[idx_ctr]=lat_ctr_drv; lon_ctr[idx_ctr]=lon_ctr_drv; lat_crn[idx_crn+0L]=lat_crn[idx_crn+1L]=lat_crn_drv; lat_crn[idx_crn+2L]=lat_crn[idx_crn+3L]=lat_crn_drv+lat_ncr; lon_crn[idx_crn+0L]=lon_crn[idx_crn+3L]=lon_crn_drv; lon_crn[idx_crn+1L]=lon_crn[idx_crn+2L]=lon_crn_drv+lon_ncr; /* Branch-cut rule */ if(lon_crn_drv+lon_ncr >= 360.0){ lon_crn[idx_crn+0L]=lon_crn[idx_crn+3L]=lon_crn_drv-360.0; lon_crn[idx_crn+1L]=lon_crn[idx_crn+2L]=lon_crn_drv+lon_ncr-360.0; } /* !brnch */ } /* !mss_val */ } /* !lon_idx */ } /* !lat_idx */ if(lon_vld_nxt) lon_vld_nxt=(long *)nco_free(lon_vld_nxt); if(lon_vld_prv) lon_vld_prv=(long *)nco_free(lon_vld_prv); } /* !CICE */ if(lat_bnd_id == NC_MIN_INT && lon_bnd_id == NC_MIN_INT){ /* Interfaces (ntf) and boundaries (bnd) for curvilinear grids are ill-defined since sides need not follow latitudes nor meridians Simplest representation that contains equivalent information to interfaces/boundaries is grid corners array Diagnose grid corners from midpoints Most curvilinear data (e.g., WRF) is dimensioned lat x lon unlike SCRIP which uses lon x lat Hence we keep lat_ctr, lon_ctr, lat_crn, lon_crn with same order (likely lat x lon) as data file from which we infer grid Always use input order to write skeleton file Change that order, if necessary, to write SCRIP grid file In the interior of a curvilinear grid, nine points contribute to the four corners of a quadrilateral surrounding each center point These are the three points above the point, the three points at the same latitude, and the three points beneath the point In other words, a nine-point stencil is required to define the four corners inferred around each gridcell center It is cleanest to use this stencil only once for all cells in the "real"-grid, including those on the edges, not the interior For this to work cleanly we define an enlarged "fake"-grid where we pre-copy the values that lead to the desired extrapolation on "real"-grid edges Inspired by array-based solutions to integration of PDEs on meshes in Juri Toomre's class NB: implementation is not robust to missing value points in interior of grid. Hopefully grids have no missing values in coordinate variables, although they may have missing values in non-grid fields (e.g., mask, temperature) */ double *lat_ctr_fk; /* [dgr] Latitude grid with extrapolated boundaries necessary for 9-point template to find four grid corners for each real center */ double *lon_ctr_fk; /* [dgr] Longitude grid with extrapolated boundaries necessary for 9-point template to find four grid corners for each real center */ lat_ctr_fk=(double *)nco_malloc((lat_nbr+2)*(lon_nbr+2)*sizeof(double)); lon_ctr_fk=(double *)nco_malloc((lat_nbr+2)*(lon_nbr+2)*sizeof(double)); long int idx_rl; /* [idx] Index into real unrolled array */ long int idx_fk; /* [idx] Index into fake unrolled array */ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ /* lat idx on real grid */ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ /* lon idx on real grid */ idx_rl=lat_idx*lon_nbr+lon_idx; idx_fk=(lat_idx+1)*(lon_nbr+2)+lon_idx+1; /* Copy real grid to interior of fake grid */ lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]; lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]; } /* !lon */ } /* !lat */ /* Formulae to extrapolate sides and corners of fake grid are written as a starting lat/lon plus or minus adjustment Adjustment is positive-definite if grid monotonically increases in latitude and longitude from LL to UR 20160111: Use macros/functions to determine longitude adjustments that are always less than 180 This ensures all longitudes contributing to extrapolated longitude are from same branch cut */ /* Bottom row */ lat_idx=0; /* lat idx of extrapolated point on fake grid */ for(lon_idx=1;lon_idx<lon_nbr+1;lon_idx++){ /* lon idx of extrapolated point on fake grid */ idx_fk=lat_idx*(lon_nbr+2)+lon_idx; /* 1D-offset of extrapolated point on bottom row of fake grid */ idx_rl=lat_idx*lon_nbr+lon_idx-1; /* 1D-offset of neighboring point on bottom row of real grid */ lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]-(lat_ctr[idx_rl+lon_nbr]-lat_ctr[idx_rl]); lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]-nco_lon_dff_brnch_dgr(lon_ctr[idx_rl+lon_nbr],lon_ctr[idx_rl]); } /* !lon */ /* Top row */ lat_idx=lat_nbr+1; /* lat idx of extrapolated point on fake grid */ for(lon_idx=1;lon_idx<lon_nbr+1;lon_idx++){ /* lon idx of extrapolated point on fake grid */ idx_fk=lat_idx*(lon_nbr+2)+lon_idx; /* 1D-offset of extrapolated point on top row of fake grid */ idx_rl=(lat_nbr-1)*lon_nbr+lon_idx-1; /* 1D-offset of neighboring point on top row of real grid */ lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]+(lat_ctr[idx_rl]-lat_ctr[idx_rl-lon_nbr]); lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]+nco_lon_dff_brnch_dgr(lon_ctr[idx_rl],lon_ctr[idx_rl-lon_nbr]); } /* !lon */ /* Left side */ lon_idx=0; /* lon idx of extrapolated point on fake grid */ for(lat_idx=1;lat_idx<lat_nbr+1;lat_idx++){ /* lat idx of extrapolated point on fake grid */ idx_fk=lat_idx*(lon_nbr+2)+lon_idx; /* 1D-offset of extrapolated point on left side of fake grid */ idx_rl=(lat_idx-1)*lon_nbr+lon_idx; /* 1D-offset of neighboring point on left side of real grid */ lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]-(lat_ctr[idx_rl+1]-lat_ctr[idx_rl]); lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]-nco_lon_dff_brnch_dgr(lon_ctr[idx_rl+1],lon_ctr[idx_rl]); } /* !lat */ /* Right side */ lon_idx=lon_nbr+1; /* lon idx of extrapolated point on fake grid */ for(lat_idx=1;lat_idx<lat_nbr+1;lat_idx++){ /* lat idx of extrapolated point on fake grid */ idx_fk=lat_idx*(lon_nbr+2)+lon_idx; /* 1D-offset of extrapolated point on right side of fake grid */ idx_rl=(lat_idx-1)*lon_nbr+lon_idx-2; /* 1D-offset of neighboring point on right side of real grid */ lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]+(lat_ctr[idx_rl]-lat_ctr[idx_rl-1]); lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]+nco_lon_dff_brnch_dgr(lon_ctr[idx_rl],lon_ctr[idx_rl-1]); } /* !lat */ /* LL */ lat_ctr_fk[0]=lat_ctr_fk[lon_nbr+2]-(lat_ctr_fk[2*(lon_nbr+2)]-lat_ctr_fk[lon_nbr+2]); lon_ctr_fk[0]=lon_ctr_fk[1]-nco_lon_dff_brnch_dgr(lon_ctr_fk[2],lon_ctr_fk[1]); /* LR */ lat_ctr_fk[lon_nbr+1]=lat_ctr_fk[2*(lon_nbr+2)-1]-(lat_ctr_fk[3*(lon_nbr+2)-1]-lat_ctr_fk[2*(lon_nbr+2)-1]); lon_ctr_fk[lon_nbr+1]=lon_ctr_fk[lon_nbr]+nco_lon_dff_brnch_dgr(lon_ctr_fk[lon_nbr],lon_ctr_fk[lon_nbr-1]); /* UR */ lat_ctr_fk[(lat_nbr+2)*(lon_nbr+2)-1]=lat_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-1]+(lat_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-1]-lat_ctr_fk[lat_nbr*(lon_nbr+2)-1]); lon_ctr_fk[(lat_nbr+2)*(lon_nbr+2)-1]=lon_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-2]+nco_lon_dff_brnch_dgr(lon_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-2],lon_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-3]); /* UL */ lat_ctr_fk[(lat_nbr+1)*(lon_nbr+2)]=lat_ctr_fk[lat_nbr*(lon_nbr+2)]+(lat_ctr_fk[lat_nbr*(lon_nbr+2)]-lat_ctr_fk[(lat_nbr-1)*(lon_nbr+2)]); lon_ctr_fk[(lat_nbr+1)*(lon_nbr+2)]=lon_ctr_fk[lat_nbr*(lon_nbr+2)+1]-nco_lon_dff_brnch_dgr(lon_ctr_fk[lat_nbr*(lon_nbr+2)+2],lon_ctr_fk[lat_nbr*(lon_nbr+2)+1]); if(nco_dbg_lvl_get() >= nco_dbg_std){ long idx_dbg; idx_dbg=rgr->idx_dbg; (void)fprintf(stderr,"%s: INFO %s idx_dbg = %li, Fake Center [lat,lon]=[%g,%g]\n",nco_prg_nm_get(),fnc_nm,idx_dbg,lat_ctr_fk[idx_dbg],lon_ctr_fk[idx_dbg]); } /* !dbg */ long int lat_idx_fk; /* [idx] Index into fake (extrapolated) latitude array */ long int lon_idx_fk; /* [idx] Index into fake (extrapolated) longitude array */ long int idx_fk_crn_ll_ctr_ll; long int idx_fk_crn_ll_ctr_lr; long int idx_fk_crn_ll_ctr_ur; long int idx_fk_crn_ll_ctr_ul; long int idx_fk_crn_lr_ctr_ll; long int idx_fk_crn_lr_ctr_lr; long int idx_fk_crn_lr_ctr_ur; long int idx_fk_crn_lr_ctr_ul; long int idx_fk_crn_ur_ctr_ll; long int idx_fk_crn_ur_ctr_lr; long int idx_fk_crn_ur_ctr_ur; long int idx_fk_crn_ur_ctr_ul; long int idx_fk_crn_ul_ctr_ll; long int idx_fk_crn_ul_ctr_lr; long int idx_fk_crn_ul_ctr_ur; long int idx_fk_crn_ul_ctr_ul; double *crn_lat; double *crn_lon; crn_lat=(double *)nco_malloc(grd_crn_nbr*sizeof(double)); crn_lon=(double *)nco_malloc(grd_crn_nbr*sizeof(double)); size_t wrn_nbr_max=20; size_t wrn_nbr=0; for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ /* 9-point template valid at all interior (non-edge) points in real grid, and at all points (including edges) in fake grid Read variables idx_crn_ll_ctr_ul as "index of upper left gridcell center that contributes to lower-left gridcell corner" Algorithms execute in counter-clockwise (CCW) direction: lower-left, lower-right, upper-right, upper-left lat_idx and lon_idx are true indices and are used to write into grd_crn_lat/lon arrays lat_idx_fk and lon_idx_fk are indices into fake arrays with extrapolated boundaries and are used to read data from fake arrays */ lon_idx_fk=lon_idx+1; lat_idx_fk=lat_idx+1; idx_rl=lat_idx*lon_nbr+lon_idx; idx_fk=lat_idx_fk*(lon_nbr+2)+lon_idx_fk; /* Determine index into fake array (valid everywhere it is applied) Comments after each equation are formula for real index (valid only at interior gridcells) */ idx_fk_crn_ll_ctr_ll=idx_fk-(lon_nbr+2)-1; // (lat_idx-1)*lon_nbr+lon_idx-1 idx_fk_crn_ll_ctr_lr=idx_fk-(lon_nbr+2); // (lat_idx-1)*lon_nbr+lon_idx idx_fk_crn_ll_ctr_ur=idx_fk; // lat_idx*lon_nbr+lon_idx idx_fk_crn_ll_ctr_ul=idx_fk-1; // lat_idx*lon_nbr+lon_idx-1; idx_fk_crn_lr_ctr_ll=idx_fk-(lon_nbr+2); // (lat_idx-1)*lon_nbr+lon_idx idx_fk_crn_lr_ctr_lr=idx_fk-(lon_nbr+2)+1; // (lat_idx-1)*lon_nbr+lon_idx+1 idx_fk_crn_lr_ctr_ur=idx_fk+1; // lat_idx*lon_nbr+lon_idx+1 idx_fk_crn_lr_ctr_ul=idx_fk; // lat_idx*lon_nbr+lon_idx; idx_fk_crn_ur_ctr_ll=idx_fk; // lat_idx*lon_nbr+lon_idx idx_fk_crn_ur_ctr_lr=idx_fk+1; // lat_idx*lon_nbr+lon_idx+1 idx_fk_crn_ur_ctr_ur=idx_fk+(lon_nbr+2)+1; // (lat_idx+1)*lon_nbr+lon_idx+1 idx_fk_crn_ur_ctr_ul=idx_fk+(lon_nbr+2); // (lat_idx+1)*lon_nbr+lon_idx; idx_fk_crn_ul_ctr_ll=idx_fk-1; // lat_idx*lon_nbr+lon_idx-1 idx_fk_crn_ul_ctr_lr=idx_fk; // lat_idx*lon_nbr+lon_idx idx_fk_crn_ul_ctr_ur=idx_fk+(lon_nbr+2); // (lat_idx+1)*lon_nbr+lon_idx idx_fk_crn_ul_ctr_ul=idx_fk+(lon_nbr+2)-1; // (lat_idx+1)*lon_nbr+lon_idx-1; /* 20160111: Algorithm requires that all longitudes in template be on same "branch cut" If, say, LL longitude is 179.0 and LR longitude is -179.0 then their sum and average are zero, not 180.0 or -180.0 as desired Routines labeled "*_brnch" in the following ensure that branch-cut rules are followed */ idx_crn_ll=grd_crn_nbr*idx_rl+0; lat_crn[idx_crn_ll]=0.25*(lat_ctr_fk[idx_fk_crn_ll_ctr_ll]+lat_ctr_fk[idx_fk_crn_ll_ctr_lr]+lat_ctr_fk[idx_fk_crn_ll_ctr_ur]+lat_ctr_fk[idx_fk_crn_ll_ctr_ul]); lon_crn[idx_crn_ll]=nco_lon_crn_avg_brnch(lon_ctr_fk[idx_fk_crn_ll_ctr_ll],lon_ctr_fk[idx_fk_crn_ll_ctr_lr],lon_ctr_fk[idx_fk_crn_ll_ctr_ur],lon_ctr_fk[idx_fk_crn_ll_ctr_ul]); idx_crn_lr=grd_crn_nbr*idx_rl+1; lat_crn[idx_crn_lr]=0.25*(lat_ctr_fk[idx_fk_crn_lr_ctr_ll]+lat_ctr_fk[idx_fk_crn_lr_ctr_lr]+lat_ctr_fk[idx_fk_crn_lr_ctr_ur]+lat_ctr_fk[idx_fk_crn_lr_ctr_ul]); lon_crn[idx_crn_lr]=nco_lon_crn_avg_brnch(lon_ctr_fk[idx_fk_crn_lr_ctr_ll],lon_ctr_fk[idx_fk_crn_lr_ctr_lr],lon_ctr_fk[idx_fk_crn_lr_ctr_ur],lon_ctr_fk[idx_fk_crn_lr_ctr_ul]); idx_crn_ur=grd_crn_nbr*idx_rl+2; lat_crn[idx_crn_ur]=0.25*(lat_ctr_fk[idx_fk_crn_ur_ctr_ll]+lat_ctr_fk[idx_fk_crn_ur_ctr_lr]+lat_ctr_fk[idx_fk_crn_ur_ctr_ur]+lat_ctr_fk[idx_fk_crn_ur_ctr_ul]); lon_crn[idx_crn_ur]=nco_lon_crn_avg_brnch(lon_ctr_fk[idx_fk_crn_ur_ctr_ll],lon_ctr_fk[idx_fk_crn_ur_ctr_lr],lon_ctr_fk[idx_fk_crn_ur_ctr_ur],lon_ctr_fk[idx_fk_crn_ur_ctr_ul]); idx_crn_ul=grd_crn_nbr*idx_rl+3; lat_crn[idx_crn_ul]=0.25*(lat_ctr_fk[idx_fk_crn_ul_ctr_ll]+lat_ctr_fk[idx_fk_crn_ul_ctr_lr]+lat_ctr_fk[idx_fk_crn_ul_ctr_ur]+lat_ctr_fk[idx_fk_crn_ul_ctr_ul]); lon_crn[idx_crn_ul]=nco_lon_crn_avg_brnch(lon_ctr_fk[idx_fk_crn_ul_ctr_ll],lon_ctr_fk[idx_fk_crn_ul_ctr_lr],lon_ctr_fk[idx_fk_crn_ul_ctr_ur],lon_ctr_fk[idx_fk_crn_ul_ctr_ul]); crn_lat[0]=lat_crn[idx_crn_ll]; crn_lat[1]=lat_crn[idx_crn_lr]; crn_lat[2]=lat_crn[idx_crn_ur]; crn_lat[3]=lat_crn[idx_crn_ul]; crn_lon[0]=lon_crn[idx_crn_ll]; crn_lon[1]=lon_crn[idx_crn_lr]; crn_lon[2]=lon_crn[idx_crn_ur]; crn_lon[3]=lon_crn[idx_crn_ul]; flg_ccw=nco_ccw_chk(crn_lat,crn_lon,grd_crn_nbr,idx_ccw,rcr_lvl); if(!flg_ccw && wrn_nbr < wrn_nbr_max){ (void)fprintf(stdout,"%s: %s WARNING reports non-CCW gridcell at idx=%li, (lat,lon)_idx=(%li,%li), (lat,lon) = (%g, %g)\n",nco_prg_nm_get(),fnc_nm,idx_rl,lat_idx,lon_idx,lat_ctr[lat_idx],lon_ctr[lon_idx]); wrn_nbr++; if(wrn_nbr == wrn_nbr_max) (void)fprintf(stdout,"%s: %s INFO Number of non-CCW errors reached maximum = %li, not printing anymore\n",nco_prg_nm_get(),fnc_nm,wrn_nbr_max); } /* endif */ lat_crn[idx_crn_ll]=crn_lat[0]; lat_crn[idx_crn_lr]=crn_lat[1]; lat_crn[idx_crn_ur]=crn_lat[2]; lat_crn[idx_crn_ul]=crn_lat[3]; lon_crn[idx_crn_ll]=crn_lon[0]; lon_crn[idx_crn_lr]=crn_lon[1]; lon_crn[idx_crn_ur]=crn_lon[2]; lon_crn[idx_crn_ul]=crn_lon[3]; } /* !lon */ } /* !lat */ if(lat_ctr_fk) lat_ctr_fk=(double *)nco_free(lat_ctr_fk); if(lon_ctr_fk) lon_ctr_fk=(double *)nco_free(lon_ctr_fk); if(crn_lon) crn_lon=(double *)nco_free(crn_lon); if(crn_lat) crn_lat=(double *)nco_free(crn_lat); } /* !(lat_bnd_id && lon_bnd_id) */ } /* !flg_grd_crv */ if(flg_1D_psd_rct_bnd){ double lon_brnch_min; double lon_brnch_max; double lon_dff; assert(grd_crn_nbr == 4); /* Make boundaries that were provided as pseudo-rectangular branch-cut-compliant */ for(col_idx=0;col_idx<col_nbr;col_idx++){ lon_brnch_min=(lon_bnd[2*col_idx] <= lon_bnd[2*col_idx+1]) ? lon_bnd[2*col_idx] : lon_bnd[2*col_idx+1]; lon_brnch_max=(lon_bnd[2*col_idx] >= lon_bnd[2*col_idx+1]) ? lon_bnd[2*col_idx] : lon_bnd[2*col_idx+1]; lon_dff=lon_brnch_max-lon_brnch_min; if(lon_dff >= 180.0){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: INFO %s reports 1D pseudo-rectangular bounds branch-cut straddle at col_idx=%ld lon_brnch_max, lon_brnch_min, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,col_idx,lon_brnch_max,lon_brnch_min,lon_dff); lon_brnch_max-=360.0; }else if(lon_dff <= -180.0){ lon_brnch_max+=360.0; } /* !lon_dff */ /* Extra condition to convert CW bounds to CCW bounds (necessary for OCO2) */ if(lon_brnch_min <= lon_brnch_max){ lon_bnd[2*col_idx]=lon_brnch_min; lon_bnd[2*col_idx+1]=lon_brnch_max; }else{ lon_bnd[2*col_idx]=lon_brnch_max; lon_bnd[2*col_idx+1]=lon_brnch_min; } /* end else */ } /* !col_idx */ /* Convert boundaries that were provided as pseudo-rectangular to corners */ for(col_idx=0;col_idx<col_nbr;col_idx++){ idx=grd_crn_nbr*col_idx; /* fxm: OCO2 provides boundaries in CW not CCW orientation */ lon_crn[idx]=lon_bnd[2*col_idx]; /* LL */ lon_crn[idx+1]=lon_bnd[2*col_idx+1]; /* LR */ lon_crn[idx+2]=lon_bnd[2*col_idx+1]; /* UR */ lon_crn[idx+3]=lon_bnd[2*col_idx]; /* UL */ lat_crn[idx]=lat_bnd[2*col_idx]; /* LL */ lat_crn[idx+1]=lat_bnd[2*col_idx]; /* LR */ lat_crn[idx+2]=lat_bnd[2*col_idx+1]; /* UR */ lat_crn[idx+3]=lat_bnd[2*col_idx+1]; /* UL */ /* fxm: OCO2 provides boundaries in CW not CCW orientation */ } /* !col_idx */ } /* flg_1D_psd_rct_bnd */ if(flg_grd_crv || flg_1D_psd_rct_bnd){ /* As of 20160308, use same sanity check for 1D pseudo-rectangular grids as for curvilinear grids Pseudo-rectangular grids rely on user-produced boundaries that may be psychotic (CW, non-branch-cut) Starting 20151205, use same sanity check for both inferred and copied curvilinear grids 20151129: Curvilinear extrapolation technique above yields corners outside [-90.0,90.0], [-180.0,360.0] Also, it may assume input is ascending swath and fail for descending swaths Complications not fully addressed: Swaths may (verify this) turn from ascending to descending, or visa-versa, when satellite crosses latitude extrema Swaths may cross the date-line (and back!) */ /* Determine numeric bounds of input coordinate system */ double lon_min_min; double lon_max_max; nco_bool NCO_LON_0_TO_360=True; if(has_mss_val_ctr){ for(idx=0;idx<grd_sz_nbr;idx++) if(lon_ctr[idx] != mss_val_ctr_dbl && lon_ctr[idx] < 0.0) break; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(lon_ctr[idx] < 0.0) break; } /* !has_mss_val_ctr */ if(idx != grd_sz_nbr) NCO_LON_0_TO_360=False; if(NCO_LON_0_TO_360){ lon_min_min=0.0; lon_max_max=360.0; }else{ lon_min_min=-180.0; lon_max_max=180.0; } /* !NCO_LON_0_TO_360 */ /* Correct for extrapolation outside boundaries */ for(idx=0;idx<grd_sz_nbr*grd_crn_nbr;idx++){ idx_ctr=idx/grd_crn_nbr; if(has_mss_val_ctr) if(lat_ctr[idx_ctr] == mss_val_ctr_dbl) continue; if(lat_crn[idx] < -90.0 || lat_crn[idx] > 90.0 || lon_crn[idx] < lon_min_min || lon_crn[idx] > lon_max_max){ idx_crn_ll=grd_crn_nbr*idx_ctr+0; idx_crn_lr=grd_crn_nbr*idx_ctr+1; idx_crn_ur=grd_crn_nbr*idx_ctr+2; idx_crn_ul=grd_crn_nbr*idx_ctr+3; if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stderr,"%s: INFO %s reports %s corner outside canonical bounds at idx = %li, Center [lat,lon]=[%g,%g]; Corners LL [%g,%g] LR [%g,%g] UR [%g,%g] UL [%g,%g]\n",nco_prg_nm_get(),fnc_nm,(lat_bnd_id == NC_MIN_INT) ? "inferred" : "copied",idx_ctr,lat_ctr[idx_ctr],lon_ctr[idx_ctr],lat_crn[idx_crn_ll],lon_crn[idx_crn_ll],lat_crn[idx_crn_lr],lon_crn[idx_crn_lr],lat_crn[idx_crn_ur],lon_crn[idx_crn_ur],lat_crn[idx_crn_ul],lon_crn[idx_crn_ul]); /* Restrict grid to real latitudes and to the 360-degree range detected from input cell-centers */ if(lat_crn[idx] < -90.0) lat_crn[idx]=-90.0; if(lat_crn[idx] > 90.0) lat_crn[idx]=90.0; if(lon_crn[idx] < lon_min_min) lon_crn[idx]+=360.0; if(lon_crn[idx] > lon_max_max) lon_crn[idx]-=360.0; } /* !sanity */ } /* !idx */ /* Vertices (for valid points) are now within 360 degrees (either [0,360] or [-180,180]) implied by input coordinate system Curvilinear inferred grid are, by construction, branch-cut compliant fxm: Curvilinear and 1D pseudo-rectangular grids prescribed by (i.e., read-in from) input may not be branch-cut compliant */ if(nco_dbg_lvl_get() >= nco_dbg_std){ long idx_dbg; idx_dbg=rgr->idx_dbg; idx_crn_ll=grd_crn_nbr*idx_dbg+0; idx_crn_lr=grd_crn_nbr*idx_dbg+1; idx_crn_ur=grd_crn_nbr*idx_dbg+2; idx_crn_ul=grd_crn_nbr*idx_dbg+3; (void)fprintf(stderr,"%s: INFO %s idx_dbg = %li, Center [lat,lon]=[%g,%g]; Corners LL [%g,%g] LR [%g,%g] UR [%g,%g] UL [%g,%g]\n",nco_prg_nm_get(),fnc_nm,idx_dbg,lat_ctr[idx_dbg],lon_ctr[idx_dbg],lat_crn[idx_crn_ll],lon_crn[idx_crn_ll],lat_crn[idx_crn_lr],lon_crn[idx_crn_lr],lat_crn[idx_crn_ur],lon_crn[idx_crn_ur],lat_crn[idx_crn_ul],lon_crn[idx_crn_ul]); } /* !dbg */ } /* !flg_grd_crv || flg_1D_psd_rct_bnd */ if(flg_grd_crv){ /* Copy centers into empty output array */ for(idx=0;idx<grd_sz_nbr;idx++){ grd_ctr_lat[idx]=lat_ctr[idx]; grd_ctr_lon[idx]=lon_ctr[idx]; } /* !idx */ /* Copy inferred or copied (from input) sanity-checked corners into empty output array */ for(idx=0;idx<grd_sz_nbr*grd_crn_nbr;idx++){ grd_crn_lat[idx]=lat_crn[idx]; grd_crn_lon[idx]=lon_crn[idx]; } /* !idx */ } /* !flg_grd_crv */ /* 20150512 Many 2D datasets have bad bounds Primary example is Gaussian grids archived by CESM models that use midpoint rule rather than iterate to compute interfaces from quadrature points Such files have correct gw arrays and incorrect cell bounds flg_dgn_bnd allows nco_grd_nfr() to override faulty boundaries in file with correct bounds */ const nco_bool flg_dgn_bnd=rgr->flg_dgn_bnd; /* [flg] Diagnose rather than copy inferred bounds */ const long lat_nbr_hlf=lat_nbr/2L; // [nbr] Half number of latitudes (e.g., lat_nbr_hlf=32 for lat_nbr=64 and 65) if(flg_grd_2D){ if(flg_dgn_bnd || (lat_bnd_id == NC_MIN_INT && lon_bnd_id == NC_MIN_INT)){ if(nco_dbg_lvl_get() >= nco_dbg_std && flg_dgn_bnd) (void)fprintf(stdout,"%s: INFO %s will diagnose cell boundaries from cell centers...\n",nco_prg_nm_get(),fnc_nm); /* Derive interfaces (ntf) and bounds (bnd) from midpoints approximation applied to center data NB: Simplistically derived interfaces (ntf) only valid on some rectangular grids (not on Gaussian grids) These inferred-from-midpoint interfaces/bounds are overwritten in next block once lat grid is known */ if(flg_s2n) lat_ntf[0L]=lat_ctr[0L]-0.5*(lat_ctr[1L]-lat_ctr[0L]); else lat_ntf[0L]=lat_ctr[0L]+0.5*(lat_ctr[0L]-lat_ctr[1L]); if(lat_ntf[0L] < -90.0) lat_ntf[0L]=-90.0; /* NB: lat_ntf[0] can be same as lat_ctr[0] for cap grid */ if(lat_ntf[0L] > 90.0) lat_ntf[0L]=90.0; for(lat_idx=0L;lat_idx<lat_nbr-1L;lat_idx++) lat_ntf[lat_idx+1L]=0.5*(lat_ctr[lat_idx]+lat_ctr[lat_idx+1L]); if(flg_s2n) lat_ntf[lat_nbr]=lat_ctr[lat_nbr-1L]+0.5*(lat_ctr[lat_nbr-1L]-lat_ctr[lat_nbr-2L]); else lat_ntf[lat_nbr]=lat_ctr[lat_nbr-1L]-0.5*(lat_ctr[lat_nbr-2L]-lat_ctr[lat_nbr-1L]); if(lat_ntf[lat_nbr] > 90.0) lat_ntf[lat_nbr]=90.0; /* NB: lat_ntf[lat_nbr] can be same as lat_ctr[lat_nbr-1] for cap grid */ if(lat_ntf[lat_nbr] < -90.0) lat_ntf[lat_nbr]=-90.0; /* NB: lat_ntf[lat_nbr] can be same as lat_ctr[lat_nbr-1] for cap grid */ if(flg_s2n) lat_spn=fabs(lat_ntf[lat_nbr]-lat_ntf[0L]); /* fabs() ensures positive-definite span for N->S grids */ lon_ntf[0L]=lon_ctr[0L]-0.5*(lon_ctr[1L]-lon_ctr[0L]); for(lon_idx=0;lon_idx<lon_nbr-1L;lon_idx++) lon_ntf[lon_idx+1L]=0.5*(lon_ctr[lon_idx]+lon_ctr[lon_idx+1L]); lon_ntf[lon_nbr]=lon_ctr[lon_nbr-1L]+0.5*(lon_ctr[lon_nbr-1L]-lon_ctr[lon_nbr-2L]); lon_spn=lon_ntf[lon_nbr]-lon_ntf[0L]; for(idx=0;idx<lon_nbr;idx++){ lon_bnd[2L*idx]=lon_ntf[idx]; lon_bnd[2L*idx+1L]=lon_ntf[idx+1L]; } /* !idx */ for(idx=0;idx<lat_nbr;idx++){ lat_bnd[2L*idx]=lat_ntf[idx]; lat_bnd[2L*idx+1L]=lat_ntf[idx+1L]; } /* !idx */ }else{ /* !(lat_bnd_id && lon_bnd_id) */ /* Derive interfaces (ntf) from bounds (bnd) data on disk */ for(idx=0;idx<lon_nbr;idx++) lon_ntf[idx]=lon_bnd[2L*idx]; lon_ntf[lon_nbr]=lon_bnd[2L*lon_nbr-1L]; for(idx=0;idx<lat_nbr;idx++) lat_ntf[idx]=lat_bnd[2L*idx]; lat_ntf[lat_nbr]=lat_bnd[2L*lat_nbr-1L]; lat_spn=fabs(lat_ntf[lat_nbr]-lat_ntf[0L]); /* fabs() ensures positive-definite span for N->S grids */ lon_spn=lon_ntf[lon_nbr]-lon_ntf[0L]; } /* !(lat_bnd_id && lon_bnd_id) */ } /* !flg_grd_2D */ if(flg_grd_2D){ /* Diagnose type of two-dimensional input grid by testing second latitude center against formulae */ double lat_ctr_tst_eqa; double lat_ctr_tst_fv; if(flg_s2n) lat_ctr_tst_eqa=lat_ntf[0L]+lat_spn*1.5/lat_nbr; else lat_ctr_tst_eqa=lat_ntf[0L]-lat_spn*1.5/lat_nbr; if(flg_s2n) lat_ctr_tst_fv=lat_ntf[0L]+lat_spn/(lat_nbr-1L); else lat_ctr_tst_fv=lat_ntf[0L]-lat_spn/(lat_nbr-1L); double lat_ctr_tst_gss; /* In diagnosing grids, agreement with input to single-precision is "good enough for government work" Hence some comparisons cast from double to float before comparison 20150526: T42 grid from SCRIP and related maps are only accurate to ~eight digits 20150611: map_ne120np4_to_fv801x1600_bilin.150418.nc has yc_b[1600]=-89.775000006 not expected exact value lat_ctr[1]=-89.775000000000006 20170521: T62 grid from NCEP-NCAR Reanalysis 1 worse than single precision, has yc_[192]=-86.6531 not expected exact value lat_ctr[1]=-86.6531671712612, relative difference is 7.86021e-07 20191008: T62 grid from NCEP-NCAR Reanalysis 2 worse than single precision, has yc_[92]=-86.6531 not expected exact value lat_ctr[1]=-86.6531671712612, relative difference is 7.86021e-07 */ if(nco_dbg_lvl_get() >= nco_dbg_scl && !flg_s2n) (void)fprintf(stderr,"%s: INFO %s reports that grid inferral has detected a 2D grid that runs from north-to-south, not south-to-north. Support for creating/inferring 2D N-to-S grids was added in NCO 4.7.7 (September, 2018) and should work fine.\nHINT: If present command fails, report problem to developers and then re-try inferring grid after reversing input dataset's latitude coordinate (with, e.g., ncpdq -a time,-lat,lon in.nc out.nc)\n",nco_prg_nm_get(),fnc_nm); if((float)lat_ctr[1L] == (float)lat_ctr_tst_eqa) lat_typ=nco_grd_lat_eqa; if((float)lat_ctr[1L] == (float)lat_ctr_tst_fv) lat_typ=nco_grd_lat_fv; double *lat_sin=NULL_CEWI; // [frc] Sine of Gaussian latitudes double precision double *wgt_Gss=NULL; // [frc] Gaussian weights double precision if(lat_typ == nco_grd_lat_nil){ /* Check for Gaussian grid */ lat_sin=(double *)nco_malloc(lat_nbr*sizeof(double)); wgt_Gss=(double *)nco_malloc(lat_nbr*sizeof(double)); (void)nco_lat_wgt_gss(lat_nbr,flg_s2n,lat_sin,wgt_Gss); lat_ctr_tst_gss=rdn2dgr*asin(lat_sin[1L]); /* Gaussian weights on output grid will be double-precision accurate Grid itself is kept as user-specified so area diagnosed by ESMF_RegridWeightGen may be slightly inconsistent with weights */ const double eps_rlt_cnv_gss=1.0e-6; // Convergence criterion (1.0e-7 fails for NCEP NCAR Reanalysis 1!) if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: DEBUG %s reports lat_ctr[1]=%g, lat_ctr_tst_gss=%g, fabs(1.0-fabs(lat_ctr[1]/lat_ctr_tst_gss))=%g\n",nco_prg_nm_get(),fnc_nm,lat_ctr[1],lat_ctr_tst_gss,fabs(1.0-fabs(lat_ctr[1]/lat_ctr_tst_gss))); if(fabs(1.0-fabs(lat_ctr[1]/lat_ctr_tst_gss)) < eps_rlt_cnv_gss) lat_typ=nco_grd_lat_gss; } /* !Gaussian */ if(lat_typ == nco_grd_lat_nil){ /* If still of unknown type, this 2D grid may be weird This occurs, e.g., with POP3 destination grid Change gridtype from nil (which means not-yet-set) to unknown (which means none of the others matched) */ lat_typ=nco_grd_lat_unk; } /* !nil */ /* Currently grd_lat_typ and grd_2D_typ are equivalent, though that may be relaxed in future */ if(lat_typ == nco_grd_lat_unk) grd_typ=nco_grd_2D_unk; else if(lat_typ == nco_grd_lat_gss) grd_typ=nco_grd_2D_gss; else if(lat_typ == nco_grd_lat_fv) grd_typ=nco_grd_2D_fv; else if(lat_typ == nco_grd_lat_eqa) grd_typ=nco_grd_2D_eqa; else assert(False); /* Diagnose latitude interfaces from gridcell centers (if boundaries not provided) */ if(flg_dgn_bnd || (lat_bnd_id == NC_MIN_INT && lon_bnd_id == NC_MIN_INT)){ //if(flg_s2n) lat_nrt=lat_ntf[lat_nbr]; else lat_nrt=lat_ntf[0L]; lat_spn=fabs(lat_ntf[lat_nbr]-lat_ntf[0L]); switch(lat_typ){ case nco_grd_lat_fv: lat_ncr=lat_spn/(lat_nbr-1L); if(flg_s2n) lat_ntf[1L]=lat_ntf[0L]+0.5*lat_ncr; else lat_ntf[1L]=lat_ntf[0L]-0.5*lat_ncr; for(lat_idx=2;lat_idx<lat_nbr;lat_idx++) if(flg_s2n) lat_ntf[lat_idx]=lat_ntf[1L]+(lat_idx-1L)*lat_ncr; else lat_ntf[lat_idx]=lat_ntf[1L]-(lat_idx-1L)*lat_ncr; break; case nco_grd_lat_eqa: lat_ncr=lat_spn/lat_nbr; for(lat_idx=1L;lat_idx<lat_nbr;lat_idx++) if(flg_s2n) lat_ntf[lat_idx]=lat_ntf[0L]+lat_idx*lat_ncr; else lat_ntf[lat_idx]=lat_ntf[0L]-lat_idx*lat_ncr; break; case nco_grd_lat_gss: for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_ctr[lat_idx]=rdn2dgr*asin(lat_sin[lat_idx]); /* First guess for lat_ntf is midway between Gaussian abscissae */ for(lat_idx=1L;lat_idx<lat_nbr;lat_idx++) lat_ntf[lat_idx]=0.5*(lat_ctr[lat_idx-1L]+lat_ctr[lat_idx]); /* Iterate guess until area between interfaces matches Gaussian weight */ for(lat_idx=1L;lat_idx<lat_nbr_hlf;lat_idx++){ double fofx_at_x0; /* [frc] Function to iterate evaluated at current guess */ double dfdx_at_x0; /* [frc] Derivative of equation evaluated at current guess */ // 20190531: Wuyin Lin reports this convergence criterion fails on ECMWF F640 grid // Probably because latitude iterations assume s2n grid and ECMWF is n2s // Possibly also because latitude coordinates are stored in single precision // Implement precision-dependent convergence criterion, e.g., 1.0e-15 and 1.0e-7 for double- and single-precision, respectively? const double eps_rlt_cnv=1.0e-15; // Convergence criterion (1.0e-16 pushes double precision to the brink) itr_cnt=0; lat_wgt_gss=fabs(sin(dgr2rdn*lat_ntf[lat_idx])-sin(dgr2rdn*lat_ntf[lat_idx-1L])); fofx_at_x0=wgt_Gss[lat_idx-1L]-lat_wgt_gss; while(fabs(fofx_at_x0) > eps_rlt_cnv){ /* Newton-Raphson iteration: Let x=lat_ntf[lat_idx], y0=lat_ntf[lat_idx-1], gw = Gaussian weight (exact solution) f(x)=sin(dgr2rdn*x)-sin(dgr2rdn*y0)-gw=0 # s2n grid f(x)=sin(dgr2rdn*y0)-sin(dgr2rdn*x)-gw=0 # n2s grid dfdx(x)= dgr2rdn*cos(dgr2rdn*x) # s2n grid dfdx(x)=-dgr2rdn*cos(dgr2rdn*x) # n2s grid x_better=x0-f(x0)/f'(x0) */ dfdx_at_x0=dgr2rdn*cos(dgr2rdn*lat_ntf[lat_idx]); if(!flg_s2n) dfdx_at_x0=-dfdx_at_x0; lat_ntf[lat_idx]+=fofx_at_x0/dfdx_at_x0; /* NB: not sure why this is minus not plus but it works :) */ lat_wgt_gss=fabs(sin(dgr2rdn*lat_ntf[lat_idx])-sin(dgr2rdn*lat_ntf[lat_idx-1L])); fofx_at_x0=wgt_Gss[lat_idx-1L]-lat_wgt_gss; if(++itr_cnt > itr_nbr_max){ (void)fprintf(stdout,"%s: ERROR %s reports convergence only %g after %d iterations for lat_idx = %ld\n",nco_prg_nm_get(),fnc_nm,fabs(fofx_at_x0),itr_nbr_max,lat_idx); nco_exit(EXIT_FAILURE); } /* endif */ } /* !while */ } /* !lat_idx */ /* Use Gaussian grid symmetry to obtain same interfaces in both hemispheres (avoids cumulative rounding errors) */ if(lat_nbr%2){ /* lat_nbr is odd */ for(lat_idx=1L;lat_idx<=lat_nbr_hlf+1L;lat_idx++) lat_ntf[lat_nbr_hlf+lat_idx]=-lat_ntf[lat_nbr_hlf-lat_idx+1L]; }else{ /* lat_nbr is even */ for(lat_idx=1L;lat_idx<lat_nbr_hlf;lat_idx++) lat_ntf[lat_nbr_hlf+lat_idx]=-lat_ntf[lat_nbr_hlf-lat_idx]; } /* !flg_lat_evn */ if(lat_sin) lat_sin=(double *)nco_free(lat_sin); break; case nco_grd_lat_unk: /* No generic formula exists so use interfaces already read or diagnosed as midpoints between centers */ break; default: nco_dfl_case_generic_err(); break; } /* !lat_typ */ if(lat_typ == nco_grd_lat_gss){ /* 20170510: First approximation above to exterior interfaces for Gaussian grid are ~ +/-89 degrees Loops below recompute interior interfaces only Southern- and northern-most interfaces must be explicitly assigned Inferral test for Gaussian grid _assumes_ global grid Hence WLOG can assign [-90.0, 90.0] to Gaussian grid exterior boundaries */ if(flg_s2n) lat_ntf[0L]=-90.0; else lat_ntf[0L]=90.0; if(flg_s2n) lat_ntf[lat_nbr]=90.0; else lat_ntf[lat_nbr]=-90.0; } /* !nco_grd_lat_gss */ /* Now that final latitude interfaces are known for all grid-types, assign to boundaries, overwriting provisional values stored there earlier */ for(idx=0;idx<lat_nbr;idx++){ lat_bnd[2L*idx]=lat_ntf[idx]; lat_bnd[2L*idx+1L]=lat_ntf[idx+1L]; } /* !idx */ } /* !(lat_bnd_id && lon_bnd_id) */ /* Use centers and boundaries to diagnose latitude weights */ switch(lat_typ){ case nco_grd_lat_eqa: case nco_grd_lat_fv: for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=fabs(sin(dgr2rdn*lat_ntf[lat_idx+1L])-sin(dgr2rdn*lat_ntf[lat_idx])); break; case nco_grd_lat_gss: for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=wgt_Gss[lat_idx]; break; case nco_grd_lat_unk: for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=fabs(sin(dgr2rdn*lat_ntf[lat_idx+1L])-sin(dgr2rdn*lat_ntf[lat_idx])); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: WARNING %s reports unknown input latitude grid-type. Guessing that weights for grid of rectangles is OK.\n",nco_prg_nm_get(),fnc_nm); break; default: nco_dfl_case_generic_err(); break; } /* !lat_typ */ /* Diagnose type of longitude grid by testing second longitude center against formulae */ lon_spn=lon_ntf[lon_nbr]-lon_ntf[0L]; lat_spn=fabs(lat_ntf[lat_nbr]-lat_ntf[0L]); if((float)lon_spn == 360.0f && (float)lat_spn == 180.0f) nco_grd_xtn=nco_grd_xtn_glb; else nco_grd_xtn=nco_grd_xtn_rgn; if(lon_typ == nco_grd_lon_nil){ if( (float)lon_ctr[0L] == 0.0f && (float)lon_ctr[1L] == (float)(lon_ctr[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_Grn_ctr; else if((float)lon_ctr[0L] == -180.0f && (float)lon_ctr[1L] == (float)(lon_ctr[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_180_ctr; else if((float)lon_ntf[0L] == 0.0f && (float)lon_ntf[1L] == (float)(lon_ntf[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_Grn_wst; else if((float)lon_ntf[0L] == -180.0f && (float)lon_ntf[1L] == (float)(lon_ntf[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_180_wst; else if((float)lon_ctr[1L] == (float)(lon_ctr[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_bb; else lon_typ=nco_grd_lon_unk; } /* !lon_typ */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s diagnosed input 2D grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_2D_sng(grd_typ)); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s diagnosed input latitude grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_lat_sng(lat_typ)); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s diagnosed input longitude grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_lon_sng(lon_typ)); } /* !flg_grd_2D */ if(flg_grd_2D){ if(nco_dbg_lvl_get() >= nco_dbg_crr){ for(idx=0;idx<lat_nbr;idx++){ (void)fprintf(stdout,"lat[%li] = %g, vertices = ",idx,lat_ctr[idx]); for(bnd_idx=0;bnd_idx<bnd_nbr;bnd_idx++) (void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lat_bnd[bnd_nbr*idx+bnd_idx],bnd_idx == bnd_nbr-1 ? "]\n" : ", "); } /* end loop over lat */ for(idx=0;idx<lon_nbr;idx++){ (void)fprintf(stdout,"lon[%li] = %g, vertices = ",idx,lon_ctr[idx]); for(bnd_idx=0;bnd_idx<bnd_nbr;bnd_idx++) (void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lon_bnd[bnd_nbr*idx+bnd_idx],bnd_idx == bnd_nbr-1 ? "]\n" : ", "); } /* end loop over lon */ } /* endif dbg */ /* Fuzzy test of latitude weight normalization */ //const double eps_rlt_max=1.0e-14; /* [frc] Round-off error tolerance: Used 1.0e-14 until 20180904 */ const double eps_rlt_max=1.0e-12; /* [frc] Round-off error tolerance: Used 1.0e-12 since 20180904 */ double lat_wgt_ttl_xpc; /* [frc] Expected sum of latitude weights */ lat_wgt_ttl=0.0; for(idx=0;idx<lat_nbr;idx++) lat_wgt_ttl+=lat_wgt[idx]; lat_wgt_ttl_xpc=fabs(sin(dgr2rdn*lat_bnd[2*(lat_nbr-1)+1L])-sin(dgr2rdn*lat_bnd[0L])); if(grd_typ != nco_grd_2D_unk && 1.0-lat_wgt_ttl/lat_wgt_ttl_xpc > eps_rlt_max){ (void)fprintf(stdout,"%s: ERROR %s reports grid normalization does not meet precision tolerance eps_rlt_max = %20.15f\nlat_wgt_ttl = %20.15f, lat_wgt_ttl_xpc = %20.15f, lat_wgt_frc = %20.15f, eps_rlt = %20.15f\n",nco_prg_nm_get(),fnc_nm,eps_rlt_max,lat_wgt_ttl,lat_wgt_ttl_xpc,lat_wgt_ttl/lat_wgt_ttl_xpc,1.0-lat_wgt_ttl/lat_wgt_ttl_xpc); nco_exit(EXIT_FAILURE); } /* !imprecise */ } /* !flg_grd_2D */ if(flg_grd_2D){ assert(grd_crn_nbr == 4); if(flg_dgn_bnd || (lat_bnd_id == NC_MIN_INT && lon_bnd_id == NC_MIN_INT)){ /* If interfaces were diagnosed from centers, copy corners from interfaces */ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ idx=grd_crn_nbr*lon_idx; lon_crn[idx]=lon_ntf[lon_idx]; /* LL */ lon_crn[idx+1L]=lon_ntf[lon_idx+1L]; /* LR */ lon_crn[idx+2L]=lon_ntf[lon_idx+1L]; /* UR */ lon_crn[idx+3L]=lon_ntf[lon_idx]; /* UL */ } /* !lon_idx */ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ idx=grd_crn_nbr*lat_idx; lat_crn[idx]=lat_ntf[lat_idx]; /* LL */ lat_crn[idx+1L]=lat_ntf[lat_idx]; /* LR */ lat_crn[idx+2L]=lat_ntf[lat_idx+1L]; /* UR */ lat_crn[idx+3L]=lat_ntf[lat_idx+1L]; /* UL */ } /* !lat_idx */ }else{ /* !lat_bnd_id */ /* If boundaries were provided in input dataset, copy corners from boundaries */ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ idx=grd_crn_nbr*lon_idx; lon_crn[idx]=lon_bnd[2*lon_idx]; /* LL */ lon_crn[idx+1L]=lon_bnd[2*lon_idx+1L]; /* LR */ lon_crn[idx+2L]=lon_bnd[2*lon_idx+1L]; /* UR */ lon_crn[idx+3L]=lon_bnd[2*lon_idx]; /* UL */ } /* !lon_idx */ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ idx=grd_crn_nbr*lat_idx; lat_crn[idx]=lat_bnd[2*lat_idx]; /* LL */ lat_crn[idx+1L]=lat_bnd[2*lat_idx]; /* LR */ lat_crn[idx+2L]=lat_bnd[2*lat_idx+1L]; /* UR */ lat_crn[idx+3L]=lat_bnd[2*lat_idx+1L]; /* UL */ } /* !lat_idx */ } /* !lat_bnd_id */ } /* !flg_grd_2D */ /* lat/lon_crn will not change anymore so stuff rectangular arrays into unrolled arrays */ if(flg_grd_1D){ for(idx=0;idx<grd_sz_nbr;idx++){ grd_ctr_lat[idx]=lat_ctr[idx]; grd_ctr_lon[idx]=lon_ctr[idx]; if(flg_wrt_crn){ for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){ idx2=grd_crn_nbr*idx+crn_idx; grd_crn_lat[idx2]=lat_crn[idx2]; grd_crn_lon[idx2]=lon_crn[idx2]; } /* !crn */ }else{ /* !flg_wrt_crn */ /* Defaults for ERWG when corners are unknown */ for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){ idx2=grd_crn_nbr*idx+crn_idx; grd_crn_lat[idx2]=0.0; grd_crn_lon[idx2]=0.0; } /* !crn */ } /* !flg_wrt_crn */ } /* !col */ } /* !flg_grd_1D */ if(flg_grd_2D){ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ idx=lat_idx*lon_nbr+lon_idx; grd_ctr_lat[idx]=lat_ctr[lat_idx]; grd_ctr_lon[idx]=lon_ctr[lon_idx]; for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){ idx2=grd_crn_nbr*idx+crn_idx; lat_idx2=lat_idx*grd_crn_nbr+crn_idx; lon_idx2=lon_idx*grd_crn_nbr+crn_idx; grd_crn_lat[idx2]=lat_crn[lat_idx2]; grd_crn_lon[idx2]=lon_crn[lon_idx2]; } /* !crn */ } /* !lon */ } /* !lat */ /* 20190613: Convert CW quadrilaterals to CCW quadrilaterals so TempestRemap accepts grids Default construction/inferral method orders corners CCW and CW for s2n and n2s grids, respectively */ if(!flg_s2n){ for(idx=0L;idx<grd_sz_nbr;idx++){ idx2=grd_crn_nbr*idx; flg_ccw=nco_ccw_chk(grd_crn_lat+idx2,grd_crn_lon+idx2,grd_crn_nbr,idx_ccw,rcr_lvl); } /* !idx */ } /* !flg_s2n */ } /* !flg_grd_2D */ /* Find span of all grids */ double lat_max; /* [dgr] Maximum latitude */ double lat_min; /* [dgr] Minimum latitude */ double lon_max; /* [dgr] Maximum longitude */ double lon_min; /* [dgr] Minimum longitude */ idx_ctr=0; if(has_mss_val_ctr){ /* Find first non-missing value center and thus corners */ for(idx_ctr=0;idx_ctr<grd_sz_nbr;idx_ctr++){ if(grd_ctr_lat[idx_ctr] != mss_val_ctr_dbl) break; } /* !grd_sz_nbr */ assert(idx_ctr != grd_sz_nbr); } /* !has_mss_val_ctr */ if(flg_wrt_crn){ /* Grids with corner boundaries supplied or inferred */ lon_max=grd_crn_lon[idx_ctr*grd_crn_nbr]; lat_max=grd_crn_lat[idx_ctr*grd_crn_nbr]; lon_min=grd_crn_lon[idx_ctr*grd_crn_nbr]; lat_min=grd_crn_lat[idx_ctr*grd_crn_nbr]; for(idx=1;idx<grd_sz_nbr*grd_crn_nbr;idx++){ idx_ctr=idx/grd_crn_nbr; if(has_mss_val_ctr) if(grd_ctr_lat[idx_ctr] == mss_val_ctr_dbl) continue; lat_max=(grd_crn_lat[idx] > lat_max) ? grd_crn_lat[idx] : lat_max; lon_max=(grd_crn_lon[idx] > lon_max) ? grd_crn_lon[idx] : lon_max; lat_min=(grd_crn_lat[idx] < lat_min) ? grd_crn_lat[idx] : lat_min; lon_min=(grd_crn_lon[idx] < lon_min) ? grd_crn_lon[idx] : lon_min; } /* !idx */ }else{ /* !flg_wrt_crn */ /* 20170424: Diagnose grid-extent when corners were not provided or inferred This is usually (always?) for 1d unstructured grids with only centers provided */ lon_max=grd_ctr_lon[idx_ctr]; lat_max=grd_ctr_lat[idx_ctr]; lon_min=grd_ctr_lon[idx_ctr]; lat_min=grd_ctr_lat[idx_ctr]; for(idx_ctr=1;idx_ctr<grd_sz_nbr;idx_ctr++){ if(has_mss_val_ctr) if(grd_ctr_lat[idx_ctr] == mss_val_ctr_dbl) continue; lat_max=(grd_ctr_lat[idx_ctr] > lat_max) ? grd_ctr_lat[idx_ctr] : lat_max; lon_max=(grd_ctr_lon[idx_ctr] > lon_max) ? grd_ctr_lon[idx_ctr] : lon_max; lat_min=(grd_ctr_lat[idx_ctr] < lat_min) ? grd_ctr_lat[idx_ctr] : lat_min; lon_min=(grd_ctr_lon[idx_ctr] < lon_min) ? grd_ctr_lon[idx_ctr] : lon_min; } /* !idx_ctr */ } /* flg_wrt_crn */ lat_spn=lat_max-lat_min; lon_spn=lon_max-lon_min; /* Use strict rules for rectangular grids, looser for spans that are inferred, or center-to-center not corner-to-corner */ if(flg_grd_2D){ if((float)lon_spn == 360.0f && (float)lat_spn == 180.0f) nco_grd_xtn=nco_grd_xtn_glb; else nco_grd_xtn=nco_grd_xtn_rgn; }else{ /* !flg_grd_2D */ if((float)lon_spn >= 340.0f && (float)lat_spn >= 170.0f) nco_grd_xtn=nco_grd_xtn_glb; else nco_grd_xtn=nco_grd_xtn_rgn; } /* flg_wrt_crn */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s reports grid resolution %li x %li, spans %g x %g degrees: [%g <= lat <= %g], [%g <= lon <= %g]\n",nco_prg_nm_get(),fnc_nm,lat_nbr,lon_nbr,lat_spn,lon_spn,lat_min,lat_max,lon_min,lon_max); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s diagnosed input grid-extent: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_xtn_sng(nco_grd_xtn)); /* Write ERWG hints if filenames provided and grid is regional */ char *fl_hnt=NULL; char *fl_hnt_dst=NULL; char *fl_hnt_src=NULL; if(rgr->fl_hnt_dst) fl_hnt=fl_hnt_dst=rgr->fl_hnt_dst; if(rgr->fl_hnt_src) fl_hnt=fl_hnt_src=rgr->fl_hnt_src; if(nco_grd_xtn == nco_grd_xtn_rgn && fl_hnt){ const char *fl_mode="w"; FILE *fp_hnt; /* [fl] Hint file (for ERWG switches) file handle */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s writing ERWG weight-generation regional hint to file %s\n",nco_prg_nm_get(),fnc_nm,fl_hnt); /* Open output file */ if((fp_hnt=fopen(fl_hnt,fl_mode)) == NULL){ (void)fprintf(stderr,"%s: ERROR unable to open hint output file %s\n",nco_prg_nm_get(),fl_hnt); nco_exit(EXIT_FAILURE); } /* end if */ if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: Opened hint file %s\n",nco_prg_nm_get(),fl_hnt); if(fl_hnt_src) (void)fprintf(fp_hnt,"--src_regional"); if(fl_hnt_dst) (void)fprintf(fp_hnt,"--dst_regional"); rcd=fclose(fp_hnt); if(rcd != 0){ (void)fprintf(stderr,"%s: ERROR unable to close hint output file %s\n",nco_prg_nm_get(),fl_hnt); nco_exit(EXIT_FAILURE); } /* end if */ if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: Closed hint file %s\n",nco_prg_nm_get(),fl_hnt); } /* !nco_grd_xtn */ /* Diagnose area if necessary 20170510: ALM/CLM "area" is _FillValue=1.0e36f over ocean and total gridcell area in km2 (not multiplied by landfrac) elsewhere Writing this ALM/CLM "area" variable to gridfile, then using with ERWG --user_areas could be disastrous (depending on mask array and interpolation type) On the other hand CAM "area" variable is exactly what we want for gridfile Input areas are considered "untrustworthy" iff they have _and use_ missing value attribute Re-diagnose areas considered untrustworthy so output area array does not contain missing values */ if(flg_wrt_crn && has_mss_val_area){ const double mss_val_dbl=mss_val_area_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(area[idx] == mss_val_dbl) break; if(idx < grd_sz_nbr) use_mss_val_area=True; if(nco_dbg_lvl_get() >= nco_dbg_fl && use_mss_val_area) (void)fprintf(stdout,"%s: INFO %s reports input area field %s is considered untrustworthy because it uses missing values, will diagnose area from cell boundaries instead...\n",nco_prg_nm_get(),fnc_nm,area_nm_in); } /* !has_mss_val_area */ /* 20170511: There remain a handful of cases when input area should be diagnosed not copied These include using ncremap in SGS mode when inferred grids must use sensible area units Otherwise an inferred grid with area [km2] from ALM/CLM might be combined with area [sr] from NCO This would bias ERWG --user_areas produced values by ~10^10 Setting flg_dgn_area ensures inferred area uses [sr] */ const nco_bool flg_dgn_area=rgr->flg_dgn_area; /* [flg] Diagnose rather than copy inferred area */ if(flg_wrt_crn && /* If bounds are available to compute area and ... */ (area_id == NC_MIN_INT || /* Area is not in input file ... */ use_mss_val_area || /* Area is untrustworthy */ flg_dgn_area)){ /* User/application explicitly requests diagnostic area */ /* Not absolutely necessary to diagnose area because ERWG will diagnose and output area itself _unless_ --user_areas option is given */ if(nco_dbg_lvl_get() >= nco_dbg_std && flg_dgn_area) (void)fprintf(stdout,"%s: INFO %s reports diagnosing area from cell boundaries...\n",nco_prg_nm_get(),fnc_nm); if(flg_grd_crv || flg_grd_1D){ /* Area of arbitrary unstructured or curvilinear grids requires spherical trigonometry */ nco_sph_plg_area(rgr,grd_crn_lat,grd_crn_lon,grd_sz_nbr,grd_crn_nbr,area); }else if(flg_grd_2D){ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) for(lon_idx=0;lon_idx<lon_nbr;lon_idx++) area[lat_idx*lon_nbr+lon_idx]=fabs(dgr2rdn*(lon_bnd[2*lon_idx+1L]-lon_bnd[2*lon_idx])*(sin(dgr2rdn*lat_bnd[2*lat_idx+1L])-sin(dgr2rdn*lat_bnd[2*lat_idx]))); /* fabs() ensures positive area in n2s grids */ } /* !flg_grd_2D */ } /* !area_id */ /* ERWG will fail unless grid file has mask variable Use nul-mask (all points included) whenever input mask variable not supplied/detected Define nul-mask true everywhere and overwrite with false below Input mask can be any type and output mask will always be NC_INT */ for(idx=0;idx<grd_sz_nbr;idx++) msk[idx]=1; if(msk_id != NC_MIN_INT){ /* Change missing-value-masked points to 0 integer mask for SCRIP grids (SCRIP has no missing value convention) Input mask can be any type and output mask will always be NC_INT Applications: ALM/CLM mask (landmask) is NC_FLOAT and defines but does not use NC_FLOAT missing value CICE mask (tmask/umask) is NC_FLOAT and defines and uses NC_FLOAT missing value AMSR mask is NC_SHORT and has no missing value GHRSST mask is NC_BYTE and is a multi-valued surface-type flag with missing value == -1b */ if(msk_typ != NC_INT){ if(nco_dbg_lvl_get() == nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s mask variable \"%s\" has odd type = %s. Re-run with higher debugging level for more information.\n",nco_prg_nm_get(),fnc_nm,msk_nm,nco_typ_sng(msk_typ)); if(nco_dbg_lvl_get() > nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s mask variable \"%s\" has odd type = %s. Regridding weight generators require a mask variable of type NC_INT to specify points to include/exclude as sources/destinations. Points where the mask variable is zero will be excluded (ignored) in regridding, all other points will be included. When inferring gridfiles, NCO assumes the first variable with a \"mask\"-like name (\"mask\", \"Mask\", \"grid_imask\", \"landmask\", or \"tmask\"), or the variable designated by the \"--msk_[src/dst]=msk_nm\" option, is this mask. However the variable \"%s\" in this file is not type NC_INT and so may not be intended as a regridding mask, hence this pleasant informational warning. To prevent NCO from interpreting \"%s\" as a regridding mask, specify \"--msk_src=none\" and/or \"--msk_dst=none\", as appropriate. To utilize some other variable as the mask variable, specify \"--msk_src=msk_nm\" and/or \"--msk_dst=msk_nm\", as appropriate. Mask treatment is subtle, and NCO tries to \"do the right thing\". Whether it does is often easiest to discern by visual inspection of the regridded results.\n",nco_prg_nm_get(),fnc_nm,msk_nm,nco_typ_sng(msk_typ),msk_nm,msk_nm); } /* msk_typ */ switch(msk_typ){ case NC_FLOAT: if(has_mss_val_msk){ const float mss_val_flt=mss_val_msk_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.fp[idx] == mss_val_flt || msk_unn.fp[idx] == 0.0f) msk[idx]=0; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.fp[idx] == 0.0f) msk[idx]=0; } /* !mss_val */ break; case NC_DOUBLE: if(has_mss_val_msk){ const double mss_val_dbl=mss_val_msk_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.dp[idx] == mss_val_dbl || msk_unn.dp[idx] == 0.0) msk[idx]=0; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.dp[idx] == 0.0) msk[idx]=0; } /* !mss_val */ break; case NC_INT: if(has_mss_val_msk){ const int mss_val_int=mss_val_msk_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.ip[idx] == mss_val_int || msk_unn.ip[idx] == 0) msk[idx]=0; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.ip[idx] == 0) msk[idx]=0; } /* !mss_val */ break; case NC_SHORT: /* http://stackoverflow.com/questions/208433/how-do-i-write-a-short-literal-in-c */ if(has_mss_val_msk){ const short mss_val_sht=mss_val_msk_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.sp[idx] == mss_val_sht || msk_unn.sp[idx] == ((short)0)) msk[idx]=0; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.sp[idx] == ((short)0)) msk[idx]=0; /* 20160111: AMSR kludge fxm */ // for(idx=0;idx<grd_sz_nbr;idx++) if(msk[idx] == 1) msk[idx]=0; } /* !mss_val */ break; case NC_BYTE: if(has_mss_val_msk){ const nco_byte mss_val_byt=mss_val_msk_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.bp[idx] == mss_val_byt || msk_unn.bp[idx] == ((nco_byte)0)) msk[idx]=0; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.bp[idx] == ((nco_byte)0)) msk[idx]=0; /* 20170811: GHRSST kludge? */ } /* !mss_val */ break; default: (void)fprintf(stderr,"%s: ERROR %s mask variable \"%s\" has unsupported type = %s\n",nco_prg_nm_get(),fnc_nm,msk_nm,nco_typ_sng(msk_typ)); nco_dfl_case_generic_err(); return NCO_ERR; break; } /* !msk_typ */ if(msk_unn.vp) msk_unn.vp=(void *)nco_free(msk_unn.vp); } /* !msk_id */ if(nco_dbg_lvl_get() >= nco_dbg_sbr){ lat_wgt_ttl=0.0; area_ttl=0.0; if(flg_grd_2D){ (void)fprintf(stderr,"%s: INFO %s reports destination rectangular latitude grid:\n",nco_prg_nm_get(),fnc_nm); for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) lat_wgt_ttl+=lat_wgt[lat_idx]; } /* !flg_grd_2D */ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) for(lon_idx=0;lon_idx<lon_nbr;lon_idx++) area_ttl+=area[lat_idx*lon_nbr+lon_idx]; (void)fprintf(stdout,"lat_wgt_ttl = %20.15f, frc_lat_wgt = %20.15f, area_ttl = %20.15f, frc_area = %20.15f\n",lat_wgt_ttl,lat_wgt_ttl/2.0,area_ttl,area_ttl/(4.0*M_PI)); assert(area_ttl > 0.0); assert(area_ttl <= 4.0*M_PI); } /* endif dbg */ /* Open grid file */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id); /* Define dimensions */ /* 20151230 ERWG appears to require presence of corner arrays in grid file even when they are not used (e.g., bilinear) But ERWG will break when corner values are bad. Default is do not write bad corner values. Uncomment next line to write bad corner values. */ /* flg_wrt_crn=True; */ if(flg_wrt_crn) rcd=nco_def_dim(out_id,grd_crn_nm,grd_crn_nbr,&dmn_id_grd_crn); rcd=nco_def_dim(out_id,grd_sz_nm,grd_sz_nbr,&dmn_id_grd_sz); rcd=nco_def_dim(out_id,grd_rnk_nm,grd_rnk_nbr,&dmn_id_grd_rnk); int shuffle; /* [flg] Turn-on shuffle filter */ int deflate; /* [flg] Turn-on deflate filter */ deflate=(int)True; shuffle=NC_SHUFFLE; /* Define variables */ (void)nco_def_var(out_id,dmn_sz_nm,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_grd_rnk,&dmn_sz_int_id); /* NB: Too small to deflate */ (void)nco_def_var(out_id,area_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&area_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,msk_nm,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_grd_sz,&msk_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,grd_ctr_lat_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&grd_ctr_lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_ctr_lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,grd_ctr_lon_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&grd_ctr_lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_ctr_lon_id,shuffle,deflate,dfl_lvl); if(flg_wrt_crn){ dmn_ids[0]=dmn_id_grd_sz; dmn_ids[1]=dmn_id_grd_crn; (void)nco_def_var(out_id,grd_crn_lat_nm,crd_typ,dmn_nbr_2D,dmn_ids,&grd_crn_lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_crn_lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,grd_crn_lon_nm,crd_typ,dmn_nbr_2D,dmn_ids,&grd_crn_lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_crn_lon_id,shuffle,deflate,dfl_lvl); } /* !flg_wrt_crn */ /* Define attributes */ aed_sct aed_mtd; char *att_nm; char *att_val; if(strstr(rgr->grd_ttl,"None given")){ const char att_fmt[]="NCO inferred this grid from input file %s"; att_val=(char *)nco_malloc((strlen(att_fmt)+strlen(rgr->fl_in)+1L)*sizeof(char)); sprintf(att_val,att_fmt,rgr->fl_in); }else{ att_val=strdup(rgr->grd_ttl); } /* !grd_ttl */ rcd=nco_char_att_put(out_id,NULL,"title",att_val); rcd=nco_char_att_put(out_id,NULL,"Conventions","SCRIP"); const char usr_cpp[]=TKN2SNG(USER); /* [sng] Hostname from C pre-processor */ rcd=nco_char_att_put(out_id,NULL,"created_by",usr_cpp); rcd=nco_char_att_put(out_id,NULL,"grid_generator","NCO"); (void)nco_hst_att_cat(out_id,rgr->cmd_ln); (void)nco_vrs_att_cat(out_id); rcd=nco_char_att_put(out_id,NULL,"latitude_grid_type",nco_grd_lat_sng(lat_typ)); rcd=nco_char_att_put(out_id,NULL,"longitude_grid_type",nco_grd_lon_sng(lon_typ)); rcd=nco_char_att_put(out_id,area_nm,"long_name","Solid Angle Subtended on Source Grid"); rcd=nco_char_att_put(out_id,area_nm,"standard_name","solid_angle"); rcd=nco_char_att_put(out_id,area_nm,"units","steradian"); rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"long_name","Latitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"standard_name","latitude"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"long_name","Longitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"standard_name","longitude"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"units","degrees_east"); else rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ if(flg_wrt_crn){ rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"long_name","Latitude of Grid Cell Vertices"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"long_name","Longitude of Grid Cell Vertices"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"units","degrees_east"); else rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"bounds",grd_crn_lat_nm); rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"bounds",grd_crn_lon_nm); } /* !flg_wrt_crn */ rcd=nco_char_att_put(out_id,msk_nm,"long_name","Binary Integer Mask for Grid"); rcd=nco_char_att_put(out_id,msk_nm,"units","none"); /* Begin data mode */ (void)nco_enddef(out_id); /* Write variables */ dmn_srt[0]=0L; dmn_cnt[0]=grd_rnk_nbr; rcd=nco_put_vara(out_id,dmn_sz_int_id,dmn_srt,dmn_cnt,dmn_sz_int,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,msk_id,dmn_srt,dmn_cnt,msk,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,grd_ctr_lat_id,dmn_srt,dmn_cnt,grd_ctr_lat,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,grd_ctr_lon_id,dmn_srt,dmn_cnt,grd_ctr_lon,crd_typ); if(flg_wrt_crn){ dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=grd_sz_nbr; dmn_cnt[1]=grd_crn_nbr; rcd=nco_put_vara(out_id,grd_crn_lat_id,dmn_srt,dmn_cnt,grd_crn_lat,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=grd_sz_nbr; dmn_cnt[1]=grd_crn_nbr; rcd=nco_put_vara(out_id,grd_crn_lon_id,dmn_srt,dmn_cnt,grd_crn_lon,crd_typ); } /* !flg_wrt_crn */ /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); fl_out=rgr->fl_ugrid; if(fl_out){ /* Test UGRID: Documentation: https://github.com/ugrid-conventions/ugrid-conventions Procedure: Create 1x1 skeleton file, infer UGRID and SCRIP grids from it ncks -O -D 1 --rgr ttl='Equiangular grid 180x360' --rgr skl=${HOME}/skl_180x360.nc --rgr scrip=${HOME}/grd_180x360_SCRIP.nc --rgr latlon=180,360#lat_typ=eqa#lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr infer --rgr ugrid=${HOME}/grd_ugrid.nc --rgr scrip=${HOME}/grd_scrip.nc ~/skl_180x360.nc ~/foo.nc ncks --cdl -v mesh_node_y ~/grd_ugrid.nc ncks --cdl -v mesh_face_nodes,mesh_face_x,mesh_face_y -d nFaces,0 ~/grd_ugrid.nc ncks --cdl -v mesh_edge_nodes,mesh_edge_x,mesh_edge_y -d nEdges,0 ~/grd_ugrid.nc ncks --cdl -v grid_center_lat,grid_corner_lat -d grid_size,0,,360 -d grid_corners,0,3 ~/grd_scrip.nc ncks --cdl -m -M ~/grd_ugrid.nc */ char *dgx_nm=NULL_CEWI; /* [sng] Name of edge_coordinates x variable */ char *dgy_nm=NULL_CEWI; /* [sng] Name of edge_coordinates y variable */ char *dg_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as edges */ char *dg_nd_nm=NULL_CEWI; /* [sng] Name of edge_node_connectivity variable */ char *fcx_nm=NULL_CEWI; /* [sng] Name of face_coordinates x variable */ char *fcy_nm=NULL_CEWI; /* [sng] Name of face_coordinates y variable */ char *fc_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as faces */ char *fc_nd_nm=NULL_CEWI; /* [sng] Name of face_node_connectivity variable */ char *msh_nm=NULL_CEWI; /* [sng] Name of mesh topology variable */ char *nd_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as nodes */ char *ndx_nm=NULL_CEWI; /* [sng] Name of node_coordinates x variable */ char *ndy_nm=NULL_CEWI; /* [sng] Name of node_coordinates y variable */ char *npe_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as nodes-per-edge */ char *npf_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as nodes-per-face */ double *dgx=NULL_CEWI; /* [dgr] Characteristic longitude of edges */ double *dgy=NULL_CEWI; /* [dgr] Characteristic latitude of edges */ double *fcx=NULL_CEWI; /* [dgr] Characteristic longitude of faces */ double *fcy=NULL_CEWI; /* [dgr] Characteristic latitude of faces */ double *ndx=NULL_CEWI; /* [dgr] Longitude of nodes */ double *ndy=NULL_CEWI; /* [dgr] Latitude of nodes */ int *dg_nd; /* [idx] edge_node_connectivity variable */ int *fc_nd; /* [idx] face_node_connectivity variable */ int dg_nd_id=NC_MIN_INT; /* [id] edge_node_connectivity variable ID */ int dgx_id=NC_MIN_INT; /* [id] Characteristic longitude of edges variable ID */ int dgy_id=NC_MIN_INT; /* [id] Characteristic latitude of edges variable ID */ int dmn_id_dg=NC_MIN_INT; /* [id] Dimension ID for edges */ int dmn_id_fc=NC_MIN_INT; /* [id] Dimension ID for faces */ int dmn_id_nd=NC_MIN_INT; /* [id] Dimension ID for nodes */ int dmn_id_npe=NC_MIN_INT; /* [id] Dimension ID for nodes-per-edge */ int dmn_id_npf=NC_MIN_INT; /* [id] Dimension ID for nodes-per-face */ int fc_nd_id=NC_MIN_INT; /* [id] face_node_connectivity variable ID */ int fcx_id=NC_MIN_INT; /* [id] Characteristic longitude of faces variable ID */ int fcy_id=NC_MIN_INT; /* [id] Characteristic latitude of faces variable ID */ int msh_id=NC_MIN_INT; /* [id] Mesh topology variable ID */ int msh_val=42; /* [id] Mesh topology variable value from Monty Python */ int ndx_id=NC_MIN_INT; /* [id] Longitude of mesh nodes variable ID */ int ndy_id=NC_MIN_INT; /* [id] Latitude of mesh nodes variable ID */ const long fc_nbr=grd_sz_nbr; /* [nbr] Number of faces in mesh */ const long npe_nbr=2; /* [nbr] Number of nodes per edge */ const long npf_nbr=grd_crn_nbr; /* [nbr] Number of nodes per face */ long dg_idx; /* [idx] Counting index for edges */ long dg_nbr=(long)NC_MIN_INT64; /* [nbr] Number of edges in mesh */ long fc_idx; /* [idx] Counting index for faces */ long nd_idx; /* [idx] Counting index for nodes */ long nd_nbr=(long)NC_MIN_INT64; /* [nbr] Number of nodes in mesh */ long srt_idx=0; /* [idx] start_index (C/Fortran) for edge_nodes, face_nodes */ if(!dgx_nm) dgx_nm=(char *)strdup("mesh_edge_x"); if(!dgy_nm) dgy_nm=(char *)strdup("mesh_edge_y"); if(!dg_dmn_nm) dg_dmn_nm=(char *)strdup("nEdges"); if(!fcx_nm) fcx_nm=(char *)strdup("mesh_face_x"); if(!fcy_nm) fcy_nm=(char *)strdup("mesh_face_y"); if(!fc_dmn_nm) fc_dmn_nm=(char *)strdup("nFaces"); if(!dg_nd_nm) dg_nd_nm=(char *)strdup("mesh_edge_nodes"); if(!fc_nd_nm) fc_nd_nm=(char *)strdup("mesh_face_nodes"); if(!msh_nm) msh_nm=(char *)strdup("mesh"); if(!nd_dmn_nm) nd_dmn_nm=(char *)strdup("nNodes"); if(!ndx_nm) ndx_nm=(char *)strdup("mesh_node_x"); if(!ndy_nm) ndy_nm=(char *)strdup("mesh_node_y"); if(!npe_dmn_nm) npe_dmn_nm=(char *)strdup("two"); if(!npf_dmn_nm) npf_dmn_nm=(char *)strdup("maxNodesPerFace"); if(flg_grd_1D){ (void)fprintf(stdout,"%s: ERROR %s UGRID output does not yet support 1D grids\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); }else if(flg_grd_2D){ /* Assume 2D grids are global and comprised of quadrilaterals */ switch(lat_typ){ case nco_grd_lat_fv: /* Currently all 2D grids are converted to the same UGRID representation fxm: Cap grids (e.g., FV) should eventually be written with a real cap, rather than as the "polar teeth" representation currently used. Polar teeth convention allows cap grid to be represented as rectangular on disk However, cap grids are better suited to non-rectangular UGRID meshes */ case nco_grd_lat_eqa: case nco_grd_lat_gss: /* Numbers of unique edges and nodes counted from South Pole (SP) to North Pole (NP) */ dg_nbr=lon_nbr*2+ /* SP: cells_per_lat*unique_edges_per_cell */ (lat_nbr-2)*lon_nbr*2+ /* Mid: lats*cells_per_lat*unique_edges_per_cell */ lon_nbr*1; /* NP: cells_per_lat*unique_edges_per_cell */ nd_nbr=1+lon_nbr*1+ /* SP: SP+cells_per_lat*unique_nodes_per_cell */ (lat_nbr-2)*lon_nbr*1+ /* Mid: lats*cells_per_lat*unique_nodes_per_cell */ 1; /* NP: NP */ break; case nco_grd_lat_unk: case nco_grd_lat_nil: default: nco_dfl_case_generic_err(); break; } /* !lat_typ */ }else if(flg_grd_crv){ (void)fprintf(stdout,"%s: ERROR %s UGRID output does not yet support curvilinear grids\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !flg_grd */ dg_nd=(int *)nco_malloc(dg_nbr*npe_nbr*nco_typ_lng(NC_INT)); dgx=(double *)nco_malloc(dg_nbr*nco_typ_lng(crd_typ)); dgy=(double *)nco_malloc(dg_nbr*nco_typ_lng(crd_typ)); fc_nd=(int *)nco_malloc(fc_nbr*npf_nbr*nco_typ_lng(NC_INT)); fcx=(double *)nco_malloc(fc_nbr*nco_typ_lng(crd_typ)); fcy=(double *)nco_malloc(fc_nbr*nco_typ_lng(crd_typ)); ndx=(double *)nco_malloc(nd_nbr*nco_typ_lng(crd_typ)); ndy=(double *)nco_malloc(nd_nbr*nco_typ_lng(crd_typ)); const long int idx_fst_crn_ll=0; const long int idx_fst_crn_lr=1; const long int idx_fst_crn_ur=2; const long int idx_fst_crn_ul=3; /* Node Ordering: Each interior face requires one new node Node 0 at SP New latitude row moves next node North Add nodes to run West->East */ /* SP */ ndx[0]=lon_crn[0]; /* Longitude degenerate at SP, NP, keep same longitude as corner array */ ndy[0]=lat_crn[0]; /* Mid */ for(nd_idx=1;nd_idx<nd_nbr-1L;nd_idx++){ fc_idx=nd_idx-1L; lat_idx=fc_idx/lon_nbr; lon_idx=fc_idx%lon_nbr; ndx[nd_idx]=lon_crn[lon_idx*grd_crn_nbr+idx_fst_crn_ul]; ndy[nd_idx]=lat_crn[lat_idx*grd_crn_nbr+idx_fst_crn_ul]; } /* !nd_idx */ /* NP */ ndx[nd_nbr-1L]=lon_crn[(lon_nbr-1)*grd_crn_nbr+idx_fst_crn_ul]; ndy[nd_nbr-1L]=lat_crn[(lat_nbr-1)*grd_crn_nbr+idx_fst_crn_ul]; /* Edge Ordering: epf_nbr is number of distinct edges-per-face (incremental, for interior cells) Each additional interior rectangular gridcell requires two new edges: Edge 0 runs South->North for all cells Edge 1 runs West->East for all cells NP row requires only one new edge per face */ /* SP */ const int epf_nbr=2; /* [nbr] Number of distinct edges-per-face (incremental, for interior cells) */ for(fc_idx=0;fc_idx<lon_nbr;fc_idx++){ dg_idx=fc_idx*epf_nbr; /* Edge 0 */ dg_nd[(dg_idx+0L)*npe_nbr+0L]=srt_idx; dg_nd[(dg_idx+0L)*npe_nbr+1L]=srt_idx+fc_idx+1L; /* Edge 1 */ dg_nd[(dg_idx+1L)*npe_nbr+0L]=srt_idx+fc_idx+1L; dg_nd[(dg_idx+1L)*npe_nbr+1L]=srt_idx+fc_idx+2L; } /* !fc_idx */ /* Mid */ for(fc_idx=lon_nbr;fc_idx<(lat_nbr-1L)*lon_nbr;fc_idx++){ dg_idx=fc_idx*epf_nbr; /* Edge 0 */ dg_nd[(dg_idx+0L)*npe_nbr+0L]=srt_idx+fc_idx-lon_nbr+1L; dg_nd[(dg_idx+0L)*npe_nbr+1L]=srt_idx+fc_idx+1L; /* Edge 1 */ dg_nd[(dg_idx+1L)*npe_nbr+0L]=srt_idx+fc_idx+1L; dg_nd[(dg_idx+1L)*npe_nbr+1L]=srt_idx+fc_idx+2L; } /* !fc_idx */ /* NP */ for(fc_idx=fc_nbr-lon_nbr;fc_idx<fc_nbr;fc_idx++){ /* Only one new edge per face in last row, easiest to count backwards from last edge */ dg_idx=dg_nbr-(fc_nbr-fc_idx); /* NP faces require only only one new edge, Edge 0 */ dg_nd[(dg_idx+0L)*npe_nbr+0L]=srt_idx+fc_idx-lon_nbr+1L; dg_nd[(dg_idx+0L)*npe_nbr+1L]=srt_idx+nd_nbr-1L; } /* !fc_idx */ /* SP */ for(fc_idx=0;fc_idx<lon_nbr;fc_idx++){ fc_nd[fc_idx*npf_nbr+0L]=srt_idx+0L; fc_nd[fc_idx*npf_nbr+1L]=srt_idx+fc_idx+2L; /* NB: CCW */ fc_nd[fc_idx*npf_nbr+2L]=srt_idx+fc_idx+1L; /* NB: CCW */ fc_nd[fc_idx*npf_nbr+3L]=mss_val_int_out; } /* !fc_idx */ /* Mid */ for(fc_idx=lon_nbr;fc_idx<fc_nbr-lon_nbr;fc_idx++){ fc_nd[fc_idx*npf_nbr+idx_fst_crn_ll]=srt_idx+fc_idx-lon_nbr+1L; fc_nd[fc_idx*npf_nbr+idx_fst_crn_lr]=srt_idx+fc_idx-lon_nbr+2L; fc_nd[fc_idx*npf_nbr+idx_fst_crn_ur]=srt_idx+fc_idx+2L; fc_nd[fc_idx*npf_nbr+idx_fst_crn_ul]=srt_idx+fc_idx+1L; } /* !fc_idx */ /* NP */ for(fc_idx=fc_nbr-lon_nbr;fc_idx<fc_nbr;fc_idx++){ fc_nd[fc_idx*npf_nbr+0L]=srt_idx+nd_nbr-(fc_nbr-fc_idx)-2L; fc_nd[fc_idx*npf_nbr+1L]=srt_idx+nd_nbr-(fc_nbr-fc_idx)-1L; fc_nd[fc_idx*npf_nbr+2L]=srt_idx+nd_nbr-1L; fc_nd[fc_idx*npf_nbr+3L]=mss_val_int_out; } /* !fc_idx */ /* Characteristic coordinates */ for(dg_idx=0;dg_idx<dg_nbr-1L;dg_idx++){ idx=dg_idx*npe_nbr; dgx[dg_idx]=0.5*(ndx[dg_nd[idx+0L]]+ndx[dg_nd[idx+1L]]); dgy[dg_idx]=0.5*(ndy[dg_nd[idx+0L]]+ndy[dg_nd[idx+1L]]); } /* !dg_idx */ /* Degenerate longitude at SP, NP, causes weird characterisic longitude unless special care taken */ for(fc_idx=0;fc_idx<fc_nbr-1L;fc_idx++){ idx=fc_idx*npf_nbr; if(fc_idx < lon_nbr){ fcx[fc_idx]=0.5*(ndx[fc_nd[idx+1L]]+ndx[fc_nd[idx+2L]]); }else if(fc_idx >= fc_nbr-lon_nbr-1){ fcx[fc_idx]=0.5*(ndx[fc_nd[idx+0L]]+ndx[fc_nd[idx+1L]]); }else if(fc_nd[idx+3L] != mss_val_int_out){ /* fxm for fcx use nco_lon_crn_avg_brnch() and 3-node version too */ fcx[fc_idx]=0.25*(ndx[fc_nd[idx+0L]]+ndx[fc_nd[idx+1L]]+ndx[fc_nd[idx+2L]]+ndx[fc_nd[idx+3L]]); }else{ abort(); } /* !fc_idx */ if(fc_nd[idx+3L] != mss_val_int_out) fcy[fc_idx]=0.25*(ndy[fc_nd[idx+0L]]+ndy[fc_nd[idx+1L]]+ndy[fc_nd[idx+2L]]+ndy[fc_nd[idx+3L]]); else fcy[fc_idx]=0.33*(ndy[fc_nd[idx+0L]]+ndy[fc_nd[idx+1L]]+ndy[fc_nd[idx+2L]]); } /* !fc_idx */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id); rcd=nco_def_dim(out_id,dg_dmn_nm,dg_nbr,&dmn_id_dg); rcd=nco_def_dim(out_id,fc_dmn_nm,fc_nbr,&dmn_id_fc); rcd=nco_def_dim(out_id,nd_dmn_nm,nd_nbr,&dmn_id_nd); rcd=nco_def_dim(out_id,npe_dmn_nm,npe_nbr,&dmn_id_npe); rcd=nco_def_dim(out_id,npf_dmn_nm,npf_nbr,&dmn_id_npf); dmn_ids[0]=dmn_id_dg; dmn_ids[1]=dmn_id_npe; rcd=nco_def_var(out_id,dg_nd_nm,(nc_type)NC_INT,dmn_nbr_2D,dmn_ids,&dg_nd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,dg_nd_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_fc; dmn_ids[1]=dmn_id_npf; rcd=nco_def_var(out_id,fc_nd_nm,(nc_type)NC_INT,dmn_nbr_2D,dmn_ids,&fc_nd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,fc_nd_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,msh_nm,(nc_type)NC_INT,dmn_nbr_0D,(int *)NULL,&msh_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msh_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,ndx_nm,crd_typ,dmn_nbr_1D,&dmn_id_nd,&ndx_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,ndx_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,ndy_nm,crd_typ,dmn_nbr_1D,&dmn_id_nd,&ndy_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,ndy_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,dgx_nm,crd_typ,dmn_nbr_1D,&dmn_id_dg,&dgx_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,dgx_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,dgy_nm,crd_typ,dmn_nbr_1D,&dmn_id_dg,&dgy_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,dgy_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,fcx_nm,crd_typ,dmn_nbr_1D,&dmn_id_fc,&fcx_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,fcx_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,fcy_nm,crd_typ,dmn_nbr_1D,&dmn_id_fc,&fcy_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,fcy_id,shuffle,deflate,dfl_lvl); if(strstr(rgr->grd_ttl,"None given")){ const char att_fmt[]="NCO constructed this UGRID grid from scratch"; att_val=(char *)nco_malloc((strlen(att_fmt)+strlen(rgr->fl_in)+1L)*sizeof(char)); sprintf(att_val,att_fmt); }else{ att_val=strdup(rgr->grd_ttl); } /* !grd_ttl */ rcd=nco_char_att_put(out_id,NULL,"title",att_val); rcd=nco_char_att_put(out_id,NULL,"Conventions","CF-1.6, UGRID-1.0"); rcd=nco_char_att_put(out_id,NULL,"created_by",usr_cpp); rcd=nco_char_att_put(out_id,NULL,"grid_generator","NCO"); (void)nco_hst_att_cat(out_id,rgr->cmd_ln); (void)nco_vrs_att_cat(out_id); rcd=nco_char_att_put(out_id,msh_nm,"cf_role","mesh_topology"); rcd=nco_char_att_put(out_id,msh_nm,"standard_name","mesh_topology"); rcd=nco_char_att_put(out_id,msh_nm,"long_name","Topology data"); att_nm=strdup("topology_dimension"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=msh_nm; aed_mtd.id=msh_id; aed_mtd.sz=1; aed_mtd.type=NC_INT; aed_mtd.val.ip=&val_two; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,msh_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); aed_mtd.sz=strlen(ndx_nm)+strlen(ndy_nm)+1L; att_val=(char *)nco_malloc((aed_mtd.sz+1L)*nco_typ_lng(NC_CHAR)); (void)sprintf(att_val,"%s %s",ndx_nm,ndy_nm); rcd=nco_char_att_put(out_id,msh_nm,"node_coordinates",att_val); rcd=nco_char_att_put(out_id,msh_nm,"face_node_connectivity",fc_nd_nm); aed_mtd.sz=strlen(fcx_nm)+strlen(fcy_nm)+1L; att_val=(char *)nco_malloc((aed_mtd.sz+1L)*nco_typ_lng(NC_CHAR)); (void)sprintf(att_val,"%s %s",fcx_nm,fcy_nm); rcd=nco_char_att_put(out_id,msh_nm,"face_coordinates",att_val); rcd=nco_char_att_put(out_id,msh_nm,"face_dimension",fc_dmn_nm); rcd=nco_char_att_put(out_id,msh_nm,"edge_node_connectivity",dg_nd_nm); aed_mtd.sz=strlen(dgx_nm)+strlen(dgy_nm)+1L; att_val=(char *)nco_malloc((aed_mtd.sz+1L)*nco_typ_lng(NC_CHAR)); (void)sprintf(att_val,"%s %s",dgx_nm,dgy_nm); rcd=nco_char_att_put(out_id,msh_nm,"edge_coordinates",att_val); rcd=nco_char_att_put(out_id,msh_nm,"edge_dimension",dg_dmn_nm); rcd=nco_char_att_put(out_id,ndx_nm,"standard_name","longitude"); rcd=nco_char_att_put(out_id,ndx_nm,"long_name","Longitude of mesh nodes"); rcd=nco_char_att_put(out_id,ndx_nm,"units","degrees_east"); rcd=nco_char_att_put(out_id,ndy_nm,"standard_name","latitude"); rcd=nco_char_att_put(out_id,ndy_nm,"long_name","Latitude of mesh nodes"); rcd=nco_char_att_put(out_id,ndy_nm,"units","degrees_north"); rcd=nco_char_att_put(out_id,dg_nd_nm,"cf_role","edge_node_connectivity"); rcd=nco_char_att_put(out_id,dg_nd_nm,"long_name","Maps every edge to the two nodes that it connects"); att_nm=strdup("start_index"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=dg_nd_nm; aed_mtd.id=dg_nd_id; aed_mtd.sz=1; aed_mtd.type=NC_INT; aed_mtd.val.ip=&val_zero; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,dg_nd_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); rcd=nco_char_att_put(out_id,fc_nd_nm,"cf_role","face_node_connectivity"); rcd=nco_char_att_put(out_id,fc_nd_nm,"long_name","Maps every face to its corner nodes"); att_nm=strdup("start_index"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=fc_nd_nm; aed_mtd.id=fc_nd_id; aed_mtd.sz=1; aed_mtd.type=NC_INT; aed_mtd.val.ip=&val_zero; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,fc_nd_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); att_nm=strdup("_FillValue"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=fc_nd_nm; aed_mtd.id=fc_nd_id; aed_mtd.sz=1; aed_mtd.type=NC_INT; aed_mtd.val.ip=&mss_val_int_out; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,fc_nd_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); rcd=nco_char_att_put(out_id,dgx_nm,"standard_name","longitude"); rcd=nco_char_att_put(out_id,dgx_nm,"long_name","Characteristic longitude of 2D mesh face"); rcd=nco_char_att_put(out_id,dgx_nm,"units","degrees_east"); rcd=nco_char_att_put(out_id,dgy_nm,"standard_name","latitude"); rcd=nco_char_att_put(out_id,dgy_nm,"long_name","Characteristic latitude of 2D mesh face"); rcd=nco_char_att_put(out_id,dgy_nm,"units","degrees_north"); rcd=nco_char_att_put(out_id,fcx_nm,"standard_name","longitude"); rcd=nco_char_att_put(out_id,fcx_nm,"long_name","Characteristic longitude of 2D mesh edge"); rcd=nco_char_att_put(out_id,fcx_nm,"units","degrees_east"); rcd=nco_char_att_put(out_id,fcy_nm,"standard_name","latitude"); rcd=nco_char_att_put(out_id,fcy_nm,"long_name","Characteristic latitude of 2D mesh edge"); rcd=nco_char_att_put(out_id,fcy_nm,"units","degrees_north"); /* Begin data mode */ (void)nco_enddef(out_id); (void)nco_put_vara(out_id,msh_id,dmn_srt,dmn_cnt,&msh_val,(nc_type)NC_INT); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=dg_nbr; dmn_cnt[1]=epf_nbr; (void)nco_put_vara(out_id,dg_nd_id,dmn_srt,dmn_cnt,dg_nd,(nc_type)NC_INT); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=fc_nbr; dmn_cnt[1]=npf_nbr; (void)nco_put_vara(out_id,fc_nd_id,dmn_srt,dmn_cnt,fc_nd,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=nd_nbr; (void)nco_put_vara(out_id,ndx_id,dmn_srt,dmn_cnt,ndx,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=nd_nbr; (void)nco_put_vara(out_id,ndy_id,dmn_srt,dmn_cnt,ndy,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=dg_nbr; (void)nco_put_vara(out_id,dgx_id,dmn_srt,dmn_cnt,dgx,crd_typ); (void)nco_put_vara(out_id,dgy_id,dmn_srt,dmn_cnt,dgy,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=fc_nbr; (void)nco_put_vara(out_id,fcx_id,dmn_srt,dmn_cnt,fcx,crd_typ); (void)nco_put_vara(out_id,fcy_id,dmn_srt,dmn_cnt,fcy,crd_typ); /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); /* Free memory associated with output file */ if(dgx) dgx=(double *)nco_free(dgx); if(dgy) dgy=(double *)nco_free(dgy); if(dg_nd) dg_nd=(int *)nco_free(dg_nd); if(fcx) fcx=(double *)nco_free(fcx); if(fcy) fcy=(double *)nco_free(fcy); if(fc_nd) fc_nd=(int *)nco_free(fc_nd); if(ndx) ndx=(double *)nco_free(ndx); if(ndy) ndy=(double *)nco_free(ndy); /* Free strings */ if(dgx_nm) dgx_nm=(char *)nco_free(dgx_nm); if(dgy_nm) dgy_nm=(char *)nco_free(dgy_nm); if(dg_dmn_nm) dg_dmn_nm=(char *)nco_free(dg_dmn_nm); if(dg_nd_nm) dg_nd_nm=(char *)nco_free(dg_nd_nm); if(fcx_nm) fcx_nm=(char *)nco_free(fcx_nm); if(fcy_nm) fcy_nm=(char *)nco_free(fcy_nm); if(fc_dmn_nm) fc_dmn_nm=(char *)nco_free(fc_dmn_nm); if(fc_nd_nm) fc_nd_nm=(char *)nco_free(fc_nd_nm); if(msh_nm) msh_nm=(char *)nco_free(msh_nm); if(nd_dmn_nm) nd_dmn_nm=(char *)nco_free(nd_dmn_nm); if(ndx_nm) ndx_nm=(char *)nco_free(ndx_nm); if(ndy_nm) ndy_nm=(char *)nco_free(ndy_nm); if(npe_dmn_nm) npe_dmn_nm=(char *)nco_free(npe_dmn_nm); if(npf_dmn_nm) npf_dmn_nm=(char *)nco_free(npf_dmn_nm); } /* !fl_ugrid */ /* Free memory associated with input file */ if(dmn_sz_int) dmn_sz_int=(int *)nco_free(dmn_sz_int); if(msk) msk=(int *)nco_free(msk); if(area) area=(double *)nco_free(area); if(grd_ctr_lat) grd_ctr_lat=(double *)nco_free(grd_ctr_lat); if(grd_ctr_lon) grd_ctr_lon=(double *)nco_free(grd_ctr_lon); if(grd_crn_lat) grd_crn_lat=(double *)nco_free(grd_crn_lat); if(grd_crn_lon) grd_crn_lon=(double *)nco_free(grd_crn_lon); if(lat_bnd) lat_bnd=(double *)nco_free(lat_bnd); if(lat_crn) lat_crn=(double *)nco_free(lat_crn); if(lat_ctr) lat_ctr=(double *)nco_free(lat_ctr); if(lat_ntf) lat_ntf=(double *)nco_free(lat_ntf); if(lat_wgt) lat_wgt=(double *)nco_free(lat_wgt); if(lon_bnd) lon_bnd=(double *)nco_free(lon_bnd); if(lon_crn) lon_crn=(double *)nco_free(lon_crn); if(lon_ctr) lon_ctr=(double *)nco_free(lon_ctr); if(lon_ntf) lon_ntf=(double *)nco_free(lon_ntf); /* Free strings */ if(col_dmn_nm) col_dmn_nm=(char *)nco_free(col_dmn_nm); if(lat_dmn_nm) lat_dmn_nm=(char *)nco_free(lat_dmn_nm); if(lon_dmn_nm) lon_dmn_nm=(char *)nco_free(lon_dmn_nm); if(bnd_dmn_nm) bnd_dmn_nm=(char *)nco_free(bnd_dmn_nm); if(lat_nm_in) lat_nm_in=(char *)nco_free(lat_nm_in); if(lon_nm_in) lon_nm_in=(char *)nco_free(lon_nm_in); if(lat_bnd_nm) lat_bnd_nm=(char *)nco_free(lat_bnd_nm); if(lon_bnd_nm) lon_bnd_nm=(char *)nco_free(lon_bnd_nm); if(area_nm_in) area_nm_in=(char *)nco_free(area_nm_in); if(msk_nm_in) msk_nm_in=(char *)nco_free(msk_nm_in); return rcd; } /* !nco_grd_nfr() */ double /* O [dgr] Longitude difference (lon_r-lon_l) */ nco_lon_dff_brnch_dgr /* [fnc] Subtract longitudes with branch-cut rules */ (double lon_r, /* I [dgr] Longitude on right of gridcell (subtractor) */ double lon_l) /* I [dgr] Longitude on left of gridcell (subtractee) */ { /* Purpose: Return difference of two longitudes in degrees Assume longitudes are within 180 degrees of eachother Default orientation is monotonically increasing longitude from left to right */ const char fnc_nm[]="nco_lon_dff_brnch_dgr()"; const double lon_dff=lon_r-lon_l; /* [dgr] Longitude difference (lon_r-lon_l) */ if(lon_dff >= 180.0){ (void)fprintf(stdout,"%s: WARNING %s reports lon_r, lon_l, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_r,lon_l,lon_dff); return lon_dff-360.0; }else if(lon_dff <= -180.0){ return lon_dff+360.0; } /* !lon_dff */ return lon_dff; } /* !nco_lon_dff_brnch_dgr() */ double /* O [rdn] Longitude difference (lon_r-lon_l) */ nco_lon_dff_brnch_rdn /* [fnc] Subtract longitudes with branch-cut rules */ (double lon_r, /* I [rdn] Longitude on right of gridcell (subtractor) */ double lon_l) /* I [rdn] Longitude on left of gridcell (subtractee) */ { /* Purpose: Return difference of two longitudes in radians Assume longitudes are within pi radians of eachother Default orientation is monotonically increasing longitude from left to right */ const char fnc_nm[]="nco_lon_dff_brnch_rdn()"; const double lon_dff=lon_r-lon_l; /* [rdn] Longitude difference (lon_r-lon_l) */ //nco_bool dbg_prn=False; /* [flg] Print warning when longitude difference is suspicious */ /* longitudes on different branch cuts are expected when computing polygon area, so warn only if requested with high debugging level */ if(lon_dff >= M_PI){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: WARNING %s reports lon_r, lon_l, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_r,lon_l,lon_dff); return lon_dff-M_PI-M_PI; }else if(lon_dff <= -M_PI){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: WARNING %s reports lon_r, lon_l, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_r,lon_l,lon_dff); return lon_dff+M_PI+M_PI; } /* !lon_dff */ return lon_dff; } /* !nco_lon_dff_brnch_rdn() */ double /* O [dgr] Longitude average */ nco_lon_crn_avg_brnch /* [fnc] Average quadrilateral longitude with branch-cut rules */ (double lon_ll, /* I [dgr] Longitude at lower left of gridcell */ double lon_lr, /* I [dgr] Longitude at lower right of gridcell */ double lon_ur, /* I [dgr] Longitude at upper right of gridcell */ double lon_ul) /* I [dgr] Longitude at upper left of gridcell */ { /* Purpose: Return average of four corner longitudes of quadrilateral Assume longitudes are within 180 degrees of eachother Default orientation is monotonically increasing longitude from left to right WLOG, adjust all longitudes to be on same branch as lon_ll */ const char fnc_nm[]="nco_lon_crn_avg_brnch()"; double lon_dff; /* [dgr] Longitude difference */ lon_dff=lon_lr-lon_ll; if(lon_dff >= 180.0){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: INFO %s reports lon_lr, lon_ll, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_lr,lon_ll,lon_dff); lon_lr-=360.0; }else if(lon_dff <= -180.0){ lon_lr+=360.0; } /* !lon_dff */ lon_dff=lon_ur-lon_ll; if(lon_dff >= 180.0){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: INFO %s reports lon_ur, lon_ll, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_ur,lon_ll,lon_dff); lon_ur-=360.0; }else if(lon_dff <= -180.0){ lon_ur+=360.0; } /* !lon_dff */ lon_dff=lon_ul-lon_ll; if(lon_dff >= 180.0){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: INFO %s reports lon_ul, lon_ll, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_ul,lon_ll,lon_dff); lon_ul-=360.0; }else if(lon_dff <= -180.0){ lon_ul+=360.0; } /* !lon_dff */ return 0.25*(lon_ll+lon_lr+lon_ur+lon_ul); } /* !nco_lon_crn_avg_brnch() */ double /* O [dgr] Longitude average */ nco_lon_ply_avg_brnch_dgr /* [fnc] Average polygon longitude with branch-cut rules */ (double *lon_crn, /* I [dgr] Longitude of gridcell corners */ long lon_nbr) /* I [nbr] Number of vertices in polygon */ { /* Purpose: Return average longitude of polygon vertices, i.e., centroid longitude Assume longitudes are within 180 degrees of one another Default orientation is monotonically increasing longitude from left to right WLOG, adjust all longitudes to be on same branch as lon_ll */ // const char fnc_nm[]="nco_lon_ply_avg_brnch()"; double lon_dff; /* [dgr] Longitude difference */ double lon_avg; /* [dgr] Longitude average */ int lon_idx; /* [idx] Polygon vertex index */ assert(lon_nbr != 0); lon_avg=lon_crn[0]; for(lon_idx=1;lon_idx<lon_nbr;lon_idx++){ lon_avg+=lon_crn[lon_idx]; lon_dff=lon_crn[lon_idx]-lon_crn[0]; if(lon_dff >= 180.0){ lon_avg-=360.0; }else if(lon_dff <= -180.0){ lon_avg+=360.0; } /* !lon_dff */ } /* !lon_idx */ return lon_avg/lon_nbr; } /* !nco_lon_ply_avg_brnch() */ nco_bool /* O [flg] Input corners were CCW */ nco_ccw_chk /* [fnc] Convert quadrilateral gridcell corners to CCW orientation */ (double * const crn_lat, /* [dgr] Latitude corners of gridcell */ double * const crn_lon, /* [dgr] Latitude corners of gridcell */ const int crn_nbr, /* [nbr] Number of corners per gridcell */ int idx_ccw, /* [idx] Index of starting vertice for CCW check (Point A = tail side AB) */ const int rcr_lvl) /* [nbr] Recursion level */ { /* Purpose: Determine whether corner vertices are oriented CCW If not, alter order so they are returned in CCW order Function can call itself, and rcr_lvl indicates recursion level: rcr_lvl=1: Called by host code, i.e., nco_grd_nfr() rcr_lvl=2: Called by itself, i.e., nco_ccw_chk() Assumptions: Quadrilateral vertices are already corrected to obey branch-cut rules, i.e., all vertices are on "same side" of dateline or Greenwich as appropriate Algorithm: Start crn_idx=0, i.e., quadrilateral LL corner Vector A runs from crn_idx=0 to crn_idx=1, i.e., quadrilateral LL->LR Vector B runs from crn_idx=1 to crn_idx=2, i.e., quadrilateral LR->UR Compute cross-product A x B = C C is normal to plane containining A and B Dot-product of C with radial vector to head A = tail B is positive if A and B are CCW if(ABC is CCW){ if(CDA is CCW) Done else Copy D:=A (make CDA degenerate, triangularize quadrilateral) endif }else(ABC is not CCW){ Assume entire quadrilateral is CW Take mirror image of quadrilateral by switching B with D If(new ABC is CCW){ If(CDA is CCW) Done else Copy D:=A (make CDA degenerate, triangularize quadrilateral) endif }else{ Fail (return False, meaning point should be masked) } All cases return True (i.e., CCW) from rcr_lvl=1 except last Last case returns False, and calling code should mask such an aberrant point */ const char fnc_nm[]="nco_ccw_chk()"; /* MSVC compiler chokes unless array size is compile-time constant */ const int CRN_NBR_MSVC=4; double sin_lat[CRN_NBR_MSVC]; double sin_lon[CRN_NBR_MSVC]; double cos_lat[CRN_NBR_MSVC]; double cos_lon[CRN_NBR_MSVC]; double A_tail_x,A_tail_y,A_tail_z; double A_head_x,A_head_y,A_head_z; double A_x,A_y,A_z; double B_tail_x,B_tail_y,B_tail_z; double B_head_x,B_head_y,B_head_z; double B_x,B_y,B_z; double C_x,C_y,C_z; double R_x,R_y,R_z; double lat_rdn; double lon_rdn; double dot_prd; int crn_idx; /* [idx] Corner idx */ int A_tail_idx,A_head_idx; int B_tail_idx,B_head_idx; nco_bool flg_ccw; /* [flg] Input is CCW */ assert(crn_nbr == CRN_NBR_MSVC); for(crn_idx=0;crn_idx<crn_nbr;crn_idx++){ lat_rdn=crn_lat[crn_idx]*M_PI/180.0; lon_rdn=crn_lon[crn_idx]*M_PI/180.0; sin_lat[crn_idx]=sin(lat_rdn); cos_lat[crn_idx]=cos(lat_rdn); sin_lon[crn_idx]=sin(lon_rdn); cos_lon[crn_idx]=cos(lon_rdn); } /* !crn_idx */ /* Calls from host code (i.e., nco_grd_nfr()) start at lower-left of quadrilateral ABCD = Point A = vertex 0 Calls from self can start from quadrilateral Point A or C To check triangle CDA, start at upper-right of quadrilateral ABCD = Point C = vertex 2 */ A_tail_idx=idx_ccw; A_head_idx=B_tail_idx=(A_tail_idx+1)%crn_nbr; B_head_idx=(B_tail_idx+1)%crn_nbr; A_tail_x=cos_lat[A_tail_idx]*cos_lon[A_tail_idx]; A_tail_y=cos_lat[A_tail_idx]*sin_lon[A_tail_idx]; A_tail_z=sin_lat[A_tail_idx]; A_head_x=B_tail_x=R_x=cos_lat[A_head_idx]*cos_lon[A_head_idx]; A_head_y=B_tail_y=R_y=cos_lat[A_head_idx]*sin_lon[A_head_idx]; A_head_z=B_tail_z=R_z=sin_lat[A_head_idx]; B_head_x=cos_lat[B_head_idx]*cos_lon[B_head_idx]; B_head_y=cos_lat[B_head_idx]*sin_lon[B_head_idx]; B_head_z=sin_lat[B_head_idx]; A_x=A_head_x-A_tail_x; A_y=A_head_y-A_tail_y; A_z=A_head_z-A_tail_z; B_x=B_head_x-B_tail_x; B_y=B_head_y-B_tail_y; B_z=B_head_z-B_tail_z; /* Cross-Product C = A x B */ C_x=A_y*B_z-B_y*A_z; C_y=-A_x*B_z+B_x*A_z; C_z=A_x*B_y-B_x*A_y; /* Dot-Product R dot C */ dot_prd=C_x*R_x+C_y*R_y+C_z*R_z; if(dot_prd > 0.0) flg_ccw=True; else flg_ccw=False; if(flg_ccw && crn_nbr == 4 && rcr_lvl == 1){ /* Original ABC is CCW, now check CDA */ idx_ccw=2; flg_ccw=nco_ccw_chk(crn_lat,crn_lon,crn_nbr,idx_ccw,rcr_lvl+1); if(!flg_ccw && nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: WARNING %s reports triangle ABC is and CDA is not CCW in quadrilateral gridcell with LL (lat,lon) = (%g, %g), dot_prd = %g. Setting D:=A to triangularize quadrilateral.\n",nco_prg_nm_get(),fnc_nm,*crn_lat+0,*crn_lon+0,dot_prd); /* Triangularize quadrilateral D:=A */ crn_lat[3]=crn_lat[0]; crn_lon[3]=crn_lon[0]; return True; }else if(!flg_ccw && crn_nbr == 4 && rcr_lvl == 1){ /* Original ABC is not CCW 20160124: Simplistic fix: reverse gridpoint order This only works for quadrilaterals without degenerate points */ double crn_tmp; if(!flg_ccw && nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: INFO %s reports triangle ABC is non-CCW in quadrilateral gridcell with LL (lat,lon) = (%g, %g), dot_prd = %g. Mirror-imaging...\n",nco_prg_nm_get(),fnc_nm,*crn_lat+0,*crn_lon+0,dot_prd); crn_tmp=crn_lat[1]; crn_lat[1]=crn_lat[3]; crn_lat[3]=crn_tmp; crn_tmp=crn_lon[1]; crn_lon[1]=crn_lon[3]; crn_lon[3]=crn_tmp; /* Check new triangle ABC */ idx_ccw=0; flg_ccw=nco_ccw_chk(crn_lat,crn_lon,crn_nbr,idx_ccw,rcr_lvl+1); if(flg_ccw){ /* Inverted ABC is CCW, now check CDA */ idx_ccw=2; flg_ccw=nco_ccw_chk(crn_lat,crn_lon,crn_nbr,idx_ccw,rcr_lvl+1); if(flg_ccw){ return True; }else{ if(!flg_ccw && nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: INFO %s reports triangle ABC is CCW after inversion, but triangle CDA is not at quadrilateral gridcell with LL (lat,lon) = (%g, %g), dot_prd = %g. Setting D:=A to triangularize quadrilateral.\n",nco_prg_nm_get(),fnc_nm,*crn_lat+0,*crn_lon+0,dot_prd); /* Triangularize quadrilateral D:=A */ crn_lat[3]=crn_lat[0]; crn_lon[3]=crn_lon[0]; return True; } /* flg_ccw */ }else{ /* Original and Inverted ABC are not CCW */ if(!flg_ccw && nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: WARNING %s reports triangle ABC remains non-CCW after first inversion\n",nco_prg_nm_get(),fnc_nm); return False; } /* !flg_ccw */ } /* flg_ccw */ return flg_ccw; } /* !nco_ccw_chk() */
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LambdaMangleContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Lex/ModuleLoader.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/LocInfoType.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/OwningPtr.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/MC/MCParser/MCAsmParser.h" #include <deque> #include <string> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class AttributeList; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnumConstantDecl; class Expr; class ExtVectorType; class ExternalSemaSource; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; class MultiLevelTemplateArgumentList; class NamedDecl; class NonNullAttr; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TargetAttributesSema; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; namespace sema { class AccessedEntity; class BlockScopeInfo; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class TemplateDeductionInfo; } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) LLVM_DELETED_FUNCTION; void operator=(const Sema &) LLVM_DELETED_FUNCTION; mutable const TargetAttributesSema* TheTargetAttributesSema; ///\brief Source of additional semantic information. ExternalSemaSource *ExternalSource; ///\brief Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); static bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { // We are about to link these. It is now safe to compute the linkage of // the new decl. If the new decl has external linkage, we will // link it with the hidden decl (which also has external linkage) and // it will keep having external linkage. If it has internal linkage, we // will not link it. Since it has no previous decls, it will remain // with internal linkage. return !Old->isHidden() || New->hasExternalLinkage(); } public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// \brief Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// \brief Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// \brief Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; /// PackContext - Manages the stack for \#pragma pack. An alignment /// of 0 indicates default alignment. void *PackContext; // Really a "PragmaPackStack*" bool MSStructPragmaOn; // True when \#pragma ms_struct on /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// \brief Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// ExprNeedsCleanups - True if the current evaluation context /// requires cleanups to be run at its conclusion. bool ExprNeedsCleanups; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs; /// \brief Stack containing information about each of the nested /// function, block, and method scopes that are currently active. /// /// This array is never empty. Clients should ignore the first /// element, which is used to cache a single FunctionScopeInfo /// that's used to parse every top-level function. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. OwningPtr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType; /// \brief Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. OwningPtr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// \brief A mapping from external names to the most recent /// locally-scoped extern "C" declaration with that name. /// /// This map contains external declarations introduced in local /// scopes, e.g., /// /// \code /// extern "C" void f() { /// void foo(int, int); /// } /// \endcode /// /// Here, the name "foo" will be associated with the declaration of /// "foo" within f. This name is not visible outside of /// "f". However, we still find it in two cases: /// /// - If we are declaring another global or extern "C" entity with /// the name "foo", we can find "foo" as a previous declaration, /// so that the types of this external declaration can be checked /// for compatibility. /// /// - If we would implicitly declare "foo" (e.g., due to a call to /// "foo" in C when no prototype or definition is visible), then /// we find this declaration of "foo" and complain that it is /// not visible. llvm::DenseMap<DeclarationName, NamedDecl *> LocallyScopedExternCDecls; /// \brief Look for a locally scoped extern "C" declaration by the given name. llvm::DenseMap<DeclarationName, NamedDecl *>::iterator findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// \brief All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// \brief The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// \brief All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// \brief All the destructors seen during a class definition that had their /// exception spec computation delayed because it depended on an unparsed /// exception spec. SmallVector<CXXDestructorDecl*, 2> DelayedDestructorExceptionSpecs; /// \brief All the overriding destructors seen during a class definition /// (there could be multiple due to nested classes) that had their exception /// spec checks delayed, plus the overridden destructor. SmallVector<std::pair<const CXXDestructorDecl*, const CXXDestructorDecl*>, 2> DelayedDestructorExceptionSpecChecks; /// \brief All the members seen during a class definition which were both /// explicitly defaulted and had explicitly-specified exception /// specifications, along with the function type containing their /// user-specified exception specification. Those exception specifications /// were overridden with the default specifications, but we still need to /// check whether they are compatible with the default specification, and /// we can't do that until the nesting set of class definitions is complete. SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2> DelayedDefaultedMemberExceptionSpecs; /// \brief Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, const FunctionDecl *FD); LateTemplateParserCB *LateTemplateParser; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, void *P) { LateTemplateParser = LTP; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// \brief The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(0) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != 0; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = 0; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == NULL); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = 0; } ~ContextRAII() { pop(); } }; /// \brief RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated); } ~SynthesizedFunctionScope() { S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::DenseMap<IdentifierInfo*,WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// \brief Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// \brief The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// \brief The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// \brief The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// \brief The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// \brief Caches identifiers/selectors for NSFoundation APIs. OwningPtr<NSAPI> NSAPIObj; /// \brief The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// \brief Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// \brief The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// \brief The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// \brief Pointer to NSString type (NSString *). QualType NSStringPointer; /// \brief The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// \brief The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// \brief The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// \brief The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// \brief The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// \brief id<NSCopying> type. QualType QIDNSCopying; /// \brief will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// \brief Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum ExpressionEvaluationContext { /// \brief The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// \brief The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statment). ConstantEvaluated, /// \brief The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// \brief The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// \brief Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// \brief The expression evaluation context. ExpressionEvaluationContext Context; /// \brief Whether the enclosing context needed a cleanup. bool ParentNeedsCleanups; /// \brief Whether we are in a decltype expression. bool IsDecltype; /// \brief The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs; /// \brief The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// \brief The declaration that provides context for the lambda expression /// if the normal declaration context does not suffice, e.g., in a /// default function argument. Decl *LambdaContextDecl; /// \brief The context information used to mangle lambda expressions /// within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions. IntrusiveRefCntPtr<LambdaMangleContext> LambdaMangle; /// \brief If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// \brief If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, bool ParentNeedsCleanups, Decl *LambdaContextDecl, bool IsDecltype) : Context(Context), ParentNeedsCleanups(ParentNeedsCleanups), IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects), LambdaContextDecl(LambdaContextDecl), LambdaMangle() { } /// \brief Retrieve the mangling context for lambdas. LambdaMangleContext &getLambdaMangleContext() { assert(LambdaContextDecl && "Need to have a lambda context declaration"); if (!LambdaMangle) LambdaMangle = new LambdaMangleContext; return *LambdaMangle; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; /// \brief A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache; /// \brief The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// \brief The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, SmallVector<ParmVarDecl *, 1> > UnparsedDefaultArgInstantiationsMap; /// \brief A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::DenseMap<NamedDecl *, SourceLocation> UndefinedButUsed; /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( llvm::SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::DenseMap<Selector, SourceLocation> ReferencedSelectors; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; void ReadMethodPool(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); /// \brief Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema& S) : S(S), OldFPContractState(S.FPFeatures.fp_contract) {} ~FPContractStateRAII() { S.FPFeatures.fp_contract = OldFPContractState; } private: Sema& S; bool OldFPContractState : 1; }; typedef llvm::MCAsmParserSemaCallback::InlineAsmIdentifierInfo InlineAsmIdentifierInfo; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = 0); ~Sema(); /// \brief Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } const TargetAttributesSema &getTargetAttributesSema() const; Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///\brief Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// \brief Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } }; /// \brief Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// \brief Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// \brief Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// \brief Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T) const; std::string getFixItZeroLiteralForType(QualType T) const; ExprResult Owned(Expr* E) { return E; } ExprResult Owned(ExprResult R) { return R; } StmtResult Owned(Stmt* S) { return S; } void ActOnEndOfTranslationUnit(); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); void PushLambdaScope(CXXRecordDecl *Lambda, CXXMethodDecl *CallOperator); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, sema::CapturedRegionScopeInfo::CapturedRegionKind K); void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP =0, const Decl *D = 0, const BlockExpr *blkExpr = 0); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.back(); } void PushCompoundScope(); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// \brief Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// \brief Retrieve the current lambda expression, if any. sema::LambdaScopeInfo *getCurLambda(); /// \brief Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVector<Decl*,2> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = 0); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = 0); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); /// \brief Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, llvm::MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T, TypeSourceInfo *ReturnTypeInfo); /// \brief Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = 0); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); bool CheckSpecifiedExceptionType(QualType &T, const SourceRange &Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc, bool *MissingExceptionSpecification = 0, bool *MissingEmptyExceptionSpecification = 0, bool AllowNoexceptAllMatchWithNoSpec = false, bool IsOperatorNew = false); bool CheckExceptionSpecSubset( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// \brief The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// \brief Abstract class used to diagnose incomplete types. struct TypeDiagnoser { bool Suppressed; TypeDiagnoser(bool Suppressed = false) : Suppressed(Suppressed) { } virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template<typename T1> class BoundTypeDiagnoser1 : public TypeDiagnoser { unsigned DiagID; const T1 &Arg1; public: BoundTypeDiagnoser1(unsigned DiagID, const T1 &Arg1) : TypeDiagnoser(DiagID == 0), DiagID(DiagID), Arg1(Arg1) { } virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) { if (Suppressed) return; S.Diag(Loc, DiagID) << getPrintable(Arg1) << T; } virtual ~BoundTypeDiagnoser1() { } }; template<typename T1, typename T2> class BoundTypeDiagnoser2 : public TypeDiagnoser { unsigned DiagID; const T1 &Arg1; const T2 &Arg2; public: BoundTypeDiagnoser2(unsigned DiagID, const T1 &Arg1, const T2 &Arg2) : TypeDiagnoser(DiagID == 0), DiagID(DiagID), Arg1(Arg1), Arg2(Arg2) { } virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) { if (Suppressed) return; S.Diag(Loc, DiagID) << getPrintable(Arg1) << getPrintable(Arg2) << T; } virtual ~BoundTypeDiagnoser2() { } }; template<typename T1, typename T2, typename T3> class BoundTypeDiagnoser3 : public TypeDiagnoser { unsigned DiagID; const T1 &Arg1; const T2 &Arg2; const T3 &Arg3; public: BoundTypeDiagnoser3(unsigned DiagID, const T1 &Arg1, const T2 &Arg2, const T3 &Arg3) : TypeDiagnoser(DiagID == 0), DiagID(DiagID), Arg1(Arg1), Arg2(Arg2), Arg3(Arg3) { } virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) { if (Suppressed) return; S.Diag(Loc, DiagID) << getPrintable(Arg1) << getPrintable(Arg2) << getPrintable(Arg3) << T; } virtual ~BoundTypeDiagnoser3() { } }; bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template<typename T1> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1) { BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1); return RequireCompleteType(Loc, T, Diagnoser); } template<typename T1, typename T2> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1, const T2 &Arg2) { BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2); return RequireCompleteType(Loc, T, Diagnoser); } template<typename T1, typename T2, typename T3> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1, const T2 &Arg2, const T3 &Arg3) { BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2, Arg3); return RequireCompleteType(Loc, T, Diagnoser); } bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template<typename T1> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const T1 &Arg1) { BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1); return RequireCompleteExprType(E, Diagnoser); } template<typename T1, typename T2> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const T1 &Arg1, const T2 &Arg2) { BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2); return RequireCompleteExprType(E, Diagnoser); } template<typename T1, typename T2, typename T3> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const T1 &Arg1, const T2 &Arg2, const T3 &Arg3) { BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2, Arg3); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template<typename T1> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1) { BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1); return RequireLiteralType(Loc, T, Diagnoser); } template<typename T1, typename T2> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1, const T2 &Arg2) { BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2); return RequireLiteralType(Loc, T, Diagnoser); } template<typename T1, typename T2, typename T3> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1, const T2 &Arg2, const T3 &Arg3) { BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2, Arg3); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); QualType BuildDecltypeType(Expr *E, SourceLocation Loc); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // /// List of decls defined in a function prototype. This contains EnumConstants /// that incorrectly end up in translation unit scope because there is no /// function to pin them on. ActOnFunctionDeclarator reads this list and patches /// them into the FunctionDecl. std::vector<NamedDecl*> DeclsInPrototypeScope; /// Nonzero if we are currently parsing a function declarator. This is a counter /// as opposed to a boolean so we can deal with nested function declarators /// such as: /// void f(void (*g)(), ...) unsigned InFunctionDeclarator; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = 0); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = 0, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = ParsedType(), bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, IdentifierInfo **CorrectedII = 0); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); bool DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType); /// \brief Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_FunctionTemplate }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; const IdentifierInfo *Keyword; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword), Keyword(Keyword) { } static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate); return Kind == NC_TypeTemplate? TNK_Type_template : TNK_Function_template; } }; /// \brief Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = 0); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, const LookupResult &Previous, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R); void CheckShadow(Scope *S, VarDecl *D); void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl* ActOnVariableDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckCompleteVariableDeclaration(VarDecl *var); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); void ActOnStartFunctionDeclarator(); void ActOnEndFunctionDeclarator(); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); void checkVoidParamDecl(ParmVarDecl *Param); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsExplicitSpecialization); void CheckMain(FunctionDecl *FD, const DeclSpec &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit, bool TypeMayContainAuto); void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto); void ActOnInitializerError(Decl *Dcl); void ActOnCXXForRangeDecl(Decl *D); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, Decl **Group, unsigned NumDecls); DeclGroupPtrTy BuildDeclaratorGroup(Decl **Group, unsigned NumDecls, bool TypeMayContainAuto = true); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(Decl **Group, unsigned NumDecls); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition(FunctionDecl *FD); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// \brief Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// \brief Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ParmVarDecl * const *Begin, ParmVarDecl * const *End); /// \brief Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin, ParmVarDecl * const *End, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// \brief Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, AttributeList *AttrList, SourceLocation SemiLoc); /// \brief The parser has processed a module import declaration. /// /// \param AtLoc The location of the '@' symbol, if any. /// /// \param ImportLoc The location of the 'import' keyword. /// /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc, ModuleIdPath Path); /// \brief Create an implicit import of the given module at the given /// source location. /// /// This routine is typically used for error recovery, when the entity found /// by name lookup is actually hidden within a module that we know about but /// the user has forgotten to import. void createImplicitModuleImport(SourceLocation Loc, Module *Mod); /// \brief Retrieve a suitable printing policy. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// \brief Retrieve a suitable printing policy. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation = false); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo &Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, AttributeList *MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = 0); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceLocation RBraceLoc); void ActOnObjCContainerFinishDefinition(); /// \brief Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, const EnumDecl *Prev); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, AttributeList *Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc, SourceLocation RBraceLoc, Decl *EnumDecl, Decl **Elements, unsigned NumElements, Scope *S, AttributeList *Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// \brief Make the given externally-produced declaration visible at the /// top level scope. /// /// \param D The externally-produced declaration to push. /// /// \param Name The name of the externally-produced declaration. void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param ExplicitInstantiationOrSpecialization When true, we are checking /// whether the declaration is in scope for the purposes of explicit template /// instantiation or specialization. The default is false. bool isDeclInScope(NamedDecl *&D, DeclContext *Ctx, Scope *S = 0, bool ExplicitInstantiationOrSpecialization = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool Override, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, StringRef Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); /// \brief Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// \brief Don't merge availability attributes at all. AMK_None, /// \brief Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// \brief Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override }; void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, Decl *Old, Scope *S); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &OldDecls, bool OldDeclsWereHidden); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool OldIsHidden); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl); /// \brief Checks availability of the function depending on the current /// function context.Inside an unavailable function,unavailability is ignored. /// /// \returns true if \p FD is unavailable and current context is inside /// an available function, false otherwise. bool isFunctionConsideredUnavailable(FunctionDecl *FD); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionArgTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = 0); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsNoReturnConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg ///< Value of a non-type template parameter. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); /// \brief Abstract base class used to diagnose problems that occur while /// trying to convert an expression to integral or enumeration type. class ICEConvertDiagnoser { public: bool Suppress; bool SuppressConversion; ICEConvertDiagnoser(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) { } /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual DiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the expression has incomplete class type. virtual DiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the only matching conversion function /// is explicit. virtual DiagnosticBuilder diagnoseExplicitConv(Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// \brief Emits a note for the explicit conversion function. virtual DiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when there are multiple possible conversion /// functions. virtual DiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a note for one of the candidate conversions. virtual DiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual DiagnosticBuilder diagnoseConversion(Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ICEConvertDiagnoser() {} }; ExprResult ConvertToIntegralOrEnumerationType(SourceLocation Loc, Expr *FromE, ICEConvertDiagnoser &Diagnoser, bool AllowScopedEnumerations); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = false); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, TemplateArgumentListInfo *ExplicitTemplateArgs = 0); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, Expr **Args, unsigned NumArgs, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false); void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false); void AddConversionCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet& CandidateSet); void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, Expr **Args, unsigned NumArgs, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys, Expr **Args, unsigned NumArgs, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, Expr **Args, unsigned NumArgs, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, bool Operator, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType()); // Emit as a series of 'note's all template and non-templates // identified by the expression Expr void NoteAllOverloadCandidates(Expr* E, QualType DestType = QualType()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = 0); FunctionDecl *ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair* Found = 0); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, const SourceRange& OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; // An enum to represent whether something is dealing with a call to begin() // or a call to end() in a range-based for loop. enum BeginEndFunction { BEF_begin, BEF_end }; ForRangeStatus BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc, SourceLocation RangeLoc, VarDecl *Decl, BeginEndFunction BEF, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, Expr **Args, unsigned NumArgs, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, Expr **Args, unsigned NumArgs, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *input); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, Expr **Args, unsigned NumArgs, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, Expr **Args, unsigned NumArgs, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ParmVarDecl **Param, ParmVarDecl **ParamEnd, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// @brief Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// \brief Look up any declaration with any name. LookupAnyName }; /// \brief Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// \brief The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// \brief The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists. ForRedeclaration }; /// \brief The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// \brief The lookup resulted in an error. LOLR_Error, /// \brief The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// \brief The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// \brief The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template }; SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); private: bool CppLookupName(LookupResult &R, Scope *S); // \brief The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// \brief Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; public: /// \brief Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRawAndTemplate); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, bool Operator, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext = 0, bool EnteringContext = false, const ObjCObjectPointerType *OPT = 0); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool ExplicitInstantiationOrSpecialization); bool DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD, bool NonInheritable = true, bool Inheritable = true); void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL, bool NonInheritable = true, bool Inheritable = true, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const AttributeList *AttrList); void checkUnusedDeclAttributes(Declarator &D); bool CheckRegparmAttr(const AttributeList &attr, unsigned &value); bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC, const FunctionDecl *FD = 0); bool CheckNoReturnAttr(const AttributeList &attr); void CheckAlignasUnderalignment(Decl *D); /// \brief Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs, SourceRange Range); void WarnUndefinedMethod(SourceLocation ImpLoc, ObjCMethodDecl *method, bool &IncompleteImpl, unsigned DiagID); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); bool isPropertyReadonly(ObjCPropertyDecl *PropertyDecl, ObjCInterfaceDecl *IDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap; /// CheckProtocolMethodDefs - This routine checks unimplemented /// methods declared in protocol, and those referenced by it. void CheckProtocolMethodDefs(SourceLocation ImpLoc, ObjCProtocolDecl *PDecl, bool& IncompleteImpl, const SelectorSet &InsMap, const SelectorSet &ClsMap, ObjCContainerDecl *CDecl); /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, const SelectorSet &InsMap); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl, ObjCInterfaceDecl *IDecl); void DefaultSynthesizeProperties(Scope *S, Decl *D); /// CollectImmediateProperties - This routine collects all properties in /// the class and its conforming protocols; but not those it its super class. void CollectImmediateProperties(ObjCContainerDecl *CDecl, llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& PropMap, llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& SuperPropMap); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, bool *isOverridingProperty, TypeSourceInfo *T, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, TypeSourceInfo *T, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = 0); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// \brief Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool warn, bool instance); public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false, bool warn=true) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, warn, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false, bool warn=true) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, warn, /*instance*/false); } /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg(Sema &actions) : E(0) { } // FIXME: The const_cast here is ugly. RValue references would make this // much nicer (or we could duplicate a bunch of the move semantics // emulation code from Ownership.h). FullExprArg(const FullExprArg& Other) : E(Other.E) {} ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg(ActOnFinishFullExpr(Arg, CC).release()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.release()); } StmtResult ActOnExprStmt(ExprResult Arg); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, MultiStmtArg Elts, bool isStmtExpr); /// \brief A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S): S(S) { S.ActOnStartOfCompoundStmt(); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal, SourceLocation DotDotDotLoc, Expr *RHSVal, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); StmtResult ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal, Decl *CondVar, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Expr *Cond, Decl *CondVar); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond, Decl *CondVar, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, FullExprArg Second, Decl *SecondVar, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *BeginEndDecl, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, sema::CapturedRegionScopeInfo::CapturedRegionKind Kind); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(bool IsInstantiation = false); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc); const VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, bool AllowFunctionParameters); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, SourceLocation RParenLoc); NamedDecl *LookupInlineAsmIdentifier(StringRef &LineBuf, SourceLocation Loc, InlineAsmIdentifierInfo &Info); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, SourceLocation EndLoc); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, MultiStmtArg Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); StmtResult ActOnSEHFinallyBlock(SourceLocation Loc, Stmt *Block); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// \brief If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void EmitDeprecationWarning(NamedDecl *D, StringRef Message, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass, const ObjCPropertyDecl *ObjCProperty); void HandleDelayedDeprecationCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); bool makeUnavailableInSystemHeader(SourceLocation loc, StringRef message); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D); bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass=0); void NoteDeletedFunction(FunctionDecl *FD); std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, Expr **Args, unsigned NumArgs); void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = 0, bool IsDecltype = false); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, bool IsDecltype = false); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E); void MarkMemberReferenced(MemberExpr *E); void UpdateMarkingForLValueToRValue(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// \brief Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType); /// \brief Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// \brief Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// \brief Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = 0); /// \brief Figure out if an expression could be turned into a call. bool isExprCallable(const Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// \brief Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = 0); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = 0, ArrayRef<Expr *> Args = ArrayRef<Expr *>()); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = 0); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = 0, NamedDecl *FoundD = 0); ExprResult BuildAnonymousStructUnionMemberReference(const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, Expr *baseObjectExpr = 0, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = 0); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr*> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = 0); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = 0); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = 0); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(const Token *StringToks, unsigned NumStringToks, Scope *UDLScope = 0); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, MultiTypeArg ArgTypes, MultiExprArg ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, TypeSourceInfo **Types, Expr **Exprs, unsigned NumAssocs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, const SourceRange &ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; bool HasTrailingLParen; }; ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = 0); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); ExprResult LookupMemberExpr(LookupResult &R, ExprResult &Base, bool &IsArrow, SourceLocation OpLoc, CXXScopeSpec &SS, Decl *ObjCImpDecl, bool HasTemplateArgs); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl, bool HasTrailingLParen); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr **Args, unsigned NumArgs, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = 0, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, Expr **Args, unsigned NumArgs, SourceLocation RParenLoc, Expr *Config = 0, bool IsExecConfig = false); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// \brief Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// \brief Describes the result of an "if-exists" condition check. enum IfExistsResult { /// \brief The symbol exists. IER_Exists, /// \brief The symbol does not exist. IER_DoesNotExist, /// \brief The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// \brief An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, AttributeList *AttrList); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); CXXRecordDecl *getStdBadAlloc() const; /// \brief Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// \brief Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// \brief Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const CXXConstructorDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, AttributeList *AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool isTypeName, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, const CXXScopeSpec &SS, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, AttributeList *AttrList, bool IsInstantiation, bool IsTypeName, SourceLocation TypenameLoc); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, bool HasUsingKeyword, SourceLocation UsingLoc, CXXScopeSpec &SS, UnqualifiedId &Name, AttributeList *AttrList, bool IsTypeName, SourceLocation TypenameLoc); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, AttributeList *AttrList, TypeResult Type); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can re remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// \brief Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// \brief Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(ComputedEST != EST_ComputedNoexcept && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// \brief The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// \brief The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// \brief Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// \brief Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// \brief Overwrite an EPI's exception specification with this /// computed exception specification. void getEPI(FunctionProtoType::ExtProtoInfo &EPI) const { EPI.ExceptionSpecType = getExceptionSpecType(); if (EPI.ExceptionSpecType == EST_Dynamic) { EPI.NumExceptions = size(); EPI.Exceptions = data(); } else if (EPI.ExceptionSpecType == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" EPI.ExceptionSpecType = EST_ComputedNoexcept; EPI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).take(); } } FunctionProtoType::ExtProtoInfo getEPI() const { FunctionProtoType::ExtProtoInfo EPI; getEPI(EPI); return EPI; } }; /// \brief Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defautled /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD); /// \brief Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Check the given exception-specification and update the /// extended prototype information with the results. void checkExceptionSpecification(ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExtProtoInfo &EPI); /// \brief Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); /// \brief Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// \brief Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl, CXXDestructorDecl *Destructor); /// \brief Declare all inheriting constructors for the given class. /// /// \param ClassDecl The class declaration into which the inheriting /// constructors will be added. void DeclareInheritingConstructors(CXXRecordDecl *ClassDecl); /// \brief Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// \brief Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// \brief Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// \brief Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// \brief Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// \brief Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// \brief When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// \brief RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// \brief Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// \brief Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. void CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false); /// \brief Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); ExprResult CheckCXXThrowOperand(SourceLocation ThrowLoc, Expr *E, bool IsThrownVarInScope); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Expr *ArraySize, SourceRange DirectInitRange, Expr *Initializer, bool TypeMayContainAuto = true); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, bool UseGlobal, QualType AllocType, bool IsArray, Expr **PlaceArgs, unsigned NumPlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete); bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range, DeclarationName Name, Expr** Args, unsigned NumArgs, DeclContext *Ctx, bool AllowMissing, FunctionDecl *&Operator, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, QualType Argument, bool addMallocAttr = false); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, bool ConvertToBoolean); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// ActOnUnaryTypeTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnUnaryTypeTrait(UnaryTypeTrait OTT, SourceLocation KWLoc, ParsedType Ty, SourceLocation RParen); ExprResult BuildUnaryTypeTrait(UnaryTypeTrait OTT, SourceLocation KWLoc, TypeSourceInfo *T, SourceLocation RParen); /// ActOnBinaryTypeTrait - Parsed one of the bianry type trait support /// pseudo-functions. ExprResult ActOnBinaryTypeTrait(BinaryTypeTrait OTT, SourceLocation KWLoc, ParsedType LhsTy, ParsedType RhsTy, SourceLocation RParen); ExprResult BuildBinaryTypeTrait(BinaryTypeTrait BTT, SourceLocation KWLoc, TypeSourceInfo *LhsT, TypeSourceInfo *RhsT, SourceLocation RParen); /// \brief Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the bianry type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult DiagnoseDtorReference(SourceLocation NameLoc, Expr *MemExpr); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType, bool HasTrailingLParen); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName, bool HasTrailingLParen); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS, bool HasTrailingLParen); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); ExprResult ActOnFinishFullExpr(Expr *Expr) { return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc() : SourceLocation()); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue = false, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); bool isUnknownSpecialization(const CXXScopeSpec &SS); /// \brief The parser has parsed a global nested-name-specifier '::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(Scope *S, SourceLocation CCLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation IdLoc, IdentifierInfo &II, ParsedType ObjectType); bool BuildCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, QualType ObjectType, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup); /// \brief The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param Identifier The identifier preceding the '::'. /// /// \param IdentifierLoc The location of the identifier. /// /// \param CCLoc The location of the '::'. /// /// \param ObjectType The type of the object, if we're parsing /// nested-name-specifier in a member access expression. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, ParsedType ObjectType, bool EnteringContext, CXXScopeSpec &SS); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation ColonLoc, ParsedType ObjectType, bool EnteringContext); /// \brief The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// \brief Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// \brief Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// \brief Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent); /// \brief Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params); /// \brief Introduce the scope for a lambda expression. sema::LambdaScopeInfo *enterLambdaScope(CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// \brief Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief Introduce the lambda parameters into scope. void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope); /// \brief Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope, bool IsInstantiation = false); /// \brief Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// \brief Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, Expr **Strings, unsigned NumStrings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *" or "NSString *" depending on the type of /// ValueType, which is allowed to be a built-in numeric type or /// "char *" or "const char *". ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, ObjCDictionaryElement *Elements, unsigned NumElements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, SourceLocation LangLoc, StringRef Lang, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = 0); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, AttributeList *Attrs = 0); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, Expr **Args, unsigned NumArgs, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = ArrayRef<CXXCtorInitializer *>()); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// \brief The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// \brief The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// \brief The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// \brief Load any externally-stored vtable uses. void LoadExternalVTableUses(); typedef LazyVector<CXXRecordDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDynamicClasses, 2, 2> DynamicClassesType; /// \brief A list of all of the dynamic classes in this translation /// unit. DynamicClassesType DynamicClasses; /// \brief Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// \brief Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD); /// \brief Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); void CheckCompletedCXXClass(CXXRecordDecl *Record); void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); void ActOnFinishCXXMemberDecls(); void ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnReenterDeclaratorTemplateScope(Scope *S, DeclaratorDecl *D); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, bool Flag = true); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD, const FunctionProtoType *T); void CheckDelayedExplicitlyDefaultedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases, unsigned NumBases); void ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases, unsigned NumBases); bool IsDerivedFrom(QualType Derived, QualType Base); bool IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool BasePathInvolvesVirtualBase(const CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = 0, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(Decl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// \brief When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractArrayType }; bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template<typename T1> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1) { BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1); return RequireNonAbstractType(Loc, T, Diagnoser); } template<typename T1, typename T2> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1, const T2 &Arg2) { BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2); return RequireNonAbstractType(Loc, T, Diagnoser); } template<typename T1, typename T2, typename T3> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1, const T2 &Arg2, const T3 &Arg3) { BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2, Arg3); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, AbstractDiagSelID SelID = AbstractNone); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); Decl *ActOnTypeParameter(Scope *S, bool Typename, bool Ellipsis, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); Decl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, Decl **Params, unsigned NumParams, SourceLocation RAngleLoc); /// \brief The context in which we are checking a template parameter /// list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC); TemplateParameterList * MatchTemplateParametersToScopeSpecifier(SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateParameterList **ParamLists, unsigned NumParamLists, bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid); DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false); /// \brief Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template); DeclResult ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, AttributeList *Attr, MultiTemplateParamsArg TemplateParameterLists); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); Decl *ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization(FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted); /// \brief Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// \brief The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// \brief The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// \brief The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, const TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// \brief Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// /// \param ExpansionIntoFixedList If non-NULL, will be set true to indicate /// when the template arguments contain a pack expansion that is being /// expanded into a fixed parameter list. /// /// \returns True if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool *ExpansionIntoFixedList = 0); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, const TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateArgument(TemplateTemplateParmDecl *Param, const TemplateArgumentLoc &Arg, unsigned ArgumentPackIndex); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// \brief Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// \brief We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// \brief We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// \brief We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// \brief Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// \brief Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// \brief The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// \brief An arbitrary expression. UPPC_Expression = 0, /// \brief The base type of a class type. UPPC_BaseType, /// \brief The type of an arbitrary declaration. UPPC_DeclarationType, /// \brief The type of a data member. UPPC_DataMemberType, /// \brief The size of a bit-field. UPPC_BitFieldWidth, /// \brief The expression in a static assertion. UPPC_StaticAssertExpression, /// \brief The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// \brief The enumerator value. UPPC_EnumeratorValue, /// \brief A using declaration. UPPC_UsingDeclaration, /// \brief A friend declaration. UPPC_FriendDeclaration, /// \brief A declaration qualifier. UPPC_DeclarationQualifier, /// \brief An initializer. UPPC_Initializer, /// \brief A default argument. UPPC_DefaultArgument, /// \brief The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// \brief The type of an exception. UPPC_ExceptionType, /// \brief Partial specialization. UPPC_PartialSpecialization, /// \brief Microsoft __if_exists. UPPC_IfExists, /// \brief Microsoft __if_not_exists. UPPC_IfNotExists, /// \brief Lambda expression. UPPC_Lambda, /// \brief Block expression, UPPC_Block }; /// \brief Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// \brief If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// \brief If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// \brief If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// \brief If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// \brief If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// \brief If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param SS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(CXXScopeSpec &SS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// \brief Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// \brief Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// \brief Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// \brief Template argument deduction was successful. TDK_Success = 0, /// \brief The declaration was invalid; do nothing. TDK_Invalid, /// \brief Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// \brief Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// \brief Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// \brief Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// \brief Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// \brief A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// \brief When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// \brief When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// \brief The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// \brief The arguments included an overloaded function name that could /// not be resolved to a suitable function. TDK_FailedOverloadResolution, /// \brief Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) { } QualType OriginalParamType; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = 0); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); /// \brief Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, TypeSourceInfo *&Result); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = 0, bool RelativeToPrimary = false, const FunctionDecl *Pattern = 0); /// \brief A template instantiation that is currently in progress. struct ActiveTemplateInstantiation { /// \brief The kind of template instantiation we are performing enum InstantiationKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template, and /// TemplateArgs/NumTemplateArguments provides the template /// arguments as specified. /// FIXME: Use a TemplateArgumentList DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a ClassTemplatePartialSpecializationDecl or /// a FunctionTemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation } Kind; /// \brief The point of instantiation within the source code. SourceLocation PointOfInstantiation; /// \brief The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// \brief The entity that is being instantiated. Decl *Entity; /// \brief The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; /// \brief The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// \brief The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// \brief The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; ActiveTemplateInstantiation() : Kind(TemplateInstantiation), Template(0), Entity(0), TemplateArgs(0), NumTemplateArgs(0), DeductionInfo(0) {} /// \brief Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; friend bool operator==(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { if (X.Kind != Y.Kind) return false; if (X.Entity != Y.Entity) return false; switch (X.Kind) { case TemplateInstantiation: case ExceptionSpecInstantiation: return true; case PriorTemplateArgumentSubstitution: case DefaultTemplateArgumentChecking: if (X.Template != Y.Template) return false; // Fall through case DefaultTemplateArgumentInstantiation: case ExplicitTemplateArgumentSubstitution: case DeducedTemplateArgumentSubstitution: case DefaultFunctionArgumentInstantiation: return X.TemplateArgs == Y.TemplateArgs; } llvm_unreachable("Invalid InstantiationKind!"); } friend bool operator!=(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { return !(X == Y); } }; /// \brief List of active template instantiations. /// /// This vector is treated as a stack. As one template instantiation /// requires another template instantiation, additional /// instantiations are pushed onto the stack up to a /// user-configurable limit LangOptions::InstantiationDepth. SmallVector<ActiveTemplateInstantiation, 16> ActiveTemplateInstantiations; /// \brief Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// \brief The number of ActiveTemplateInstantiation entries in /// \c ActiveTemplateInstantiations that are not actual instantiations and, /// therefore, should not be counted as part of the instantiation depth. unsigned NonInstantiationEntries; /// \brief The last template from which a template instantiation /// error or warning was produced. /// /// This value is used to suppress printing of redundant template /// instantiation backtraces when there are multiple errors in the /// same instantiation. FIXME: Does this belong in Sema? It's tough /// to implement it anywhere else. ActiveTemplateInstantiation LastTemplateInstantiationErrorContext; /// \brief The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// \brief RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// \brief The stack of calls expression undergoing template instantiation. /// /// The top of this stack is used by a fixit instantiating unresolved /// function calls to fix the AST to match the textual change it prints. SmallVector<CallExpr *, 8> CallsUndergoingInstantiation; /// \brief For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnostics; /// \brief A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// \brief Note that we are instantiating a class template, /// function template, or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// \brief Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, ActiveTemplateInstantiation::InstantiationKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are substituting prior template arguments into a /// non-type or template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// \brief Determines whether we have exceeded the maximum /// recursive template instantiations. operator bool() const { return Invalid; } private: Sema &SemaRef; bool Invalid; bool SavedInNonInstantiationSFINAEContext; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate(const InstantiatingTemplate&) LLVM_DELETED_FUNCTION; InstantiatingTemplate& operator=(const InstantiatingTemplate&) LLVM_DELETED_FUNCTION; }; void PrintInstantiationStack(); /// \brief Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// \brief Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().Context == Sema::Unevaluated; } /// \brief RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction.` class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; } /// \brief Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// \brief The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// \brief The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::DenseMap<IdentifierInfo *, TypoCorrection> UnqualifiedTyposCorrectedMap; /// \brief A cache containing the results of typo correction for unqualified /// name lookup. /// /// The string is the string that we corrected to (which may be empty, if /// there was no correction), while the boolean will be true when the /// string represents a keyword. UnqualifiedTyposCorrectedMap UnqualifiedTyposCorrected; /// \brief Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; /// \brief An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// \brief The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// \brief The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, unsigned ThisTypeQuals); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ParmVarDecl **Params, unsigned NumParams, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams = 0); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param NumExprs The number of expressions in \p Exprs. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = 0, LocalInstantiationScope *OuterMostScope = 0); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false); void InstantiateStaticDataMemberDefinition( SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; Decl *ActOnStartClassInterface(SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl * const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc); Decl *ActOnStartClassImplementation( SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, const IdentifierLocPair *IdentList, unsigned NumElts, AttributeList *attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, const IdentifierLocPair *ProtocolId, unsigned NumProtocols, SmallVectorImpl<Decl *> &Protocols); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed /// \param CD The semantic container for the property /// \param redeclaredProperty Declaration for property if redeclared /// in class extension. /// \param lexicalDC Container for redeclaredProperty. void ProcessPropertyDecl(ObjCPropertyDecl *property, ObjCContainerDecl *CD, ObjCPropertyDecl *redeclaredProperty = 0, ObjCContainerDecl *lexicalDC = 0); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); void MatchOneProtocolPropertiesInClass(Decl *CDecl, ObjCProtocolDecl *PDecl); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, Decl **allMethods = 0, unsigned allNum = 0, Decl **allProperties = 0, unsigned pNum = 0, DeclGroupPtrTy *allTUVars = 0, unsigned tuvNum = 0); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, bool *OverridingProperty, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = 0); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. AttributeList *ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// \brief Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// \brief The message is sent to 'super'. ObjCSuperMessage, /// \brief The message is an instance message. ObjCInstanceMessage, /// \brief The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// \brief Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// \brief Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); enum PragmaPackKind { PPK_Default, // #pragma pack([n]) PPK_Show, // #pragma pack(show), only supported by MSVC. PPK_Push, // #pragma pack(push, [identifier], [n]) PPK_Pop // #pragma pack(pop, [identifier], [n]) }; enum PragmaMSStructKind { PMSST_OFF, // #pragms ms_struct off PMSST_ON // #pragms ms_struct on }; /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(PragmaPackKind Kind, IdentifierInfo *Name, Expr *Alignment, SourceLocation PragmaLoc, SourceLocation LParenLoc, SourceLocation RParenLoc); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT void ActOnPragmaFPContract(tok::OnOffSwitch OOS); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); // OpenMP directives and clauses. /// \brief Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, Scope *CurScope, ArrayRef<DeclarationNameInfo> IdList); /// \brief Build a new OpenMPThreadPrivateDecl and check its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl( SourceLocation Loc, ArrayRef<DeclRefExpr *> VarList); /// \brief The kind of conversion being performed. enum CheckedConversionKind { /// \brief An implicit conversion. CCK_ImplicitConversion, /// \brief A C-style cast. CCK_CStyleCast, /// \brief A functional-style cast. CCK_FunctionalCast, /// \brief A cast other than a C-style cast. CCK_OtherCast }; /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = 0, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstProtoArg, Expr **Args, unsigned NumArgs, SmallVector<Expr *, 8> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); /// Checks to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic and returning NULL if not. bool variadicArgumentPODCheck(const Expr *E, VariadicCallType CT); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointer - The assignment is between two pointers types which /// point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = 0); /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and prepare for a conversion of the /// RHS to the LHS type. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind); // CheckSingleAssignmentConstraints - Currently used by // CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking, // this routine performs the default function/array converions. AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS, bool Diagnose = true); // \brief If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, QualType* CompLHSTy = 0); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = 0); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc, bool isRelational); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool *NonStandardCompositeType = 0); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool *NonStandardCompositeType = 0) { Expr *E1Tmp = E1.take(), *E2Tmp = E2.take(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, NonStandardCompositeType); E1 = Owned(E1Tmp); E2 = Owned(E2Tmp); return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool isRelational); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible_With_Added_Qualification - The two types are /// reference-compatible with added qualification, meaning that /// they are reference-compatible and the qualifiers on T1 (cv1) /// are greater than the qualifiers on T2 (cv2). Ref_Compatible_With_Added_Qualification, /// Ref_Compatible - The two types are reference-compatible and /// have equivalent qualifiers (cv1 == cv2). Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// \brief Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// \brief Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged }; /// \brief Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds. ARCConversionResult CheckObjCARCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(QualType ReceiverType, Expr **Args, unsigned NumArgs, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, QualType &ReturnType, ExprValueKind &VK); /// \brief Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// \brief If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// \brief Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc); ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc, Expr *SubExpr); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// \brief Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// \brief Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result=0); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, Expr *BitWidth, bool *ZeroWidth = 0); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice }; CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D); bool CheckCUDATarget(CUDAFunctionTarget CallerTarget, CUDAFunctionTarget CalleeTarget); bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee) { return CheckCUDATarget(IdentifyCUDATarget(Caller), IdentifyCUDATarget(Callee)); } /// \name Code completion //@{ /// \brief Describes the context in which code completion occurs. enum ParserCompletionContext { /// \brief Code completion occurs at top-level or namespace context. PCC_Namespace, /// \brief Code completion occurs within a class, struct, or union. PCC_Class, /// \brief Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// \brief Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// \brief Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// \brief Code completion occurs following one or more template /// headers. PCC_Template, /// \brief Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// \brief Code completion occurs within an expression. PCC_Expression, /// \brief Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// \brief Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// \brief Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// \brief Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// \brief Code completion occurs where only a type is permitted. PCC_Type, /// \brief Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// \brief Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool IsArrow); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteCase(Scope *S); void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteReturn(Scope *S); void CodeCompleteAfterIf(Scope *S); void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer(Decl *Constructor, CXXCtorInitializer** Initializers, unsigned NumInitializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, IdentifierInfo **SelIdents, unsigned NumSelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, IdentifierInfo **SelIdents, unsigned NumSelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, IdentifierInfo **SelIdents, unsigned NumSelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = 0); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, IdentifierInfo **SelIdents, unsigned NumSelIdents); void CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols, unsigned NumProtocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, bool IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, IdentifierInfo **SelIdents, unsigned NumSelIdents); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteNaturalLanguage(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=0, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, Expr **Args, unsigned NumArgs); bool CheckBlockCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, ArrayRef<const Expr *> Args, unsigned NumProtoArgs, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(CallExpr *TheCall); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinObjectSize(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); enum StringLiteralCheckType { SLCT_NotALiteral, SLCT_UncheckedLiteral, SLCT_CheckedLiteral }; StringLiteralCheckType checkFormatStringExpr(const Expr *E, ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, bool inFunctionCall = true); void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr, ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, bool inFunctionCall, VariadicCallType CallType); bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range); void CheckNonNullArguments(const NonNullAttr *NonNull, const Expr * const *ExprArgs, SourceLocation CallSiteLoc); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnStackAddr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc); void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS); void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// \brief Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); public: /// \brief Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// \brief A map from magic value to type information. OwningPtr<llvm::DenseMap<TypeTagMagicValue, TypeTagData> > TypeTagForDatatypeMagicValues; /// \brief Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const Expr * const *ExprArgs); /// \brief The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTWriter; public: /// \brief Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } IdentifierInfo *getSuperIdentifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } AvailabilityResult getCurContextAvailability() const; const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } }; /// \brief RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; public: EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = 0, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, IsDecltype); } EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, Sema::ReuseLambdaContextDecl, IsDecltype); } ~EnterExpressionEvaluationContext() { Actions.PopExpressionEvaluationContext(); } }; } // end namespace clang #endif
GB_unop__identity_int64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int64_fp64) // op(A') function: GB (_unop_tran__identity_int64_fp64) // C type: int64_t // A type: double // cast: int64_t cij = GB_cast_to_int64_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int64_t z = GB_cast_to_int64_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int64_t z = GB_cast_to_int64_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int64_fp64) ( int64_t *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; int64_t z = GB_cast_to_int64_t ((double) (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; int64_t z = GB_cast_to_int64_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
grib_bits_fast_big_endian_omp.c
/* * Copyright 2005-2014 ECMWF. * * This software is licensed under the terms of the Apache Licence Version 2.0 * which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. * * In applying this licence, ECMWF does not waive the privileges and immunities granted to it by * virtue of its status as an intergovernmental organisation nor does it submit to any jurisdiction. */ /*************************************************************************** * Enrico Fucile - 19.06.2007 * * * ***************************************************************************/ int grib_decode_long_array(const unsigned char* p, long *bitp, long nbits,size_t size,long* val) { long i=0; long countOfLeftmostBits=0,leftmostBits=0; long startBit,startByte; long remainingBits = nbits; long *pp=(long*)p; int inited=0; unsigned long uval=0; if ( (max_nbits%nbits == 0) && (*bitp%nbits == 0) ) { #pragma omp parallel for schedule(static) firstprivate(inited,pp) private(startBit,countOfLeftmostBits,remainingBits,leftmostBits) for (i=0;i<size;i++) { if (!inited) { startBit=*bitp+i*nbits; remainingBits = nbits; if (startBit >= max_nbits) { pp+=startBit/max_nbits; startBit %= max_nbits; } inited=1; } if (startBit == max_nbits) { startBit = 0; pp++; } val[i]=VALUE(*pp,startBit,remainingBits); startBit+=remainingBits; remainingBits=nbits; } } else { #pragma omp parallel for schedule(static) firstprivate(inited,pp) private(startBit,countOfLeftmostBits,remainingBits,leftmostBits) for (i=0;i<size;i++) { if (!inited) { startBit=*bitp+i*nbits; remainingBits = nbits; if (startBit >= max_nbits) { pp+=startBit/max_nbits; startBit %= max_nbits; } inited=1; } countOfLeftmostBits = startBit + remainingBits; if (countOfLeftmostBits > max_nbits) { countOfLeftmostBits = max_nbits - startBit; remainingBits -= countOfLeftmostBits; leftmostBits=(VALUE(*(pp++),startBit,countOfLeftmostBits)) << remainingBits; startBit = 0; } else leftmostBits = 0; val[i]=leftmostBits+(VALUE(*pp,startBit,remainingBits)); startBit+=remainingBits; remainingBits=nbits; } } *bitp+=size*nbits; return GRIB_SUCCESS; } int grib_decode_double_array(const unsigned char* p, long *bitp, long nbits,double reference_value,double s,double d,size_t size,double* val) { long i=0; long countOfLeftmostBits=0,leftmostBits=0; long startBit,startByte; long remainingBits = nbits; long *pp=(long*)p; int inited=0; unsigned long uval=0; double fact=s*d; double bias=reference_value*d; if ( (max_nbits%nbits == 0) && (*bitp%nbits == 0) ) { #pragma omp parallel for schedule(static) firstprivate(inited,pp) private(startBit,countOfLeftmostBits,remainingBits,leftmostBits) for (i=0;i<size;i++) { if (!inited) { startBit=*bitp+i*nbits; remainingBits = nbits; if (startBit >= max_nbits) { pp+=startBit/max_nbits; startBit %= max_nbits; } inited=1; } if (startBit == max_nbits) { startBit = 0; pp++; } val[i]=VALUE(*pp,startBit,remainingBits); val[i]= val[i] * fact + bias ; startBit+=remainingBits; remainingBits=nbits; } } else { #pragma omp parallel for schedule(static) firstprivate(inited,pp) private(startBit,countOfLeftmostBits,remainingBits,leftmostBits) for (i=0;i<size;i++) { if (!inited) { startBit=*bitp+i*nbits; remainingBits = nbits; if (startBit >= max_nbits) { pp+=startBit/max_nbits; startBit %= max_nbits; } inited=1; } countOfLeftmostBits = startBit + remainingBits; if (countOfLeftmostBits > max_nbits) { countOfLeftmostBits = max_nbits - startBit; remainingBits -= countOfLeftmostBits; leftmostBits=(VALUE(*(pp++),startBit,countOfLeftmostBits)) << remainingBits; startBit = 0; } else leftmostBits = 0; val[i]=leftmostBits+(VALUE(*pp,startBit,remainingBits)); val[i]= val[i] * fact + bias ; startBit+=remainingBits; remainingBits=nbits; } } *bitp+=size*nbits; return GRIB_SUCCESS; } int grib_decode_double_array_complex(const unsigned char* p, long *bitp, long nbits,double reference_value,double s,double* d,size_t size,double* val) { long i=0; long countOfLeftmostBits=0,leftmostBits=0; long startBit; long remainingBits = nbits; long *pp=(long*)p; int inited=0; unsigned long uval=0; if ( (max_nbits%nbits == 0) && (*bitp%nbits == 0) ) { #pragma omp parallel for schedule(static) firstprivate(inited,pp) private(startBit,countOfLeftmostBits,remainingBits,leftmostBits) for (i=0;i<size;i++) { if (!inited) { startBit=*bitp+i*nbits; remainingBits = nbits; if (startBit >= max_nbits) { pp+=startBit/max_nbits; startBit %= max_nbits; } inited=1; } if (startBit == max_nbits) { startBit = 0; pp++; } val[i]=VALUE(*pp,startBit,remainingBits); val[i]= ((( (val[i]) * s)+reference_value)*d[i/2]); startBit+=remainingBits; remainingBits=nbits; } } else { #pragma omp parallel for schedule(static) firstprivate(inited,pp) private(startBit,countOfLeftmostBits,remainingBits,leftmostBits) for (i=0;i<size;i++) { if (!inited) { startBit=*bitp+i*nbits; remainingBits = nbits; if (startBit >= max_nbits) { pp+=startBit/max_nbits; startBit %= max_nbits; } inited=1; } countOfLeftmostBits = startBit + remainingBits; if (countOfLeftmostBits > max_nbits) { countOfLeftmostBits = max_nbits - startBit; remainingBits -= countOfLeftmostBits; leftmostBits=(VALUE(*pp,startBit,countOfLeftmostBits)) << remainingBits; startBit = 0; pp++; } else leftmostBits = 0; val[i]=leftmostBits+(VALUE(*pp,startBit,remainingBits)); val[i]= ((( (val[i]) * s)+reference_value)*d[i/2]); startBit+=remainingBits; remainingBits=nbits; } } *bitp+=size*nbits; return GRIB_SUCCESS; } int grib_encode_double_array(size_t n_vals,const double* val,long nbits,double reference_value,double d,double divisor,unsigned char* p,long *bitp) { long* destination = (long*)p; double* v=(double*)val; long countOfLeftmostBits=0,startBit=0,remainingBits=0,rightmostBits=0; unsigned long uval=0; size_t i=0; startBit=*bitp; remainingBits = nbits; if (startBit >= max_nbits) { destination += startBit / max_nbits; startBit %= max_nbits; } if ( (max_nbits%nbits == 0) && (*bitp%nbits == 0) ) { for(i=0;i< n_vals;i++){ uval = (unsigned long)(((((*v)*d)-reference_value)*divisor)+0.5); if (startBit == max_nbits) { startBit = 0; destination++; } rightmostBits = VALUE(uval,max_nbits-remainingBits,remainingBits); *destination = ((*destination) & ~MASKVALUE(startBit,remainingBits)) + (rightmostBits << max_nbits-(remainingBits+startBit)); startBit+=remainingBits; remainingBits=nbits; v++; } } else { for(i=0;i< n_vals;i++){ countOfLeftmostBits = startBit + remainingBits; uval = (unsigned long)(((((*v)*d)-reference_value)*divisor)+0.5); if (countOfLeftmostBits > max_nbits) { countOfLeftmostBits = max_nbits - startBit; startBit = max_nbits - remainingBits; remainingBits -= countOfLeftmostBits; *destination = (((*destination) >> countOfLeftmostBits) << countOfLeftmostBits) + (VALUE(uval,startBit,countOfLeftmostBits)); startBit = 0; destination++; } rightmostBits = VALUE(uval,max_nbits-remainingBits,remainingBits); *destination = ((*destination) & ~MASKVALUE(startBit,remainingBits)) + (rightmostBits << max_nbits-(remainingBits+startBit)); startBit+=remainingBits; remainingBits=nbits; v++; } } *bitp+=n_vals*nbits; return GRIB_SUCCESS; } int grib_encode_double_array_complex(size_t n_vals,double* val,long nbits,double reference_value, double* scal,double d,double divisor,unsigned char* p,long *bitp) { long* destination = (long*)p; double* v=val; long countOfLeftmostBits=0,startBit=0,remainingBits=0,rightmostBits=0; unsigned long uval=0; size_t i=0; startBit=*bitp; remainingBits = nbits; if (startBit >= max_nbits) { destination += startBit / max_nbits; startBit %= max_nbits; } if ( (max_nbits%nbits == 0) && (*bitp%nbits == 0) ) { for(i=0;i< n_vals;i++) { uval = (unsigned long)(((((*v)*d*scal[i/2])-reference_value)*divisor)+0.5); if (startBit == max_nbits) { startBit = 0; destination++; } rightmostBits = VALUE(uval,max_nbits-remainingBits,remainingBits); *destination = ((*destination) & ~MASKVALUE(startBit,remainingBits)) + (rightmostBits << max_nbits-(remainingBits+startBit)); startBit+=remainingBits; remainingBits=nbits; v++; } } else { for(i=0;i< n_vals;i++) { countOfLeftmostBits = startBit + remainingBits; uval = (unsigned long)(((((*v)*d*scal[i/2])-reference_value)*divisor)+0.5); if (countOfLeftmostBits > max_nbits) { countOfLeftmostBits = max_nbits - startBit; startBit = max_nbits - remainingBits; remainingBits -= countOfLeftmostBits; *destination = (((*destination) >> countOfLeftmostBits) << countOfLeftmostBits) + (VALUE(uval,startBit,countOfLeftmostBits)); startBit = 0; destination++; } rightmostBits = VALUE(uval,max_nbits-remainingBits,remainingBits); *destination = ((*destination) & ~MASKVALUE(startBit,remainingBits)) + (rightmostBits << max_nbits-(remainingBits+startBit)); startBit+=remainingBits; remainingBits=nbits; v++; } } *bitp+=n_vals*nbits; return 0; }
arraybench.c
/**************************************************************************** * * * OpenMP MicroBenchmark Suite - Version 3.1 * * * * produced by * * * * Mark Bull, Fiona Reid and Nix Mc Donnell * * * * at * * * * Edinburgh Parallel Computing Centre * * * * email: markb@epcc.ed.ac.uk or fiona@epcc.ed.ac.uk * * * * * * This version copyright (c) The University of Edinburgh, 2015. * * * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * * * ****************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include "common.h" #include "arraybench.h" double btest[IDA]; double atest[IDA]; #pragma omp threadprivate (btest) int main(int argc, char **argv) { init(argc, argv); /* GENERATE REFERENCE TIME */ reference("reference time 1", &refer); char testName[32]; /* TEST PRIVATE */ sprintf(testName, "PRIVATE %d", IDA); benchmark(testName, &testprivnew); /* TEST FIRSTPRIVATE */ sprintf(testName, "FIRSTPRIVATE %d", IDA); benchmark(testName, &testfirstprivnew); #ifdef OMPVER2 /* TEST COPYPRIVATE */ sprintf(testName, "COPYPRIVATE %d", IDA); benchmark(testName, &testcopyprivnew); #endif /* TEST THREADPRIVATE - COPYIN */ sprintf(testName, "COPYIN %d", IDA); benchmark(testName, &testthrprivnew); finalise(); return EXIT_SUCCESS; } void refer() { int j; double a[1]; for (j = 0; j < innerreps; j++) { array_delay(delaylength, a); } } void testfirstprivnew() { int j; for (j = 0; j < innerreps; j++) { #pragma omp parallel firstprivate(atest) { array_delay(delaylength, atest); } } } void testprivnew() { int j; for (j = 0; j < innerreps; j++) { #pragma omp parallel private(atest) { array_delay(delaylength, atest); } } } #ifdef OMPVER2 void testcopyprivnew() { int j; for (j=0; j<innerreps; j++) { #pragma omp parallel private(atest) { #pragma omp single copyprivate(atest) { array_delay(delaylength, atest); } } } } #endif void testthrprivnew() { int j; for (j = 0; j < innerreps; j++) { #pragma omp parallel copyin(btest) { array_delay(delaylength, btest); } } }
setup.h
////////////////////////////////////////////////////////////////////////////////// // // // trueke // // A multi-GPU implementation of the exchange Monte Carlo method. // // // ////////////////////////////////////////////////////////////////////////////////// // // // Copyright © 2015 Cristobal A. Navarro, Wei Huang. // // // // This file is part of trueke. // // trueke is free software: you can redistribute it and/or modify // // it under the terms of the GNU General Public License as published by // // the Free Software Foundation, either version 3 of the License, or // // (at your option) any later version. // // // // trueke is distributed in the hope that it will be useful, // // but WITHOUT ANY WARRANTY; without even the implied warranty of // // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // // GNU General Public License for more details. // // // // You should have received a copy of the GNU General Public License // // along with trueke. If not, see <http://www.gnu.org/licenses/>. // // // ////////////////////////////////////////////////////////////////////////////////// #ifndef _SETUP_H_ #define _SETUP_H_ /* function declarations */ void pickgpus(setup_t *s); void init(setup_t *s, int argc, int argv); void adapt_init(setup_t *s, int argc, char argv); void printparams(setup_t *s); void getparams(setup_t *s, int argc, char **argv); void newseed(int *seed); void malloc_arrays(setup_t *s); void adapt_malloc_arrays(setup_t *s); void reset(setup_t *s); void adjustparams(setup_t *s); /* adapt init */ void adapt_init(setup_t *s, int argc, char **argv){ printf("adapt_init....{\n"); fflush (stdout); /* get parameters */ getparams(s, argc, argv); /* adjust some parameters related to memory pool and active replicas*/ adjustparams(s); #ifdef MEASURE /* folders for output */ s->obsfolder = "data"; s->plotfolder = "plots"; make_output_folders(s->obsfolder, s->plotfolder); #endif /* parameter seed or random seed */ if(s->seed != 0){ gpu_pcg32_srandom_r(&s->hpcgs, &s->hpcgi, s->seed, 1); } else{ gpu_pcg32_srandom_r(&s->hpcgs, &s->hpcgi, devseed(), 1); } s->seed = gpu_pcg32_random_r(&s->hpcgs, &s->hpcgi); /* pick the GPUs */ pickgpus(s); /* set the number of threads as the number of GPUs */ omp_set_num_threads(s->ngpus); /* build the space of computation for the lattices */ s->mcblock = dim3(BX, BY / 2, BZ); s->mcgrid = dim3((s->L + BX - 1) / BX, (s->L + BY - 1) / (2 * BY), (s->L + BZ - 1) / BZ); s->lblock = dim3(BLOCKSIZE1D, 1, 1); s->lgrid = dim3((s->N + BLOCKSIZE1D - 1) / BLOCKSIZE1D, 1, 1); /* build the space of computation for random numbers and lattice simulation */ s->prng_block = dim3(BLOCKSIZE1D, 1, 1); s->prng_grid = dim3(((s->N / 4) + BLOCKSIZE1D - 1) / BLOCKSIZE1D, 1, 1); /* allocate main arrays */ adapt_malloc_arrays(s); /* create timers */ sdkCreateTimer(&(s->timer)); sdkCreateTimer(&(s->gtimer)); sdkCreateTimer(&(s->ktimer)); /* reset timers */ sdkResetTimer(&(s->timer)); sdkResetTimer(&(s->gtimer)); sdkResetTimer(&(s->ktimer)); /* print parameters */ printparams(s); //printf("}:ok\n\n"); fflush(stdout); } /* adapt malloc */ void adapt_malloc_arrays( setup_t *s ){ /* multi-gpu adaptation arrays */ s->mdlat = (int***) malloc(sizeof(int**) * s->ngpus); s->aex = (float**)malloc(sizeof(float*)*s->ngpus); s->aavex = (float**)malloc(sizeof(float*)*s->ngpus); s->aexE = (float**)malloc(sizeof(float*)*s->ngpus); s->arstream = (cudaStream_t**)malloc(sizeof(cudaStream_t*) * s->ngpus); s->apcga = (uint64_t***)malloc(sizeof(uint64_t**) * s->ngpus); s->apcgb = (uint64_t***)malloc(sizeof(uint64_t**) * s->ngpus); s->dH = (int **)malloc(sizeof(int*) * s->ngpus); s->dE = (float**)malloc(sizeof(float*) * s->ngpus); s->arts = (findex_t**)malloc(sizeof(findex_t*) * s->ngpus); s->atrs = (findex_t**)malloc(sizeof(findex_t*) * s->ngpus); s->aT = (float**)malloc(sizeof(float*)*s->ngpus); /* T is a sorted temp array */ s->T = (float*)malloc(sizeof(float)*s->Ra); /* host values for each replica */ s->E = (float*)malloc(sizeof(float)*s->Ra); // memory for H array s->hH = (int*)malloc(sizeof(int) * s->N); /* multi-GPU setup */ #pragma omp parallel { int tid, nt, r; /* set threads */ adapt_threadset(s, &tid, &nt, &r); //printf("arge malloc: tid=%i r=%i rpool = %i\n", tid, r, s->rpool[tid]); fflush(stdout); /* allocate the replica pool for each GPU */ s->mdlat[tid] = (int**) malloc(sizeof(int *) * s->rpool[tid]); /* ex is a per temperature counter array */ s->aex[tid] = (float*)malloc(sizeof(float)*s->rpool[tid]); /* avex is a per temperature counter array */ s->aavex[tid] = (float*)malloc(sizeof(float)*s->rpool[tid]); /* exchange energies */ s->aexE[tid] = (float*)malloc(sizeof(float) * s->rpool[tid]); /* CUDA streams */ s->arstream[tid] = (cudaStream_t*)malloc(sizeof(cudaStream_t) * s->rpool[tid]); /* PRNG states volume, one state per thread */ s->apcga[tid] = (uint64_t**)malloc(sizeof(uint64_t*) * s->rpool[tid]); s->apcgb[tid] = (uint64_t**)malloc(sizeof(uint64_t*) * s->rpool[tid]); /* fragmented indices for replicas temperature sorted */ s->arts[tid] = (findex_t*)malloc(sizeof(findex_t)*s->rpool[tid]); /* fragmented indices for temperatures replica sorted */ s->atrs[tid] = (findex_t*)malloc(sizeof(findex_t)*s->rpool[tid]); /* fragmented temperatures sorted */ s->aT[tid] = (float*)malloc(sizeof(float)*s->rpool[tid]); /* malloc device magnetic field -- multi-GPU */ checkCudaErrors(cudaMalloc(&(s->dH[tid]), sizeof(int)*s->N)); /* malloc device energy reductions -- multi-GPU*/ checkCudaErrors(cudaMalloc(&(s->dE[tid]), sizeof(float)*s->rpool[tid])); /* malloc the data for 'r' replicas on each GPU */ for(int k = 0; k < s->rpool[tid]; ++k){ checkCudaErrors(cudaMalloc(&(s->mdlat[tid][k]), sizeof(int) * s->N)); checkCudaErrors(cudaMalloc(&(s->apcga[tid][k]), (s->N/4) * sizeof(uint64_t))); checkCudaErrors(cudaMalloc(&(s->apcgb[tid][k]), (s->N/4) * sizeof(uint64_t))); checkCudaErrors(cudaStreamCreateWithFlags(&(s->arstream[tid][k]), cudaStreamNonBlocking)); // offset and sequence approach kernel_gpupcg_setup<<<s->prng_grid, s->prng_block, 0, s->arstream[tid][k] >>>(s->apcga[tid][k], s->apcgb[tid][k], s->N/4, s->seed + (unsigned long long)(s->N/4 * (s->rpool[tid]*tid + k)), (s->rpool[tid]*tid + k)); //printf("tid=%i N=%i N/4 = %i R = %i seed = %lu k = %lu \n", tid, s->N, s->N/4, s->R, s->seed + (unsigned long long)(s->N/4 * (s->rpool[tid]*tid + k)), (s->rpool[tid]*tid + k)); //getchar(); // skip ahead approach //kernel_gpupcg_setup_offset<<<s->prng_grid, s->prng_block, 0, s->arstream[tid][k] >>>(s->apcga[tid][k], s->apcgb[tid][k], s->N/4, s->seed, (unsigned long long)((s->ms * s->pts + s->ds)*4*s->realizations), (s->L^3)/4 * (s->R/s->ngpus * tid + k) ); cudaCheckErrors("kernel: prng reset"); } } /* host memory setup for each replica */ for(int i = 0; i < s->R; i++){ /* array of temperatures increasing order */ s->T[i] = s->TR - (s->R-1 - i)*s->dT; } int count = 0; for(int k = 0; k < s->ngpus; ++k){ for(int j = 0; j < s->gpur[k]; ++j){ s->arts[k][j] = s->atrs[k][j] = (findex_t){k, j}; s->aT[k][j] = s->TR - (float)(s->R-1 - count)*s->dT; s->aex[k][j] = 0; ++count; } } } /* set parameters */ void adjustparams(setup_t *s){ /* total number of spins per replica */ s->N = (s->L)*(s->L)*(s->L); /* shared memory steps */ s->cs = BLOCK_STEPS; /* keep original parameter R */ s->Ro = s->R; /* adjust R to a multiple of ngpus; R' = ceil(R/ngpus) *ngpus */ s->R = (int)ceil((float)s->R/(float)s->ngpus) * s->ngpus; /* compute Ra to be the final size Ra = R + TL */ s->Ra = s->R + (s->atrials * s->ains); /* set replica pools for each GPU */ s->gpur = (int*)malloc(sizeof(int) * s->ngpus); s->rpool = (int*)malloc(sizeof(int) * s->ngpus); /* measure zone */ if( s->mzone == -1 ){ s->mzone = (int) ((double)s->pts / log2(2.0 + sqrtf((double)s->pts)/(double)s->L) ); } /* last adaptation insert */ s->fam = 0; /* record original seed */ s->oseed = s->seed; for(int i=0; i < s->ngpus; ++i){ /* active replicas per gpu */ s->gpur[i] = s->R / s->ngpus; //printf("s->gpur[%i] = %i\n", i, s->gpur[i]); fflush(stdout); getchar(); /* replica pool per gpu */ s->rpool[i] = s->Ra / s->ngpus; /* place the remainder of replicas */ if( i < (s->Ra % s->ngpus) ){ s->rpool[i] += 1; } } } /* init */ void init(setup_t *s, int argc, char **argv){ /* set the number of threads as the number of GPUs */ //omp_set_num_threads(s->ngpus); //gpu_pcg32_srandom_r(&s->hpcgs, &s->hpcgi, s->seed, 1); // get another seed from master seeder //s->seed = gpu_pcg32_random_r(&s->hpcgs, &s->hpcgi); /* build the space of computation for the lattices */ s->mcblock = dim3(BX, BY/2, BZ); s->mcgrid = dim3((s->L + BX - 1)/BX, (s->L + BY - 1)/(2*BY), (s->L + BZ - 1)/BZ); s->lblock = dim3( BLOCKSIZE1D, 1, 1); s->lgrid = dim3((s->N + BLOCKSIZE1D - 1)/BLOCKSIZE1D, 1, 1); /* build the space of computation for random numbers and lattice simulation */ s->prng_block = dim3(BLOCKSIZE1D, 1, 1); s->prng_grid = dim3( ((s->N/4) + BLOCKSIZE1D - 1)/BLOCKSIZE1D, 1, 1); /* alocate main arrays */ malloc_arrays(s); /* reset table of obersvables per realization */ #ifdef MEASURE reset_realization_statistics(s, s->R); #endif } /* malloc arrays */ void malloc_arrays( setup_t *s ){ /* allocate the main arrays */ s->hlat = (int **)malloc(sizeof(int *) * s->R); s->dlat = (int **)malloc(sizeof(int *) * s->R); /* T is a sorted temp array */ s->T = (float*)malloc(sizeof(float)*s->R); /* ex is a per temperature counter array */ s->ex = (float*)malloc(sizeof(float)*s->R); /* avex is a per temperature counter array */ s->avex = (float*)malloc(sizeof(float)*s->R); /* index arrays */ s->rts = (int*)malloc(sizeof(int)*s->R); s->trs = (int*)malloc(sizeof(int)*s->R); /* host values for each replica */ s->E = (float*)malloc(sizeof(float)*s->R); s->exE = (float*)malloc(sizeof(float) * s->R); s->M = (int*)malloc(sizeof(int)*s->R); s->F1 = (float3*)malloc(sizeof(float3)*s->R); s->F2 = (float3*)malloc(sizeof(float3)*s->R); /* CUDA streams */ s->rstream = (cudaStream_t*)malloc(sizeof(cudaStream_t) * s->R); /* PRNG states volume, one state per thread */ s->pcga = (uint64_t **)malloc(sizeof(uint64_t *) * s->R); s->pcgb = (uint64_t **)malloc(sizeof(uint64_t *) * s->R); /* observables table */ s->obstable = (obset_t*)malloc(sizeof(obset_t)*s->R); // memory for H array s->hH = (int*)malloc(sizeof(int) * s->N); /* global index of the first replica in each GPU */ /* a copy of the magnetic field 'dH' on each GPU */ s->dH = (int **)malloc(sizeof(int*) * s->ngpus); /* device values for GPUs */ s->dE = (float**)malloc(sizeof(float*) * s->ngpus); s->dM = (int**)malloc(sizeof(int*) * s->ngpus); s->dF1 = (float3**)malloc(sizeof(float3*) * s->ngpus); s->dF2 = (float3**)malloc(sizeof(float3*) * s->ngpus); /* multi-GPU setup */ #pragma omp parallel { int tid, nt, r, k; /* set threads */ threadset(s, &tid, &nt, &r); /* malloc the data for 'r' replicas on each GPU */ for(int j = 0; j < r; ++j){ k = tid * r + j; checkCudaErrors(cudaMalloc(&(s->dlat[k]), sizeof(int) * s->N)); checkCudaErrors(cudaMalloc(&(s->pcga[k]), (s->N/4) * sizeof(uint64_t))); checkCudaErrors(cudaMalloc(&(s->pcgb[k]), (s->N/4) * sizeof(uint64_t))); checkCudaErrors(cudaStreamCreateWithFlags(&(s->rstream[k]), cudaStreamNonBlocking)); kernel_gpupcg_setup<<<s->prng_grid, s->prng_block, 0, s->rstream[k] >>>(s->pcga[k], s->pcgb[k], s->N/4, s->seed + s->N/4 * k, k); //printf("thread %i, N=%i N/4 = %i R = %i ngpus = %i R/ngpus = %i k = %i kN/4 = %i seed = %lu \n", tid, s->N, s->N/4, s->R, s->ngpus, s->R/s->ngpus, k, s->N/4 * k, s->seed + s->N/4*k); //getchar(); //cudaDeviceSynchronize(); cudaCheckErrors("kernel: prng reset"); } /* malloc device magnetic field -- multi-GPU */ checkCudaErrors(cudaMalloc(&(s->dH[tid]), sizeof(int)*s->N)); /* malloc device energy reductions -- multi-GPU*/ checkCudaErrors(cudaMalloc(&(s->dE[tid]), sizeof(float)*r)); checkCudaErrors(cudaMalloc(&(s->dM[tid]), sizeof(int)*r)); checkCudaErrors(cudaMalloc(&(s->dF1[tid]), sizeof(float3)*r)); checkCudaErrors(cudaMalloc(&(s->dF2[tid]), sizeof(float3)*r)); /* P2P memory access is not working properly, for the moment just use standard device-host-device transfers */ /* enable peer to peer memory access between GPUs */ //if(tid != 0){ //int access; //printf("\tGPU%i PeerAccess to GPU%i.....", s->gpus[tid].i, s->gpus[0].i); fflush(stdout); //checkCudaErrors(cudaDeviceCanAccessPeer(&access, s->gpus[tid].i, s->gpus[0].i)); //printf("%i\n", access); fflush(stdout); //checkCudaErrors(cudaDeviceEnablePeerAccess( s->gpus[0].i, 0 )); //} //else{ //checkCudaErrors(cudaDeviceEnablePeerAccess( s->gpus[1].i, 0 )); //} } /* host memory setup for each replica */ for(int i = 0; i < s->R; i++){ /* replica allocation */ s->hlat[i]= (int*)malloc(sizeof(int) * s->N); /* array of temperatures increasing order */ s->T[i] = s->TR - (s->R-1 - i)*s->dT; /* exchange counters initialization */ s->ex[i] = 0; /* initialize index arrays */ s->rts[i] = s->trs[i] = i; } int count = 0; /* flatten the temperatures */ for(int i=0; i<s->ngpus; ++i){ for(int j=0; j<s->gpur[i]; ++j){ s->T[count++] = s->aT[i][j]; } } printarray<float>(s->T, s->R, "T"); printf("\n"); } /* pick the idlest 'n' gpus */ void pickgpus( setup_t *s ){ /* structs for handling GPU queries error codes */ nvmlReturn_t r; /* some function variables */ unsigned int devcount, i, u; /* struct with GPU information */ gpu_t *gpus; char version[80]; /* init nvml library for GPU queries */ r = nvmlInit(); nvml_check(r, "nvmlInit"); /* nvml: get driver version */ r = nvmlSystemGetDriverVersion(version, 80); nvml_check(r, "nvmlSystemGetDriverVersion"); printf("\n\tDriver version: %s \n", version); /* get number of devices */ r = nvmlDeviceGetCount(&devcount); nvml_check(r, "nvmlDeviceGetCount"); printf("\tMAXGPUS = %d\n", devcount); /* malloc one gpu_t struct for each device */ gpus = (gpu_t*)malloc(sizeof(gpu_t)*devcount); /* return error if n > devcount */ if( s->ngpus > devcount){ fprintf(stderr, "pt error: [g = %i] > [MAXGPUS = %i]. (try g <= MAXGPUS)\n", s->ngpus, devcount); exit(1); } /* get the information of each GPU */ printf("\tListing devices:\n"); for(i = 0; i < devcount; i++){ nvmlDevice_t dev; char name[64]; //nvmlComputeMode_t compute_mode; nvmlUtilization_t util; r = nvmlDeviceGetHandleByIndex(i, &dev); nvml_check(r, "nvmlDeviceGetHandleByIndex"); r = nvmlDeviceGetName(dev, name, sizeof(name)/sizeof(name[0])); nvml_check(r, "nvmlDeviceGetName"); printf("\t\tGPU%d. %s", i, name); r = nvmlDeviceGetUtilizationRates(dev, &util); u = nvml_check(r, "nvmlDeviceGetUtilizationRates"); if(u){ printf(" -> util = %i%%\n", util.gpu); gpus[i].i = i; gpus[i].u = util.gpu; gpus[i].m = util.memory; } else{ gpus[i].i = i; } } if(u){ //printf("not sorted\n"); //for(i = 0; i < devcount; i++) // printf("gpu[%i] = (i,u,m) ---> (%i, %i, %i)\n", i, gpus[i].i, gpus[i].u, gpus[i].m); //printf("sorted\n"); qsort(gpus, devcount, sizeof(gpu), compgpu); //for(i = 0; i < devcount; i++) // printf("gpu[%i] = (i,u,m) ---> (%i, %i, %i)\n", i, gpus[i].i, gpus[i].u, gpus[i].m); } /* malloc info for 'n' GPUs */ s->gpus = (gpu_t*)malloc(sizeof(gpu_t)*s->ngpus); printf("\tchosen GPU(s) = {"); for(i = 0; i < s->ngpus; i++){ s->gpus[i] = gpus[i]; printf(" GPU%i", s->gpus[i].i); } printf(" }\n"); /* shutdown the nvml library */ r = nvmlShutdown(); nvml_check(r, "nvmlShutdown"); /* free the auxiliary gpu_t array */ free(gpus); } /* print parameters */ void printparams(setup_t *s){ printf("\tparameters:{\n"); printf("\t\tL: %i\n", s->L); printf("\t\tvolume: %i\n", s->N); printf("\t\t[TR,dT]: [%f, %f]\n", s->TR, s->dT); printf("\t\t[atrials, ains, apts, ams]: [%i, %i, %i, %i]\n", s->atrials, s->ains, s->apts, s->ams); printf("\t\tmag_field h: %f\n", s->h); printf("\t\treplicas: %i\n", s->R); printf("\t\tptsteps: %i\n", s->pts); printf("\t\tmzone: %i\n", s->mzone); printf("\t\tdrop_steps: %i\n", s->ds); printf("\t\tmcsteps: %i\n", s->ms); printf("\t\tmeasure: %i\n", s->fs); printf("\t\tperiod: %i\n", s->period); printf("\t\tnblocks: %i\n", s->blocks); printf("\t\trealizations: %i\n", s->realizations); printf("\t\tseed: %lu\n", s->seed); printf("\t\tmicrosteps: %i\n", s->cs); printf("\t\tNGPUS: %i\n\t}\n", s->ngpus); /* print space of computation */ printf("\tsoc{\n\t\tmcgrid is %i x %i x %i mcblock %i x %i x %i\n\t\tlgrid is %i x %i x %i lblock %i x %i x %i \n\t}\n", s->mcgrid.x, s->mcgrid.y, s->mcgrid.z, s->mcblock.x, s->mcblock.y, s->mcblock.z, s->lgrid.x, s->lgrid.y, s->lgrid.z, s->lblock.x, s->lblock.y, s->lblock.z); } /* get parameters */ void getparams(setup_t *s, int argc, char **argv){ /* if the number or arguments is not correct, stop the program */ if(argc != 28){ printf("run as:\n./bin/trueke -l <L> <R> -t <T> <dT> -a <tri> <ins> <pts> <ms> -h <h> -s <pts> <mz> <eq> <ms> <meas> <per> -br <b> <r> -z <seed> -g <x>\n"); exit(1); } else{ for(int i=0; i<argc; i++){ /* lattice size and number of replicas */ if(strcmp(argv[i],"-l") == 0){ s->L = atoi(argv[i+1]); s->R = atoi(argv[i+2]); } /* get TR and dT */ else if(strcmp(argv[i],"-t") == 0){ s->TR = atof(argv[i+1]); s->dT = atof(argv[i+2]); } /* the magnetic field constant */ else if(strcmp(argv[i],"-h") == 0){ s->h = atof(argv[i+1]); } /* ptsteps, drop steps, mc steps, final steps */ else if(strcmp(argv[i],"-s") == 0){ s->pts = atof(argv[i+1]); s->mzone = atoi(argv[i+2]); s->ds = atof(argv[i+3]); s->ms = atof(argv[i+4]); s->fs = atof(argv[i+5]); s->period = atof(argv[i+6]); } /* number of measure blocks and realizations */ else if(strcmp(argv[i],"-br") == 0){ s->blocks = atof(argv[i+1]); s->realizations = atof(argv[i+2]); } /* adaptative dt parameters */ else if(strcmp(argv[i], "-a") == 0){ s->atrials = atoi(argv[i+1]); s->ains = atoi(argv[i+2]); s->apts = atoi(argv[i+3]); s->ams = atoi(argv[i+4]); } /* number of gpus */ else if(strcmp(argv[i],"-g") == 0){ s->ngpus = atoi(argv[i+1]); } /* seed, (pass 0 for /dev/urandom) */ else if(strcmp(argv[i],"-z") == 0){ s->seed = atoi(argv[i+1]); } } } if( (s->L % 32) != 0 ) fprintf(stderr, "lattice dimensional size must be multiples of 32"); } #endif
pooling_hcl_arm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: qtang@openailab.com */ #include "pooling_param.h" #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "module/module.h" #include "operator/op.h" #include "utility/float.h" #include "utility/sys_port.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <assert.h> #include <math.h> #include <stddef.h> #include <arm_neon.h> #define POOL_GENERIC 0 #define POOL_K2S2 1 #define POOL_K3S2 2 #define POOL_K3S1 3 typedef void (*pooling_kernel_t)(const void* input, void* output, int inc, int inh, int inw, int outh, int outw, int, int, int, int, int, int, int pad_h1, int pad_w1, int); static void avg_2x2s2(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int out_hw = outh * outw; if (pad_w1 > 0) { outw--; } if (pad_h1 > 0) { outh--; } int block_w = outw >> 2; int remain_w = inw - outw * 2; for (int c = 0; c < inc; c++) { const float* line0 = input + c * in_hw; const float* line1 = line0 + inw; float* out_ptr = output + c * out_hw; for (int i = 0; i < outh; i++) { for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p10 = vld1q_f32(line1); float32x4_t sum0 = vaddq_f32(p00, p10); float32x4_t p01 = vld1q_f32(line0 + 4); float32x4_t p11 = vld1q_f32(line1 + 4); float32x4_t sum1 = vaddq_f32(p01, p11); #ifdef __aarch64__ sum0 = vpaddq_f32(sum0, sum1); #else float32x2_t sum0_1 = vpadd_f32(vget_low_f32(sum0), vget_high_f32(sum0)); float32x2_t sum0_2 = vpadd_f32(vget_low_f32(sum1), vget_high_f32(sum1)); sum0 = vcombine_f32(sum0_1, sum0_2); #endif sum0 = vmulq_n_f32(sum0, 0.25f); vst1q_f32(out_ptr, sum0); line0 += 8; line1 += 8; out_ptr += 4; } for (int j = block_w * 4; j < outw; j++) { float32x2_t p1 = vld1_f32(line0); float32x2_t p2 = vld1_f32(line1); float32x2_t sum = vadd_f32(p1, p2); *out_ptr = (sum[0] + sum[1]) * 0.25f; out_ptr++; line0 += 2; line1 += 2; } if (pad_w1) { *out_ptr = (line0[0] + line1[0]) * 0.5f; out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; } if (pad_h1) { for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p01 = vld1q_f32(line0 + 4); #ifdef __aarch64__ p00 = vpaddq_f32(p00, p01); #else float32x2_t sum0_1 = vpadd_f32(vget_low_f32(p00), vget_high_f32(p00)); float32x2_t sum0_2 = vpadd_f32(vget_low_f32(p01), vget_high_f32(p01)); p00 = vcombine_f32(sum0_1, sum0_2); #endif p00 = vmulq_n_f32(p00, 0.5f); vst1q_f32(out_ptr, p00); line0 += 8; out_ptr += 4; } for (int j = block_w * 4; j < outw; j++) { float32x2_t p1 = vld1_f32(line0); *out_ptr = (p1[0] + p1[1]) * 0.5f; out_ptr++; line0 += 2; } if (pad_w1) { *out_ptr = line0[0]; out_ptr++; } } } } static void max_2x2s2(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int out_hw = outh * outw; if (pad_w1 > 0) { outw--; } if (pad_h1 > 0) { outh--; } int block_w = outw >> 2; int remain_w = inw - outw * 2; for (int c = 0; c < inc; c++) { const float* line0 = input + c * in_hw; const float* line1 = line0 + inw; float* out_ptr = output + c * out_hw; for (int i = 0; i < outh; i++) { for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p10 = vld1q_f32(line1); float32x4_t p01 = vld1q_f32(line0 + 4); float32x4_t p11 = vld1q_f32(line1 + 4); #ifdef __aarch64__ float32x4_t max0 = vmaxq_f32(p00, p10); float32x4_t max1 = vmaxq_f32(p01, p11); /* pairwaise max */ float32x4_t _max = vpmaxq_f32(max0, max1); #else float32x2_t max0_1 = vpmax_f32(vget_low_f32(p00), vget_low_f32(p10)); float32x2_t max0_2 = vpmax_f32(vget_high_f32(p00), vget_high_f32(p10)); max0_1 = vpmax_f32(max0_1, max0_2); float32x2_t max1_1 = vpmax_f32(vget_low_f32(p01), vget_low_f32(p11)); float32x2_t max1_2 = vpmax_f32(vget_high_f32(p01), vget_high_f32(p11)); max1_1 = vpmax_f32(max1_1, max1_2); float32x4_t _max = vcombine_f32(max0_1, max1_1); #endif vst1q_f32(out_ptr, _max); line0 += 8; line1 += 8; out_ptr += 4; } for (int j = block_w * 4; j < outw; j++) { float32x2_t p1 = vld1_f32(line0); float32x2_t p2 = vld1_f32(line1); float32x2_t _max = vmax_f32(p1, p2); *out_ptr = fmax(_max[0], _max[1]); out_ptr++; line0 += 2; line1 += 2; } if (pad_w1 > 0) { *out_ptr = fmax(line0[0], line1[0]); out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; } if (pad_h1 > 0) { for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p01 = vld1q_f32(line0 + 4); #ifdef __aarch64__ p00 = vpmaxq_f32(p00, p01); #else float32x2_t max0_1 = vpmax_f32(vget_low_f32(p00), vget_high_f32(p00)); float32x2_t max0_2 = vpmax_f32(vget_low_f32(p01), vget_high_f32(p01)); p00 = vcombine_f32(max0_1, max0_2); #endif vst1q_f32(out_ptr, p00); line0 += 8; out_ptr += 4; } for (int j = block_w * 4; j < outw; j++) { float32x2_t p1 = vld1_f32(line0); *out_ptr = fmax(p1[0], p1[1]); out_ptr++; line0 += 2; } if (pad_w1 > 0) { *out_ptr = line0[0]; out_ptr++; } } } } static void avg_3x3s2(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int out_hw = outh * outw; if (pad_w1 > 0) { outw--; } if (pad_h1 > 0) { outh--; } int block_w = outw >> 2; int remain_w = inw - outw * 2; for (int c = 0; c < inc; c++) { const float* line0 = input + c * in_hw; const float* line1 = line0 + inw; const float* line2 = line1 + inw; float* out_ptr = output + c * out_hw; for (int i = 0; i < outh; i++) { float32x4x2_t p00 = vld2q_f32(line0); float32x4x2_t p10 = vld2q_f32(line1); float32x4x2_t p20 = vld2q_f32(line2); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t sum0 = vaddq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); sum0 = vaddq_f32(sum0, p01); float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t sum1 = vaddq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); sum1 = vaddq_f32(sum1, p11); float32x4x2_t p20_new = vld2q_f32(line2 + 8); float32x4_t sum2 = vaddq_f32(p20.val[0], p20.val[1]); float32x4_t p21 = vextq_f32(p20.val[0], p20_new.val[0], 1); sum2 = vaddq_f32(sum2, p21); sum0 = vaddq_f32(vaddq_f32(sum0, sum1), sum2); sum0 = vmulq_n_f32(sum0, 0.11111111f); vst1q_f32(out_ptr, sum0); p00 = p00_new; p10 = p10_new; p20 = p20_new; line0 += 8; line1 += 8; line2 += 8; out_ptr += 4; } for (int j = block_w * 4; j < outw; j++) { *out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]) * 0.11111111f; out_ptr++; line0 += 2; line1 += 2; line2 += 2; } if (pad_w1 == 1) { *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.16666667f; out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; line2 += remain_w + inw; } if (pad_h1 == 1) { float32x4x2_t p00 = vld2q_f32(line0); float32x4x2_t p10 = vld2q_f32(line1); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t sum0 = vaddq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); sum0 = vaddq_f32(sum0, p01); float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t sum1 = vaddq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); sum1 = vaddq_f32(sum1, p11); sum0 = vaddq_f32(sum0, sum1); sum0 = vmulq_n_f32(sum0, 0.16666667f); vst1q_f32(out_ptr, sum0); p00 = p00_new; p10 = p10_new; line0 += 8; line1 += 8; out_ptr += 4; } for (int j = block_w * 4; j < outw; j++) { *out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2]) * 0.16666667f; out_ptr++; line0 += 2; line1 += 2; } if (pad_w1 == 1) { *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.25f; out_ptr++; } else if (pad_w1 == 2) { *out_ptr = (line0[0] + line1[0]) * 0.5f; out_ptr++; } } else if (pad_h1 == 2) { float32x4x2_t p00 = vld2q_f32(line0); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t sum0 = vaddq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); sum0 = vaddq_f32(sum0, p01); sum0 = vmulq_n_f32(sum0, 0.3333333f); vst1q_f32(out_ptr, sum0); p00 = p00_new; line0 += 8; out_ptr += 4; } for (int j = block_w * 4; j < outw; j++) { *out_ptr = (line0[0] + line0[1] + line0[2]) * 0.3333333f; out_ptr++; line0 += 2; } if (pad_w1 == 1) { *out_ptr = (line0[0] + line0[1]) * 0.5f; out_ptr++; } else if (pad_w1 == 2) { *out_ptr = line0[0]; out_ptr++; } } } } static void max_3x3s2(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int out_hw = outh * outw; if (pad_w1 > 0) { outw--; } if (pad_h1 > 0) { outh--; } int block_w = outw >> 2; int remain_w = inw - outw * 2; for (int c = 0; c < inc; c++) { const float* line0 = input + c * in_hw; const float* line1 = line0 + inw; const float* line2 = line1 + inw; float* out_ptr = output + c * out_hw; for (int i = 0; i < outh; i++) { float32x4x2_t p00 = vld2q_f32(line0); float32x4x2_t p10 = vld2q_f32(line1); float32x4x2_t p20 = vld2q_f32(line2); for (int j = 0; j < block_w; j++) { /* p00 = [1,2,3,4,5,6,7,8] p00.val[0]=[1,3,5,7] max0 = [2,4,6,8] p00_new = [9,10,11,12,13,14,15,16] p01 = [3,5,7,9] max0=max(max0,p01)=[3,5,7,9] */ float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t max0 = vmaxq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); max0 = vmaxq_f32(max0, p01); float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t max1 = vmaxq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); max1 = vmaxq_f32(max1, p11); float32x4x2_t p20_new = vld2q_f32(line2 + 8); float32x4_t max2 = vmaxq_f32(p20.val[0], p20.val[1]); float32x4_t p21 = vextq_f32(p20.val[0], p20_new.val[0], 1); max2 = vmaxq_f32(max2, p21); max0 = vmaxq_f32(vmaxq_f32(max0, max1), max2); vst1q_f32(out_ptr, max0); p00 = p00_new; p10 = p10_new; p20 = p20_new; line0 += 8; line1 += 8; line2 += 8; out_ptr += 4; } for (int j = block_w * 4; j < outw; j++) { float max0 = fmax(fmax(line0[0], line0[1]), line0[2]); float max1 = fmax(fmax(line1[0], line1[1]), line1[2]); float max2 = fmax(fmax(line2[0], line2[1]), line2[2]); *out_ptr = fmax(fmax(max0, max1), max2); out_ptr++; line0 += 2; line1 += 2; line2 += 2; } if (pad_w1 == 1) { float max0 = fmax(fmax(line0[0], line0[1]), fmax(line1[0], line1[1])); *out_ptr = fmax(fmax(line2[0], line2[1]), max0); out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; line2 += remain_w + inw; } if (pad_h1 == 1) { float32x4x2_t p00 = vld2q_f32(line0); float32x4x2_t p10 = vld2q_f32(line1); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t max0 = vmaxq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); max0 = vmaxq_f32(max0, p01); float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t max1 = vmaxq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); max1 = vmaxq_f32(max1, p11); vst1q_f32(out_ptr, vmaxq_f32(max0, max1)); p00 = p00_new; p10 = p10_new; line0 += 8; line1 += 8; out_ptr += 4; } for (int j = block_w * 4; j < outw; j++) { float max0 = fmax(fmax(line0[0], line0[1]), line0[2]); float max1 = fmax(fmax(line1[0], line1[1]), line1[2]); *out_ptr = fmax(max0, max1); out_ptr++; line0 += 2; line1 += 2; } if (pad_w1 == 1) { *out_ptr = fmax(fmax(line0[0], line0[1]), fmax(line1[0], line1[1])); out_ptr++; } } } } static void avg_2x2s2_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int out_hw = outh * outw; if (inw % 2 == 0) outw--; if (inh % 2 == 0) outh--; int block_w = (outw - 1) >> 2; int remain_w = inw - outw * 2 + 1; for (int c = 0; c < inc; c++) { const float* line00 = input + c * in_hw; float* out_ptr = output + c * out_hw; // h begin if (is_caffe == 0) *out_ptr = line00[0]; else *out_ptr = line00[0] * 0.25f; out_ptr++; line00++; for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line00); float32x4_t p01 = vld1q_f32(line00 + 4); #ifdef __aarch64__ float32x4_t sum0 = vpaddq_f32(p00, p01); #else float32x2_t sum0_1 = vpadd_f32(vget_low_f32(p00), vget_high_f32(p00)); float32x2_t sum0_2 = vpadd_f32(vget_low_f32(p01), vget_high_f32(p01)); float32x4_t sum0 = vcombine_f32(sum0_1, sum0_2); #endif if (is_caffe == 0) sum0 = vmulq_n_f32(sum0, 0.5f); else sum0 = vmulq_n_f32(sum0, 0.25f); vst1q_f32(out_ptr, sum0); line00 += 8; out_ptr += 4; } for (int j = block_w * 4 + 1; j < outw; j++) { if (is_caffe == 0) *out_ptr = (line00[0] + line00[1]) * 0.5f; else *out_ptr = (line00[0] + line00[1]) * 0.25f; out_ptr++; line00 += 2; } if (inw % 2 == 0) { if (is_caffe == 0) *out_ptr = line00[0]; else *out_ptr = line00[0] * 0.25f; out_ptr++; } line00 += remain_w; // h center const float* line0 = line00; const float* line1 = line0 + inw; for (int i = 1; i < outh; i++) { // w begin if (is_caffe == 0) *out_ptr = (line0[0] + line1[0]) * 0.5f; else *out_ptr = (line0[0] + line1[0]) * 0.25f; out_ptr++; line0++; line1++; // w center for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p10 = vld1q_f32(line1); float32x4_t sum0 = vaddq_f32(p00, p10); float32x4_t p01 = vld1q_f32(line0 + 4); float32x4_t p11 = vld1q_f32(line1 + 4); float32x4_t sum1 = vaddq_f32(p01, p11); #ifdef __aarch64__ float32x4_t _sum = vpaddq_f32(sum0, sum1); #else float32x2_t sum0_1 = vpadd_f32(vget_low_f32(sum0), vget_high_f32(sum0)); float32x2_t sum0_2 = vpadd_f32(vget_low_f32(sum1), vget_high_f32(sum1)); float32x4_t _sum = vcombine_f32(sum0_1, sum0_2); #endif _sum = vmulq_n_f32(_sum, 0.25f); vst1q_f32(out_ptr, _sum); out_ptr += 4; line0 += 8; line1 += 8; } for (int j = block_w * 4 + 1; j < outw; j++) { *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.25f; out_ptr++; line0 += 2; line1 += 2; } // w end if (inw % 2 == 0) { if (is_caffe == 0) *out_ptr = (line0[0] + line1[0]) * 0.5f; else *out_ptr = (line0[0] + line1[0]) * 0.25f; out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; } // h end if (inh % 2 == 0) { if (is_caffe == 0) *out_ptr = line0[0]; else *out_ptr = line0[0] * 0.25f; out_ptr++; line0++; for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p01 = vld1q_f32(line0 + 4); #ifdef __aarch64__ float32x4_t _sum = vpaddq_f32(p00, p01); #else float32x2_t sum0_1 = vpadd_f32(vget_low_f32(p00), vget_high_f32(p00)); float32x2_t sum0_2 = vpadd_f32(vget_low_f32(p01), vget_high_f32(p01)); float32x4_t _sum = vcombine_f32(sum0_1, sum0_2); #endif if (is_caffe == 0) _sum = vmulq_n_f32(_sum, 0.5f); else _sum = vmulq_n_f32(_sum, 0.25f); vst1q_f32(out_ptr, _sum); out_ptr += 4; line0 += 8; } for (int j = block_w * 4 + 1; j < outw; j++) { if (is_caffe == 0) *out_ptr = (line0[0] + line0[1]) * 0.5f; else *out_ptr = (line0[0] + line0[1]) * 0.25f; out_ptr++; line0 += 2; } if (inw % 2 == 0) { if (is_caffe == 0) *out_ptr = line0[0]; else *out_ptr = line0[0] * 0.25f; } } } } static void max_2x2s2_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int out_hw = outh * outw; if (inw % 2 == 0) outw--; if (inh % 2 == 0) outh--; int block_w = (outw - 1) >> 2; int remain_w = inw - outw * 2 + 1; for (int c = 0; c < inc; c++) { const float* line00 = input + c * in_hw; float* out_ptr = output + c * out_hw; // h begin *out_ptr = line00[0]; out_ptr++; line00++; for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line00); float32x4_t p01 = vld1q_f32(line00 + 4); #ifdef __aarch64__ float32x4_t _max = vpmaxq_f32(p00, p01); #else float32x2_t max0_1 = vpmax_f32(vget_low_f32(p00), vget_high_f32(p00)); float32x2_t max0_2 = vpmax_f32(vget_low_f32(p01), vget_high_f32(p01)); float32x4_t _max = vcombine_f32(max0_1, max0_2); #endif vst1q_f32(out_ptr, _max); out_ptr += 4; line00 += 8; } for (int j = block_w * 4 + 1; j < outw; j++) { *out_ptr = fmax(line00[0], line00[1]); out_ptr++; line00 += 2; } if (inw % 2 == 0) { *out_ptr = line00[0]; out_ptr++; } line00 += remain_w; // h center const float* line0 = line00; const float* line1 = line0 + inw; for (int i = 1; i < outh; i++) { // w begin *out_ptr = fmax(line0[0], line1[0]); out_ptr++; line0++; line1++; // w center for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p10 = vld1q_f32(line1); float32x4_t p01 = vld1q_f32(line0 + 4); float32x4_t p11 = vld1q_f32(line1 + 4); #ifdef __aarch64__ float32x4_t max0 = vmaxq_f32(p00, p10); float32x4_t max1 = vmaxq_f32(p01, p11); float32x4_t _max = vpmaxq_f32(max0, max1); #else float32x2_t max0_1 = vpmax_f32(vget_low_f32(p00), vget_low_f32(p10)); float32x2_t max0_2 = vpmax_f32(vget_high_f32(p00), vget_high_f32(p10)); max0_1 = vpmax_f32(max0_1, max0_2); float32x2_t max1_1 = vpmax_f32(vget_low_f32(p01), vget_low_f32(p11)); float32x2_t max1_2 = vpmax_f32(vget_high_f32(p01), vget_high_f32(p11)); max1_1 = vpmax_f32(max1_1, max1_2); float32x4_t _max = vcombine_f32(max0_1, max1_1); #endif vst1q_f32(out_ptr, _max); out_ptr += 4; line0 += 8; line1 += 8; } for (int j = block_w * 4 + 1; j < outw; j++) { float32x2_t p1 = vld1_f32(line0); float32x2_t p2 = vld1_f32(line1); float32x2_t _max = vmax_f32(p1, p2); *out_ptr = fmax(_max[0], _max[1]); out_ptr++; line0 += 2; line1 += 2; } // w end if (inw % 2 == 0) { *out_ptr = fmax(line0[0], line1[0]); out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; } // h end if (inh % 2 == 0) { *out_ptr = line0[0]; out_ptr++; line0++; for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p01 = vld1q_f32(line0 + 4); #ifdef __aarch64__ float32x4_t _max = vpmaxq_f32(p00, p01); #else float32x2_t max0_1 = vpmax_f32(vget_low_f32(p00), vget_high_f32(p00)); float32x2_t max0_2 = vpmax_f32(vget_low_f32(p01), vget_high_f32(p01)); float32x4_t _max = vcombine_f32(max0_1, max0_2); #endif vst1q_f32(out_ptr, _max); out_ptr += 4; line0 += 8; } for (int j = block_w * 4 + 1; j < outw; j++) { *out_ptr = fmax(line0[0], line0[1]); out_ptr++; line0 += 2; } if (inw % 2 == 0) { *out_ptr = line0[0]; } } } } static void max_3x3s2_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { // TLOG_ERR("max_3x3s2_p1\n"); int in_hw = inw * inh; int out_hw = outh * outw; if (is_caffe == 1 || inw % 2 == 1) outw--; if (is_caffe == 1 || inh % 2 == 1) outh--; int block_w = (outw - 1) >> 2; int remain_w = inw - outw * 2 + 1; for (int c = 0; c < inc; c++) { const float* line1 = input + c * in_hw; const float* line2 = line1 + inw; float* out_ptr = output + c * out_hw; // h begin --------------------------------------- *out_ptr = fmax(fmax(line1[0], line1[1]), fmax(line2[0], line2[1])); out_ptr++; line1 += 1; line2 += 1; float32x4x2_t p10 = vld2q_f32(line1); float32x4x2_t p20 = vld2q_f32(line2); for (int j = 0; j < block_w; j++) { float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t max1 = vmaxq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); max1 = vmaxq_f32(max1, p11); float32x4x2_t p20_new = vld2q_f32(line2 + 8); float32x4_t max2 = vmaxq_f32(p20.val[0], p20.val[1]); float32x4_t p21 = vextq_f32(p20.val[0], p20_new.val[0], 1); max2 = vmaxq_f32(max2, p21); max1 = vmaxq_f32(max1, max2); vst1q_f32(out_ptr, max1); p10 = p10_new; p20 = p20_new; line1 += 8; line2 += 8; out_ptr += 4; } for (int j = block_w * 4 + 1; j < outw; j++) { float max1 = fmax(fmax(line1[0], line1[1]), line1[2]); float max2 = fmax(fmax(line2[0], line2[1]), line2[2]); *out_ptr = fmax(max1, max2); out_ptr++; line1 += 2; line2 += 2; } if (inw % 2 == 1) { *out_ptr = fmax(fmax(line1[0], line1[1]), fmax(line2[0], line2[1])); out_ptr++; } else if (is_caffe == 1 && inw % 2 == 0) { *out_ptr = fmax(line1[0], line2[0]); out_ptr++; } line1 += remain_w; line2 += remain_w; // h center --------------------------------------- const float* line0 = line1; line1 = line2; line2 = line1 + inw; for (int i = 1; i < outh; i++) { // left float max0 = fmax(fmax(line1[0], line1[1]), fmax(line2[0], line2[1])); *out_ptr = fmax(fmax(line0[0], line0[1]), max0); out_ptr++; line0 += 1; line1 += 1; line2 += 1; // mid float32x4x2_t p00 = vld2q_f32(line0); float32x4x2_t p10 = vld2q_f32(line1); float32x4x2_t p20 = vld2q_f32(line2); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t max0 = vmaxq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); max0 = vmaxq_f32(max0, p01); float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t max1 = vmaxq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); max1 = vmaxq_f32(max1, p11); float32x4x2_t p20_new = vld2q_f32(line2 + 8); float32x4_t max2 = vmaxq_f32(p20.val[0], p20.val[1]); float32x4_t p21 = vextq_f32(p20.val[0], p20_new.val[0], 1); max2 = vmaxq_f32(max2, p21); max0 = vmaxq_f32(vmaxq_f32(max0, max1), max2); vst1q_f32(out_ptr, max0); p00 = p00_new; p10 = p10_new; p20 = p20_new; line0 += 8; line1 += 8; line2 += 8; out_ptr += 4; } for (int j = block_w * 4 + 1; j < outw; j++) { float max0 = fmax(fmax(line0[0], line0[1]), line0[2]); float max1 = fmax(fmax(line1[0], line1[1]), line1[2]); float max2 = fmax(fmax(line2[0], line2[1]), line2[2]); *out_ptr = fmax(fmax(max0, max1), max2); out_ptr++; line0 += 2; line1 += 2; line2 += 2; } if (inw % 2 == 1) { max0 = fmax(fmax(line1[0], line1[1]), fmax(line2[0], line2[1])); *out_ptr = fmax(fmax(line0[0], line0[1]), max0); out_ptr++; } else if (inw % 2 == 0 && is_caffe == 1) { *out_ptr = fmax(fmax(line0[0], line1[0]), line2[0]); out_ptr++; } line0 += inw + remain_w; line1 += inw + remain_w; line2 += inw + remain_w; } // h end ------------------------------------------ if (inh % 2 == 1) { *out_ptr = fmax(fmax(line1[0], line1[1]), fmax(line0[0], line0[1])); out_ptr++; line0 += 1; line1 += 1; float32x4x2_t p00 = vld2q_f32(line0); float32x4x2_t p10 = vld2q_f32(line1); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t max0 = vmaxq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); max0 = vmaxq_f32(max0, p01); float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t max1 = vmaxq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); max1 = vmaxq_f32(max1, p11); max0 = vmaxq_f32(max0, max1); vst1q_f32(out_ptr, max0); p00 = p00_new; p10 = p10_new; line0 += 8; line1 += 8; out_ptr += 4; } for (int j = block_w * 4 + 1; j < outw; j++) { float max0 = fmax(fmax(line0[0], line0[1]), line0[2]); float max1 = fmax(fmax(line1[0], line1[1]), line1[2]); *out_ptr = fmax(max0, max1); out_ptr++; line0 += 2; line1 += 2; } if (inw % 2 == 1) { *out_ptr = fmax(fmax(line1[0], line1[1]), fmax(line0[0], line0[1])); out_ptr++; } else if (inw % 2 == 0 && is_caffe == 1) { *out_ptr = fmax(line0[0], line1[0]); out_ptr++; } } else if (inh % 2 == 0 && is_caffe == 1) { *out_ptr = fmax(line0[0], line0[1]); out_ptr++; line0 += 1; float32x4x2_t p00 = vld2q_f32(line0); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t max0 = vmaxq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); max0 = vmaxq_f32(max0, p01); vst1q_f32(out_ptr, max0); p00 = p00_new; line0 += 8; out_ptr += 4; } for (int j = block_w * 4 + 1; j < outw; j++) { *out_ptr = fmax(fmax(line0[0], line0[1]), line0[2]); out_ptr++; line0 += 2; } if (inw % 2 == 1) { *out_ptr = fmax(line0[0], line0[1]); out_ptr++; } else if (inw % 2 == 0) { *out_ptr = line0[0]; } } } } static void avg_3x3s2_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int out_hw = outh * outw; if (is_caffe == 1 || inw % 2 == 1) outw--; if (is_caffe == 1 || inh % 2 == 1) outh--; int block_w = (outw - 1) >> 2; int remain_w = inw - outw * 2 + 1; for (int c = 0; c < inc; c++) { const float* line1 = input + c * in_hw; const float* line2 = line1 + inw; float* out_ptr = output + c * out_hw; // h begin --------------------------------------- if (is_caffe == 0) *out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.25f; else *out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f; out_ptr++; line1 += 1; line2 += 1; float32x4x2_t p10 = vld2q_f32(line1); float32x4x2_t p20 = vld2q_f32(line2); for (int j = 0; j < block_w; j++) { float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t sum1 = vaddq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); sum1 = vaddq_f32(sum1, p11); float32x4x2_t p20_new = vld2q_f32(line2 + 8); float32x4_t sum2 = vaddq_f32(p20.val[0], p20.val[1]); float32x4_t p21 = vextq_f32(p20.val[0], p20_new.val[0], 1); sum2 = vaddq_f32(sum2, p21); sum1 = vaddq_f32(sum1, sum2); if (is_caffe == 0) sum1 = vmulq_n_f32(sum1, 0.16666667f); else sum1 = vmulq_n_f32(sum1, 0.11111111f); vst1q_f32(out_ptr, sum1); p10 = p10_new; p20 = p20_new; line1 += 8; line2 += 8; out_ptr += 4; } for (int j = block_w * 4 + 1; j < outw; j++) { if (is_caffe == 0) *out_ptr = (line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]) * 0.16666667f; else *out_ptr = (line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]) * 0.11111111f; out_ptr++; line1 += 2; line2 += 2; } if (inw % 2 == 1) { if (is_caffe == 0) *out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.25f; else *out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f; out_ptr++; } else if (inw % 2 == 0 && is_caffe == 1) { *out_ptr = (line1[0] + line2[0]) * 0.16666667f; out_ptr++; } line1 += remain_w; line2 += remain_w; // h center --------------------------------------- const float* line0 = line1; line1 = line2; line2 = line1 + inw; for (int i = 1; i < outh; i++) { // left if (is_caffe == 0) *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.16666667f; else *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f; out_ptr++; line0 += 1; line1 += 1; line2 += 1; // mid float32x4x2_t p00 = vld2q_f32(line0); float32x4x2_t p10 = vld2q_f32(line1); float32x4x2_t p20 = vld2q_f32(line2); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t sum0 = vaddq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); sum0 = vaddq_f32(sum0, p01); float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t sum1 = vaddq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); sum1 = vaddq_f32(sum1, p11); float32x4x2_t p20_new = vld2q_f32(line2 + 8); float32x4_t sum2 = vaddq_f32(p20.val[0], p20.val[1]); float32x4_t p21 = vextq_f32(p20.val[0], p20_new.val[0], 1); sum2 = vaddq_f32(sum2, p21); sum0 = vaddq_f32(vaddq_f32(sum0, sum1), sum2); sum0 = vmulq_n_f32(sum0, 0.11111111f); vst1q_f32(out_ptr, sum0); p00 = p00_new; p10 = p10_new; p20 = p20_new; line0 += 8; line1 += 8; line2 += 8; out_ptr += 4; } for (int j = block_w * 4 + 1; j < outw; j++) { *out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]) * 0.11111111f; out_ptr++; line0 += 2; line1 += 2; line2 += 2; } // end if (inw % 2 == 1) { if (is_caffe == 0) *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.16666667f; else *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f; out_ptr++; } else if (inw % 2 == 0 && is_caffe == 1) { *out_ptr = (line0[0] + line1[0] + line2[0]) * 0.16666667f; out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; line2 += remain_w + inw; } // h end------------------------------- if (inh % 2 == 1) { if (is_caffe == 0) *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.25f; else *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.11111111f; out_ptr++; line0 += 1; line1 += 1; float32x4x2_t p00 = vld2q_f32(line0); float32x4x2_t p10 = vld2q_f32(line1); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t sum0 = vaddq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); sum0 = vaddq_f32(sum0, p01); float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t sum1 = vaddq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); sum1 = vaddq_f32(sum1, p11); sum0 = vaddq_f32(sum0, sum1); if (is_caffe == 0) sum0 = vmulq_n_f32(sum0, 0.16666667f); else sum0 = vmulq_n_f32(sum0, 0.11111111f); vst1q_f32(out_ptr, sum0); p00 = p00_new; p10 = p10_new; line0 += 8; line1 += 8; out_ptr += 4; } for (int j = block_w * 4 + 1; j < outw; j++) { if (is_caffe == 0) *out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2]) * 0.16666667f; else *out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2]) * 0.11111111f; out_ptr++; line0 += 2; line1 += 2; } if (inw % 2 == 1) { if (is_caffe == 0) *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.25f; else *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.11111111f; out_ptr++; } else if (inw % 2 == 0 && is_caffe == 1) { *out_ptr = (line0[0] + line1[0]) * 0.16666667f; out_ptr++; } } else if (inw % 2 == 0 && is_caffe == 1) { *out_ptr = (line0[0] + line0[1]) * 0.16666667f; out_ptr++; line0 += 1; float32x4x2_t p00 = vld2q_f32(line0); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t sum0 = vaddq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); sum0 = vaddq_f32(sum0, p01); sum0 = vmulq_n_f32(sum0, 0.16666667f); vst1q_f32(out_ptr, sum0); p00 = p00_new; line0 += 8; out_ptr += 4; } for (int j = block_w * 4 + 1; j < outw; j++) { *out_ptr = (line0[0] + line0[1] + line0[2]) * 0.16666667f; out_ptr++; line0 += 2; } if (inw % 2 == 1) { *out_ptr = (line0[0] + line0[1]) * 0.16666667f; out_ptr++; } else if (inw % 2 == 0) { *out_ptr = line0[0] * 0.25f; out_ptr++; } } } } static void max_3x3s1_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { // TLOG_ERR("max_3x3s1_p1\n"); int in_hw = inw * inh; int mid_w = inw - 2; int mid_h = inh - 2; for (int c = 0; c < inc; c++) { const float* line1 = input + c * in_hw; const float* line2 = line1 + inw; float* out_ptr = output + c * in_hw; // h begin left----[line1+=0]----------------------------------- *out_ptr = fmax(fmax(line1[0], line1[1]), fmax(line2[0], line2[1])); out_ptr++; // h begin center----[line1+=1]---------------------------------- for (int j = 0; j < mid_w; j++) { float max1 = fmax(fmax(line1[0], line1[1]), line1[2]); float max2 = fmax(fmax(line2[0], line2[1]), line2[2]); *out_ptr = fmax(max2, max1); out_ptr++; line1 += 1; line2 += 1; } // h begin right----[line1+=2]----------------------------------- *out_ptr = fmax(fmax(line1[0], line1[1]), fmax(line2[0], line2[1])); out_ptr++; line1 += 2; line2 += 2; // h center --------------------------------------- const float* line0 = input + c * in_hw; for (int i = 0; i < mid_h; i++) { // left float max0 = fmax(fmax(line1[0], line1[1]), fmax(line2[0], line2[1])); *out_ptr = fmax(fmax(line0[0], line0[1]), max0); out_ptr++; // mid for (int j = 0; j < mid_w; j++) { float max0 = fmax(fmax(line0[0], line0[1]), line0[2]); float max1 = fmax(fmax(line1[0], line1[1]), line1[2]); float max2 = fmax(fmax(line2[0], line2[1]), line2[2]); *out_ptr = fmax(fmax(max0, max1), max2); out_ptr++; line0 += 1; line1 += 1; line2 += 1; } max0 = fmax(fmax(line1[0], line1[1]), fmax(line2[0], line2[1])); *out_ptr = fmax(fmax(line0[0], line0[1]), max0); out_ptr++; line0 += 2; line1 += 2; line2 += 2; } // h end ------------------------------------------ *out_ptr = fmax(fmax(line1[0], line1[1]), fmax(line0[0], line0[1])); out_ptr++; for (int j = 0; j < mid_w; j++) { float max0 = fmax(fmax(line0[0], line0[1]), line0[2]); float max1 = fmax(fmax(line1[0], line1[1]), line1[2]); *out_ptr = fmax(max0, max1); out_ptr++; line0 += 1; line1 += 1; } *out_ptr = fmax(fmax(line1[0], line1[1]), fmax(line0[0], line0[1])); } } static void avg_3x3s1_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { // TLOG_ERR("avg_3x3s1_p1\n"); int in_hw = inw * inh; int mid_w = inw - 2; int mid_h = inh - 2; for (int c = 0; c < inc; c++) { const float* line1 = input + c * in_hw; const float* line2 = line1 + inw; float* out_ptr = output + c * in_hw; // h begin left----[line1+=0]----------------------------------- if (is_caffe == 0) *out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.25f; else *out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f; out_ptr++; // h begin center----[line1+=1]---------------------------------- for (int j = 0; j < mid_w; j++) { if (is_caffe == 0) *out_ptr = (line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]) * 0.16666667f; else *out_ptr = (line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]) * 0.11111111f; out_ptr++; line1 += 1; line2 += 1; } // h begin right----[line1+=2]----------------------------------- if (is_caffe == 0) *out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.25f; else *out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f; out_ptr++; line1 += 2; line2 += 2; // h center --------------------------------------- const float* line0 = input + c * in_hw; for (int i = 0; i < mid_h; i++) { // left if (is_caffe == 0) *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.16666667f; else *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f; out_ptr++; // mid for (int j = 0; j < mid_w; j++) { *out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]) * 0.11111111f; out_ptr++; line0 += 1; line1 += 1; line2 += 1; } if (is_caffe == 0) *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.16666667f; else *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f; out_ptr++; line0 += 2; line1 += 2; line2 += 2; } // h end ------------------------------------------ if (is_caffe == 0) *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.25f; else *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.11111111f; out_ptr++; for (int j = 0; j < mid_w; j++) { if (is_caffe == 0) *out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2]) * 0.16666667f; else *out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2]) * 0.11111111f; out_ptr++; line0 += 1; line1 += 1; } if (is_caffe == 0) *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.25f; else *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.11111111f; } } static void avg_global(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int block = in_hw >> 3; int tail = in_hw & ~7; for (int c = 0; c < inc; c++) { const float* line0 = input + c * in_hw; float* out_ptr = output + c; float sum = 0.f; for (int j = 0; j < block; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p01 = vld1q_f32(line0 + 4); p00 = vaddq_f32(p00, p01); // p00=vpaddq_f32(p00,p00); // sum+=(p00[0]+p00[1]); sum += (p00[0] + p00[1] + p00[2] + p00[3]); line0 += 8; } for (int j = tail; j < in_hw; j++) { sum += line0[0]; line0++; } *out_ptr = sum / in_hw; } } static void max_global(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int block = in_hw >> 3; int tail = in_hw & ~7; for (int c = 0; c < inc; c++) { const float* line0 = input + c * in_hw; float* out_ptr = output + c; float32x4_t p00 = vld1q_f32(line0); float32x4_t res = p00; for (int j = 0; j < block; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p01 = vld1q_f32(line0 + 4); float32x4_t max0 = vmaxq_f32(p00, p01); res = vmaxq_f32(res, max0); line0 += 8; } float max_ = fmax(fmax(res[0], res[1]), fmax(res[2], res[3])); for (int j = tail; j < in_hw; j++) { max_ = fmax(max_, line0[0]); line0++; } *out_ptr = max_; } } int pooling_kernel_perf_prerun(struct tensor* input, struct tensor* out, struct pool_param* param) { int pool_size = POOL_GENERIC; /* global pooling */ if (param->global) { if (param->pool_method == POOL_AVG) param->funct = ( pooling_kernel_t )avg_global; else if (param->pool_method == POOL_MAX) param->funct = ( pooling_kernel_t )max_global; assert(param->funct != NULL); return 0; } /* general pooling */ if (param->stride_h == 2 && param->stride_w == 2) { if (param->kernel_h == 2 && param->kernel_w == 2) pool_size = POOL_K2S2; else if (param->kernel_h == 3 && param->kernel_w == 3) pool_size = POOL_K3S2; } else if (param->stride_h == 1 && param->stride_w == 1) { if (param->kernel_h == 3 && param->kernel_w == 3) pool_size = POOL_K3S1; } /* general max pooling, k2s2, k2k2p1, k3s1p1, k3s2, k3s2p1 */ if (param->pool_method == POOL_MAX) { if ((param->pad_h0 == param->pad_w0) && (param->pad_h1 == param->pad_w1)) { if (param->pad_h0 == 0) { if (pool_size == POOL_K2S2) param->funct = ( pooling_kernel_t )max_2x2s2; else if (pool_size == POOL_K3S2) param->funct = ( pooling_kernel_t )max_3x3s2; } else if (param->pad_h0 == 1) { if (pool_size == POOL_K2S2) param->funct = ( pooling_kernel_t )max_2x2s2_p1; else if (pool_size == POOL_K3S2) param->funct = ( pooling_kernel_t )max_3x3s2_p1; else if (pool_size == POOL_K3S1) param->funct = ( pooling_kernel_t )max_3x3s1_p1; } } if (param->funct != NULL) return 0; else { TLOG_ERR("perf general max pooling func not be find\n"); return -1; } } /* general avg pooling, k2s2, k2s2p1, k3s2, k3s2p1 */ if (param->pool_method == POOL_AVG) { if ((param->pad_h0 == param->pad_w0) && (param->pad_h1 == param->pad_w1)) { if (param->pad_h0 == 0 && param->pad_h1 == 0) { if (pool_size == POOL_K2S2) param->funct = ( pooling_kernel_t )avg_2x2s2; else if (pool_size == POOL_K3S2) param->funct = ( pooling_kernel_t )avg_3x3s2; } else if (param->pad_h0 == 1 && param->pad_h1 == 1) { if (pool_size == POOL_K2S2) param->funct = ( pooling_kernel_t )avg_2x2s2_p1; else if (pool_size == POOL_K3S2) param->funct = ( pooling_kernel_t )avg_3x3s2_p1; else if (pool_size == POOL_K3S1) param->funct = ( pooling_kernel_t )avg_3x3s1_p1; } else if (param->pad_h0 == 0 && param->pad_h1 == 1) { if (pool_size == POOL_K3S2) param->funct = ( pooling_kernel_t ) avg_3x3s2; } } if (param->funct != NULL) return 0; else { TLOG_ERR("perf general avg pooling func not be find\n"); return -1; } } TLOG_ERR("perf pooling func not be find\n"); return -1; } int pooling_kernel_perf_run(struct tensor* input, struct tensor* output, struct pool_param* param, int num_thread) { // TLOG_ERR("perf pooling_kernel_run\n"); int is_caffe = param->caffe_flavor; pooling_kernel_t kernel = (pooling_kernel_t)(param->funct); int batch = input->dims[0]; int c = input->dims[1]; int in_h = input->dims[2]; int in_w = input->dims[3]; int out_h = output->dims[2]; int out_w = output->dims[3]; int img_size = c * in_h * in_w; int feature_size = c * out_h * out_w; for (int n = 0; n < batch; n++) { void* input_frame = input->data + n * img_size * input->elem_size; void* output_frame = output->data + n * feature_size * output->elem_size; #pragma omp parallel for num_threads(num_thread) for (int ch = 0; ch < c; ch++) { void* cur_input = input_frame + ch * in_h * in_w * input->elem_size; void* cur_output = output_frame + ch * out_h * out_w * output->elem_size; kernel(cur_input, cur_output, 1, in_h, in_w, out_h, out_w, param->kernel_h, param->kernel_w, param->stride_h, param->stride_w, param->pad_h0, param->pad_w0, param->pad_h1, param->pad_w1, is_caffe); } } return 0; }
anim.c
#include "anim/anim.h" #include <math.h> #include <stdio.h> #include <stdlib.h> #include "img/rgbaimg.h" #include "img/pngio.h" #include "util/misc.h" void _save_frame( const rgba_image *frame, size_t n, size_t n_frames, const char *out_dir, size_t *len_n_frames); void animate( create_frame_f create_frame, const void *arg, time_smoothing_f smooth, size_t n_frames, const char *out_dir) { int cur_frame; double progress; rgba_image *output; char *cmd; size_t len_n_frames; if (smooth == NULL) { smooth = &anim_time_linear; } #pragma omp parallel for\ num_threads(4)\ private(output, progress)\ shared(arg, create_frame, smooth, n_frames, out_dir) for (cur_frame = 0; cur_frame < n_frames; cur_frame++) { #pragma omp critical { fprintf(stderr, "Creating frame %d\n", cur_frame); } progress = smooth((double) cur_frame / n_frames); output = create_frame(progress, arg); _save_frame(output, cur_frame, n_frames, out_dir, &len_n_frames); rgbaimg_destroy(output); } fprintf(stderr, "Done creating frames\n"); sprintf_alloc(&cmd, "ffmpeg -y -framerate 60 -i img/out/%%0%zud.png " "-c:v libx264 -pix_fmt yuv420p img/out.mp4", len_n_frames); system(cmd); free(cmd); } void _save_frame( const rgba_image *frame, size_t n, size_t n_frames, const char *out_dir, size_t *len_n_frames) { char *path; char *pattern; *len_n_frames = snprintf(NULL, 0, "%zu", n_frames); sprintf_alloc(&pattern, "%%s/%%0%zuzu.png", *len_n_frames); sprintf_alloc(&path, pattern, out_dir, n); png_save_to_file(frame, path); free(path); free(pattern); } double anim_time_linear(double t) { return t; } double anim_time_sigmoid(double t) { return 1.0 / (1 + exp(-t)); } double anim_time_smoothstep(double t) { if (t < 0) { return 0; } else if (t > 1) { return 1; } return t*t * (3 - 2*t); } double anim_time_smootherstep(double t) { if (t < 0) { return 0; } else if (t > 1) { return 1; } return t*t*t * (t * (6*t - 15) + 10); } double anim_time_smootheststep(double t) { double t3 = t*t*t; double t4 = t * t3; if (t < 0) { return 0; } else if (t > 1) { return 1; } return -20 * t4*t3 + 70 * t3*t3 - 84 * t4*t + 35 * t4; }
sum.c
#include <stdio.h> #include <stdlib.h> int main() { int n = 40000000; //int n = 400000; double* vector = (double*) malloc(n * sizeof(double)); double sum = 0; #pragma omp parallel for simd for(int x = 0; x < n; ++x) vector[x] = x; #pragma omp parallel for simd reduction(+: sum) schedule(static, 100000) for(int y = 0; y < n; ++y) sum += vector[y]; //printf("%lf", sum); free(vector); vector = NULL; return 0; }
ccode_c.c
// ----------------------------------------------------------------------- // Copyright 2019 Centrum Wiskunde & Informatica, Amsterdam // Author: Daniel M. Pelt // Contact: D.M.Pelt@cwi.nl // Website: http://dmpelt.github.io/foam_ct_phantom/ // License: MIT // This file is part of foam_ct_phantom, a Python package for generating // foam-like phantoms for CT. // ----------------------------------------------------------------------- #define _USE_MATH_DEFINES #include <math.h> #include <omp.h> #include <stdlib.h> #ifdef _MSC_VER #define DECLDIR __declspec(dllexport) #else #define DECLDIR #endif #ifndef __MTWISTER_H #define __MTWISTER_H #define STATE_VECTOR_LENGTH 624 #define STATE_VECTOR_M 397 /* changes to STATE_VECTOR_LENGTH also require changes to this */ typedef struct tagMTRand { unsigned long mt[STATE_VECTOR_LENGTH]; int index; } MTRand; MTRand seedRand(unsigned long seed); unsigned long genRandLong(MTRand* rand); float genRand(MTRand* rand); void m_seedRand(MTRand* rand, unsigned long seed); #endif /* #ifndef __MTWISTER_H */ // OpenMP set number of threads DECLDIR void set_threads(const unsigned int nthrd){ omp_set_num_threads(nthrd); } MTRand randgen; DECLDIR void setseed(const unsigned int seed){ m_seedRand(&randgen, seed); } DECLDIR void drawnewpositions(float * const pos3, float * const ds, const unsigned int ntrials, const float zrange){ for(unsigned int i=0; i<ntrials; i++){ float x,y,z; while(1){ x = 2*genRand(&randgen)-1; y = 2*genRand(&randgen)-1; if(x*x + y*y<=1){ z = zrange*(2*genRand(&randgen)-1); break; } } pos3[3*i] = x; pos3[3*i+1] = y; pos3[3*i+2] = z; ds[i] = sqrtf(x*x + y*y)-1; } } DECLDIR unsigned int newsphere(float * const pos3, float * const ds, const float * const spheres, const unsigned int ntrials, const unsigned int nspheres, const float zrange, unsigned int * const updated){ const float nx = spheres[(nspheres-1)*5]; const float ny = spheres[(nspheres-1)*5+1]; const float nz = spheres[(nspheres-1)*5+2]; const float nsz = spheres[(nspheres-1)*5+3]; unsigned int nupdated=0; for(unsigned int i=0; i<ntrials; i++){ float x = pos3[3*i]; float y = pos3[3*i+1]; float z = pos3[3*i+2]; float dsv = sqrtf((nx-x)*(nx-x) + (ny-y)*(ny-y) + (nz-z)*(nz-z)) - nsz; if(dsv<0){ while(dsv<0){ x = 2*genRand(&randgen)-1; y = 2*genRand(&randgen)-1; dsv = 1 - sqrtf(x*x + y*y); if(dsv<0){ continue; } z = zrange*(2*genRand(&randgen)-1); int j; #pragma omp parallel for shared(dsv) firstprivate(x, y, z, nspheres) private(j) for(j=0; j<nspheres; j++){ const float dsn = sqrtf((spheres[5*j]-x)*(spheres[5*j]-x) + (spheres[5*j+1]-y)*(spheres[5*j+1]-y) + (spheres[5*j+2]-z)*(spheres[5*j+2]-z)) - spheres[5*j+3]; if(dsn<dsv){ #pragma omp critical if(dsn<dsv) dsv = dsn; } } } pos3[3*i] = x; pos3[3*i+1] = y; pos3[3*i+2] = z; ds[i] = -dsv; updated[nupdated] = i; nupdated++; }else{ if(dsv<-ds[i]){ ds[i] = -dsv; updated[nupdated] = i; nupdated++; } } } // printf("%d\n",nupdated); return nupdated; } DECLDIR void genvol(const float * const spheres, const unsigned int nspheres, float * const vol, const unsigned int * const n, const float voxsize, const float * const c, const unsigned int supersampling, const unsigned int iz){ const unsigned int ninslc = n[0]*n[1]; int i; #pragma omp parallel for private(i) for(i=0; i<ninslc; i++){ vol[iz*ninslc+i] = 0; } for(unsigned int sz=0; sz<supersampling; sz++){ #pragma omp parallel { unsigned int * const temp = (unsigned int *) malloc(nspheres*sizeof(unsigned int)); long double * const dzs = (long double *) malloc(nspheres*sizeof(long double)); long double * const s2s = (long double *) malloc(nspheres*sizeof(long double)); for(unsigned int j=0; j<nspheres; j++){ s2s[j] = spheres[5*j+3]*spheres[5*j+3]; } long double z = c[2] + (iz+0.5)*voxsize - n[2]*voxsize/2 + sz*voxsize/supersampling - voxsize/2 + voxsize/(2*supersampling); unsigned int ntocheck=0; for(unsigned int j=0; j<nspheres; j++){ dzs[j] = (z-spheres[5*j+2])*(z-spheres[5*j+2]); if(dzs[j]<s2s[j]){ temp[ntocheck] = j; ntocheck++; } } #pragma omp for schedule(dynamic) private(i) for(i=0; i<ninslc; i++){ const unsigned int iy = i/n[0]; const unsigned int ix = i%n[0]; for(unsigned int sx=0; sx<supersampling; sx++){ const long double x = c[0] + (ix+0.5)*voxsize - n[0]*voxsize/2 + sx*voxsize/supersampling - voxsize/2 + voxsize/(2*supersampling); for(unsigned int sy=0; sy<supersampling; sy++){ const long double y = c[1] + (iy+0.5)*voxsize - n[1]*voxsize/2 + sy*voxsize/supersampling - voxsize/2 + voxsize/(2*supersampling); if(sqrtl(x*x+y*y)>1){ continue; } unsigned char found=0; long double s2,dx,dy,dz; for(unsigned int q=0; q<ntocheck; q++){ const unsigned int j = temp[q]; dx = (x-spheres[5*j])*(x-spheres[5*j]); if(dx+dzs[j] >= s2s[j]) continue; dy = (y-spheres[5*j+1])*(y-spheres[5*j+1]); if(dx+dy+dzs[j]<s2s[j]){ vol[iz*ninslc+i]+=spheres[5*j+4]; found=1; break; } } if(found==0){ vol[iz*ninslc+i]+=1; } } } } free(dzs); free(temp); free(s2s); } } #pragma omp parallel for private(i) for(i=0; i<ninslc; i++){ vol[iz*ninslc+i]/=supersampling*supersampling*supersampling; } } DECLDIR void genparproj(const float * const spheres, const unsigned int nspheres, float * const proj, const unsigned int * const n, const float pixsize, const float * const c, const float angle, const float * const rotc){ const unsigned int ntotal = n[0]*n[1]; const long double ca = cosl(angle); const long double sa = sinl(angle); int i; #pragma omp parallel for private(i) for(i=0; i<ntotal; i++){ const unsigned int ix = i % n[0]; const long double x = c[0] + (ix+0.5)*pixsize - n[0]*pixsize/2; const long double px = rotc[0] * ca + rotc[1] * sa; const long double dx = (x-px)*(x-px); if(dx>=1){ proj[i]=0; }else{ proj[i] = 2*sqrtl(1 - dx); } } const unsigned int nthr = omp_get_max_threads(); long double * const temp = (long double *) calloc(nthr*ntotal,sizeof(long double)); #pragma omp parallel { const unsigned int tidx = ntotal*omp_get_thread_num(); #pragma omp for schedule(dynamic) private(i) for(i=0; i<nspheres; i++){ const long double s2 = spheres[5*i+3]*spheres[5*i+3]; const long double py = spheres[5*i+2]; const long double px = (rotc[0] + spheres[5*i]) * ca + (rotc[1] + spheres[5*i+1]) * sa; const long double pyi = (py - c[1])/pixsize + 0.5*(n[1]-1); const long double pxi = (px - c[0])/pixsize + 0.5*(n[0]-1); const unsigned int sz = spheres[5*i+3]/pixsize + 1; int l = pxi-sz; int r = pxi+sz; int u = pyi-sz; int d = pyi+sz; if(l<0) l=0; if(r>=n[0]) r=n[0]-1; if(u<0) u=0; if(d>=n[1]) d=n[1]-1; for(unsigned int j=u; j<=d;j++){ const long double y = c[1] + (j+0.5)*pixsize - n[1]*pixsize/2; const long double dy = (y-spheres[5*i+2])*(y-spheres[5*i+2]); if(dy >= s2) continue; for(unsigned int k=l; k<=r; k++){ const long double x = c[0] + (k+0.5)*pixsize - n[0]*pixsize/2; const long double dx = (x-px)*(x-px); if(dx+dy<s2){ temp[tidx+j*n[0]+k] -= 2*(1-spheres[5*i+4])*sqrtl(s2 - dx - dy); } } } } } #pragma omp parallel for private(i) for(i=0; i<ntotal; i++){ long double tmpf = 0; for(unsigned int j=0;j<nthr;j++){ tmpf += temp[j*ntotal + i]; } proj[i] += tmpf; } free(temp); } DECLDIR void gen3dproj(const float * const spheres, const unsigned int nspheres, float * const proj, const unsigned int * const n, const float pixsize, const float maxz, const float * const rot, const unsigned int cutout, const float cutoff){ const unsigned int ntotal = n[0]*n[1]; int i; const unsigned int nthr = omp_get_max_threads(); long double * const temp = (long double *) calloc(nthr*ntotal,sizeof(long double)); long double * const mint = (long double *) malloc(nthr*ntotal*sizeof(long double)); #pragma omp parallel for private(i) for(i=0;i<nthr*ntotal;i++){ mint[i] = INFINITY; } #pragma omp parallel { const unsigned int tidx = ntotal*omp_get_thread_num(); #pragma omp for schedule(dynamic) private(i) for(i=0; i<nspheres; i++){ if(spheres[5*i+4]<cutoff) continue; const long double s2 = spheres[5*i+3]*spheres[5*i+3]; const long double px = spheres[5*i]; const long double pz = spheres[5*i+1]; const long double py = spheres[5*i+2]; const long double pyi = py/pixsize + 0.5*(n[1]-1); const long double pxi = px/pixsize + 0.5*(n[0]-1); const unsigned int sz = spheres[5*i+3]/pixsize + 1; int l = pxi-sz; int r = pxi+sz; int u = pyi-sz; int d = pyi+sz; if(l<0) l=0; if(r>=n[0]) r=n[0]-1; if(u<0) u=0; if(d>=n[1]) d=n[1]-1; for(unsigned int j=u; j<=d;j++){ const long double y = (j+0.5)*pixsize - n[1]*pixsize/2; const long double dy = (y-spheres[5*i+2])*(y-spheres[5*i+2]); if(dy >= s2) continue; for(unsigned int k=l; k<=r; k++){ const long double x = (k+0.5)*pixsize - n[0]*pixsize/2; const long double dx = (x-px)*(x-px); if(dx+dy<s2){ long double t = pz - sqrtl(s2 - dx - dy); long double rx = rot[0]*x + rot[1]*t + rot[2]*y; long double ry = rot[3]*x + rot[4]*t + rot[5]*y; long double rz = rot[6]*x + rot[7]*t + rot[8]*y; long double fac = 1; char reject = fabsl(rz)>maxz; if(!reject && cutout>0){ reject = rx<0 && ry<0 && rz<0; } if (reject){ fac = 0.75; t = pz + sqrtl(s2 - dx - dy); rx = rot[0]*x + rot[1]*t + rot[2]*y; ry = rot[3]*x + rot[4]*t + rot[5]*y; rz = rot[6]*x + rot[7]*t + rot[8]*y; reject = fabsl(rz)>maxz; if(!reject && cutout>0){ reject = rx<0 && ry<0 && rz<0; } if (reject){ continue; } } if(t < mint[tidx+j*n[0]+k]){ mint[tidx+j*n[0]+k] = t; temp[tidx+j*n[0]+k] = fac*sqrtl(s2 - dx - dy)/spheres[5*i+3]; } } } } } } #pragma omp parallel for private(i) for(i=0; i<ntotal; i++){ long double tmpf = temp[i]; long double cmint = mint[i]; for(unsigned int j=1;j<nthr;j++){ if(mint[j*ntotal + i]<cmint){ cmint = mint[j*ntotal + i]; tmpf = temp[j*ntotal + i]; } } proj[i] = tmpf; } free(temp); free(mint); } DECLDIR void genconeproj(const float * const spheres, const unsigned int nspheres, float * const proj, const unsigned int * const n, const float pixsize, const float zoff, const float sod, const float sdd){ const unsigned int ntotal = n[0]*n[1]; int i; #pragma omp parallel for schedule(dynamic) private(i) for(i=0; i<ntotal; i++){ long double tmp=0; const unsigned int ix = i % n[0]; const unsigned int iy = i / n[0]; const long double x = (ix+0.5)*pixsize - n[0]*pixsize/2; const long double y = (iy+0.5)*pixsize - n[1]*pixsize/2; // https://math.stackexchange.com/questions/2613781/line-cylinder-intersection // long double bxl, bxr, byl, byr, bzl, bzr; const long double x0 = -sod; const long double k = x/sdd; const long double l = y/sdd; const long double df = 1 - (x0*x0 - 1)*k*k; if(df>0){ const long double t1 = -(sqrtl(df)+x0)/(k*k+1); const long double t2 = (sqrtl(df)-x0)/(k*k+1); const long double dx = t2 - t1; const long double dy = k*dx; const long double dz = l*dx; tmp = sqrtl(dx*dx+dy*dy+dz*dz); }else{ proj[i]=0; continue; } const long double sz = sqrtl(1+k*k+l*l); const long double tx = 1/sz; const long double ty = k/sz; const long double tz = l/sz; // https://en.wikipedia.org/wiki/Distance_from_a_point_to_a_line for(unsigned int j=0; j<nspheres; j++){ const long double sz = spheres[5*j+2]-zoff; const long double sx = spheres[5*j]; const long double sy = spheres[5*j+1]; const long double s2 = spheres[5*j+3]*spheres[5*j+3]; const long double psz = (x0-sx)*tx - sy*ty - sz*tz; const long double pdx = x0-sx - psz*tx; const long double pdy = -sy - psz*ty; const long double pdz = -sz - psz*tz; const long double dist = pdx*pdx+pdy*pdy+pdz*pdz; if(dist<s2){ tmp -= 2*(1-spheres[5*j+4])*sqrtl(s2 - dist); } } proj[i] = tmp; } } DECLDIR void average2d(const float * const invol, float * const outvol, const unsigned int nx, const unsigned int ny, const unsigned int ss){ const unsigned int npix = nx*ny; int i; #pragma omp parallel for private(i) for(i=0; i<npix; i++){ const unsigned int ix = i % nx; const unsigned int iy = i / nx; long double tmp = 0; for(unsigned int y2=ss*iy;y2<ss*(iy+1);y2++){ for(unsigned int x2=ss*ix;x2<ss*(ix+1);x2++){ tmp += invol[y2*ss*nx + x2]; } } outvol[i] = tmp/(ss*ss); } } DECLDIR unsigned int gettouching(const float * const spheres, const unsigned int nspheres, const unsigned int i, const float cutoff, unsigned int * const indices){ const unsigned int nthr = omp_get_max_threads(); unsigned int * const js = (unsigned int *) malloc(nthr*nspheres*sizeof(unsigned int)); unsigned int * const njs = (unsigned int *) malloc(nthr*sizeof(unsigned int)); for(unsigned int j=0;j<nthr;j++) njs[j]=0; const float x = spheres[5*i]; const float y = spheres[5*i+1]; const float z = spheres[5*i+2]; const float s = spheres[5*i+3]; #pragma omp parallel { unsigned int tid = omp_get_thread_num(); unsigned int nj = 0; int j; #pragma omp for private(j) for(j=0; j<nspheres; j++){ if(i==j) continue; const float dst = sqrtf((spheres[5*j]-x)*(spheres[5*j]-x)+(spheres[5*j+1]-y)*(spheres[5*j+1]-y)+(spheres[5*j+2]-z)*(spheres[5*j+2]-z)) - spheres[5*j+3] - s; if(dst<cutoff){ js[tid*nspheres + nj] = j; nj++; } } njs[tid] = nj; } unsigned int nindices = 0; for(unsigned int j=0; j<nthr; j++){ for(unsigned int k=0; k<njs[j]; k++){ indices[nindices] = js[j*nspheres + k]; nindices++; } } free(js); free(njs); return nindices; } // Adapted from https://www.johndcook.com/blog/cpp_random_number_generation/ unsigned int poissonsmall(const float lambda){ float p = 1.0; const float L = expf(-lambda); unsigned int k = 0; do { k++; p *= genRand(&randgen); } while (p > L); return k - 1; } unsigned int poissonlarge(const float lambda){ // "Rejection method PA" from "The Computer Generation of Poisson Random Variables" by A. C. Atkinson // Journal of the Royal Statistical Society Series C (Applied Statistics) Vol. 28, No. 1. (1979) // The article is on pages 29-35. The algorithm given here is on page 32. float c = 0.767 - 3.36/lambda; float beta = M_PI/sqrtf(3.0*lambda); float alpha = beta*lambda; float k = logf(c) - lambda - logf(beta); for(;;) { float u = genRand(&randgen); float x = (alpha - logf((1.0 - u)/u))/beta; int n = (int) floor(x + 0.5); if (n < 0) continue; float v = genRand(&randgen); float y = alpha - beta*x; float temp = 1.0 + expf(y); float lhs = y + logf(v/(temp*temp)); float rhs = k + n*logf(lambda) - lgammaf(n+1); if (lhs <= rhs) return n; } } float poisson(const float lambda){ return (lambda < 30.0) ? poissonsmall(lambda) : poissonlarge(lambda); } DECLDIR void applypoisson(float * const proj, const unsigned int npix, const float flux, const float factor){ int i; #pragma omp parallel for private(i) for(i=0; i<npix; i++){ float tmp = poisson(flux*expf(-proj[i]*factor)); if(tmp<=0) tmp = 1; proj[i] = -logf(tmp/flux)/factor; } } /* An implementation of the MT19937 Algorithm for the Mersenne Twister * by Evan Sultanik. Based upon the pseudocode in: M. Matsumoto and * T. Nishimura, "Mersenne Twister: A 623-dimensionally * equidistributed uniform pseudorandom number generator," ACM * Transactions on Modeling and Computer Simulation Vol. 8, No. 1, * January pp.3-30 1998. * * http://www.sultanik.com/Mersenne_twister */ #define UPPER_MASK 0x80000000 #define LOWER_MASK 0x7fffffff #define TEMPERING_MASK_B 0x9d2c5680 #define TEMPERING_MASK_C 0xefc60000 inline void m_seedRand(MTRand* rand, unsigned long seed) { /* set initial seeds to mt[STATE_VECTOR_LENGTH] using the generator * from Line 25 of Table 1 in: Donald Knuth, "The Art of Computer * Programming," Vol. 2 (2nd Ed.) pp.102. */ rand->mt[0] = seed & 0xffffffff; for(rand->index=1; rand->index<STATE_VECTOR_LENGTH; rand->index++) { rand->mt[rand->index] = (6069 * rand->mt[rand->index-1]) & 0xffffffff; } } /** * Creates a new random number generator from a given seed. */ MTRand seedRand(unsigned long seed) { MTRand rand; m_seedRand(&rand, seed); return rand; } /** * Generates a pseudo-randomly generated long. */ unsigned long genRandLong(MTRand* rand) { unsigned long y; static unsigned long mag[2] = {0x0, 0x9908b0df}; /* mag[x] = x * 0x9908b0df for x = 0,1 */ if(rand->index >= STATE_VECTOR_LENGTH || rand->index < 0) { /* generate STATE_VECTOR_LENGTH words at a time */ int kk; if(rand->index >= STATE_VECTOR_LENGTH+1 || rand->index < 0) { m_seedRand(rand, 4357); } for(kk=0; kk<STATE_VECTOR_LENGTH-STATE_VECTOR_M; kk++) { y = (rand->mt[kk] & UPPER_MASK) | (rand->mt[kk+1] & LOWER_MASK); rand->mt[kk] = rand->mt[kk+STATE_VECTOR_M] ^ (y >> 1) ^ mag[y & 0x1]; } for(; kk<STATE_VECTOR_LENGTH-1; kk++) { y = (rand->mt[kk] & UPPER_MASK) | (rand->mt[kk+1] & LOWER_MASK); rand->mt[kk] = rand->mt[kk+(STATE_VECTOR_M-STATE_VECTOR_LENGTH)] ^ (y >> 1) ^ mag[y & 0x1]; } y = (rand->mt[STATE_VECTOR_LENGTH-1] & UPPER_MASK) | (rand->mt[0] & LOWER_MASK); rand->mt[STATE_VECTOR_LENGTH-1] = rand->mt[STATE_VECTOR_M-1] ^ (y >> 1) ^ mag[y & 0x1]; rand->index = 0; } y = rand->mt[rand->index++]; y ^= (y >> 11); y ^= (y << 7) & TEMPERING_MASK_B; y ^= (y << 15) & TEMPERING_MASK_C; y ^= (y >> 18); return y; } /** * Generates a pseudo-randomly generated double in the range [0..1]. */ float genRand(MTRand* rand) { return((float)genRandLong(rand) / (unsigned long)0xffffffff); }
cheby.gold.h
#include "common/common.hpp" void cheby_step (double *out_def, double* Ac_def, double* Ap_def, double* RHS_def, double* Dinv_def, double h2inv, double c1, double c2, int N) { double (*Ap)[N][N] = (double (*)[N][N])Ap_def; double (*Ac)[N][N] = (double (*)[N][N])Ac_def; double (*out)[N][N] = (double (*)[N][N])out_def; double (*RHS)[N][N] = (double (*)[N][N])RHS_def; double (*Dinv)[N][N] = (double (*)[N][N])Dinv_def; #pragma omp parallel for for (int k = 1; k < N-1; k++) { for (int j = 1; j < N-1; j++) { #pragma GCC ivdep for (int i = 1; i < N-1; i++) { double MA = Ac[k][j][i] - h2inv * (0.03 * (Ac[k-1][j-1][i-1] + Ac[k-1][j-1][i+1] + Ac[k-1][j+1][i-1] + Ac[k-1][j+1][i+1] + Ac[k+1][j-1][i-1] + Ac[k+1][j-1][i+1] + Ac[k+1][j+1][i-1] + Ac[k+1][j+1][i+1]) + 0.1 * (Ac[k-1][j-1][i] + Ac[k-1][j][i-1] + Ac[k-1][j][i+1] + Ac[k-1][j+1][i] + Ac[k][j-1][i-1] + Ac[k][j-1][i+1] + Ac[k][j+1][i-1] + Ac[k][j+1][i+1] + Ac[k+1][j-1][i] + Ac[k+1][j][i-1] + Ac[k+1][j][i+1] + Ac[k+1][j+1][i]) + 0.46 * (Ac[k-1][j][i] + Ac[k][j-1][i] + Ac[k][j][i-1] + Ac[k][j][i+1] + Ac[k][j+1][i] + Ac[k+1][j][i]) - 4.26 * Ac[k][j][i]); out[k][j][i] = Ac[k][j][i] + c1 * (Ac[k][j][i] - Ap[k][j][i]) + c2 * Dinv[k][j][i] * (RHS[k][j][i] - MA); } } } } extern "C" void cheby_gold (double* out, double *Ac, double* Ap, double* RHS, double* Dinv, double h2inv, double c1, double c2, int N) { double* temp1 = getZero3DArray<double>(N, N, N); double* temp2 = getZero3DArray<double>(N, N, N); double* temp3 = getZero3DArray<double>(N, N, N); cheby_step(temp1, Ac, Ap, RHS, Dinv, h2inv, c1, c2, N); cheby_step(temp2, temp1, Ac, RHS, Dinv, h2inv, c1, c2, N); cheby_step(temp3, temp2, temp1, RHS, Dinv, h2inv, c1, c2, N); cheby_step(temp1, temp3, temp2, RHS, Dinv, h2inv, c1, c2, N); cheby_step(temp2, temp1, temp3, RHS, Dinv, h2inv, c1, c2, N); cheby_step(out, temp2, temp1, RHS, Dinv, h2inv, c1, c2, N); delete[] temp1; delete[] temp2; delete[] temp3; }
GB_binop__bclr_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bclr_int8 // A.*B function (eWiseMult): GB_AemultB__bclr_int8 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bclr_int8 // C+=b function (dense accum): GB_Cdense_accumb__bclr_int8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bclr_int8 // C=scalar+B GB_bind1st__bclr_int8 // C=scalar+B' GB_bind1st_tran__bclr_int8 // C=A+scalar GB_bind2nd__bclr_int8 // C=A'+scalar GB_bind2nd_tran__bclr_int8 // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = GB_BITCLR (aij, bij, int8_t, 8) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_BITCLR (x, y, int8_t, 8) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BCLR || GxB_NO_INT8 || GxB_NO_BCLR_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bclr_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bclr_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bclr_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__bclr_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bclr_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bclr_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t bij = Bx [p] ; Cx [p] = GB_BITCLR (x, bij, int8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bclr_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; Cx [p] = GB_BITCLR (aij, y, int8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_BITCLR (x, aij, int8_t, 8) ; \ } GrB_Info GB_bind1st_tran__bclr_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_BITCLR (aij, y, int8_t, 8) ; \ } GrB_Info GB_bind2nd_tran__bclr_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pfem_2_monolithic_slip_scheme.h
/* ============================================================================== KratosFluidDynamicsApplication A library based on: Kratos A General Purpose Software for Multi-Physics Finite Element Analysis Version 1.0 (Released on march 05, 2007). Copyright 2007 Pooyan Dadvand, Riccardo Rossi, Janosch Stascheit, Felix Nagel pooyan@cimne.upc.edu rrossi@cimne.upc.edu janosch.stascheit@rub.de nagel@sd.rub.de - CIMNE (International Center for Numerical Methods in Engineering), Gran Capita' s/n, 08034 Barcelona, Spain - Ruhr-University Bochum, Institute for Structural Mechanics, Germany Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following condition: Distribution of this code for any commercial purpose is permissible ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ============================================================================== */ #if !defined(KRATOS_PFEM2_MONOLITHIC_SLIP_SCHEME ) #define KRATOS_PFEM2_MONOLITHIC_SLIP_SCHEME /* System includes */ /* External includes */ #include "boost/smart_ptr.hpp" /* Project includes */ #include "includes/define.h" #include "includes/model_part.h" #include "solving_strategies/schemes/scheme.h" #include "includes/variables.h" #include "includes/deprecated_variables.h" #include "containers/array_1d.h" #include "utilities/openmp_utils.h" #include "utilities/coordinate_transformation_utilities.h" #include "processes/process.h" namespace Kratos { /**@name Kratos Globals */ /*@{ */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ /**@name Enum's */ /*@{ */ /*@} */ /**@name Functions */ /*@{ */ /*@} */ /**@name Kratos Classes */ /*@{ */ template<class TSparseSpace, class TDenseSpace //= DenseSpace<double> > class PFEM2MonolithicSlipScheme : public Scheme<TSparseSpace, TDenseSpace> { public: /**@name Type Definitions */ /*@{ */ //typedef boost::shared_ptr< ResidualBasedPredictorCorrectorBossakScheme<TSparseSpace,TDenseSpace> > Pointer; KRATOS_CLASS_POINTER_DEFINITION(PFEM2MonolithicSlipScheme); typedef Scheme<TSparseSpace, TDenseSpace> BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename Element::DofsVectorType DofsVectorType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef Element::GeometryType GeometryType; /*@} */ /**@name Life Cycle */ /*@{ */ /** Constructor without a turbulence model */ PFEM2MonolithicSlipScheme(unsigned int DomainSize) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,SLIP) { } /** Destructor. */ virtual ~PFEM2MonolithicSlipScheme() {} /*@} */ /**@name Operators */ /*@{ */ /** Performing the update of the solution. */ //*************************************************************************** virtual void Update(ModelPart& r_model_part, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dv, TSystemVectorType& b) override { KRATOS_TRY; mRotationTool.RotateVelocities(r_model_part); BasicUpdateOperations(r_model_part, rDofSet, A, Dv, b); mRotationTool.RecoverVelocities(r_model_part); KRATOS_CATCH("") } //*************************************************************************** virtual void BasicUpdateOperations(ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dv, TSystemVectorType& b) { KRATOS_TRY int NumThreads = OpenMPUtils::GetNumThreads(); OpenMPUtils::PartitionVector DofSetPartition; OpenMPUtils::DivideInPartitions(rDofSet.size(), NumThreads, DofSetPartition); //update of velocity (by DOF) #pragma omp parallel { int k = OpenMPUtils::ThisThread(); typename DofsArrayType::iterator DofSetBegin = rDofSet.begin() + DofSetPartition[k]; typename DofsArrayType::iterator DofSetEnd = rDofSet.begin() + DofSetPartition[k + 1]; for (typename DofsArrayType::iterator itDof = DofSetBegin; itDof != DofSetEnd; itDof++) { if (itDof->IsFree()) { itDof->GetSolutionStepValue() += TSparseSpace::GetValue(Dv, itDof->EquationId()); } } } KRATOS_CATCH("") } //*************************************************************************** /** this function is designed to be called in the builder and solver to introduce the selected time integration scheme. It "asks" the matrix needed to the element and performs the operations needed to introduce the seected time integration scheme. this function calculates at the same time the contribution to the LHS and to the RHS of the system */ void CalculateSystemContributions(Element& rCurrentElement, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, const ProcessInfo& CurrentProcessInfo) override { KRATOS_TRY //Initializing the non linear iteration for the current element //KRATOS_WATCH(LHS_Contribution); //basic operations for the element considered rCurrentElement.CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); rCurrentElement.EquationIdVector(EquationId, CurrentProcessInfo); // If there is a slip condition, apply it on a rotated system of coordinates mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentElement.GetGeometry()); mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentElement.GetGeometry()); KRATOS_CATCH("") } void CalculateRHSContribution(Element& rCurrentElement, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, const ProcessInfo& CurrentProcessInfo) override { //Initializing the non linear iteration for the current element //basic operations for the element considered rCurrentElement.CalculateRightHandSide(RHS_Contribution, CurrentProcessInfo); rCurrentElement.EquationIdVector(EquationId, CurrentProcessInfo); // If there is a slip condition, apply it on a rotated system of coordinates mRotationTool.Rotate(RHS_Contribution,rCurrentElement.GetGeometry()); mRotationTool.ApplySlipCondition(RHS_Contribution,rCurrentElement.GetGeometry()); } /** functions totally analogous to the precedent but applied to the "condition" objects */ virtual void CalculateSystemContributions(Condition& rCurrentCondition, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, const ProcessInfo& CurrentProcessInfo) override { KRATOS_TRY //KRATOS_WATCH("CONDITION LOCALVELOCITYCONTRIBUTION IS NOT DEFINED"); rCurrentCondition.CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); rCurrentCondition.EquationIdVector(EquationId, CurrentProcessInfo); // Rotate contributions (to match coordinates for slip conditions) //mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentCondition->GetGeometry()); //mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentCondition->GetGeometry()); KRATOS_CATCH("") } virtual void CalculateRHSContribution(Condition& rCurrentCondition, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; //KRATOS_WATCH("CONDITION LOCALVELOCITYCONTRIBUTION IS NOT DEFINED"); //Initializing the non linear iteration for the current condition //basic operations for the element considered rCurrentCondition.CalculateRightHandSide(RHS_Contribution,rCurrentProcessInfo); rCurrentCondition.EquationIdVector(EquationId,rCurrentProcessInfo); // Rotate contributions (to match coordinates for slip conditions) mRotationTool.Rotate(RHS_Contribution,rCurrentCondition.GetGeometry()); mRotationTool.ApplySlipCondition(RHS_Contribution,rCurrentCondition.GetGeometry()); KRATOS_CATCH(""); } //************************************************************************************* //************************************************************************************* virtual void InitializeSolutionStep(ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { const ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); Scheme<TSparseSpace, TDenseSpace>::InitializeSolutionStep(r_model_part, A, Dx, b); double DeltaTime = CurrentProcessInfo[DELTA_TIME]; if (DeltaTime == 0) KRATOS_THROW_ERROR(std::logic_error, "detected delta_time = 0 ... check if the time step is created correctly for the current model part", ""); } //************************************************************************************* //************************************************************************************* virtual void InitializeNonLinIteration(ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY KRATOS_CATCH("") } virtual void FinalizeNonLinIteration(ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { /* ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); //if orthogonal subscales are computed if (CurrentProcessInfo[OSS_SWITCH] == 1.0) { if (rModelPart.GetCommunicator().MyPID() == 0) std::cout << "Computing OSS projections" << std::endl; for (typename ModelPart::NodesContainerType::iterator ind = rModelPart.NodesBegin(); ind != rModelPart.NodesEnd(); ind++) { noalias(ind->FastGetSolutionStepValue(ADVPROJ)) = ZeroVector(3); ind->FastGetSolutionStepValue(DIVPROJ) = 0.0; ind->FastGetSolutionStepValue(NODAL_AREA) = 0.0; }//end of loop over nodes //loop on nodes to compute ADVPROJ CONVPROJ NODALAREA array_1d<double, 3 > output; for (typename ModelPart::ElementsContainerType::iterator elem = rModelPart.ElementsBegin(); elem != rModelPart.ElementsEnd(); elem++) { elem->Calculate(ADVPROJ, output, CurrentProcessInfo); } rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA); rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ); rModelPart.GetCommunicator().AssembleCurrentData(ADVPROJ); // Correction for periodic conditions this->PeriodicConditionProjectionCorrection(rModelPart); for (typename ModelPart::NodesContainerType::iterator ind = rModelPart.NodesBegin(); ind != rModelPart.NodesEnd(); ind++) { if (ind->FastGetSolutionStepValue(NODAL_AREA) == 0.0) { ind->FastGetSolutionStepValue(NODAL_AREA) = 1.0; //KRATOS_WATCH("*********ATTENTION: NODAL AREA IS ZERRROOOO************"); } const double Area = ind->FastGetSolutionStepValue(NODAL_AREA); ind->FastGetSolutionStepValue(ADVPROJ) /= Area; ind->FastGetSolutionStepValue(DIVPROJ) /= Area; } } */ } void FinalizeSolutionStep(ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { /* Element::EquationIdVectorType EquationId; LocalSystemVectorType RHS_Contribution; LocalSystemMatrixType LHS_Contribution; ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); for (ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); ++itNode) { itNode->FastGetSolutionStepValue(REACTION_X,0) = 0.0; itNode->FastGetSolutionStepValue(REACTION_Y,0) = 0.0; itNode->FastGetSolutionStepValue(REACTION_Z,0) = 0.0; } for (ModelPart::ElementsContainerType::ptr_iterator itElem = rModelPart.Elements().ptr_begin(); itElem != rModelPart.Elements().ptr_end(); ++itElem) { //KRATOS_WATCH(LHS_Contribution); //basic operations for the element considered (*itElem)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); (*itElem)->EquationIdVector(EquationId, CurrentProcessInfo); GeometryType& rGeom = (*itElem)->GetGeometry(); unsigned int NumNodes = rGeom.PointsNumber(); unsigned int Dimension = rGeom.WorkingSpaceDimension(); unsigned int index = 0; for (unsigned int i = 0; i < NumNodes; i++) { rGeom[i].FastGetSolutionStepValue(REACTION_X,0) -= RHS_Contribution[index++]; rGeom[i].FastGetSolutionStepValue(REACTION_Y,0) -= RHS_Contribution[index++]; if (Dimension == 3) rGeom[i].FastGetSolutionStepValue(REACTION_Z,0) -= RHS_Contribution[index++]; index++; // skip pressure dof } } rModelPart.GetCommunicator().AssembleCurrentData(REACTION); */ // Base scheme calls FinalizeSolutionStep method of elements and conditions Scheme<TSparseSpace, TDenseSpace>::FinalizeSolutionStep(rModelPart, A, Dx, b); } //************************************************************************************************ //************************************************************************************************ /*@} */ /**@name Operations */ /*@{ */ /*@} */ /**@name Access */ /*@{ */ /*@} */ /**@name Inquiry */ /*@{ */ /*@} */ /**@name Friends */ /*@{ */ /*@} */ protected: /**@name Protected static Member Variables */ /*@{ */ /*@} */ /**@name Protected member Variables */ /*@{ */ /*@} */ /**@name Protected Operators*/ /*@{ */ /*@} */ /**@name Protected Access */ /*@{ */ /*@} */ /**@name Protected Inquiry */ /*@{ */ /*@} */ /**@name Protected LifeCycle */ /*@{ */ /*@} */ private: /**@name Static Member Variables */ /*@{ */ /*@} */ /**@name Member Variables */ /*@{ */ CoordinateTransformationUtils<LocalSystemMatrixType,LocalSystemVectorType,double> mRotationTool; /*@} */ /**@name Private Operators*/ /*@{ */ /*@} */ /**@name Private Operations*/ /*@{ */ /*@} */ /**@name Private Access */ /*@{ */ /*@} */ /**@name Private Inquiry */ /*@{ */ /*@} */ /**@name Un accessible methods */ /*@{ */ /*@} */ }; /* Class Scheme */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_BOSSAK_SCHEME defined */
solve.c
#include <stdlib.h> #include <stdio.h> #include <assert.h> #include <omp.h> #include "shared_consts.h" void partial_choose(double **matrix, int rows, int column, int sub_matrix_size) { int i, max_i, start = rows - sub_matrix_size; double max_v; #ifndef FALSE_SHARING __assume_aligned(matrix, ALIGNMENT_SIZE); #endif #ifdef NO_DEFAULT_VECTORIZATION #pragma novector #endif for (i = start, max_v = matrix[start][start]; i < rows; i++) if (matrix[i][start] > max_v) { max_v = matrix[i][start]; max_i = i; } if (matrix[start][start] < max_v) { double *tmp = matrix[max_i]; matrix[max_i] = matrix[start]; matrix[start] = tmp; } } double* solve_with_partial_choose(double **matrix, int rows, int columns) { int i, j, k; double c, *R = ALLOC_MEMORY(sizeof(double)*rows); char nieskonczenie_wiele = 0; if (R == NULL) return NULL; #ifndef FALSE_SHARING __assume_aligned(R, ALIGNMENT_SIZE); __assume_aligned(matrix, ALIGNMENT_SIZE); #endif //postępowanie proste for (i = 0; i < rows; i++) { partial_choose(matrix, rows, columns, rows - i); if (matrix[i][i] != 0) { for (j = i + 1; j < rows; j++) { #ifdef MANUAL_VECTORIZATION #pragma ivdep #endif for (k = i, c = matrix[j][i] / matrix[i][i]; k < columns; k++) { matrix[j][k] -= c*matrix[i][k]; } } } } //sprawdzam czy istnieją rozwiązania for (i = rows - 1; i >= 0; i--) if (matrix[i][i] == 0 && matrix[i][columns - 1] != 0) return NULL; //brak rozwiązań else if (matrix[i][i] == 0 && matrix[i][columns - 1] == 0) nieskonczenie_wiele = 1; if (nieskonczenie_wiele) { R[0] = INFINITY; return R; } //Postępowanie odwrtone for (i = rows - 1; i >= 0; i--) { for (R[i] = matrix[i][columns - 1], j = i + 1; j < columns - 1; j++) R[i] -= matrix[i][j] * R[j]; assert(matrix[i][i] != 0); R[i] /= matrix[i][i]; } return R; } void partial_choose_parallel(double **matrix, int rows, int columns, int sub_matrix_size) { int i, j, start = rows - sub_matrix_size, max_i = start, priv_max_i = start; double max_v = matrix[start][start], priv_max_v = matrix[start][start]; if (start >= rows) return; #ifndef FALSE_SHARING __assume_aligned(matrix, ALIGNMENT_SIZE); #endif #pragma omp parallel default(none) shared(matrix, rows, columns, sub_matrix_size, max_v, max_i, start) firstprivate(priv_max_i, priv_max_v) private(i) { #pragma omp for schedule(static) #ifdef NO_DEFAULT_VECTORIZATION #pragma novector #endif for (i = start; i < rows; i++) { if (priv_max_v < matrix[i][start]) { priv_max_i = i; priv_max_v = matrix[i][start]; } } #pragma omp flush (max_v) { if (priv_max_v > max_v) { #pragma omp critical { if (priv_max_v > max_v) { max_v = priv_max_v; max_i = priv_max_i; } } } } } if (matrix[start][start] < matrix[max_i][start]) { //przesuwam wiersz z maximum na górę if (matrix[start] != matrix[max_i]) { double *tmp = matrix[start]; matrix[start] = matrix[max_i]; matrix[max_i] = tmp; } } } double* solve_with_partial_choose_parallel(double **matrix, int rows, int columns) { int i, j, k; double *R = ALLOC_MEMORY(sizeof(double)*rows); char infinitely_many = 0, illegal = 0; if (R == NULL) return NULL; #ifndef FALSE_SHARING __assume_aligned(R, ALIGNMENT_SIZE); __assume_aligned(matrix, ALIGNMENT_SIZE); #endif //postępowanie proste #pragma omp parallel for schedule(static) num_threads(1) default(none) private(i, j, k) shared(matrix, rows, columns) for (i = 0; i < rows; i++) { partial_choose(matrix, rows, columns, rows - i); if (matrix[i][i] != 0) { #pragma omp parallel for schedule(static) default(none) private(j, k) shared(matrix, rows, columns, i) for (j = i + 1; j < rows; j++) { double c = matrix[j][i] / matrix[i][i]; #ifdef MANUAL_VECTORIZATION #pragma ivdep #endif for (k = i; k < columns; k++) { matrix[j][k] -= c*matrix[i][k]; } } } } //sprawdzam czy istnieją rozwiązania #pragma omp parallel for schedule(static) private(i) #ifdef NO_DEFAULT_VECTORIZATION #pragma novector #endif for (i = rows - 1; i >= 0; i--) if (matrix[i][i] == 0 && matrix[i][columns - 1] != 0) illegal = 1; //brak rozwiązań else if (matrix[i][i] == 0 && matrix[i][columns - 1] == 0) infinitely_many = 1; if (illegal) return NULL; if (infinitely_many) { R[0] = INFINITY; return R; } //Postępowanie odwrtone for (i = rows - 1; i >= 0; i--) { double sum = 0; R[i] = matrix[i][columns - 1]; #pragma omp parallel for schedule(static) private(j) reduction(+:sum) #ifdef NO_DEFAULT_VECTORIZATION #pragma novector #endif for (j = i + 1; j < columns - 1; j++) { sum += matrix[i][j] * R[j]; } R[i] -= sum; assert(matrix[i][i]); R[i] /= matrix[i][i]; } return R; }
parallel_for.h
/* * Copyright (c) 2021, Horance Liu and the respective contributors * All rights reserved. * * Use of this source code is governed by a Apache 2.0 license that can be found * in the LICENSE file. */ #pragma once #include <cassert> #include <cstdio> #include <limits> #include <string> #include <type_traits> #include <utility> #include <vector> #include "mnn/infra/config.h" #include "mnn/infra/aligned_allocator.h" #include "mnn/infra/mnn_error.h" #ifdef MNN_USE_TBB #ifndef NOMINMAX #define NOMINMAX // tbb includes windows.h in tbb/machine/windows_api.h #endif #include <tbb/task_group.h> #include <tbb/tbb.h> #endif #if !defined(MNN_USE_OMP) && !defined(MNN_SINGLE_THREAD) #include <future> // NOLINT #include <thread> // NOLINT #endif #if defined(MNN_USE_GCD) && !defined(MNN_SINGLE_THREAD) #include <dispatch/dispatch.h> #endif namespace mnn { #ifdef MNN_USE_TBB static tbb::task_scheduler_init tbbScheduler( tbb::task_scheduler_init::automatic); // tbb::task_scheduler_init::deferred); typedef tbb::BlockedRange<size_t> BlockedRange; template <typename Func> void parallel_for(size_t begin, size_t end, const Func &f, size_t grainsize) { assert(end >= begin); tbb::parallel_for( BlockedRange(begin, end, end - begin > grainsize ? grainsize : 1), f); } template <typename Func> void xparallel_for(size_t begin, size_t end, const Func &f) { f(BlockedRange(begin, end, 100)); } #else struct BlockedRange { typedef size_t const_iterator; BlockedRange(size_t begin, size_t end) : begin_(begin), end_(end) {} BlockedRange(int begin, int end) : begin_(begin), end_(end) {} const_iterator begin() const { return begin_; } const_iterator end() const { return end_; } private: size_t begin_; size_t end_; }; template <typename Func> void xparallel_for(size_t begin, size_t end, const Func &f) { BlockedRange r(begin, end); f(r); } #if defined(MNN_USE_OMP) template <typename Func> void parallel_for(size_t begin, size_t end, const Func &f, size_t /*grainsize*/) { assert(end >= begin); // unsigned index isn't allowed in OpenMP 2.0 #pragma omp parallel for for (int i = static_cast<int>(begin); i < static_cast<int>(end); ++i) f(BlockedRange(i, i + 1)); } #elif defined(MNN_USE_GCD) template <typename Func> void parallel_for(size_t begin, size_t end, const Func &f, size_t grainsize) { assert(end >= begin); size_t count = end - begin; size_t blockSize = grainsize; if (count < blockSize || blockSize == 0) { blockSize = 1; } size_t blockCount = (count + blockSize - 1) / blockSize; assert(blockCount > 0); dispatch_apply(blockCount, dispatch_get_global_queue(QOS_CLASS_DEFAULT, 0), ^(size_t block) { size_t blockStart = block * blockSize; size_t blockEnd = blockStart + blockSize; if (blockEnd > end) { blockEnd = end; } assert(blockStart < blockEnd); f(BlockedRange(blockStart, blockEnd)); }); } #elif defined(MNN_SINGLE_THREAD) template <typename Func> void parallel_for(size_t begin, size_t end, const Func &f, size_t /*grainsize*/) { xparallel_for(begin, end, f); } #else template <typename Func> void parallel_for(size_t begin, size_t end, const Func &f, size_t /*grainsize*/) { assert(end >= begin); size_t nthreads = std::thread::hardware_concurrency(); size_t blockSize = (end - begin) / nthreads; if (blockSize * nthreads < end - begin) blockSize++; std::vector<std::future<void> > futures; size_t blockBegin = begin; size_t blockEnd = blockBegin + blockSize; if (blockEnd > end) blockEnd = end; for (size_t i = 0; i < nthreads; i++) { futures.push_back( std::move(std::async(std::launch::async, [blockBegin, blockEnd, &f] { f(BlockedRange(blockBegin, blockEnd)); }))); blockBegin += blockSize; blockEnd = blockBegin + blockSize; if (blockBegin >= end) break; if (blockEnd > end) blockEnd = end; } for (auto &future : futures) future.wait(); } #endif #endif // MNN_USE_TBB template <typename T, typename U> bool value_representation(U const &value) { return static_cast<U>(static_cast<T>(value)) == value; } template <typename T, typename Func> inline void for_( bool parallelize, size_t begin, T end, Func f, size_t grainsize = 100) { static_assert(std::is_integral<T>::value, "end must be integral type"); parallelize = parallelize && value_representation<size_t>(end); parallelize ? parallel_for(begin, end, f, grainsize) : xparallel_for(begin, end, f); } template <typename T, typename Func> inline void for_i(bool parallelize, T size, Func f, size_t grainsize = 100u) { #ifdef MNN_SINGLE_THREAD for (size_t i = 0; i < size; ++i) { f(i); } #else // #ifdef MNN_SINGLE_THREAD for_(parallelize, 0u, size, [&](const BlockedRange &r) { #ifdef MNN_USE_OMP #pragma omp parallel for for (int i = static_cast<int>(r.begin()); i < static_cast<int>(r.end()); i++) { f(i); } #else for (size_t i = r.begin(); i < r.end(); i++) { f(i); } #endif }, grainsize); #endif // #ifdef MNN_SINGLE_THREAD } template <typename T, typename Func> inline void for_i(T size, Func f, size_t grainsize = 100) { for_i(true, size, f, grainsize); } } // namespace mnn
dertransp_parallel.c
/* Inputs: prhs[0] = dA/dx corresponding to a matrix A of size (nrow x ncol) * prhs[1] = nrow of matrix A * Outputs: plhs[0] = dA'/dx corresponding to transposed matrix A' * Method: This is just a row permutation of dA/dx * * by SeHyoun Ahn, Aug 2016 */ #include <omp.h> #include "mex.h" #include <stdlib.h> #include <time.h> void insertsort(mwIndex *irs, double *prs, mwSize n) { mwIndex i,j; mwIndex swapind; double swapval; for (i=1; i<n; ++i) { swapind = irs[i]; swapval = prs[i]; for (j=i; j>=0;--j) { if (j==0) { irs[j] = swapind; prs[j] = swapval; } else if (swapind<irs[j-1]) { irs[j] = irs[j-1]; prs[j] = prs[j-1]; } else { irs[j] = swapind; prs[j] = swapval; break; } } } }; void quicksort(mwIndex* irs, double* prs, mwSize n) { mwIndex front, back, pivot; mwIndex swapind; double swapval; pivot = rand()%n; front = rand()%n; back = rand()%n; if (irs[front]>irs[back]) { if (irs[pivot]>irs[front]) { pivot = irs[front]; irs[front] = irs[0]; irs[0] = pivot; } else if (irs[pivot]>irs[back]) { front = irs[pivot]; irs[pivot] = irs[0]; irs[0] = front; pivot = front; } else { pivot = irs[back]; irs[back] = irs[0]; irs[0] = pivot; } } else { if (irs[pivot]>irs[back]) { pivot = irs[back]; irs[back] = irs[0]; irs[0] = pivot; } else if (irs[pivot]>irs[front]) { back = irs[pivot]; irs[pivot] = irs[0]; irs[0] = back; pivot = back; } else { pivot = irs[front]; irs[front] = irs[0]; irs[0] = pivot; } } front = 1; back = n-1; while (front < back) { if (irs[front] < pivot) { ++front; } else if (irs[back] > pivot) { --back; } else { swapind = irs[back]; swapval = prs[back]; irs[back] = irs[front]; prs[back] = prs[front]; irs[front]= swapind; prs[front]= swapval; ++front; } } if (irs[front]<pivot) { swapind = irs[front]; swapval = prs[front]; irs[front] = irs[0]; prs[front] = prs[0]; irs[0] = swapind; prs[0] = swapval; if (front > 17) quicksort(&irs[0],&prs[0],front); else if (front > 1) insertsort(&irs[0],&prs[0],front); if ((n-1-front) > 17) quicksort(&irs[front+1],&prs[front+1],n-1-front); else if ((n-1-front) > 1) insertsort(&irs[front+1],&prs[front+1],n-1-front); } else { swapind = irs[front-1]; swapval = prs[front-1]; irs[front-1] = irs[0]; prs[front-1] = prs[0]; irs[0] = swapind; prs[0] = swapval; if (front-1 > 17) quicksort(&irs[0],&prs[0],front-1); else if (front-1 > 1) insertsort(&irs[0],&prs[0],front-1); if (n-front > 17) quicksort(&irs[front],&prs[front],n-front); else if (n-front > 1) insertsort(&irs[front],&prs[front],n-front); } }; void mexFunction(int nlhs, mxArray *plhs[],int nrhs,const mxArray *prhs[]) { srand(time(NULL)); mwSize nrow, ncol, nderiv,nnz; /* Read In Sparse Matrix */ mwIndex *irsA, *jcsA; double *prA; nderiv = mxGetN(prhs[0]); ncol = mxGetM(prhs[0]); irsA = mxGetIr(prhs[0]); jcsA = mxGetJc(prhs[0]); prA = mxGetPr(prhs[0]); nnz = jcsA[nderiv]; /* Read in the number of rows in the Matrix A */ nrow = mxGetScalar(prhs[1]); ncol = ncol/nrow; /* Prepare Output Matrix */ mwIndex *lirs, *ljcs; double *lpr; plhs[0] = mxCreateSparse(nrow*ncol,nderiv,nnz,mxREAL); lirs = mxGetIr(plhs[0]); ljcs = mxGetJc(plhs[0]); lpr = mxGetPr(plhs[0]); // lirs = mxMalloc( nnz * sizeof(*lirs)); // ljcs = mxMalloc( (nderiv+1) * sizeof(*ljcs)); // lpr = mxMalloc( nnz * sizeof(*lpr)); mwIndex i,j,tmp; ljcs[0]=0; #pragma omp parallel for default(shared) private(i,j) num_threads(2) for (i=0; i<nderiv; ++i) { /* Compute the new row Index */ for (j = jcsA[i]; j<jcsA[i+1]; ++j){ lirs[j] = (irsA[j]%nrow)*ncol + irsA[j]/nrow; lpr[j] = prA[j]; } ljcs[i+1] = jcsA[i+1]; /* Sort to ensure sorted CSC format */ if ( (ljcs[i+1] - ljcs[i]) > 17) quicksort(&lirs[ljcs[i]], &lpr[ljcs[i]], ljcs[i+1]-ljcs[i]); else if ( (ljcs[i+1] - ljcs[i]) > 1) insertsort(&lirs[ljcs[i]], &lpr[ljcs[i]], ljcs[i+1]-ljcs[i]); } // plhs[0] = mxCreateSparse(nrow*ncol,nderiv,nnz,mxREAL); // if (nnz>0) { // mxSetIr(plhs[0],lirs); // mxSetJc(plhs[0],ljcs); // mxSetPr(plhs[0],lpr); // } }
DRB020-privatemissing-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* tmp should be put as private to avoid race condition Data race pair: tmp@65 vs. tmp@66 */ #include <stdlib.h> int main(int argc, char* argv[]) { int i; int tmp; int len=100; if (argc>1) len = atoi(argv[1]); int a[len]; #pragma omp parallel for for (i=0;i<len;i++) a[i]=i; #pragma omp parallel for private(tmp) for (i=0;i<len;i++) { tmp =a[i]+i; a[i] = tmp; } for (i=0;i<len;i++) printf("%d\n", a[i]); return 0; }
C_pp.c
// this is autogenerated file, do not edit it. #include "ficus/ficus.h" struct _fx_N14K_form__ktyp_t_data_t; static void _fx_free_N14K_form__ktyp_t(struct _fx_N14K_form__ktyp_t_data_t** dst); struct _fx_N14C_form__ctyp_t_data_t; static void _fx_free_N14C_form__ctyp_t(struct _fx_N14C_form__ctyp_t_data_t** dst); struct _fx_N14C_form__cexp_t_data_t; static void _fx_free_N14C_form__cexp_t(struct _fx_N14C_form__cexp_t_data_t** dst); struct _fx_N15C_form__cstmt_t_data_t; static void _fx_free_N15C_form__cstmt_t(struct _fx_N15C_form__cstmt_t_data_t** dst); typedef struct _fx_LS_data_t { int_ rc; struct _fx_LS_data_t* tl; fx_str_t hd; } _fx_LS_data_t, *_fx_LS; typedef struct _fx_N17Options__optval_t { int tag; union { bool OptBool; int_ OptInt; fx_str_t OptString; } u; } _fx_N17Options__optval_t; typedef struct _fx_T2SN17Options__optval_t { fx_str_t t0; struct _fx_N17Options__optval_t t1; } _fx_T2SN17Options__optval_t; typedef struct _fx_LT2SN17Options__optval_t_data_t { int_ rc; struct _fx_LT2SN17Options__optval_t_data_t* tl; struct _fx_T2SN17Options__optval_t hd; } _fx_LT2SN17Options__optval_t_data_t, *_fx_LT2SN17Options__optval_t; typedef struct _fx_R18Options__options_t { struct _fx_LS_data_t* app_args; fx_str_t app_filename; bool arch64; bool force_rebuild; fx_str_t build_dir; fx_str_t build_rootdir; fx_str_t cflags; fx_str_t clibs; bool compile_by_cpp; fx_str_t filename; bool gen_c; struct _fx_LS_data_t* include_path; bool debug; struct _fx_LT2SN17Options__optval_t_data_t* defines; int_ optim_iters; int_ inline_thresh; bool enable_openmp; bool relax; bool use_preamble; bool make_app; int_ optimize_level; fx_str_t output_name; bool print_ast0; bool print_ast; bool print_k0; bool print_k; bool print_tokens; bool run_app; bool verbose; bool W_unused; } _fx_R18Options__options_t; typedef struct _fx_Ta2i { int_ t0; int_ t1; } _fx_Ta2i; typedef struct _fx_T2Ta2iS { struct _fx_Ta2i t0; fx_str_t t1; } _fx_T2Ta2iS; typedef struct _fx_R9Ast__id_t { int_ m; int_ i; int_ j; } _fx_R9Ast__id_t; typedef struct _fx_R10Ast__loc_t { int_ m_idx; int_ line0; int_ col0; int_ line1; int_ col1; } _fx_R10Ast__loc_t; typedef struct _fx_T2Bi { bool t0; int_ t1; } _fx_T2Bi; typedef struct _fx_N12Ast__scope_t { int tag; union { int_ ScBlock; struct _fx_T2Bi ScLoop; int_ ScFold; int_ ScArrMap; int_ ScMap; int_ ScTry; struct _fx_R9Ast__id_t ScFun; struct _fx_R9Ast__id_t ScClass; struct _fx_R9Ast__id_t ScInterface; int_ ScModule; } u; } _fx_N12Ast__scope_t; typedef struct _fx_LN12Ast__scope_t_data_t { int_ rc; struct _fx_LN12Ast__scope_t_data_t* tl; struct _fx_N12Ast__scope_t hd; } _fx_LN12Ast__scope_t_data_t, *_fx_LN12Ast__scope_t; typedef struct _fx_LR9Ast__id_t_data_t { int_ rc; struct _fx_LR9Ast__id_t_data_t* tl; struct _fx_R9Ast__id_t hd; } _fx_LR9Ast__id_t_data_t, *_fx_LR9Ast__id_t; typedef struct _fx_T2R10Ast__loc_tS { struct _fx_R10Ast__loc_t t0; fx_str_t t1; } _fx_T2R10Ast__loc_tS; typedef struct _fx_N13PP__ppstyle_t { int tag; } _fx_N13PP__ppstyle_t; typedef struct _fx_T3iiC { int_ t0; int_ t1; char_ t2; } _fx_T3iiC; typedef struct _fx_T2iN13PP__ppstyle_t { int_ t0; struct _fx_N13PP__ppstyle_t t1; } _fx_T2iN13PP__ppstyle_t; typedef struct _fx_N11PP__pptok_t { int tag; union { fx_str_t PPString; struct _fx_T3iiC PPBreak; struct _fx_T2iN13PP__ppstyle_t PPBegin; } u; } _fx_N11PP__pptok_t; typedef struct _fx_T2N11PP__pptok_ti { struct _fx_N11PP__pptok_t t0; int_ t1; } _fx_T2N11PP__pptok_ti; typedef struct _fx_R11PP__state_t { int_ space; int_ left; int_ right; int_ top; int_ bottom; int_ lefttotal; int_ righttotal; fx_arr_t q; fx_arr_t stack; fx_arr_t pp_stack; int_ pp_top; bool emptystack; } _fx_R11PP__state_t; typedef struct _fx_FPv1S { int (*fp)(fx_str_t*, void*); fx_fcv_t* fcv; } _fx_FPv1S; typedef struct _fx_FPLS0 { int (*fp)(struct _fx_LS_data_t**, void*); fx_fcv_t* fcv; } _fx_FPLS0; typedef struct _fx_rR11PP__state_t_data_t { int_ rc; struct _fx_R11PP__state_t data; } _fx_rR11PP__state_t_data_t, *_fx_rR11PP__state_t; typedef struct _fx_R5PP__t { int_ margin; int_ default_indent; struct _fx_FPv1S print_f; struct _fx_FPLS0 get_f; struct _fx_rR11PP__state_t_data_t* r; } _fx_R5PP__t; typedef struct _fx_T2il { int_ t0; int64_t t1; } _fx_T2il; typedef struct _fx_T2iq { int_ t0; uint64_t t1; } _fx_T2iq; typedef struct _fx_T2id { int_ t0; double t1; } _fx_T2id; typedef struct _fx_N14K_form__klit_t { int tag; union { int64_t KLitInt; struct _fx_T2il KLitSInt; struct _fx_T2iq KLitUInt; struct _fx_T2id KLitFloat; fx_str_t KLitString; char_ KLitChar; bool KLitBool; struct _fx_N14K_form__ktyp_t_data_t* KLitNil; } u; } _fx_N14K_form__klit_t; typedef struct _fx_LN14K_form__ktyp_t_data_t { int_ rc; struct _fx_LN14K_form__ktyp_t_data_t* tl; struct _fx_N14K_form__ktyp_t_data_t* hd; } _fx_LN14K_form__ktyp_t_data_t, *_fx_LN14K_form__ktyp_t; typedef struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t { struct _fx_LN14K_form__ktyp_t_data_t* t0; struct _fx_N14K_form__ktyp_t_data_t* t1; } _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t; typedef struct _fx_T2R9Ast__id_tN14K_form__ktyp_t { struct _fx_R9Ast__id_t t0; struct _fx_N14K_form__ktyp_t_data_t* t1; } _fx_T2R9Ast__id_tN14K_form__ktyp_t; typedef struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* tl; struct _fx_T2R9Ast__id_tN14K_form__ktyp_t hd; } _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t, *_fx_LT2R9Ast__id_tN14K_form__ktyp_t; typedef struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t { struct _fx_R9Ast__id_t t0; struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* t1; } _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t; typedef struct _fx_T2iN14K_form__ktyp_t { int_ t0; struct _fx_N14K_form__ktyp_t_data_t* t1; } _fx_T2iN14K_form__ktyp_t; typedef struct _fx_N14K_form__ktyp_t_data_t { int_ rc; int tag; union { int_ KTypSInt; int_ KTypUInt; int_ KTypFloat; struct _fx_N14K_form__ktyp_t_data_t* KTypRawPointer; struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t KTypFun; struct _fx_LN14K_form__ktyp_t_data_t* KTypTuple; struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t KTypRecord; struct _fx_R9Ast__id_t KTypName; struct _fx_T2iN14K_form__ktyp_t KTypArray; struct _fx_N14K_form__ktyp_t_data_t* KTypVector; struct _fx_N14K_form__ktyp_t_data_t* KTypList; struct _fx_N14K_form__ktyp_t_data_t* KTypRef; } u; } _fx_N14K_form__ktyp_t_data_t, *_fx_N14K_form__ktyp_t; typedef struct _fx_T2R9Ast__id_tN14C_form__ctyp_t { struct _fx_R9Ast__id_t t0; struct _fx_N14C_form__ctyp_t_data_t* t1; } _fx_T2R9Ast__id_tN14C_form__ctyp_t; typedef struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* tl; struct _fx_T2R9Ast__id_tN14C_form__ctyp_t hd; } _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t, *_fx_LT2R9Ast__id_tN14C_form__ctyp_t; typedef struct _fx_R23C_form__cdefinterface_t { struct _fx_R9Ast__id_t ci_name; fx_str_t ci_cname; struct _fx_R9Ast__id_t ci_id; struct _fx_R9Ast__id_t ci_vtbl; struct _fx_R9Ast__id_t ci_base; struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* ci_all_methods; struct _fx_LN12Ast__scope_t_data_t* ci_scope; struct _fx_R10Ast__loc_t ci_loc; } _fx_R23C_form__cdefinterface_t; typedef struct _fx_rR23C_form__cdefinterface_t_data_t { int_ rc; struct _fx_R23C_form__cdefinterface_t data; } _fx_rR23C_form__cdefinterface_t_data_t, *_fx_rR23C_form__cdefinterface_t; typedef struct _fx_Nt6option1N14C_form__ctyp_t { int tag; union { struct _fx_N14C_form__ctyp_t_data_t* Some; } u; } _fx_Nt6option1N14C_form__ctyp_t; typedef struct _fx_Nt6option1N14C_form__cexp_t { int tag; union { struct _fx_N14C_form__cexp_t_data_t* Some; } u; } _fx_Nt6option1N14C_form__cexp_t; typedef struct _fx_Nt6option1R9Ast__id_t { int tag; union { struct _fx_R9Ast__id_t Some; } u; } _fx_Nt6option1R9Ast__id_t; typedef struct _fx_T2R9Ast__id_ti { struct _fx_R9Ast__id_t t0; int_ t1; } _fx_T2R9Ast__id_ti; typedef struct _fx_R16Ast__val_flags_t { bool val_flag_arg; bool val_flag_mutable; bool val_flag_temp; bool val_flag_tempref; bool val_flag_private; bool val_flag_subarray; bool val_flag_instance; struct _fx_T2R9Ast__id_ti val_flag_method; int_ val_flag_ctor; struct _fx_LN12Ast__scope_t_data_t* val_flag_global; } _fx_R16Ast__val_flags_t; typedef struct _fx_N12Ast__cmpop_t { int tag; } _fx_N12Ast__cmpop_t; typedef struct _fx_N17Ast__fun_constr_t { int tag; union { int_ CtorVariant; struct _fx_R9Ast__id_t CtorFP; struct _fx_R9Ast__id_t CtorExn; } u; } _fx_N17Ast__fun_constr_t; typedef struct _fx_R16Ast__fun_flags_t { int_ fun_flag_pure; bool fun_flag_ccode; bool fun_flag_have_keywords; bool fun_flag_inline; bool fun_flag_nothrow; bool fun_flag_really_nothrow; bool fun_flag_private; struct _fx_N17Ast__fun_constr_t fun_flag_ctor; struct _fx_R9Ast__id_t fun_flag_method_of; bool fun_flag_uses_fv; bool fun_flag_recursive; bool fun_flag_instance; } _fx_R16Ast__fun_flags_t; typedef struct _fx_Ta2R9Ast__id_t { struct _fx_R9Ast__id_t t0; struct _fx_R9Ast__id_t t1; } _fx_Ta2R9Ast__id_t; typedef struct _fx_T2R9Ast__id_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_R10Ast__loc_t t1; } _fx_T2R9Ast__id_tR10Ast__loc_t; typedef struct _fx_T2SR10Ast__loc_t { fx_str_t t0; struct _fx_R10Ast__loc_t t1; } _fx_T2SR10Ast__loc_t; typedef struct _fx_N17C_form__cbinary_t { int tag; union { struct _fx_N12Ast__cmpop_t COpCmp; } u; } _fx_N17C_form__cbinary_t; typedef struct _fx_N16C_form__cunary_t { int tag; } _fx_N16C_form__cunary_t; typedef struct _fx_N19C_form__ctyp_attr_t { int tag; } _fx_N19C_form__ctyp_attr_t; typedef struct _fx_N19C_form__carg_attr_t { int tag; } _fx_N19C_form__carg_attr_t; typedef struct _fx_R17C_form__ctprops_t { bool ctp_scalar; bool ctp_complex; bool ctp_ptr; bool ctp_pass_by_ref; struct _fx_LR9Ast__id_t_data_t* ctp_make; struct _fx_Ta2R9Ast__id_t ctp_free; struct _fx_Ta2R9Ast__id_t ctp_copy; } _fx_R17C_form__ctprops_t; typedef struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t { struct _fx_Nt6option1R9Ast__id_t t0; struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* t1; } _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t; typedef struct _fx_LN14C_form__ctyp_t_data_t { int_ rc; struct _fx_LN14C_form__ctyp_t_data_t* tl; struct _fx_N14C_form__ctyp_t_data_t* hd; } _fx_LN14C_form__ctyp_t_data_t, *_fx_LN14C_form__ctyp_t; typedef struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t { struct _fx_LN14C_form__ctyp_t_data_t* t0; struct _fx_N14C_form__ctyp_t_data_t* t1; } _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t; typedef struct _fx_LN19C_form__ctyp_attr_t_data_t { int_ rc; struct _fx_LN19C_form__ctyp_attr_t_data_t* tl; struct _fx_N19C_form__ctyp_attr_t hd; } _fx_LN19C_form__ctyp_attr_t_data_t, *_fx_LN19C_form__ctyp_attr_t; typedef struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t { struct _fx_LN19C_form__ctyp_attr_t_data_t* t0; struct _fx_N14C_form__ctyp_t_data_t* t1; } _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t; typedef struct _fx_T2iN14C_form__ctyp_t { int_ t0; struct _fx_N14C_form__ctyp_t_data_t* t1; } _fx_T2iN14C_form__ctyp_t; typedef struct _fx_N14C_form__ctyp_t_data_t { int_ rc; int tag; union { int_ CTypSInt; int_ CTypUInt; int_ CTypFloat; struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t CTypStruct; struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t CTypUnion; struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t CTypFunRawPtr; struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t CTypRawPtr; struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t CTypRawArray; struct _fx_T2iN14C_form__ctyp_t CTypArray; struct _fx_N14C_form__ctyp_t_data_t* CTypVector; struct _fx_R9Ast__id_t CTypName; } u; } _fx_N14C_form__ctyp_t_data_t, *_fx_N14C_form__ctyp_t; typedef struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14C_form__ctyp_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t1; } _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14K_form__klit_t t0; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t1; } _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N17C_form__cbinary_t t0; struct _fx_N14C_form__cexp_t_data_t* t1; struct _fx_N14C_form__cexp_t_data_t* t2; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t3; } _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N16C_form__cunary_t t0; struct _fx_N14C_form__cexp_t_data_t* t1; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t2; } _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_R9Ast__id_t t1; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t2; } _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_N14C_form__ctyp_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_N14C_form__cexp_t_data_t* t1; struct _fx_N14C_form__cexp_t_data_t* t2; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t3; } _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_LN14C_form__cexp_t_data_t { int_ rc; struct _fx_LN14C_form__cexp_t_data_t* tl; struct _fx_N14C_form__cexp_t_data_t* hd; } _fx_LN14C_form__cexp_t_data_t, *_fx_LN14C_form__cexp_t; typedef struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_LN14C_form__cexp_t_data_t* t1; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t2; } _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_LN14C_form__cexp_t_data_t* t0; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t1; } _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_N14C_form__cexp_t_data_t { int_ rc; int tag; union { struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t CExpIdent; struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t CExpLit; struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpBinary; struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpUnary; struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t CExpMem; struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t CExpArrow; struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t CExpCast; struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpTernary; struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpCall; struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpInit; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t CExpTyp; struct _fx_T2SR10Ast__loc_t CExpCCode; } u; } _fx_N14C_form__cexp_t_data_t, *_fx_N14C_form__cexp_t; typedef struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t { struct _fx_Nt6option1N14C_form__cexp_t t0; struct _fx_R10Ast__loc_t t1; } _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t; typedef struct _fx_LN15C_form__cstmt_t_data_t { int_ rc; struct _fx_LN15C_form__cstmt_t_data_t* tl; struct _fx_N15C_form__cstmt_t_data_t* hd; } _fx_LN15C_form__cstmt_t_data_t, *_fx_LN15C_form__cstmt_t; typedef struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t { struct _fx_LN15C_form__cstmt_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2LN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tN15C_form__cstmt_t { struct _fx_R9Ast__id_t t0; struct _fx_N15C_form__cstmt_t_data_t* t1; } _fx_T2R9Ast__id_tN15C_form__cstmt_t; typedef struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_N15C_form__cstmt_t_data_t* t1; struct _fx_N15C_form__cstmt_t_data_t* t2; struct _fx_R10Ast__loc_t t3; } _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t { struct _fx_Nt6option1N14C_form__ctyp_t t0; struct _fx_LN14C_form__cexp_t_data_t* t1; struct _fx_Nt6option1N14C_form__cexp_t t2; struct _fx_LN14C_form__cexp_t_data_t* t3; struct _fx_N15C_form__cstmt_t_data_t* t4; struct _fx_R10Ast__loc_t t5; } _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_N15C_form__cstmt_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t { struct _fx_N15C_form__cstmt_t_data_t* t0; struct _fx_N14C_form__cexp_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t; typedef struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t { struct _fx_LN14C_form__cexp_t_data_t* t0; struct _fx_LN15C_form__cstmt_t_data_t* t1; } _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t; typedef struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t { int_ rc; struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t* tl; struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t hd; } _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t, *_fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t; typedef struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t { struct _fx_N14C_form__ctyp_t_data_t* t0; struct _fx_R9Ast__id_t t1; struct _fx_Nt6option1N14C_form__cexp_t t2; struct _fx_R10Ast__loc_t t3; } _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t; typedef struct _fx_LN19C_form__carg_attr_t_data_t { int_ rc; struct _fx_LN19C_form__carg_attr_t_data_t* tl; struct _fx_N19C_form__carg_attr_t hd; } _fx_LN19C_form__carg_attr_t_data_t, *_fx_LN19C_form__carg_attr_t; typedef struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t { struct _fx_R9Ast__id_t t0; struct _fx_N14C_form__ctyp_t_data_t* t1; struct _fx_LN19C_form__carg_attr_t_data_t* t2; } _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t; typedef struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t { int_ rc; struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t* tl; struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t hd; } _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t, *_fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t; typedef struct _fx_R17C_form__cdeffun_t { struct _fx_R9Ast__id_t cf_name; fx_str_t cf_cname; struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t* cf_args; struct _fx_N14C_form__ctyp_t_data_t* cf_rt; struct _fx_LN15C_form__cstmt_t_data_t* cf_body; struct _fx_R16Ast__fun_flags_t cf_flags; struct _fx_LN12Ast__scope_t_data_t* cf_scope; struct _fx_R10Ast__loc_t cf_loc; } _fx_R17C_form__cdeffun_t; typedef struct _fx_rR17C_form__cdeffun_t_data_t { int_ rc; struct _fx_R17C_form__cdeffun_t data; } _fx_rR17C_form__cdeffun_t_data_t, *_fx_rR17C_form__cdeffun_t; typedef struct _fx_R17C_form__cdeftyp_t { struct _fx_R9Ast__id_t ct_name; struct _fx_N14C_form__ctyp_t_data_t* ct_typ; fx_str_t ct_cname; struct _fx_R17C_form__ctprops_t ct_props; int_ ct_data_start; struct _fx_R9Ast__id_t ct_enum; struct _fx_LR9Ast__id_t_data_t* ct_ifaces; struct _fx_R9Ast__id_t ct_ifaces_id; struct _fx_LN12Ast__scope_t_data_t* ct_scope; struct _fx_R10Ast__loc_t ct_loc; } _fx_R17C_form__cdeftyp_t; typedef struct _fx_rR17C_form__cdeftyp_t_data_t { int_ rc; struct _fx_R17C_form__cdeftyp_t data; } _fx_rR17C_form__cdeftyp_t_data_t, *_fx_rR17C_form__cdeftyp_t; typedef struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t { struct _fx_R9Ast__id_t t0; struct _fx_Nt6option1N14C_form__cexp_t t1; } _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t; typedef struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t* tl; struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t hd; } _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t, *_fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t; typedef struct _fx_R18C_form__cdefenum_t { struct _fx_R9Ast__id_t cenum_name; struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t* cenum_members; fx_str_t cenum_cname; struct _fx_LN12Ast__scope_t_data_t* cenum_scope; struct _fx_R10Ast__loc_t cenum_loc; } _fx_R18C_form__cdefenum_t; typedef struct _fx_rR18C_form__cdefenum_t_data_t { int_ rc; struct _fx_R18C_form__cdefenum_t data; } _fx_rR18C_form__cdefenum_t_data_t, *_fx_rR18C_form__cdefenum_t; typedef struct _fx_R19C_form__cdefmacro_t { struct _fx_R9Ast__id_t cm_name; fx_str_t cm_cname; struct _fx_LR9Ast__id_t_data_t* cm_args; struct _fx_LN15C_form__cstmt_t_data_t* cm_body; struct _fx_LN12Ast__scope_t_data_t* cm_scope; struct _fx_R10Ast__loc_t cm_loc; } _fx_R19C_form__cdefmacro_t; typedef struct _fx_rR19C_form__cdefmacro_t_data_t { int_ rc; struct _fx_R19C_form__cdefmacro_t data; } _fx_rR19C_form__cdefmacro_t_data_t, *_fx_rR19C_form__cdefmacro_t; typedef struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_LN15C_form__cstmt_t_data_t* t1; } _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t; typedef struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t { int_ rc; struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t* tl; struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t hd; } _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t, *_fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t; typedef struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t { struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t* t0; struct _fx_LN15C_form__cstmt_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_N15C_form__cstmt_t_data_t { int_ rc; int tag; union { struct _fx_R10Ast__loc_t CStmtNop; struct _fx_T2SR10Ast__loc_t CComment; struct _fx_N14C_form__cexp_t_data_t* CExp; struct _fx_R10Ast__loc_t CStmtBreak; struct _fx_R10Ast__loc_t CStmtContinue; struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t CStmtReturn; struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t CStmtBlock; struct _fx_T2R9Ast__id_tN15C_form__cstmt_t CStmtSync; struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t CStmtIf; struct _fx_T2R9Ast__id_tR10Ast__loc_t CStmtGoto; struct _fx_T2R9Ast__id_tR10Ast__loc_t CStmtLabel; struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t CStmtFor; struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t CStmtWhile; struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t CStmtDoWhile; struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t CStmtSwitch; struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t CDefVal; struct _fx_rR17C_form__cdeffun_t_data_t* CDefFun; struct _fx_rR17C_form__cdeftyp_t_data_t* CDefTyp; struct _fx_T2R9Ast__id_tR10Ast__loc_t CDefForwardSym; struct _fx_T2R9Ast__id_tR10Ast__loc_t CDefForwardTyp; struct _fx_rR18C_form__cdefenum_t_data_t* CDefEnum; struct _fx_rR23C_form__cdefinterface_t_data_t* CDefInterface; struct _fx_rR19C_form__cdefmacro_t_data_t* CMacroDef; struct _fx_T2R9Ast__id_tR10Ast__loc_t CMacroUndef; struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t CMacroIf; struct _fx_T2SR10Ast__loc_t CMacroInclude; struct _fx_T2SR10Ast__loc_t CMacroPragma; } u; } _fx_N15C_form__cstmt_t_data_t, *_fx_N15C_form__cstmt_t; typedef struct _fx_R17C_form__cdefval_t { struct _fx_R9Ast__id_t cv_name; struct _fx_N14C_form__ctyp_t_data_t* cv_typ; fx_str_t cv_cname; struct _fx_R16Ast__val_flags_t cv_flags; struct _fx_R10Ast__loc_t cv_loc; } _fx_R17C_form__cdefval_t; typedef struct _fx_R19C_form__cdeflabel_t { struct _fx_R9Ast__id_t cl_name; fx_str_t cl_cname; struct _fx_R10Ast__loc_t cl_loc; } _fx_R19C_form__cdeflabel_t; typedef struct _fx_R17C_form__cdefexn_t { struct _fx_R9Ast__id_t cexn_name; fx_str_t cexn_cname; fx_str_t cexn_base_cname; struct _fx_N14C_form__ctyp_t_data_t* cexn_typ; bool cexn_std; struct _fx_R9Ast__id_t cexn_tag; struct _fx_R9Ast__id_t cexn_data; struct _fx_R9Ast__id_t cexn_info; struct _fx_R9Ast__id_t cexn_make; struct _fx_LN12Ast__scope_t_data_t* cexn_scope; struct _fx_R10Ast__loc_t cexn_loc; } _fx_R17C_form__cdefexn_t; typedef struct _fx_rR17C_form__cdefexn_t_data_t { int_ rc; struct _fx_R17C_form__cdefexn_t data; } _fx_rR17C_form__cdefexn_t_data_t, *_fx_rR17C_form__cdefexn_t; typedef struct _fx_N15C_form__cinfo_t { int tag; union { struct _fx_R17C_form__cdefval_t CVal; struct _fx_rR17C_form__cdeffun_t_data_t* CFun; struct _fx_rR17C_form__cdeftyp_t_data_t* CTyp; struct _fx_rR17C_form__cdefexn_t_data_t* CExn; struct _fx_rR23C_form__cdefinterface_t_data_t* CInterface; struct _fx_rR18C_form__cdefenum_t_data_t* CEnum; struct _fx_R19C_form__cdeflabel_t CLabel; struct _fx_rR19C_form__cdefmacro_t_data_t* CMacro; } u; } _fx_N15C_form__cinfo_t; typedef struct _fx_N13C_pp__assoc_t { int tag; } _fx_N13C_pp__assoc_t; typedef struct _fx_T3SiN13C_pp__assoc_t { fx_str_t t0; int_ t1; struct _fx_N13C_pp__assoc_t t2; } _fx_T3SiN13C_pp__assoc_t; typedef struct { int_ rc; int_ data; } _fx_E4Exit_data_t; typedef struct { int_ rc; fx_str_t data; } _fx_E4Fail_data_t; typedef struct { int_ rc; struct _fx_T2Ta2iS data; } _fx_E22LexerUtils__LexerError_data_t; typedef struct { int_ rc; struct _fx_T2R10Ast__loc_tS data; } _fx_E17Ast__CompileError_data_t; typedef struct { int_ rc; struct _fx_T2R10Ast__loc_tS data; } _fx_E18Parser__ParseError_data_t; static void _fx_free_LS(struct _fx_LS_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LS, fx_free_str); } static int _fx_cons_LS(fx_str_t* hd, struct _fx_LS_data_t* tl, bool addref_tl, struct _fx_LS_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LS, fx_copy_str); } static void _fx_free_N17Options__optval_t(struct _fx_N17Options__optval_t* dst) { switch (dst->tag) { case 3: fx_free_str(&dst->u.OptString); break; default: ; } dst->tag = 0; } static void _fx_copy_N17Options__optval_t(struct _fx_N17Options__optval_t* src, struct _fx_N17Options__optval_t* dst) { dst->tag = src->tag; switch (src->tag) { case 3: fx_copy_str(&src->u.OptString, &dst->u.OptString); break; default: dst->u = src->u; } } static void _fx_free_T2SN17Options__optval_t(struct _fx_T2SN17Options__optval_t* dst) { fx_free_str(&dst->t0); _fx_free_N17Options__optval_t(&dst->t1); } static void _fx_copy_T2SN17Options__optval_t(struct _fx_T2SN17Options__optval_t* src, struct _fx_T2SN17Options__optval_t* dst) { fx_copy_str(&src->t0, &dst->t0); _fx_copy_N17Options__optval_t(&src->t1, &dst->t1); } static void _fx_make_T2SN17Options__optval_t( fx_str_t* t0, struct _fx_N17Options__optval_t* t1, struct _fx_T2SN17Options__optval_t* fx_result) { fx_copy_str(t0, &fx_result->t0); _fx_copy_N17Options__optval_t(t1, &fx_result->t1); } static void _fx_free_LT2SN17Options__optval_t(struct _fx_LT2SN17Options__optval_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2SN17Options__optval_t, _fx_free_T2SN17Options__optval_t); } static int _fx_cons_LT2SN17Options__optval_t( struct _fx_T2SN17Options__optval_t* hd, struct _fx_LT2SN17Options__optval_t_data_t* tl, bool addref_tl, struct _fx_LT2SN17Options__optval_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2SN17Options__optval_t, _fx_copy_T2SN17Options__optval_t); } static void _fx_free_R18Options__options_t(struct _fx_R18Options__options_t* dst) { _fx_free_LS(&dst->app_args); fx_free_str(&dst->app_filename); fx_free_str(&dst->build_dir); fx_free_str(&dst->build_rootdir); fx_free_str(&dst->cflags); fx_free_str(&dst->clibs); fx_free_str(&dst->filename); _fx_free_LS(&dst->include_path); _fx_free_LT2SN17Options__optval_t(&dst->defines); fx_free_str(&dst->output_name); } static void _fx_copy_R18Options__options_t(struct _fx_R18Options__options_t* src, struct _fx_R18Options__options_t* dst) { FX_COPY_PTR(src->app_args, &dst->app_args); fx_copy_str(&src->app_filename, &dst->app_filename); dst->arch64 = src->arch64; dst->force_rebuild = src->force_rebuild; fx_copy_str(&src->build_dir, &dst->build_dir); fx_copy_str(&src->build_rootdir, &dst->build_rootdir); fx_copy_str(&src->cflags, &dst->cflags); fx_copy_str(&src->clibs, &dst->clibs); dst->compile_by_cpp = src->compile_by_cpp; fx_copy_str(&src->filename, &dst->filename); dst->gen_c = src->gen_c; FX_COPY_PTR(src->include_path, &dst->include_path); dst->debug = src->debug; FX_COPY_PTR(src->defines, &dst->defines); dst->optim_iters = src->optim_iters; dst->inline_thresh = src->inline_thresh; dst->enable_openmp = src->enable_openmp; dst->relax = src->relax; dst->use_preamble = src->use_preamble; dst->make_app = src->make_app; dst->optimize_level = src->optimize_level; fx_copy_str(&src->output_name, &dst->output_name); dst->print_ast0 = src->print_ast0; dst->print_ast = src->print_ast; dst->print_k0 = src->print_k0; dst->print_k = src->print_k; dst->print_tokens = src->print_tokens; dst->run_app = src->run_app; dst->verbose = src->verbose; dst->W_unused = src->W_unused; } static void _fx_make_R18Options__options_t( struct _fx_LS_data_t* r_app_args, fx_str_t* r_app_filename, bool r_arch64, bool r_force_rebuild, fx_str_t* r_build_dir, fx_str_t* r_build_rootdir, fx_str_t* r_cflags, fx_str_t* r_clibs, bool r_compile_by_cpp, fx_str_t* r_filename, bool r_gen_c, struct _fx_LS_data_t* r_include_path, bool r_debug, struct _fx_LT2SN17Options__optval_t_data_t* r_defines, int_ r_optim_iters, int_ r_inline_thresh, bool r_enable_openmp, bool r_relax, bool r_use_preamble, bool r_make_app, int_ r_optimize_level, fx_str_t* r_output_name, bool r_print_ast0, bool r_print_ast, bool r_print_k0, bool r_print_k, bool r_print_tokens, bool r_run_app, bool r_verbose, bool r_W_unused, struct _fx_R18Options__options_t* fx_result) { FX_COPY_PTR(r_app_args, &fx_result->app_args); fx_copy_str(r_app_filename, &fx_result->app_filename); fx_result->arch64 = r_arch64; fx_result->force_rebuild = r_force_rebuild; fx_copy_str(r_build_dir, &fx_result->build_dir); fx_copy_str(r_build_rootdir, &fx_result->build_rootdir); fx_copy_str(r_cflags, &fx_result->cflags); fx_copy_str(r_clibs, &fx_result->clibs); fx_result->compile_by_cpp = r_compile_by_cpp; fx_copy_str(r_filename, &fx_result->filename); fx_result->gen_c = r_gen_c; FX_COPY_PTR(r_include_path, &fx_result->include_path); fx_result->debug = r_debug; FX_COPY_PTR(r_defines, &fx_result->defines); fx_result->optim_iters = r_optim_iters; fx_result->inline_thresh = r_inline_thresh; fx_result->enable_openmp = r_enable_openmp; fx_result->relax = r_relax; fx_result->use_preamble = r_use_preamble; fx_result->make_app = r_make_app; fx_result->optimize_level = r_optimize_level; fx_copy_str(r_output_name, &fx_result->output_name); fx_result->print_ast0 = r_print_ast0; fx_result->print_ast = r_print_ast; fx_result->print_k0 = r_print_k0; fx_result->print_k = r_print_k; fx_result->print_tokens = r_print_tokens; fx_result->run_app = r_run_app; fx_result->verbose = r_verbose; fx_result->W_unused = r_W_unused; } static void _fx_free_T2Ta2iS(struct _fx_T2Ta2iS* dst) { fx_free_str(&dst->t1); } static void _fx_copy_T2Ta2iS(struct _fx_T2Ta2iS* src, struct _fx_T2Ta2iS* dst) { dst->t0 = src->t0; fx_copy_str(&src->t1, &dst->t1); } static void _fx_make_T2Ta2iS(struct _fx_Ta2i* t0, fx_str_t* t1, struct _fx_T2Ta2iS* fx_result) { fx_result->t0 = *t0; fx_copy_str(t1, &fx_result->t1); } static int _fx_cons_LN12Ast__scope_t( struct _fx_N12Ast__scope_t* hd, struct _fx_LN12Ast__scope_t_data_t* tl, bool addref_tl, struct _fx_LN12Ast__scope_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN12Ast__scope_t, FX_COPY_SIMPLE_BY_PTR); } static int _fx_cons_LR9Ast__id_t( struct _fx_R9Ast__id_t* hd, struct _fx_LR9Ast__id_t_data_t* tl, bool addref_tl, struct _fx_LR9Ast__id_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LR9Ast__id_t, FX_COPY_SIMPLE_BY_PTR); } static void _fx_free_T2R10Ast__loc_tS(struct _fx_T2R10Ast__loc_tS* dst) { fx_free_str(&dst->t1); } static void _fx_copy_T2R10Ast__loc_tS(struct _fx_T2R10Ast__loc_tS* src, struct _fx_T2R10Ast__loc_tS* dst) { dst->t0 = src->t0; fx_copy_str(&src->t1, &dst->t1); } static void _fx_make_T2R10Ast__loc_tS(struct _fx_R10Ast__loc_t* t0, fx_str_t* t1, struct _fx_T2R10Ast__loc_tS* fx_result) { fx_result->t0 = *t0; fx_copy_str(t1, &fx_result->t1); } static void _fx_free_N11PP__pptok_t(struct _fx_N11PP__pptok_t* dst) { switch (dst->tag) { case 1: fx_free_str(&dst->u.PPString); break; default: ; } dst->tag = 0; } static void _fx_copy_N11PP__pptok_t(struct _fx_N11PP__pptok_t* src, struct _fx_N11PP__pptok_t* dst) { dst->tag = src->tag; switch (src->tag) { case 1: fx_copy_str(&src->u.PPString, &dst->u.PPString); break; default: dst->u = src->u; } } static void _fx_free_T2N11PP__pptok_ti(struct _fx_T2N11PP__pptok_ti* dst) { _fx_free_N11PP__pptok_t(&dst->t0); } static void _fx_copy_T2N11PP__pptok_ti(struct _fx_T2N11PP__pptok_ti* src, struct _fx_T2N11PP__pptok_ti* dst) { _fx_copy_N11PP__pptok_t(&src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2N11PP__pptok_ti(struct _fx_N11PP__pptok_t* t0, int_ t1, struct _fx_T2N11PP__pptok_ti* fx_result) { _fx_copy_N11PP__pptok_t(t0, &fx_result->t0); fx_result->t1 = t1; } static void _fx_free_R11PP__state_t(struct _fx_R11PP__state_t* dst) { fx_free_arr(&dst->q); fx_free_arr(&dst->stack); fx_free_arr(&dst->pp_stack); } static void _fx_copy_R11PP__state_t(struct _fx_R11PP__state_t* src, struct _fx_R11PP__state_t* dst) { dst->space = src->space; dst->left = src->left; dst->right = src->right; dst->top = src->top; dst->bottom = src->bottom; dst->lefttotal = src->lefttotal; dst->righttotal = src->righttotal; fx_copy_arr(&src->q, &dst->q); fx_copy_arr(&src->stack, &dst->stack); fx_copy_arr(&src->pp_stack, &dst->pp_stack); dst->pp_top = src->pp_top; dst->emptystack = src->emptystack; } static void _fx_make_R11PP__state_t( int_ r_space, int_ r_left, int_ r_right, int_ r_top, int_ r_bottom, int_ r_lefttotal, int_ r_righttotal, fx_arr_t* r_q, fx_arr_t* r_stack, fx_arr_t* r_pp_stack, int_ r_pp_top, bool r_emptystack, struct _fx_R11PP__state_t* fx_result) { fx_result->space = r_space; fx_result->left = r_left; fx_result->right = r_right; fx_result->top = r_top; fx_result->bottom = r_bottom; fx_result->lefttotal = r_lefttotal; fx_result->righttotal = r_righttotal; fx_copy_arr(r_q, &fx_result->q); fx_copy_arr(r_stack, &fx_result->stack); fx_copy_arr(r_pp_stack, &fx_result->pp_stack); fx_result->pp_top = r_pp_top; fx_result->emptystack = r_emptystack; } static void _fx_free_rR11PP__state_t(struct _fx_rR11PP__state_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR11PP__state_t, _fx_free_R11PP__state_t); } static int _fx_make_rR11PP__state_t(struct _fx_R11PP__state_t* arg, struct _fx_rR11PP__state_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR11PP__state_t, _fx_copy_R11PP__state_t); } static void _fx_free_R5PP__t(struct _fx_R5PP__t* dst) { fx_free_fp(&dst->print_f); fx_free_fp(&dst->get_f); _fx_free_rR11PP__state_t(&dst->r); } static void _fx_copy_R5PP__t(struct _fx_R5PP__t* src, struct _fx_R5PP__t* dst) { dst->margin = src->margin; dst->default_indent = src->default_indent; FX_COPY_FP(&src->print_f, &dst->print_f); FX_COPY_FP(&src->get_f, &dst->get_f); FX_COPY_PTR(src->r, &dst->r); } static void _fx_make_R5PP__t( int_ r_margin, int_ r_default_indent, struct _fx_FPv1S* r_print_f, struct _fx_FPLS0* r_get_f, struct _fx_rR11PP__state_t_data_t* r_r, struct _fx_R5PP__t* fx_result) { fx_result->margin = r_margin; fx_result->default_indent = r_default_indent; FX_COPY_FP(r_print_f, &fx_result->print_f); FX_COPY_FP(r_get_f, &fx_result->get_f); FX_COPY_PTR(r_r, &fx_result->r); } static void _fx_free_N14K_form__klit_t(struct _fx_N14K_form__klit_t* dst) { switch (dst->tag) { case 5: fx_free_str(&dst->u.KLitString); break; case 8: _fx_free_N14K_form__ktyp_t(&dst->u.KLitNil); break; default: ; } dst->tag = 0; } static void _fx_copy_N14K_form__klit_t(struct _fx_N14K_form__klit_t* src, struct _fx_N14K_form__klit_t* dst) { dst->tag = src->tag; switch (src->tag) { case 5: fx_copy_str(&src->u.KLitString, &dst->u.KLitString); break; case 8: FX_COPY_PTR(src->u.KLitNil, &dst->u.KLitNil); break; default: dst->u = src->u; } } static void _fx_free_LN14K_form__ktyp_t(struct _fx_LN14K_form__ktyp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN14K_form__ktyp_t, _fx_free_N14K_form__ktyp_t); } static int _fx_cons_LN14K_form__ktyp_t( struct _fx_N14K_form__ktyp_t_data_t* hd, struct _fx_LN14K_form__ktyp_t_data_t* tl, bool addref_tl, struct _fx_LN14K_form__ktyp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN14K_form__ktyp_t, FX_COPY_PTR); } static void _fx_free_T2LN14K_form__ktyp_tN14K_form__ktyp_t(struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t* dst) { _fx_free_LN14K_form__ktyp_t(&dst->t0); _fx_free_N14K_form__ktyp_t(&dst->t1); } static void _fx_copy_T2LN14K_form__ktyp_tN14K_form__ktyp_t( struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t* src, struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LN14K_form__ktyp_tN14K_form__ktyp_t( struct _fx_LN14K_form__ktyp_t_data_t* t0, struct _fx_N14K_form__ktyp_t_data_t* t1, struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T2R9Ast__id_tN14K_form__ktyp_t(struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* dst) { _fx_free_N14K_form__ktyp_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN14K_form__ktyp_t( struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* src, struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN14K_form__ktyp_t( struct _fx_R9Ast__id_t* t0, struct _fx_N14K_form__ktyp_t_data_t* t1, struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tN14K_form__ktyp_t(struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN14K_form__ktyp_t, _fx_free_T2R9Ast__id_tN14K_form__ktyp_t); } static int _fx_cons_LT2R9Ast__id_tN14K_form__ktyp_t( struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* hd, struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN14K_form__ktyp_t, _fx_copy_T2R9Ast__id_tN14K_form__ktyp_t); } static void _fx_free_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t(struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t* dst) { _fx_free_LT2R9Ast__id_tN14K_form__ktyp_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t( struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t* src, struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t( struct _fx_R9Ast__id_t* t0, struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* t1, struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T2iN14K_form__ktyp_t(struct _fx_T2iN14K_form__ktyp_t* dst) { _fx_free_N14K_form__ktyp_t(&dst->t1); } static void _fx_copy_T2iN14K_form__ktyp_t(struct _fx_T2iN14K_form__ktyp_t* src, struct _fx_T2iN14K_form__ktyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2iN14K_form__ktyp_t( int_ t0, struct _fx_N14K_form__ktyp_t_data_t* t1, struct _fx_T2iN14K_form__ktyp_t* fx_result) { fx_result->t0 = t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_N14K_form__ktyp_t(struct _fx_N14K_form__ktyp_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 11: _fx_free_N14K_form__ktyp_t(&(*dst)->u.KTypRawPointer); break; case 12: _fx_free_T2LN14K_form__ktyp_tN14K_form__ktyp_t(&(*dst)->u.KTypFun); break; case 13: _fx_free_LN14K_form__ktyp_t(&(*dst)->u.KTypTuple); break; case 14: _fx_free_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t(&(*dst)->u.KTypRecord); break; case 16: _fx_free_T2iN14K_form__ktyp_t(&(*dst)->u.KTypArray); break; case 17: _fx_free_N14K_form__ktyp_t(&(*dst)->u.KTypVector); break; case 18: _fx_free_N14K_form__ktyp_t(&(*dst)->u.KTypList); break; case 19: _fx_free_N14K_form__ktyp_t(&(*dst)->u.KTypRef); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_T2R9Ast__id_tN14C_form__ctyp_t(struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN14C_form__ctyp_t( struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* src, struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN14C_form__ctyp_t( struct _fx_R9Ast__id_t* t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tN14C_form__ctyp_t(struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN14C_form__ctyp_t, _fx_free_T2R9Ast__id_tN14C_form__ctyp_t); } static int _fx_cons_LT2R9Ast__id_tN14C_form__ctyp_t( struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* hd, struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN14C_form__ctyp_t, _fx_copy_T2R9Ast__id_tN14C_form__ctyp_t); } static void _fx_free_R23C_form__cdefinterface_t(struct _fx_R23C_form__cdefinterface_t* dst) { fx_free_str(&dst->ci_cname); _fx_free_LT2R9Ast__id_tN14C_form__ctyp_t(&dst->ci_all_methods); fx_free_list_simple(&dst->ci_scope); } static void _fx_copy_R23C_form__cdefinterface_t( struct _fx_R23C_form__cdefinterface_t* src, struct _fx_R23C_form__cdefinterface_t* dst) { dst->ci_name = src->ci_name; fx_copy_str(&src->ci_cname, &dst->ci_cname); dst->ci_id = src->ci_id; dst->ci_vtbl = src->ci_vtbl; dst->ci_base = src->ci_base; FX_COPY_PTR(src->ci_all_methods, &dst->ci_all_methods); FX_COPY_PTR(src->ci_scope, &dst->ci_scope); dst->ci_loc = src->ci_loc; } static void _fx_make_R23C_form__cdefinterface_t( struct _fx_R9Ast__id_t* r_ci_name, fx_str_t* r_ci_cname, struct _fx_R9Ast__id_t* r_ci_id, struct _fx_R9Ast__id_t* r_ci_vtbl, struct _fx_R9Ast__id_t* r_ci_base, struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* r_ci_all_methods, struct _fx_LN12Ast__scope_t_data_t* r_ci_scope, struct _fx_R10Ast__loc_t* r_ci_loc, struct _fx_R23C_form__cdefinterface_t* fx_result) { fx_result->ci_name = *r_ci_name; fx_copy_str(r_ci_cname, &fx_result->ci_cname); fx_result->ci_id = *r_ci_id; fx_result->ci_vtbl = *r_ci_vtbl; fx_result->ci_base = *r_ci_base; FX_COPY_PTR(r_ci_all_methods, &fx_result->ci_all_methods); FX_COPY_PTR(r_ci_scope, &fx_result->ci_scope); fx_result->ci_loc = *r_ci_loc; } static void _fx_free_rR23C_form__cdefinterface_t(struct _fx_rR23C_form__cdefinterface_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR23C_form__cdefinterface_t, _fx_free_R23C_form__cdefinterface_t); } static int _fx_make_rR23C_form__cdefinterface_t( struct _fx_R23C_form__cdefinterface_t* arg, struct _fx_rR23C_form__cdefinterface_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR23C_form__cdefinterface_t, _fx_copy_R23C_form__cdefinterface_t); } static void _fx_free_Nt6option1N14C_form__ctyp_t(struct _fx_Nt6option1N14C_form__ctyp_t* dst) { switch (dst->tag) { case 2: _fx_free_N14C_form__ctyp_t(&dst->u.Some); break; default: ; } dst->tag = 0; } static void _fx_copy_Nt6option1N14C_form__ctyp_t( struct _fx_Nt6option1N14C_form__ctyp_t* src, struct _fx_Nt6option1N14C_form__ctyp_t* dst) { dst->tag = src->tag; switch (src->tag) { case 2: FX_COPY_PTR(src->u.Some, &dst->u.Some); break; default: dst->u = src->u; } } static void _fx_free_Nt6option1N14C_form__cexp_t(struct _fx_Nt6option1N14C_form__cexp_t* dst) { switch (dst->tag) { case 2: _fx_free_N14C_form__cexp_t(&dst->u.Some); break; default: ; } dst->tag = 0; } static void _fx_copy_Nt6option1N14C_form__cexp_t( struct _fx_Nt6option1N14C_form__cexp_t* src, struct _fx_Nt6option1N14C_form__cexp_t* dst) { dst->tag = src->tag; switch (src->tag) { case 2: FX_COPY_PTR(src->u.Some, &dst->u.Some); break; default: dst->u = src->u; } } static void _fx_free_R16Ast__val_flags_t(struct _fx_R16Ast__val_flags_t* dst) { fx_free_list_simple(&dst->val_flag_global); } static void _fx_copy_R16Ast__val_flags_t(struct _fx_R16Ast__val_flags_t* src, struct _fx_R16Ast__val_flags_t* dst) { dst->val_flag_arg = src->val_flag_arg; dst->val_flag_mutable = src->val_flag_mutable; dst->val_flag_temp = src->val_flag_temp; dst->val_flag_tempref = src->val_flag_tempref; dst->val_flag_private = src->val_flag_private; dst->val_flag_subarray = src->val_flag_subarray; dst->val_flag_instance = src->val_flag_instance; dst->val_flag_method = src->val_flag_method; dst->val_flag_ctor = src->val_flag_ctor; FX_COPY_PTR(src->val_flag_global, &dst->val_flag_global); } static void _fx_make_R16Ast__val_flags_t( bool r_val_flag_arg, bool r_val_flag_mutable, bool r_val_flag_temp, bool r_val_flag_tempref, bool r_val_flag_private, bool r_val_flag_subarray, bool r_val_flag_instance, struct _fx_T2R9Ast__id_ti* r_val_flag_method, int_ r_val_flag_ctor, struct _fx_LN12Ast__scope_t_data_t* r_val_flag_global, struct _fx_R16Ast__val_flags_t* fx_result) { fx_result->val_flag_arg = r_val_flag_arg; fx_result->val_flag_mutable = r_val_flag_mutable; fx_result->val_flag_temp = r_val_flag_temp; fx_result->val_flag_tempref = r_val_flag_tempref; fx_result->val_flag_private = r_val_flag_private; fx_result->val_flag_subarray = r_val_flag_subarray; fx_result->val_flag_instance = r_val_flag_instance; fx_result->val_flag_method = *r_val_flag_method; fx_result->val_flag_ctor = r_val_flag_ctor; FX_COPY_PTR(r_val_flag_global, &fx_result->val_flag_global); } static void _fx_free_T2SR10Ast__loc_t(struct _fx_T2SR10Ast__loc_t* dst) { fx_free_str(&dst->t0); } static void _fx_copy_T2SR10Ast__loc_t(struct _fx_T2SR10Ast__loc_t* src, struct _fx_T2SR10Ast__loc_t* dst) { fx_copy_str(&src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2SR10Ast__loc_t(fx_str_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2SR10Ast__loc_t* fx_result) { fx_copy_str(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_R17C_form__ctprops_t(struct _fx_R17C_form__ctprops_t* dst) { fx_free_list_simple(&dst->ctp_make); } static void _fx_copy_R17C_form__ctprops_t(struct _fx_R17C_form__ctprops_t* src, struct _fx_R17C_form__ctprops_t* dst) { dst->ctp_scalar = src->ctp_scalar; dst->ctp_complex = src->ctp_complex; dst->ctp_ptr = src->ctp_ptr; dst->ctp_pass_by_ref = src->ctp_pass_by_ref; FX_COPY_PTR(src->ctp_make, &dst->ctp_make); dst->ctp_free = src->ctp_free; dst->ctp_copy = src->ctp_copy; } static void _fx_make_R17C_form__ctprops_t( bool r_ctp_scalar, bool r_ctp_complex, bool r_ctp_ptr, bool r_ctp_pass_by_ref, struct _fx_LR9Ast__id_t_data_t* r_ctp_make, struct _fx_Ta2R9Ast__id_t* r_ctp_free, struct _fx_Ta2R9Ast__id_t* r_ctp_copy, struct _fx_R17C_form__ctprops_t* fx_result) { fx_result->ctp_scalar = r_ctp_scalar; fx_result->ctp_complex = r_ctp_complex; fx_result->ctp_ptr = r_ctp_ptr; fx_result->ctp_pass_by_ref = r_ctp_pass_by_ref; FX_COPY_PTR(r_ctp_make, &fx_result->ctp_make); fx_result->ctp_free = *r_ctp_free; fx_result->ctp_copy = *r_ctp_copy; } static void _fx_free_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t( struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* dst) { _fx_free_LT2R9Ast__id_tN14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t( struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* src, struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t( struct _fx_Nt6option1R9Ast__id_t* t0, struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* t1, struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LN14C_form__ctyp_t(struct _fx_LN14C_form__ctyp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN14C_form__ctyp_t, _fx_free_N14C_form__ctyp_t); } static int _fx_cons_LN14C_form__ctyp_t( struct _fx_N14C_form__ctyp_t_data_t* hd, struct _fx_LN14C_form__ctyp_t_data_t* tl, bool addref_tl, struct _fx_LN14C_form__ctyp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN14C_form__ctyp_t, FX_COPY_PTR); } static void _fx_free_T2LN14C_form__ctyp_tN14C_form__ctyp_t(struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t* dst) { _fx_free_LN14C_form__ctyp_t(&dst->t0); _fx_free_N14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T2LN14C_form__ctyp_tN14C_form__ctyp_t( struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t* src, struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LN14C_form__ctyp_tN14C_form__ctyp_t( struct _fx_LN14C_form__ctyp_t_data_t* t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static int _fx_cons_LN19C_form__ctyp_attr_t( struct _fx_N19C_form__ctyp_attr_t* hd, struct _fx_LN19C_form__ctyp_attr_t_data_t* tl, bool addref_tl, struct _fx_LN19C_form__ctyp_attr_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN19C_form__ctyp_attr_t, FX_COPY_SIMPLE_BY_PTR); } static void _fx_free_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t(struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* dst) { fx_free_list_simple(&dst->t0); _fx_free_N14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t( struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* src, struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t( struct _fx_LN19C_form__ctyp_attr_t_data_t* t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T2iN14C_form__ctyp_t(struct _fx_T2iN14C_form__ctyp_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T2iN14C_form__ctyp_t(struct _fx_T2iN14C_form__ctyp_t* src, struct _fx_T2iN14C_form__ctyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2iN14C_form__ctyp_t( int_ t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_T2iN14C_form__ctyp_t* fx_result) { fx_result->t0 = t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_N14C_form__ctyp_t(struct _fx_N14C_form__ctyp_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 13: _fx_free_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t(&(*dst)->u.CTypStruct); break; case 14: _fx_free_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t(&(*dst)->u.CTypUnion); break; case 15: _fx_free_T2LN14C_form__ctyp_tN14C_form__ctyp_t(&(*dst)->u.CTypFunRawPtr); break; case 16: _fx_free_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t(&(*dst)->u.CTypRawPtr); break; case 17: _fx_free_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t(&(*dst)->u.CTypRawArray); break; case 18: _fx_free_T2iN14C_form__ctyp_t(&(*dst)->u.CTypArray); break; case 19: _fx_free_N14C_form__ctyp_t(&(*dst)->u.CTypVector); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->t0); } static void _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14C_form__ctyp_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t1, struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__klit_t(&dst->t0); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_copy_N14K_form__klit_t(&src->t0, &dst->t0); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14K_form__klit_t* t0, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t1, struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { _fx_copy_N14K_form__klit_t(t0, &fx_result->t0); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t1); _fx_free_N14C_form__cexp_t(&dst->t2); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N17C_form__cbinary_t* t0, struct _fx_N14C_form__cexp_t_data_t* t1, struct _fx_N14C_form__cexp_t_data_t* t2, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t3, struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t1); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N16C_form__cunary_t* t0, struct _fx_N14C_form__cexp_t_data_t* t1, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t2, struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_R9Ast__id_t* t1, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t2, struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_N14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_N14C_form__cexp_t(&dst->t1); _fx_free_N14C_form__cexp_t(&dst->t2); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_N14C_form__cexp_t_data_t* t1, struct _fx_N14C_form__cexp_t_data_t* t2, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t3, struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_LN14C_form__cexp_t(struct _fx_LN14C_form__cexp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN14C_form__cexp_t, _fx_free_N14C_form__cexp_t); } static int _fx_cons_LN14C_form__cexp_t( struct _fx_N14C_form__cexp_t_data_t* hd, struct _fx_LN14C_form__cexp_t_data_t* tl, bool addref_tl, struct _fx_LN14C_form__cexp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN14C_form__cexp_t, FX_COPY_PTR); } static void _fx_free_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_LN14C_form__cexp_t(&dst->t1); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_LN14C_form__cexp_t_data_t* t1, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t2, struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_LN14C_form__cexp_t(&dst->t0); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_LN14C_form__cexp_t_data_t* t0, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t1, struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_N14C_form__cexp_t(struct _fx_N14C_form__cexp_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 1: _fx_free_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpIdent); break; case 2: _fx_free_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpLit); break; case 3: _fx_free_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( &(*dst)->u.CExpBinary); break; case 4: _fx_free_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpUnary); break; case 5: _fx_free_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpMem); break; case 6: _fx_free_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpArrow); break; case 7: _fx_free_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpCast); break; case 8: _fx_free_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpTernary); break; case 9: _fx_free_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpCall); break; case 10: _fx_free_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpInit); break; case 11: _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpTyp); break; case 12: _fx_free_T2SR10Ast__loc_t(&(*dst)->u.CExpCCode); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t(struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t* dst) { _fx_free_Nt6option1N14C_form__cexp_t(&dst->t0); } static void _fx_copy_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t( struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t* src, struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t* dst) { _fx_copy_Nt6option1N14C_form__cexp_t(&src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t( struct _fx_Nt6option1N14C_form__cexp_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t* fx_result) { _fx_copy_Nt6option1N14C_form__cexp_t(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_LN15C_form__cstmt_t(struct _fx_LN15C_form__cstmt_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN15C_form__cstmt_t, _fx_free_N15C_form__cstmt_t); } static int _fx_cons_LN15C_form__cstmt_t( struct _fx_N15C_form__cstmt_t_data_t* hd, struct _fx_LN15C_form__cstmt_t_data_t* tl, bool addref_tl, struct _fx_LN15C_form__cstmt_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN15C_form__cstmt_t, FX_COPY_PTR); } static void _fx_free_T2LN15C_form__cstmt_tR10Ast__loc_t(struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_LN15C_form__cstmt_t(&dst->t0); } static void _fx_copy_T2LN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LN15C_form__cstmt_tR10Ast__loc_t( struct _fx_LN15C_form__cstmt_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T2R9Ast__id_tN15C_form__cstmt_t(struct _fx_T2R9Ast__id_tN15C_form__cstmt_t* dst) { _fx_free_N15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN15C_form__cstmt_t( struct _fx_T2R9Ast__id_tN15C_form__cstmt_t* src, struct _fx_T2R9Ast__id_tN15C_form__cstmt_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN15C_form__cstmt_t( struct _fx_R9Ast__id_t* t0, struct _fx_N15C_form__cstmt_t_data_t* t1, struct _fx_T2R9Ast__id_tN15C_form__cstmt_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_N15C_form__cstmt_t(&dst->t1); _fx_free_N15C_form__cstmt_t(&dst->t2); } static void _fx_copy_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); dst->t3 = src->t3; } static void _fx_make_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_N15C_form__cstmt_t_data_t* t1, struct _fx_N15C_form__cstmt_t_data_t* t2, struct _fx_R10Ast__loc_t* t3, struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); fx_result->t3 = *t3; } static void _fx_free_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_Nt6option1N14C_form__ctyp_t(&dst->t0); _fx_free_LN14C_form__cexp_t(&dst->t1); _fx_free_Nt6option1N14C_form__cexp_t(&dst->t2); _fx_free_LN14C_form__cexp_t(&dst->t3); _fx_free_N15C_form__cstmt_t(&dst->t4); } static void _fx_copy_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_copy_Nt6option1N14C_form__ctyp_t(&src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_Nt6option1N14C_form__cexp_t(&src->t2, &dst->t2); FX_COPY_PTR(src->t3, &dst->t3); FX_COPY_PTR(src->t4, &dst->t4); dst->t5 = src->t5; } static void _fx_make_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_Nt6option1N14C_form__ctyp_t* t0, struct _fx_LN14C_form__cexp_t_data_t* t1, struct _fx_Nt6option1N14C_form__cexp_t* t2, struct _fx_LN14C_form__cexp_t_data_t* t3, struct _fx_N15C_form__cstmt_t_data_t* t4, struct _fx_R10Ast__loc_t* t5, struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* fx_result) { _fx_copy_Nt6option1N14C_form__ctyp_t(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_Nt6option1N14C_form__cexp_t(t2, &fx_result->t2); FX_COPY_PTR(t3, &fx_result->t3); FX_COPY_PTR(t4, &fx_result->t4); fx_result->t5 = *t5; } static void _fx_free_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_N15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_N15C_form__cstmt_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t( struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t* dst) { _fx_free_N15C_form__cstmt_t(&dst->t0); _fx_free_N14C_form__cexp_t(&dst->t1); } static void _fx_copy_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t( struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t* src, struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t( struct _fx_N15C_form__cstmt_t_data_t* t0, struct _fx_N14C_form__cexp_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T2LN14C_form__cexp_tLN15C_form__cstmt_t(struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* dst) { _fx_free_LN14C_form__cexp_t(&dst->t0); _fx_free_LN15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T2LN14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* src, struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LN14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_LN14C_form__cexp_t_data_t* t0, struct _fx_LN15C_form__cstmt_t_data_t* t1, struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2LN14C_form__cexp_tLN15C_form__cstmt_t(struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t, _fx_free_T2LN14C_form__cexp_tLN15C_form__cstmt_t); } static int _fx_cons_LT2LN14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* hd, struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t* tl, bool addref_tl, struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t, _fx_copy_T2LN14C_form__cexp_tLN15C_form__cstmt_t); } static void _fx_free_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_LT2LN14C_form__cexp_tLN15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t( struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->t0); _fx_free_Nt6option1N14C_form__cexp_t(&dst->t2); } static void _fx_copy_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t( struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t* src, struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; _fx_copy_Nt6option1N14C_form__cexp_t(&src->t2, &dst->t2); dst->t3 = src->t3; } static void _fx_make_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t( struct _fx_N14C_form__ctyp_t_data_t* t0, struct _fx_R9Ast__id_t* t1, struct _fx_Nt6option1N14C_form__cexp_t* t2, struct _fx_R10Ast__loc_t* t3, struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; _fx_copy_Nt6option1N14C_form__cexp_t(t2, &fx_result->t2); fx_result->t3 = *t3; } static int _fx_cons_LN19C_form__carg_attr_t( struct _fx_N19C_form__carg_attr_t* hd, struct _fx_LN19C_form__carg_attr_t_data_t* tl, bool addref_tl, struct _fx_LN19C_form__carg_attr_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN19C_form__carg_attr_t, FX_COPY_SIMPLE_BY_PTR); } static void _fx_free_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t( struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->t1); fx_free_list_simple(&dst->t2); } static void _fx_copy_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t( struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* src, struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); } static void _fx_make_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t( struct _fx_R9Ast__id_t* t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_LN19C_form__carg_attr_t_data_t* t2, struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); } static void _fx_free_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t( struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t, _fx_free_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t); } static int _fx_cons_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t( struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* hd, struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t* tl, bool addref_tl, struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t, _fx_copy_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t); } static void _fx_free_R17C_form__cdeffun_t(struct _fx_R17C_form__cdeffun_t* dst) { fx_free_str(&dst->cf_cname); _fx_free_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t(&dst->cf_args); _fx_free_N14C_form__ctyp_t(&dst->cf_rt); _fx_free_LN15C_form__cstmt_t(&dst->cf_body); fx_free_list_simple(&dst->cf_scope); } static void _fx_copy_R17C_form__cdeffun_t(struct _fx_R17C_form__cdeffun_t* src, struct _fx_R17C_form__cdeffun_t* dst) { dst->cf_name = src->cf_name; fx_copy_str(&src->cf_cname, &dst->cf_cname); FX_COPY_PTR(src->cf_args, &dst->cf_args); FX_COPY_PTR(src->cf_rt, &dst->cf_rt); FX_COPY_PTR(src->cf_body, &dst->cf_body); dst->cf_flags = src->cf_flags; FX_COPY_PTR(src->cf_scope, &dst->cf_scope); dst->cf_loc = src->cf_loc; } static void _fx_make_R17C_form__cdeffun_t( struct _fx_R9Ast__id_t* r_cf_name, fx_str_t* r_cf_cname, struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t* r_cf_args, struct _fx_N14C_form__ctyp_t_data_t* r_cf_rt, struct _fx_LN15C_form__cstmt_t_data_t* r_cf_body, struct _fx_R16Ast__fun_flags_t* r_cf_flags, struct _fx_LN12Ast__scope_t_data_t* r_cf_scope, struct _fx_R10Ast__loc_t* r_cf_loc, struct _fx_R17C_form__cdeffun_t* fx_result) { fx_result->cf_name = *r_cf_name; fx_copy_str(r_cf_cname, &fx_result->cf_cname); FX_COPY_PTR(r_cf_args, &fx_result->cf_args); FX_COPY_PTR(r_cf_rt, &fx_result->cf_rt); FX_COPY_PTR(r_cf_body, &fx_result->cf_body); fx_result->cf_flags = *r_cf_flags; FX_COPY_PTR(r_cf_scope, &fx_result->cf_scope); fx_result->cf_loc = *r_cf_loc; } static void _fx_free_rR17C_form__cdeffun_t(struct _fx_rR17C_form__cdeffun_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR17C_form__cdeffun_t, _fx_free_R17C_form__cdeffun_t); } static int _fx_make_rR17C_form__cdeffun_t( struct _fx_R17C_form__cdeffun_t* arg, struct _fx_rR17C_form__cdeffun_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR17C_form__cdeffun_t, _fx_copy_R17C_form__cdeffun_t); } static void _fx_free_R17C_form__cdeftyp_t(struct _fx_R17C_form__cdeftyp_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->ct_typ); fx_free_str(&dst->ct_cname); _fx_free_R17C_form__ctprops_t(&dst->ct_props); fx_free_list_simple(&dst->ct_ifaces); fx_free_list_simple(&dst->ct_scope); } static void _fx_copy_R17C_form__cdeftyp_t(struct _fx_R17C_form__cdeftyp_t* src, struct _fx_R17C_form__cdeftyp_t* dst) { dst->ct_name = src->ct_name; FX_COPY_PTR(src->ct_typ, &dst->ct_typ); fx_copy_str(&src->ct_cname, &dst->ct_cname); _fx_copy_R17C_form__ctprops_t(&src->ct_props, &dst->ct_props); dst->ct_data_start = src->ct_data_start; dst->ct_enum = src->ct_enum; FX_COPY_PTR(src->ct_ifaces, &dst->ct_ifaces); dst->ct_ifaces_id = src->ct_ifaces_id; FX_COPY_PTR(src->ct_scope, &dst->ct_scope); dst->ct_loc = src->ct_loc; } static void _fx_make_R17C_form__cdeftyp_t( struct _fx_R9Ast__id_t* r_ct_name, struct _fx_N14C_form__ctyp_t_data_t* r_ct_typ, fx_str_t* r_ct_cname, struct _fx_R17C_form__ctprops_t* r_ct_props, int_ r_ct_data_start, struct _fx_R9Ast__id_t* r_ct_enum, struct _fx_LR9Ast__id_t_data_t* r_ct_ifaces, struct _fx_R9Ast__id_t* r_ct_ifaces_id, struct _fx_LN12Ast__scope_t_data_t* r_ct_scope, struct _fx_R10Ast__loc_t* r_ct_loc, struct _fx_R17C_form__cdeftyp_t* fx_result) { fx_result->ct_name = *r_ct_name; FX_COPY_PTR(r_ct_typ, &fx_result->ct_typ); fx_copy_str(r_ct_cname, &fx_result->ct_cname); _fx_copy_R17C_form__ctprops_t(r_ct_props, &fx_result->ct_props); fx_result->ct_data_start = r_ct_data_start; fx_result->ct_enum = *r_ct_enum; FX_COPY_PTR(r_ct_ifaces, &fx_result->ct_ifaces); fx_result->ct_ifaces_id = *r_ct_ifaces_id; FX_COPY_PTR(r_ct_scope, &fx_result->ct_scope); fx_result->ct_loc = *r_ct_loc; } static void _fx_free_rR17C_form__cdeftyp_t(struct _fx_rR17C_form__cdeftyp_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR17C_form__cdeftyp_t, _fx_free_R17C_form__cdeftyp_t); } static int _fx_make_rR17C_form__cdeftyp_t( struct _fx_R17C_form__cdeftyp_t* arg, struct _fx_rR17C_form__cdeftyp_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR17C_form__cdeftyp_t, _fx_copy_R17C_form__cdeftyp_t); } static void _fx_free_T2R9Ast__id_tNt6option1N14C_form__cexp_t(struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* dst) { _fx_free_Nt6option1N14C_form__cexp_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tNt6option1N14C_form__cexp_t( struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* src, struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* dst) { dst->t0 = src->t0; _fx_copy_Nt6option1N14C_form__cexp_t(&src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tNt6option1N14C_form__cexp_t( struct _fx_R9Ast__id_t* t0, struct _fx_Nt6option1N14C_form__cexp_t* t1, struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* fx_result) { fx_result->t0 = *t0; _fx_copy_Nt6option1N14C_form__cexp_t(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tNt6option1N14C_form__cexp_t( struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t, _fx_free_T2R9Ast__id_tNt6option1N14C_form__cexp_t); } static int _fx_cons_LT2R9Ast__id_tNt6option1N14C_form__cexp_t( struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* hd, struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t, _fx_copy_T2R9Ast__id_tNt6option1N14C_form__cexp_t); } static void _fx_free_R18C_form__cdefenum_t(struct _fx_R18C_form__cdefenum_t* dst) { _fx_free_LT2R9Ast__id_tNt6option1N14C_form__cexp_t(&dst->cenum_members); fx_free_str(&dst->cenum_cname); fx_free_list_simple(&dst->cenum_scope); } static void _fx_copy_R18C_form__cdefenum_t(struct _fx_R18C_form__cdefenum_t* src, struct _fx_R18C_form__cdefenum_t* dst) { dst->cenum_name = src->cenum_name; FX_COPY_PTR(src->cenum_members, &dst->cenum_members); fx_copy_str(&src->cenum_cname, &dst->cenum_cname); FX_COPY_PTR(src->cenum_scope, &dst->cenum_scope); dst->cenum_loc = src->cenum_loc; } static void _fx_make_R18C_form__cdefenum_t( struct _fx_R9Ast__id_t* r_cenum_name, struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t* r_cenum_members, fx_str_t* r_cenum_cname, struct _fx_LN12Ast__scope_t_data_t* r_cenum_scope, struct _fx_R10Ast__loc_t* r_cenum_loc, struct _fx_R18C_form__cdefenum_t* fx_result) { fx_result->cenum_name = *r_cenum_name; FX_COPY_PTR(r_cenum_members, &fx_result->cenum_members); fx_copy_str(r_cenum_cname, &fx_result->cenum_cname); FX_COPY_PTR(r_cenum_scope, &fx_result->cenum_scope); fx_result->cenum_loc = *r_cenum_loc; } static void _fx_free_rR18C_form__cdefenum_t(struct _fx_rR18C_form__cdefenum_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR18C_form__cdefenum_t, _fx_free_R18C_form__cdefenum_t); } static int _fx_make_rR18C_form__cdefenum_t( struct _fx_R18C_form__cdefenum_t* arg, struct _fx_rR18C_form__cdefenum_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR18C_form__cdefenum_t, _fx_copy_R18C_form__cdefenum_t); } static void _fx_free_R19C_form__cdefmacro_t(struct _fx_R19C_form__cdefmacro_t* dst) { fx_free_str(&dst->cm_cname); fx_free_list_simple(&dst->cm_args); _fx_free_LN15C_form__cstmt_t(&dst->cm_body); fx_free_list_simple(&dst->cm_scope); } static void _fx_copy_R19C_form__cdefmacro_t(struct _fx_R19C_form__cdefmacro_t* src, struct _fx_R19C_form__cdefmacro_t* dst) { dst->cm_name = src->cm_name; fx_copy_str(&src->cm_cname, &dst->cm_cname); FX_COPY_PTR(src->cm_args, &dst->cm_args); FX_COPY_PTR(src->cm_body, &dst->cm_body); FX_COPY_PTR(src->cm_scope, &dst->cm_scope); dst->cm_loc = src->cm_loc; } static void _fx_make_R19C_form__cdefmacro_t( struct _fx_R9Ast__id_t* r_cm_name, fx_str_t* r_cm_cname, struct _fx_LR9Ast__id_t_data_t* r_cm_args, struct _fx_LN15C_form__cstmt_t_data_t* r_cm_body, struct _fx_LN12Ast__scope_t_data_t* r_cm_scope, struct _fx_R10Ast__loc_t* r_cm_loc, struct _fx_R19C_form__cdefmacro_t* fx_result) { fx_result->cm_name = *r_cm_name; fx_copy_str(r_cm_cname, &fx_result->cm_cname); FX_COPY_PTR(r_cm_args, &fx_result->cm_args); FX_COPY_PTR(r_cm_body, &fx_result->cm_body); FX_COPY_PTR(r_cm_scope, &fx_result->cm_scope); fx_result->cm_loc = *r_cm_loc; } static void _fx_free_rR19C_form__cdefmacro_t(struct _fx_rR19C_form__cdefmacro_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR19C_form__cdefmacro_t, _fx_free_R19C_form__cdefmacro_t); } static int _fx_make_rR19C_form__cdefmacro_t( struct _fx_R19C_form__cdefmacro_t* arg, struct _fx_rR19C_form__cdefmacro_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR19C_form__cdefmacro_t, _fx_copy_R19C_form__cdefmacro_t); } static void _fx_free_T2N14C_form__cexp_tLN15C_form__cstmt_t(struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_LN15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T2N14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* src, struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2N14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_LN15C_form__cstmt_t_data_t* t1, struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2N14C_form__cexp_tLN15C_form__cstmt_t(struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t, _fx_free_T2N14C_form__cexp_tLN15C_form__cstmt_t); } static int _fx_cons_LT2N14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* hd, struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t* tl, bool addref_tl, struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t, _fx_copy_T2N14C_form__cexp_tLN15C_form__cstmt_t); } static void _fx_free_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_LT2N14C_form__cexp_tLN15C_form__cstmt_t(&dst->t0); _fx_free_LN15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t* t0, struct _fx_LN15C_form__cstmt_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_N15C_form__cstmt_t(struct _fx_N15C_form__cstmt_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 2: _fx_free_T2SR10Ast__loc_t(&(*dst)->u.CComment); break; case 3: _fx_free_N14C_form__cexp_t(&(*dst)->u.CExp); break; case 6: _fx_free_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t(&(*dst)->u.CStmtReturn); break; case 7: _fx_free_T2LN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CStmtBlock); break; case 8: _fx_free_T2R9Ast__id_tN15C_form__cstmt_t(&(*dst)->u.CStmtSync); break; case 9: _fx_free_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CStmtIf); break; case 12: _fx_free_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( &(*dst)->u.CStmtFor); break; case 13: _fx_free_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CStmtWhile); break; case 14: _fx_free_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t(&(*dst)->u.CStmtDoWhile); break; case 15: _fx_free_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CStmtSwitch); break; case 16: _fx_free_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t(&(*dst)->u.CDefVal); break; case 17: _fx_free_rR17C_form__cdeffun_t(&(*dst)->u.CDefFun); break; case 18: _fx_free_rR17C_form__cdeftyp_t(&(*dst)->u.CDefTyp); break; case 21: _fx_free_rR18C_form__cdefenum_t(&(*dst)->u.CDefEnum); break; case 22: _fx_free_rR23C_form__cdefinterface_t(&(*dst)->u.CDefInterface); break; case 23: _fx_free_rR19C_form__cdefmacro_t(&(*dst)->u.CMacroDef); break; case 25: _fx_free_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CMacroIf); break; case 26: _fx_free_T2SR10Ast__loc_t(&(*dst)->u.CMacroInclude); break; case 27: _fx_free_T2SR10Ast__loc_t(&(*dst)->u.CMacroPragma); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_R17C_form__cdefval_t(struct _fx_R17C_form__cdefval_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->cv_typ); fx_free_str(&dst->cv_cname); _fx_free_R16Ast__val_flags_t(&dst->cv_flags); } static void _fx_copy_R17C_form__cdefval_t(struct _fx_R17C_form__cdefval_t* src, struct _fx_R17C_form__cdefval_t* dst) { dst->cv_name = src->cv_name; FX_COPY_PTR(src->cv_typ, &dst->cv_typ); fx_copy_str(&src->cv_cname, &dst->cv_cname); _fx_copy_R16Ast__val_flags_t(&src->cv_flags, &dst->cv_flags); dst->cv_loc = src->cv_loc; } static void _fx_make_R17C_form__cdefval_t( struct _fx_R9Ast__id_t* r_cv_name, struct _fx_N14C_form__ctyp_t_data_t* r_cv_typ, fx_str_t* r_cv_cname, struct _fx_R16Ast__val_flags_t* r_cv_flags, struct _fx_R10Ast__loc_t* r_cv_loc, struct _fx_R17C_form__cdefval_t* fx_result) { fx_result->cv_name = *r_cv_name; FX_COPY_PTR(r_cv_typ, &fx_result->cv_typ); fx_copy_str(r_cv_cname, &fx_result->cv_cname); _fx_copy_R16Ast__val_flags_t(r_cv_flags, &fx_result->cv_flags); fx_result->cv_loc = *r_cv_loc; } static void _fx_free_R19C_form__cdeflabel_t(struct _fx_R19C_form__cdeflabel_t* dst) { fx_free_str(&dst->cl_cname); } static void _fx_copy_R19C_form__cdeflabel_t(struct _fx_R19C_form__cdeflabel_t* src, struct _fx_R19C_form__cdeflabel_t* dst) { dst->cl_name = src->cl_name; fx_copy_str(&src->cl_cname, &dst->cl_cname); dst->cl_loc = src->cl_loc; } static void _fx_make_R19C_form__cdeflabel_t( struct _fx_R9Ast__id_t* r_cl_name, fx_str_t* r_cl_cname, struct _fx_R10Ast__loc_t* r_cl_loc, struct _fx_R19C_form__cdeflabel_t* fx_result) { fx_result->cl_name = *r_cl_name; fx_copy_str(r_cl_cname, &fx_result->cl_cname); fx_result->cl_loc = *r_cl_loc; } static void _fx_free_R17C_form__cdefexn_t(struct _fx_R17C_form__cdefexn_t* dst) { fx_free_str(&dst->cexn_cname); fx_free_str(&dst->cexn_base_cname); _fx_free_N14C_form__ctyp_t(&dst->cexn_typ); fx_free_list_simple(&dst->cexn_scope); } static void _fx_copy_R17C_form__cdefexn_t(struct _fx_R17C_form__cdefexn_t* src, struct _fx_R17C_form__cdefexn_t* dst) { dst->cexn_name = src->cexn_name; fx_copy_str(&src->cexn_cname, &dst->cexn_cname); fx_copy_str(&src->cexn_base_cname, &dst->cexn_base_cname); FX_COPY_PTR(src->cexn_typ, &dst->cexn_typ); dst->cexn_std = src->cexn_std; dst->cexn_tag = src->cexn_tag; dst->cexn_data = src->cexn_data; dst->cexn_info = src->cexn_info; dst->cexn_make = src->cexn_make; FX_COPY_PTR(src->cexn_scope, &dst->cexn_scope); dst->cexn_loc = src->cexn_loc; } static void _fx_make_R17C_form__cdefexn_t( struct _fx_R9Ast__id_t* r_cexn_name, fx_str_t* r_cexn_cname, fx_str_t* r_cexn_base_cname, struct _fx_N14C_form__ctyp_t_data_t* r_cexn_typ, bool r_cexn_std, struct _fx_R9Ast__id_t* r_cexn_tag, struct _fx_R9Ast__id_t* r_cexn_data, struct _fx_R9Ast__id_t* r_cexn_info, struct _fx_R9Ast__id_t* r_cexn_make, struct _fx_LN12Ast__scope_t_data_t* r_cexn_scope, struct _fx_R10Ast__loc_t* r_cexn_loc, struct _fx_R17C_form__cdefexn_t* fx_result) { fx_result->cexn_name = *r_cexn_name; fx_copy_str(r_cexn_cname, &fx_result->cexn_cname); fx_copy_str(r_cexn_base_cname, &fx_result->cexn_base_cname); FX_COPY_PTR(r_cexn_typ, &fx_result->cexn_typ); fx_result->cexn_std = r_cexn_std; fx_result->cexn_tag = *r_cexn_tag; fx_result->cexn_data = *r_cexn_data; fx_result->cexn_info = *r_cexn_info; fx_result->cexn_make = *r_cexn_make; FX_COPY_PTR(r_cexn_scope, &fx_result->cexn_scope); fx_result->cexn_loc = *r_cexn_loc; } static void _fx_free_rR17C_form__cdefexn_t(struct _fx_rR17C_form__cdefexn_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR17C_form__cdefexn_t, _fx_free_R17C_form__cdefexn_t); } static int _fx_make_rR17C_form__cdefexn_t( struct _fx_R17C_form__cdefexn_t* arg, struct _fx_rR17C_form__cdefexn_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR17C_form__cdefexn_t, _fx_copy_R17C_form__cdefexn_t); } static void _fx_free_N15C_form__cinfo_t(struct _fx_N15C_form__cinfo_t* dst) { switch (dst->tag) { case 2: _fx_free_R17C_form__cdefval_t(&dst->u.CVal); break; case 3: _fx_free_rR17C_form__cdeffun_t(&dst->u.CFun); break; case 4: _fx_free_rR17C_form__cdeftyp_t(&dst->u.CTyp); break; case 5: _fx_free_rR17C_form__cdefexn_t(&dst->u.CExn); break; case 6: _fx_free_rR23C_form__cdefinterface_t(&dst->u.CInterface); break; case 7: _fx_free_rR18C_form__cdefenum_t(&dst->u.CEnum); break; case 8: _fx_free_R19C_form__cdeflabel_t(&dst->u.CLabel); break; case 9: _fx_free_rR19C_form__cdefmacro_t(&dst->u.CMacro); break; default: ; } dst->tag = 0; } static void _fx_copy_N15C_form__cinfo_t(struct _fx_N15C_form__cinfo_t* src, struct _fx_N15C_form__cinfo_t* dst) { dst->tag = src->tag; switch (src->tag) { case 2: _fx_copy_R17C_form__cdefval_t(&src->u.CVal, &dst->u.CVal); break; case 3: FX_COPY_PTR(src->u.CFun, &dst->u.CFun); break; case 4: FX_COPY_PTR(src->u.CTyp, &dst->u.CTyp); break; case 5: FX_COPY_PTR(src->u.CExn, &dst->u.CExn); break; case 6: FX_COPY_PTR(src->u.CInterface, &dst->u.CInterface); break; case 7: FX_COPY_PTR(src->u.CEnum, &dst->u.CEnum); break; case 8: _fx_copy_R19C_form__cdeflabel_t(&src->u.CLabel, &dst->u.CLabel); break; case 9: FX_COPY_PTR(src->u.CMacro, &dst->u.CMacro); break; default: dst->u = src->u; } } static void _fx_free_T3SiN13C_pp__assoc_t(struct _fx_T3SiN13C_pp__assoc_t* dst) { fx_free_str(&dst->t0); } static void _fx_copy_T3SiN13C_pp__assoc_t(struct _fx_T3SiN13C_pp__assoc_t* src, struct _fx_T3SiN13C_pp__assoc_t* dst) { fx_copy_str(&src->t0, &dst->t0); dst->t1 = src->t1; dst->t2 = src->t2; } static void _fx_make_T3SiN13C_pp__assoc_t( fx_str_t* t0, int_ t1, struct _fx_N13C_pp__assoc_t* t2, struct _fx_T3SiN13C_pp__assoc_t* fx_result) { fx_copy_str(t0, &fx_result->t0); fx_result->t1 = t1; fx_result->t2 = *t2; } _fx_Nt6option1R9Ast__id_t _fx_g10C_pp__None = { 1 }; _fx_N19C_form__ctyp_attr_t _fx_g15C_pp__CTypConst = { 1 }; _fx_N19C_form__ctyp_attr_t _fx_g18C_pp__CTypVolatile = { 2 }; _fx_N19C_form__ctyp_attr_t _fx_g16C_pp__CTypStatic = { 3 }; _fx_N13C_pp__assoc_t _fx_g15C_pp__AssocLeft = { 1 }; _fx_N13C_pp__assoc_t _fx_g16C_pp__AssocRight = { 2 }; FX_EXTERN_C int _fx_M3AstFM6__eq__B2RM4id_tRM4id_t( struct _fx_R9Ast__id_t* a, struct _fx_R9Ast__id_t* b, bool* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M6C_formFM7idc2strS2R9Ast__id_tR10Ast__loc_t( struct _fx_R9Ast__id_t* n, struct _fx_R10Ast__loc_t* loc, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M2PPFM3strv2RM1tS(struct _fx_R5PP__t* pp, fx_str_t* s, void* fx_fv); FX_EXTERN_C int _fx_M2PPFM5beginv1RM1t(struct _fx_R5PP__t* pp, void* fx_fv); FX_EXTERN_C int _fx_M6C_formFM9ctyp2str_S2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14C_form__ctyp_t_data_t* t, struct _fx_R10Ast__loc_t* loc, fx_str_t* fx_result, void* fx_fv); static int _fx_M4C_ppFM9pr_id_optv3Nt6option1R9Ast__id_tR10Ast__loc_tR5PP__t( struct _fx_Nt6option1R9Ast__id_t* id_opt_0, struct _fx_R10Ast__loc_t* loc_0, struct _fx_R5PP__t* pp_0, void* fx_fv); FX_EXTERN_C int _fx_M3AstFM11compile_errE2RM5loc_tS( struct _fx_R10Ast__loc_t* loc, fx_str_t* msg, fx_exn_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M2PPFM5spacev1RM1t(struct _fx_R5PP__t* pp, void* fx_fv); static int _fx_M4C_ppFM10pr_id_opt_v4BNt6option1R9Ast__id_tR10Ast__loc_tR5PP__t( bool add_space_0, struct _fx_Nt6option1R9Ast__id_t* id_opt_0, struct _fx_R10Ast__loc_t* loc_0, struct _fx_R5PP__t* pp_0, void* fx_fv); FX_EXTERN_C int _fx_M2PPFM3cutv1RM1t(struct _fx_R5PP__t* pp, void* fx_fv); FX_EXTERN_C int _fx_M2PPFM3endv1RM1t(struct _fx_R5PP__t* pp, void* fx_fv); static int _fx_M4C_ppFM9pr_structv7SNt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_tSNt6option1R9Ast__id_tR10Ast__loc_tR5PP__t( fx_str_t* prefix_0, struct _fx_Nt6option1R9Ast__id_t* n_opt_0, struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* elems_0, fx_str_t* suffix_0, struct _fx_Nt6option1R9Ast__id_t* id_opt_0, struct _fx_R10Ast__loc_t* loc_0, struct _fx_R5PP__t* pp_0, void* fx_fv); FX_EXTERN_C int _fx_M6C_formFM6cinfo_N15C_form__cinfo_t2R9Ast__id_tR10Ast__loc_t( struct _fx_R9Ast__id_t* i, struct _fx_R10Ast__loc_t* loc, struct _fx_N15C_form__cinfo_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M2PPFM7newlinev1RM1t(struct _fx_R5PP__t* pp, void* fx_fv); FX_EXTERN_C int _fx_M2PPFM8newlineuv1RM1t(struct _fx_R5PP__t* pp, void* fx_fv); FX_EXTERN_C int _fx_F3ordi1C(char_ c, int_* fx_result, void* fx_fv); FX_EXTERN_C int _fx_F6stringS1i(int_ a, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M6StringFM5splitLS3SCB( fx_str_t* s, char_ c, bool allow_empty, struct _fx_LS_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M6StringFM7escapedS2SB(fx_str_t* s, bool quotes, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C bool _fx_M6StringFM8endswithB2SC(fx_str_t* s, char_ suffix, void* fx_fv); FX_EXTERN_C int _fx_M6K_formFM8klit2strS3N14K_form__klit_tBR10Ast__loc_t( struct _fx_N14K_form__klit_t* lit, bool cmode, struct _fx_R10Ast__loc_t* loc, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M4C_ppFM8pp_elistv2R5PP__tLN14C_form__cexp_t( struct _fx_R5PP__t* pp_0, struct _fx_LN14C_form__cexp_t_data_t* el_0, void* fx_fv); FX_EXTERN_C int _fx_M6StringFM5stripS1S(fx_str_t* s, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M2PPFM6beginvv1RM1t(struct _fx_R5PP__t* pp, void* fx_fv); FX_EXTERN_C int _fx_M2PPFM6break0v1RM1t(struct _fx_R5PP__t* pp, void* fx_fv); FX_EXTERN_C int _fx_M2PPFM6beginvv2RM1ti(struct _fx_R5PP__t* pp, int_ indent, void* fx_fv); FX_EXTERN_C int _fx_M4C_ppFM9pp_cstmt_v2R5PP__tN15C_form__cstmt_t( struct _fx_R5PP__t* pp_0, struct _fx_N15C_form__cstmt_t_data_t* s_0, void* fx_fv); FX_EXTERN_C_VAL(struct _fx_R18Options__options_t _fx_g12Options__opt) FX_EXTERN_C_VAL(struct _fx_R9Ast__id_t _fx_g9Ast__noid) FX_EXTERN_C int _fx_M3AstFM2ppS1RM4id_t(struct _fx_R9Ast__id_t* i, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M6StringFM7replaceS3SSS( fx_str_t* s, fx_str_t* substr, fx_str_t* new_substr, fx_str_t* fx_result, void* fx_fv); static int _fx_M4C_ppFM16print_cascade_ifv5SN14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR5PP__t( fx_str_t* prefix_0, struct _fx_N14C_form__cexp_t_data_t* e_0, struct _fx_N15C_form__cstmt_t_data_t* s1_0, struct _fx_N15C_form__cstmt_t_data_t* s2_0, struct _fx_R5PP__t* pp_0, void* fx_fv); FX_EXTERN_C int _fx_M2PPFM6breakuv1RM1t(struct _fx_R5PP__t* pp, void* fx_fv); FX_EXTERN_C int _fx_F7__mul__S2Ci(char_ c, int_ n, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M6C_formFM13get_idc_cnameS2R9Ast__id_tR10Ast__loc_t( struct _fx_R9Ast__id_t* i, struct _fx_R10Ast__loc_t* loc, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M2PPFM21pprint_to_string_listRM1t2ii( int_ margin, int_ default_indent, struct _fx_R5PP__t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M2PPFM5flushv1RM1t(struct _fx_R5PP__t* pp, void* fx_fv); FX_EXTERN_C int _fx_F12join_embraceS4SSSLS( fx_str_t* begin, fx_str_t* end, fx_str_t* sep, struct _fx_LS_data_t* strs, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C void _fx_M4C_ppFM4SomeNt6option1R9Ast__id_t1R9Ast__id_t( struct _fx_R9Ast__id_t* arg0, struct _fx_Nt6option1R9Ast__id_t* fx_result) { fx_result->tag = 2; fx_result->u.Some = *arg0; } FX_EXTERN_C int _fx_M4C_ppFM6__ne__B2R9Ast__id_tR9Ast__id_t( struct _fx_R9Ast__id_t* a_0, struct _fx_R9Ast__id_t* b_0, bool* fx_result, void* fx_fv) { int fx_status = 0; bool v_0; FX_CALL(_fx_M3AstFM6__eq__B2RM4id_tRM4id_t(a_0, b_0, &v_0, 0), _fx_cleanup); *fx_result = !v_0; _fx_cleanup: ; return fx_status; } FX_EXTERN_C int_ _fx_M4C_ppFM6lengthi1LN15C_form__cstmt_t(struct _fx_LN15C_form__cstmt_t_data_t* l, void* fx_fv) { return fx_list_length(l); } FX_EXTERN_C int_ _fx_M4C_ppFM6lengthi1LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t( struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t* l, void* fx_fv) { return fx_list_length(l); } FX_EXTERN_C int_ _fx_M4C_ppFM6lengthi1LS(struct _fx_LS_data_t* l, void* fx_fv) { return fx_list_length(l); } FX_EXTERN_C int_ _fx_M4C_ppFM6lengthi1LN14C_form__ctyp_t(struct _fx_LN14C_form__ctyp_t_data_t* l, void* fx_fv) { return fx_list_length(l); } FX_EXTERN_C int _fx_M4C_ppFM6stringS1S(fx_str_t* a_0, fx_str_t* fx_result, void* fx_fv) { int fx_status = 0; fx_copy_str(a_0, fx_result); return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM8length1_i1LN15C_form__cstmt_t( struct _fx_LN15C_form__cstmt_t_data_t* l_0, int_* fx_result, void* fx_fv) { int fx_status = 0; *fx_result = _fx_M4C_ppFM6lengthi1LN15C_form__cstmt_t(l_0, 0); return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM8length1_i1LS(struct _fx_LS_data_t* l_0, int_* fx_result, void* fx_fv) { int fx_status = 0; *fx_result = _fx_M4C_ppFM6lengthi1LS(l_0, 0); return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM8length1_i1LN14C_form__ctyp_t( struct _fx_LN14C_form__ctyp_t_data_t* l_0, int_* fx_result, void* fx_fv) { int fx_status = 0; *fx_result = _fx_M4C_ppFM6lengthi1LN14C_form__ctyp_t(l_0, 0); return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM3memB2LN19C_form__ctyp_attr_tN19C_form__ctyp_attr_t( struct _fx_LN19C_form__ctyp_attr_t_data_t* l_0, struct _fx_N19C_form__ctyp_attr_t* a_0, bool* fx_result, void* fx_fv) { int fx_status = 0; bool __fold_result___0 = false; _fx_LN19C_form__ctyp_attr_t lst_0 = l_0; for (; lst_0; lst_0 = lst_0->tl) { _fx_N19C_form__ctyp_attr_t* b_0 = &lst_0->hd; if (a_0->tag == b_0->tag) { __fold_result___0 = true; FX_BREAK(_fx_catch_0); } _fx_catch_0: ; FX_CHECK_BREAK(); FX_CHECK_EXN(_fx_cleanup); } *fx_result = __fold_result___0; _fx_cleanup: ; return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM10binop2str_T3SiN13C_pp__assoc_t1N17C_form__cbinary_t( struct _fx_N17C_form__cbinary_t* bop_0, struct _fx_T3SiN13C_pp__assoc_t* fx_result, void* fx_fv) { int fx_status = 0; int tag_0 = bop_0->tag; if (tag_0 == 14) { fx_str_t slit_0 = FX_MAKE_STR(""); _fx_make_T3SiN13C_pp__assoc_t(&slit_0, 1400, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } if (tag_0 == 3) { fx_str_t slit_1 = FX_MAKE_STR("*"); _fx_make_T3SiN13C_pp__assoc_t(&slit_1, 1200, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } if (tag_0 == 4) { fx_str_t slit_2 = FX_MAKE_STR("/"); _fx_make_T3SiN13C_pp__assoc_t(&slit_2, 1200, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } if (tag_0 == 5) { fx_str_t slit_3 = FX_MAKE_STR("%"); _fx_make_T3SiN13C_pp__assoc_t(&slit_3, 1200, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } if (tag_0 == 1) { fx_str_t slit_4 = FX_MAKE_STR("+"); _fx_make_T3SiN13C_pp__assoc_t(&slit_4, 1100, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } if (tag_0 == 2) { fx_str_t slit_5 = FX_MAKE_STR("-"); _fx_make_T3SiN13C_pp__assoc_t(&slit_5, 1100, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } if (tag_0 == 6) { fx_str_t slit_6 = FX_MAKE_STR("<<"); _fx_make_T3SiN13C_pp__assoc_t(&slit_6, 1000, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } if (tag_0 == 7) { fx_str_t slit_7 = FX_MAKE_STR(">>"); _fx_make_T3SiN13C_pp__assoc_t(&slit_7, 1000, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } if (tag_0 == 13) { if (bop_0->u.COpCmp.tag == 3) { fx_str_t slit_8 = FX_MAKE_STR("<"); _fx_make_T3SiN13C_pp__assoc_t(&slit_8, 900, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } } if (tag_0 == 13) { if (bop_0->u.COpCmp.tag == 4) { fx_str_t slit_9 = FX_MAKE_STR("<="); _fx_make_T3SiN13C_pp__assoc_t(&slit_9, 900, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } } if (tag_0 == 13) { if (bop_0->u.COpCmp.tag == 6) { fx_str_t slit_10 = FX_MAKE_STR(">"); _fx_make_T3SiN13C_pp__assoc_t(&slit_10, 900, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } } if (tag_0 == 13) { if (bop_0->u.COpCmp.tag == 5) { fx_str_t slit_11 = FX_MAKE_STR(">="); _fx_make_T3SiN13C_pp__assoc_t(&slit_11, 900, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } } if (tag_0 == 13) { if (bop_0->u.COpCmp.tag == 1) { fx_str_t slit_12 = FX_MAKE_STR("=="); _fx_make_T3SiN13C_pp__assoc_t(&slit_12, 800, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } } if (tag_0 == 13) { if (bop_0->u.COpCmp.tag == 2) { fx_str_t slit_13 = FX_MAKE_STR("!="); _fx_make_T3SiN13C_pp__assoc_t(&slit_13, 800, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } } if (tag_0 == 8) { fx_str_t slit_14 = FX_MAKE_STR("&"); _fx_make_T3SiN13C_pp__assoc_t(&slit_14, 700, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } if (tag_0 == 10) { fx_str_t slit_15 = FX_MAKE_STR("^"); _fx_make_T3SiN13C_pp__assoc_t(&slit_15, 600, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } if (tag_0 == 9) { fx_str_t slit_16 = FX_MAKE_STR("|"); _fx_make_T3SiN13C_pp__assoc_t(&slit_16, 500, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } if (tag_0 == 11) { fx_str_t slit_17 = FX_MAKE_STR("&&"); _fx_make_T3SiN13C_pp__assoc_t(&slit_17, 400, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } if (tag_0 == 12) { fx_str_t slit_18 = FX_MAKE_STR("||"); _fx_make_T3SiN13C_pp__assoc_t(&slit_18, 300, &_fx_g15C_pp__AssocLeft, fx_result); goto _fx_endmatch_0; } if (tag_0 == 15) { fx_str_t slit_19 = FX_MAKE_STR("="); _fx_make_T3SiN13C_pp__assoc_t(&slit_19, 100, &_fx_g16C_pp__AssocRight, fx_result); goto _fx_endmatch_0; } if (tag_0 == 16) { fx_str_t slit_20 = FX_MAKE_STR("+="); _fx_make_T3SiN13C_pp__assoc_t(&slit_20, 100, &_fx_g16C_pp__AssocRight, fx_result); goto _fx_endmatch_0; } if (tag_0 == 17) { fx_str_t slit_21 = FX_MAKE_STR("-="); _fx_make_T3SiN13C_pp__assoc_t(&slit_21, 100, &_fx_g16C_pp__AssocRight, fx_result); goto _fx_endmatch_0; } if (tag_0 == 18) { fx_str_t slit_22 = FX_MAKE_STR("*="); _fx_make_T3SiN13C_pp__assoc_t(&slit_22, 100, &_fx_g16C_pp__AssocRight, fx_result); goto _fx_endmatch_0; } if (tag_0 == 19) { fx_str_t slit_23 = FX_MAKE_STR("/="); _fx_make_T3SiN13C_pp__assoc_t(&slit_23, 100, &_fx_g16C_pp__AssocRight, fx_result); goto _fx_endmatch_0; } if (tag_0 == 20) { fx_str_t slit_24 = FX_MAKE_STR("%="); _fx_make_T3SiN13C_pp__assoc_t(&slit_24, 100, &_fx_g16C_pp__AssocRight, fx_result); goto _fx_endmatch_0; } if (tag_0 == 21) { fx_str_t slit_25 = FX_MAKE_STR("<<="); _fx_make_T3SiN13C_pp__assoc_t(&slit_25, 100, &_fx_g16C_pp__AssocRight, fx_result); goto _fx_endmatch_0; } if (tag_0 == 22) { fx_str_t slit_26 = FX_MAKE_STR(">>="); _fx_make_T3SiN13C_pp__assoc_t(&slit_26, 100, &_fx_g16C_pp__AssocRight, fx_result); goto _fx_endmatch_0; } if (tag_0 == 23) { fx_str_t slit_27 = FX_MAKE_STR("&="); _fx_make_T3SiN13C_pp__assoc_t(&slit_27, 100, &_fx_g16C_pp__AssocRight, fx_result); goto _fx_endmatch_0; } if (tag_0 == 24) { fx_str_t slit_28 = FX_MAKE_STR("|="); _fx_make_T3SiN13C_pp__assoc_t(&slit_28, 100, &_fx_g16C_pp__AssocRight, fx_result); goto _fx_endmatch_0; } if (tag_0 == 25) { fx_str_t slit_29 = FX_MAKE_STR("^="); _fx_make_T3SiN13C_pp__assoc_t(&slit_29, 100, &_fx_g16C_pp__AssocRight, fx_result); goto _fx_endmatch_0; } FX_FAST_THROW(FX_EXN_NoMatchError, _fx_cleanup); _fx_endmatch_0: ; _fx_cleanup: ; return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM9unop2str_T3SiN13C_pp__assoc_t1N16C_form__cunary_t( struct _fx_N16C_form__cunary_t* uop_0, struct _fx_T3SiN13C_pp__assoc_t* fx_result, void* fx_fv) { int fx_status = 0; int tag_0 = uop_0->tag; if (tag_0 == 1) { fx_str_t slit_0 = FX_MAKE_STR("+"); _fx_make_T3SiN13C_pp__assoc_t(&slit_0, 1300, &_fx_g16C_pp__AssocRight, fx_result); } else if (tag_0 == 2) { fx_str_t slit_1 = FX_MAKE_STR("-"); _fx_make_T3SiN13C_pp__assoc_t(&slit_1, 1300, &_fx_g16C_pp__AssocRight, fx_result); } else if (tag_0 == 3) { fx_str_t slit_2 = FX_MAKE_STR("~"); _fx_make_T3SiN13C_pp__assoc_t(&slit_2, 1300, &_fx_g16C_pp__AssocRight, fx_result); } else if (tag_0 == 4) { fx_str_t slit_3 = FX_MAKE_STR("!"); _fx_make_T3SiN13C_pp__assoc_t(&slit_3, 1300, &_fx_g16C_pp__AssocRight, fx_result); } else if (tag_0 == 5) { fx_str_t slit_4 = FX_MAKE_STR("*"); _fx_make_T3SiN13C_pp__assoc_t(&slit_4, 1300, &_fx_g16C_pp__AssocRight, fx_result); } else if (tag_0 == 6) { fx_str_t slit_5 = FX_MAKE_STR("&"); _fx_make_T3SiN13C_pp__assoc_t(&slit_5, 1300, &_fx_g16C_pp__AssocRight, fx_result); } else if (tag_0 == 7) { fx_str_t slit_6 = FX_MAKE_STR("++"); _fx_make_T3SiN13C_pp__assoc_t(&slit_6, 1300, &_fx_g16C_pp__AssocRight, fx_result); } else if (tag_0 == 8) { fx_str_t slit_7 = FX_MAKE_STR("--"); _fx_make_T3SiN13C_pp__assoc_t(&slit_7, 1300, &_fx_g16C_pp__AssocRight, fx_result); } else if (tag_0 == 9) { fx_str_t slit_8 = FX_MAKE_STR("++"); _fx_make_T3SiN13C_pp__assoc_t(&slit_8, 1400, &_fx_g15C_pp__AssocLeft, fx_result); } else if (tag_0 == 10) { fx_str_t slit_9 = FX_MAKE_STR("--"); _fx_make_T3SiN13C_pp__assoc_t(&slit_9, 1400, &_fx_g15C_pp__AssocLeft, fx_result); } else { FX_FAST_THROW(FX_EXN_NoMatchError, _fx_cleanup); } _fx_cleanup: ; return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t( struct _fx_R5PP__t* pp_0, struct _fx_R9Ast__id_t* n_0, struct _fx_R10Ast__loc_t* loc_0, void* fx_fv) { fx_str_t v_0 = {0}; int fx_status = 0; FX_CALL(_fx_M6C_formFM7idc2strS2R9Ast__id_tR10Ast__loc_t(n_0, loc_0, &v_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_0, 0), _fx_cleanup); _fx_cleanup: ; FX_FREE_STR(&v_0); return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM9pp_ctyp__v7R5PP__tSSN14C_form__ctyp_tNt6option1R9Ast__id_tBR10Ast__loc_t( struct _fx_R5PP__t* pp_0, fx_str_t* prefix0_0, fx_str_t* suffix0_0, struct _fx_N14C_form__ctyp_t_data_t* t_0, struct _fx_Nt6option1R9Ast__id_t* id_opt_0, bool fwd_mode_0, struct _fx_R10Ast__loc_t* loc_0, void* fx_fv) { int fx_status = 0; FX_CALL(fx_check_stack(), _fx_cleanup); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_cleanup); int tag_0 = FX_REC_VARIANT_TAG(t_0); bool res_0; if (tag_0 == 1) { res_0 = true; } else if (tag_0 == 2) { res_0 = true; } else if (tag_0 == 3) { res_0 = true; } else if (tag_0 == 4) { res_0 = true; } else if (tag_0 == 5) { res_0 = true; } else if (tag_0 == 6) { res_0 = true; } else if (tag_0 == 11) { res_0 = true; } else if (tag_0 == 9) { res_0 = true; } else if (tag_0 == 8) { res_0 = true; } else if (tag_0 == 12) { res_0 = true; } else if (tag_0 == 10) { res_0 = true; } else if (tag_0 == 18) { res_0 = true; } else if (tag_0 == 19) { res_0 = true; } else { res_0 = false; } FX_CHECK_EXN(_fx_cleanup); if (res_0) { fx_str_t v_0 = {0}; FX_CALL(_fx_M6C_formFM9ctyp2str_S2N14C_form__ctyp_tR10Ast__loc_t(t_0, loc_0, &v_0, 0), _fx_catch_0); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_0, 0), _fx_catch_0); FX_CALL(_fx_M4C_ppFM9pr_id_optv3Nt6option1R9Ast__id_tR10Ast__loc_tR5PP__t(id_opt_0, loc_0, pp_0, 0), _fx_catch_0); _fx_catch_0: ; FX_FREE_STR(&v_0); goto _fx_endmatch_3; } if (tag_0 == 7) { fx_str_t slit_0 = FX_MAKE_STR("void"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_0, 0), _fx_catch_2); if (id_opt_0->tag == 2) { fx_str_t v_1 = {0}; fx_str_t v_2 = {0}; fx_str_t v_3 = {0}; fx_exn_t v_4 = {0}; FX_CALL(_fx_M6C_formFM7idc2strS2R9Ast__id_tR10Ast__loc_t(&id_opt_0->u.Some, loc_0, &v_1, 0), _fx_catch_1); FX_CALL(_fx_M4C_ppFM6stringS1S(&v_1, &v_2, 0), _fx_catch_1); fx_str_t slit_1 = FX_MAKE_STR("c_pp.ml: void cannot be used with id \'"); fx_str_t slit_2 = FX_MAKE_STR("\'"); { const fx_str_t strs_0[] = { slit_1, v_2, slit_2 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 3, &v_3), _fx_catch_1); } FX_CALL(_fx_M3AstFM11compile_errE2RM5loc_tS(loc_0, &v_3, &v_4, 0), _fx_catch_1); FX_THROW(&v_4, false, _fx_catch_1); _fx_catch_1: ; fx_free_exn(&v_4); FX_FREE_STR(&v_3); FX_FREE_STR(&v_2); FX_FREE_STR(&v_1); } FX_CHECK_EXN(_fx_catch_2); _fx_catch_2: ; goto _fx_endmatch_3; } if (tag_0 == 15) { _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t* vcase_0 = &t_0->u.CTypFunRawPtr; _fx_LN14C_form__ctyp_t args_0 = vcase_0->t0; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_7); fx_str_t slit_3 = FX_MAKE_STR(""); fx_str_t slit_4 = FX_MAKE_STR(""); FX_CALL( _fx_M4C_ppFM9pp_ctyp__v7R5PP__tSSN14C_form__ctyp_tNt6option1R9Ast__id_tBR10Ast__loc_t(pp_0, &slit_3, &slit_4, vcase_0->t1, &_fx_g10C_pp__None, true, loc_0, 0), _fx_catch_7); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_7); fx_str_t slit_5 = FX_MAKE_STR("(*"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_5, 0), _fx_catch_7); FX_CALL(_fx_M4C_ppFM10pr_id_opt_v4BNt6option1R9Ast__id_tR10Ast__loc_tR5PP__t(false, id_opt_0, loc_0, pp_0, 0), _fx_catch_7); fx_str_t slit_6 = FX_MAKE_STR(")("); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_6, 0), _fx_catch_7); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_7); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_7); if (args_0 == 0) { fx_str_t slit_7 = FX_MAKE_STR("void"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_7, 0), _fx_catch_3); _fx_catch_3: ; goto _fx_endmatch_0; } if (args_0 != 0) { if (args_0->tl == 0) { fx_str_t slit_8 = FX_MAKE_STR(""); fx_str_t slit_9 = FX_MAKE_STR(""); FX_CALL( _fx_M4C_ppFM9pp_ctyp__v7R5PP__tSSN14C_form__ctyp_tNt6option1R9Ast__id_tBR10Ast__loc_t(pp_0, &slit_8, &slit_9, args_0->hd, &_fx_g10C_pp__None, true, loc_0, 0), _fx_catch_4); _fx_catch_4: ; goto _fx_endmatch_0; } } _fx_LN14C_form__ctyp_t args_1 = 0; int_ nargs_0; FX_CALL(_fx_M4C_ppFM8length1_i1LN14C_form__ctyp_t(args_0, &nargs_0, 0), _fx_catch_6); int_ i_0 = 0; FX_COPY_PTR(args_0, &args_1); _fx_LN14C_form__ctyp_t lst_0 = args_1; for (; lst_0; lst_0 = lst_0->tl, i_0 += 1) { fx_str_t v_5 = {0}; _fx_N14C_form__ctyp_t ti_0 = lst_0->hd; bool last_0 = i_0 == nargs_0 - 1; if (last_0) { fx_str_t slit_10 = FX_MAKE_STR(""); fx_copy_str(&slit_10, &v_5); } else { fx_str_t slit_11 = FX_MAKE_STR(","); fx_copy_str(&slit_11, &v_5); } fx_str_t slit_12 = FX_MAKE_STR(""); FX_CALL( _fx_M4C_ppFM9pp_ctyp__v7R5PP__tSSN14C_form__ctyp_tNt6option1R9Ast__id_tBR10Ast__loc_t(pp_0, &slit_12, &v_5, ti_0, &_fx_g10C_pp__None, true, loc_0, 0), _fx_catch_5); if (!last_0) { FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_5); } _fx_catch_5: ; FX_FREE_STR(&v_5); FX_CHECK_EXN(_fx_catch_6); } _fx_catch_6: ; if (args_1) { _fx_free_LN14C_form__ctyp_t(&args_1); } _fx_endmatch_0: ; FX_CHECK_EXN(_fx_catch_7); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_7); fx_str_t slit_13 = FX_MAKE_STR(")"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_13, 0), _fx_catch_7); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_7); _fx_catch_7: ; goto _fx_endmatch_3; } if (tag_0 == 13) { fx_str_t v_6 = {0}; _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* vcase_1 = &t_0->u.CTypStruct; fx_str_t slit_14 = FX_MAKE_STR("struct"); { const fx_str_t strs_1[] = { *prefix0_0, slit_14 }; FX_CALL(fx_strjoin(0, 0, 0, strs_1, 2, &v_6), _fx_catch_8); } fx_str_t slit_15 = FX_MAKE_STR(""); FX_CALL( _fx_M4C_ppFM9pr_structv7SNt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_tSNt6option1R9Ast__id_tR10Ast__loc_tR5PP__t( &v_6, &vcase_1->t0, vcase_1->t1, &slit_15, id_opt_0, loc_0, pp_0, 0), _fx_catch_8); _fx_catch_8: ; FX_FREE_STR(&v_6); goto _fx_endmatch_3; } if (tag_0 == 16) { _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* vcase_2 = &t_0->u.CTypRawPtr; if (vcase_2->t0 == 0) { _fx_N14C_form__ctyp_t v_7 = vcase_2->t1; if (FX_REC_VARIANT_TAG(v_7) == 13) { fx_str_t suffix_0 = {0}; fx_str_t v_8 = {0}; _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* vcase_3 = &v_7->u.CTypStruct; _fx_Nt6option1R9Ast__id_t* n_opt_0 = &vcase_3->t0; if (n_opt_0->tag == 2) { fx_str_t v_9 = {0}; FX_CALL(_fx_M6C_formFM7idc2strS2R9Ast__id_tR10Ast__loc_t(&n_opt_0->u.Some, loc_0, &v_9, 0), _fx_catch_9); fx_str_t slit_16 = FX_MAKE_STR(", *"); { const fx_str_t strs_2[] = { v_9, slit_16 }; FX_CALL(fx_strjoin(0, 0, 0, strs_2, 2, &suffix_0), _fx_catch_9); } _fx_catch_9: ; FX_FREE_STR(&v_9); } else { fx_str_t slit_17 = FX_MAKE_STR("*"); fx_copy_str(&slit_17, &suffix_0); } FX_CHECK_EXN(_fx_catch_10); fx_str_t slit_18 = FX_MAKE_STR("struct"); { const fx_str_t strs_3[] = { *prefix0_0, slit_18 }; FX_CALL(fx_strjoin(0, 0, 0, strs_3, 2, &v_8), _fx_catch_10); } FX_CALL( _fx_M4C_ppFM9pr_structv7SNt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_tSNt6option1R9Ast__id_tR10Ast__loc_tR5PP__t( &v_8, n_opt_0, vcase_3->t1, &suffix_0, id_opt_0, loc_0, pp_0, 0), _fx_catch_10); _fx_catch_10: ; FX_FREE_STR(&v_8); FX_FREE_STR(&suffix_0); goto _fx_endmatch_3; } } } if (tag_0 == 14) { fx_str_t v_10 = {0}; _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* vcase_4 = &t_0->u.CTypUnion; fx_str_t slit_19 = FX_MAKE_STR("union"); { const fx_str_t strs_4[] = { *prefix0_0, slit_19 }; FX_CALL(fx_strjoin(0, 0, 0, strs_4, 2, &v_10), _fx_catch_11); } fx_str_t slit_20 = FX_MAKE_STR(""); FX_CALL( _fx_M4C_ppFM9pr_structv7SNt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_tSNt6option1R9Ast__id_tR10Ast__loc_tR5PP__t( &v_10, &vcase_4->t0, vcase_4->t1, &slit_20, id_opt_0, loc_0, pp_0, 0), _fx_catch_11); _fx_catch_11: ; FX_FREE_STR(&v_10); goto _fx_endmatch_3; } if (tag_0 == 16) { _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* vcase_5 = &t_0->u.CTypRawPtr; _fx_LN19C_form__ctyp_attr_t attrs_0 = vcase_5->t0; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_12); bool v_11; FX_CALL(_fx_M4C_ppFM3memB2LN19C_form__ctyp_attr_tN19C_form__ctyp_attr_t(attrs_0, &_fx_g16C_pp__CTypStatic, &v_11, 0), _fx_catch_12); if (v_11) { fx_str_t slit_21 = FX_MAKE_STR("static "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_21, 0), _fx_catch_12); } bool v_12; FX_CALL(_fx_M4C_ppFM3memB2LN19C_form__ctyp_attr_tN19C_form__ctyp_attr_t(attrs_0, &_fx_g18C_pp__CTypVolatile, &v_12, 0), _fx_catch_12); if (v_12) { fx_str_t slit_22 = FX_MAKE_STR("volatile "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_22, 0), _fx_catch_12); } fx_str_t slit_23 = FX_MAKE_STR(""); fx_str_t slit_24 = FX_MAKE_STR(""); FX_CALL( _fx_M4C_ppFM9pp_ctyp__v7R5PP__tSSN14C_form__ctyp_tNt6option1R9Ast__id_tBR10Ast__loc_t(pp_0, &slit_23, &slit_24, vcase_5->t1, &_fx_g10C_pp__None, fwd_mode_0, loc_0, 0), _fx_catch_12); fx_str_t slit_25 = FX_MAKE_STR("*"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_25, 0), _fx_catch_12); FX_CALL(_fx_M4C_ppFM9pr_id_optv3Nt6option1R9Ast__id_tR10Ast__loc_tR5PP__t(id_opt_0, loc_0, pp_0, 0), _fx_catch_12); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_12); _fx_catch_12: ; goto _fx_endmatch_3; } if (tag_0 == 17) { _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* vcase_6 = &t_0->u.CTypRawArray; _fx_LN19C_form__ctyp_attr_t attrs_1 = vcase_6->t0; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_13); bool v_13; FX_CALL(_fx_M4C_ppFM3memB2LN19C_form__ctyp_attr_tN19C_form__ctyp_attr_t(attrs_1, &_fx_g16C_pp__CTypStatic, &v_13, 0), _fx_catch_13); if (v_13) { fx_str_t slit_26 = FX_MAKE_STR("static "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_26, 0), _fx_catch_13); } bool v_14; FX_CALL(_fx_M4C_ppFM3memB2LN19C_form__ctyp_attr_tN19C_form__ctyp_attr_t(attrs_1, &_fx_g18C_pp__CTypVolatile, &v_14, 0), _fx_catch_13); if (v_14) { fx_str_t slit_27 = FX_MAKE_STR("volatile "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_27, 0), _fx_catch_13); } bool v_15; FX_CALL(_fx_M4C_ppFM3memB2LN19C_form__ctyp_attr_tN19C_form__ctyp_attr_t(attrs_1, &_fx_g15C_pp__CTypConst, &v_15, 0), _fx_catch_13); if (v_15) { fx_str_t slit_28 = FX_MAKE_STR("const "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_28, 0), _fx_catch_13); } fx_str_t slit_29 = FX_MAKE_STR(""); fx_str_t slit_30 = FX_MAKE_STR(""); FX_CALL( _fx_M4C_ppFM9pp_ctyp__v7R5PP__tSSN14C_form__ctyp_tNt6option1R9Ast__id_tBR10Ast__loc_t(pp_0, &slit_29, &slit_30, vcase_6->t1, &_fx_g10C_pp__None, fwd_mode_0, loc_0, 0), _fx_catch_13); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_13); FX_CALL(_fx_M4C_ppFM10pr_id_opt_v4BNt6option1R9Ast__id_tR10Ast__loc_tR5PP__t(false, id_opt_0, loc_0, pp_0, 0), _fx_catch_13); fx_str_t slit_31 = FX_MAKE_STR("[]"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_31, 0), _fx_catch_13); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_13); _fx_catch_13: ; goto _fx_endmatch_3; } if (tag_0 == 20) { _fx_R9Ast__id_t* n_0 = &t_0->u.CTypName; if (fwd_mode_0 == false) { FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, n_0, loc_0, 0), _fx_catch_14); _fx_catch_14: ; goto _fx_endmatch_2; } if (fwd_mode_0 == true) { if (n_0->m == 0) { FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, n_0, loc_0, 0), _fx_catch_15); _fx_catch_15: ; goto _fx_endmatch_2; } } _fx_N15C_form__cinfo_t v_16 = {0}; _fx_R17C_form__cdeftyp_t v_17 = {0}; _fx_R17C_form__cdeftyp_t v_18 = {0}; FX_CALL(_fx_M6C_formFM6cinfo_N15C_form__cinfo_t2R9Ast__id_tR10Ast__loc_t(n_0, loc_0, &v_16, 0), _fx_catch_20); int tag_1 = v_16.tag; if (tag_1 == 4) { _fx_copy_R17C_form__cdeftyp_t(&v_16.u.CTyp->data, &v_17); _fx_N14C_form__ctyp_t v_19 = v_17.ct_typ; if (FX_REC_VARIANT_TAG(v_19) == 16) { _fx_N14C_form__ctyp_t v_20 = v_19->u.CTypRawPtr.t1; if (FX_REC_VARIANT_TAG(v_20) == 13) { _fx_Nt6option1R9Ast__id_t* v_21 = &v_20->u.CTypStruct.t0; if (v_21->tag == 2) { fx_str_t slit_32 = FX_MAKE_STR("struct "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_32, 0), _fx_catch_16); FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, &v_21->u.Some, loc_0, 0), _fx_catch_16); fx_str_t slit_33 = FX_MAKE_STR("*"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_33, 0), _fx_catch_16); _fx_catch_16: ; goto _fx_endmatch_1; } } } } if (tag_1 == 4) { _fx_copy_R17C_form__cdeftyp_t(&v_16.u.CTyp->data, &v_18); _fx_N14C_form__ctyp_t v_22 = v_18.ct_typ; if (FX_REC_VARIANT_TAG(v_22) == 13) { _fx_Nt6option1R9Ast__id_t* v_23 = &v_22->u.CTypStruct.t0; if (v_23->tag == 2) { fx_str_t slit_34 = FX_MAKE_STR("struct "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_34, 0), _fx_catch_17); FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, &v_23->u.Some, loc_0, 0), _fx_catch_17); _fx_catch_17: ; goto _fx_endmatch_1; } } } if (tag_1 == 6) { _fx_R23C_form__cdefinterface_t v_24 = {0}; fx_str_t v_25 = {0}; fx_str_t v_26 = {0}; _fx_copy_R23C_form__cdefinterface_t(&v_16.u.CInterface->data, &v_24); FX_CALL(_fx_M4C_ppFM6stringS1S(&v_24.ci_cname, &v_25, 0), _fx_catch_18); fx_str_t slit_35 = FX_MAKE_STR("struct "); { const fx_str_t strs_5[] = { slit_35, v_25 }; FX_CALL(fx_strjoin(0, 0, 0, strs_5, 2, &v_26), _fx_catch_18); } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_26, 0), _fx_catch_18); _fx_catch_18: ; FX_FREE_STR(&v_26); FX_FREE_STR(&v_25); _fx_free_R23C_form__cdefinterface_t(&v_24); goto _fx_endmatch_1; } if (FX_STR_LENGTH(*prefix0_0) != 0) { FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, prefix0_0, 0), _fx_catch_19); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_19); } FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, n_0, loc_0, 0), _fx_catch_19); _fx_catch_19: ; _fx_endmatch_1: ; FX_CHECK_EXN(_fx_catch_20); _fx_catch_20: ; _fx_free_R17C_form__cdeftyp_t(&v_18); _fx_free_R17C_form__cdeftyp_t(&v_17); _fx_free_N15C_form__cinfo_t(&v_16); _fx_endmatch_2: ; FX_CHECK_EXN(_fx_catch_21); FX_CALL(_fx_M4C_ppFM9pr_id_optv3Nt6option1R9Ast__id_tR10Ast__loc_tR5PP__t(id_opt_0, loc_0, pp_0, 0), _fx_catch_21); _fx_catch_21: ; goto _fx_endmatch_3; } if (tag_0 == 21) { fx_str_t slit_36 = FX_MAKE_STR("/*<label>*/"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_36, 0), _fx_catch_22); FX_CALL(_fx_M4C_ppFM9pr_id_optv3Nt6option1R9Ast__id_tR10Ast__loc_tR5PP__t(id_opt_0, loc_0, pp_0, 0), _fx_catch_22); _fx_catch_22: ; goto _fx_endmatch_3; } if (tag_0 == 22) { fx_str_t slit_37 = FX_MAKE_STR("void"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_37, 0), _fx_catch_23); FX_CALL(_fx_M4C_ppFM9pr_id_optv3Nt6option1R9Ast__id_tR10Ast__loc_tR5PP__t(id_opt_0, loc_0, pp_0, 0), _fx_catch_23); _fx_catch_23: ; goto _fx_endmatch_3; } FX_FAST_THROW(FX_EXN_NoMatchError, _fx_cleanup); _fx_endmatch_3: ; FX_CHECK_EXN(_fx_cleanup); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, suffix0_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_cleanup); _fx_cleanup: ; return fx_status; } static int _fx_M4C_ppFM10pr_id_opt_v4BNt6option1R9Ast__id_tR10Ast__loc_tR5PP__t( bool add_space_0, struct _fx_Nt6option1R9Ast__id_t* id_opt_0, struct _fx_R10Ast__loc_t* loc_0, struct _fx_R5PP__t* pp_0, void* fx_fv) { int fx_status = 0; if (id_opt_0->tag == 2) { fx_str_t v_0 = {0}; if (add_space_0) { FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_0); } FX_CALL(_fx_M6C_formFM7idc2strS2R9Ast__id_tR10Ast__loc_t(&id_opt_0->u.Some, loc_0, &v_0, 0), _fx_catch_0); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_0, 0), _fx_catch_0); _fx_catch_0: ; FX_FREE_STR(&v_0); } return fx_status; } static int _fx_M4C_ppFM9pr_id_optv3Nt6option1R9Ast__id_tR10Ast__loc_tR5PP__t( struct _fx_Nt6option1R9Ast__id_t* id_opt_0, struct _fx_R10Ast__loc_t* loc_0, struct _fx_R5PP__t* pp_0, void* fx_fv) { int fx_status = 0; if (id_opt_0->tag == 2) { fx_str_t v_0 = {0}; FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_0); FX_CALL(_fx_M6C_formFM7idc2strS2R9Ast__id_tR10Ast__loc_t(&id_opt_0->u.Some, loc_0, &v_0, 0), _fx_catch_0); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_0, 0), _fx_catch_0); _fx_catch_0: ; FX_FREE_STR(&v_0); } return fx_status; } static int _fx_M4C_ppFM9pr_structv7SNt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_tSNt6option1R9Ast__id_tR10Ast__loc_tR5PP__t( fx_str_t* prefix_0, struct _fx_Nt6option1R9Ast__id_t* n_opt_0, struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* elems_0, fx_str_t* suffix_0, struct _fx_Nt6option1R9Ast__id_t* id_opt_0, struct _fx_R10Ast__loc_t* loc_0, struct _fx_R5PP__t* pp_0, void* fx_fv) { fx_str_t v_0 = {0}; fx_str_t v_1 = {0}; int fx_status = 0; FX_CALL(fx_check_stack(), _fx_cleanup); fx_str_t slit_0 = FX_MAKE_STR(" "); { const fx_str_t strs_0[] = { *prefix_0, slit_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 2, &v_0), _fx_cleanup); } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_0, 0), _fx_cleanup); if (n_opt_0->tag == 2) { fx_str_t v_2 = {0}; FX_CALL(_fx_M6C_formFM7idc2strS2R9Ast__id_tR10Ast__loc_t(&n_opt_0->u.Some, loc_0, &v_2, 0), _fx_catch_0); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_2, 0), _fx_catch_0); fx_str_t slit_1 = FX_MAKE_STR(" "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_1, 0), _fx_catch_0); _fx_catch_0: ; FX_FREE_STR(&v_2); } FX_CHECK_EXN(_fx_cleanup); fx_str_t slit_2 = FX_MAKE_STR("{"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_2, 0), _fx_cleanup); _fx_LT2R9Ast__id_tN14C_form__ctyp_t lst_0 = elems_0; for (; lst_0; lst_0 = lst_0->tl) { _fx_N14C_form__ctyp_t ti_0 = 0; _fx_T2R9Ast__id_tN14C_form__ctyp_t* __pat___0 = &lst_0->hd; _fx_R9Ast__id_t ni_0 = __pat___0->t0; FX_COPY_PTR(__pat___0->t1, &ti_0); FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_1); int tag_0 = FX_REC_VARIANT_TAG(ti_0); bool need_nested_box_0; bool res_0; if (tag_0 == 13) { res_0 = true; goto _fx_endmatch_0; } if (tag_0 == 14) { res_0 = true; goto _fx_endmatch_0; } if (tag_0 == 16) { if (FX_REC_VARIANT_TAG(ti_0->u.CTypRawPtr.t1) == 13) { res_0 = true; goto _fx_endmatch_0; } } res_0 = false; _fx_endmatch_0: ; FX_CHECK_EXN(_fx_catch_1); if (res_0) { need_nested_box_0 = false; goto _fx_endmatch_1; } need_nested_box_0 = true; _fx_endmatch_1: ; FX_CHECK_EXN(_fx_catch_1); if (need_nested_box_0) { FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_1); } _fx_Nt6option1R9Ast__id_t v_3; _fx_M4C_ppFM4SomeNt6option1R9Ast__id_t1R9Ast__id_t(&ni_0, &v_3); fx_str_t slit_3 = FX_MAKE_STR(""); fx_str_t slit_4 = FX_MAKE_STR(";"); FX_CALL( _fx_M4C_ppFM9pp_ctyp__v7R5PP__tSSN14C_form__ctyp_tNt6option1R9Ast__id_tBR10Ast__loc_t(pp_0, &slit_3, &slit_4, ti_0, &v_3, true, loc_0, 0), _fx_catch_1); if (need_nested_box_0) { FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_1); } _fx_catch_1: ; if (ti_0) { _fx_free_N14C_form__ctyp_t(&ti_0); } FX_CHECK_EXN(_fx_cleanup); } FX_CALL(_fx_M2PPFM8newlineuv1RM1t(pp_0, 0), _fx_cleanup); fx_str_t slit_5 = FX_MAKE_STR("} "); { const fx_str_t strs_1[] = { slit_5, *suffix_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_1, 2, &v_1), _fx_cleanup); } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_1, 0), _fx_cleanup); if (id_opt_0->tag == 2) { fx_str_t v_4 = {0}; FX_CALL(_fx_M6C_formFM7idc2strS2R9Ast__id_tR10Ast__loc_t(&id_opt_0->u.Some, loc_0, &v_4, 0), _fx_catch_2); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_4, 0), _fx_catch_2); _fx_catch_2: ; FX_FREE_STR(&v_4); } _fx_cleanup: ; FX_FREE_STR(&v_0); FX_FREE_STR(&v_1); return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM8pp_ctyp_v4R5PP__tN14C_form__ctyp_tNt6option1R9Ast__id_tR10Ast__loc_t( struct _fx_R5PP__t* pp_0, struct _fx_N14C_form__ctyp_t_data_t* t_0, struct _fx_Nt6option1R9Ast__id_t* id_opt_0, struct _fx_R10Ast__loc_t* loc_0, void* fx_fv) { int fx_status = 0; fx_str_t slit_0 = FX_MAKE_STR(""); fx_str_t slit_1 = FX_MAKE_STR(""); FX_CALL( _fx_M4C_ppFM9pp_ctyp__v7R5PP__tSSN14C_form__ctyp_tNt6option1R9Ast__id_tBR10Ast__loc_t(pp_0, &slit_0, &slit_1, t_0, id_opt_0, false, loc_0, 0), _fx_cleanup); _fx_cleanup: ; return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti( struct _fx_R5PP__t* pp_0, struct _fx_N14C_form__cexp_t_data_t* e_0, int_ pr_0, void* fx_fv) { int fx_status = 0; FX_CALL(fx_check_stack(), _fx_cleanup); int tag_0 = FX_REC_VARIANT_TAG(e_0); if (tag_0 == 1) { _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* vcase_0 = &e_0->u.CExpIdent; FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, &vcase_0->t0, &vcase_0->t1.t1, 0), _fx_catch_0); _fx_catch_0: ; goto _fx_endmatch_2; } if (tag_0 == 2) { _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t* vcase_1 = &e_0->u.CExpLit; _fx_N14K_form__klit_t* l_0 = &vcase_1->t0; int tag_1 = l_0->tag; if (tag_1 == 8) { fx_str_t slit_0 = FX_MAKE_STR("0"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_0, 0), _fx_catch_1); _fx_catch_1: ; } else if (tag_1 == 6) { fx_str_t v_0 = {0}; fx_str_t v_1 = {0}; int_ v_2; FX_CALL(_fx_F3ordi1C(l_0->u.KLitChar, &v_2, 0), _fx_catch_2); FX_CALL(_fx_F6stringS1i(v_2, &v_0, 0), _fx_catch_2); fx_str_t slit_1 = FX_MAKE_STR("(char_)"); { const fx_str_t strs_0[] = { slit_1, v_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 2, &v_1), _fx_catch_2); } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_1, 0), _fx_catch_2); _fx_catch_2: ; FX_FREE_STR(&v_1); FX_FREE_STR(&v_0); } else if (tag_1 == 5) { _fx_LS sl_0 = 0; fx_str_t v_3 = {0}; fx_str_t* s0_0 = &l_0->u.KLitString; FX_CALL(_fx_M6StringFM5splitLS3SCB(s0_0, (char_)10, true, &sl_0, 0), _fx_catch_4); if (sl_0 == 0) { FX_CALL(_fx_M6StringFM7escapedS2SB(s0_0, true, &v_3, 0), _fx_catch_4); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_3, 0), _fx_catch_4); } else { int_ n_0; FX_CALL(_fx_M4C_ppFM8length1_i1LS(sl_0, &n_0, 0), _fx_catch_4); int_ i_0 = 0; _fx_LS lst_0 = sl_0; for (; lst_0; lst_0 = lst_0->tl, i_0 += 1) { fx_str_t s_0 = {0}; fx_str_t s_1 = {0}; fx_str_t v_4 = {0}; fx_str_t* s_2 = &lst_0->hd; bool v_5; if (i_0 < n_0 - 1) { v_5 = true; } else { v_5 = _fx_M6StringFM8endswithB2SC(s0_0, (char_)10, 0); } if (v_5) { fx_str_t slit_2 = FX_MAKE_STR("\n"); { const fx_str_t strs_1[] = { *s_2, slit_2 }; FX_CALL(fx_strjoin(0, 0, 0, strs_1, 2, &s_0), _fx_catch_3); } } else { fx_copy_str(s_2, &s_0); } FX_CALL(_fx_M6StringFM7escapedS2SB(&s_0, true, &s_1, 0), _fx_catch_3); if (i_0 == 0) { FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &s_1, 0), _fx_catch_3); } else { FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_3); fx_str_t slit_3 = FX_MAKE_STR("U"); { const fx_str_t strs_2[] = { slit_3, s_1 }; FX_CALL(fx_strjoin(0, 0, 0, strs_2, 2, &v_4), _fx_catch_3); } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_4, 0), _fx_catch_3); } _fx_catch_3: ; FX_FREE_STR(&v_4); FX_FREE_STR(&s_1); FX_FREE_STR(&s_0); FX_CHECK_EXN(_fx_catch_4); } } _fx_catch_4: ; FX_FREE_STR(&v_3); if (sl_0) { _fx_free_LS(&sl_0); } } else { fx_str_t v_6 = {0}; FX_CALL(_fx_M6K_formFM8klit2strS3N14K_form__klit_tBR10Ast__loc_t(l_0, true, &vcase_1->t1.t1, &v_6, 0), _fx_catch_5); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_6, 0), _fx_catch_5); _fx_catch_5: ; FX_FREE_STR(&v_6); } FX_CHECK_EXN(_fx_catch_6); _fx_catch_6: ; goto _fx_endmatch_2; } if (tag_0 == 3) { _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* vcase_2 = &e_0->u.CExpBinary; _fx_N17C_form__cbinary_t* bop_0 = &vcase_2->t0; if (bop_0->tag == 14) { _fx_T3SiN13C_pp__assoc_t v_7 = {0}; FX_CALL(_fx_M4C_ppFM10binop2str_T3SiN13C_pp__assoc_t1N17C_form__cbinary_t(bop_0, &v_7, 0), _fx_catch_7); int_ pr0_0 = v_7.t1; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_7); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_2->t1, pr0_0, 0), _fx_catch_7); fx_str_t slit_4 = FX_MAKE_STR("["); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_4, 0), _fx_catch_7); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_7); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_2->t2, 0, 0), _fx_catch_7); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_7); fx_str_t slit_5 = FX_MAKE_STR("]"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_5, 0), _fx_catch_7); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_7); _fx_catch_7: ; _fx_free_T3SiN13C_pp__assoc_t(&v_7); goto _fx_endmatch_2; } } if (tag_0 == 3) { _fx_T3SiN13C_pp__assoc_t v_8 = {0}; fx_str_t bop_str_0 = {0}; _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* vcase_3 = &e_0->u.CExpBinary; _fx_N17C_form__cbinary_t* bop_1 = &vcase_3->t0; FX_CALL(_fx_M4C_ppFM10binop2str_T3SiN13C_pp__assoc_t1N17C_form__cbinary_t(bop_1, &v_8, 0), _fx_catch_8); fx_copy_str(&v_8.t0, &bop_str_0); int_ pr0_1 = v_8.t1; _fx_N13C_pp__assoc_t assoc_0 = v_8.t2; bool use_br_0; if (pr0_1 < pr_0) { use_br_0 = true; } else { int tag_2 = bop_1->tag; bool res_0; if (tag_2 == 8) { res_0 = true; } else if (tag_2 == 9) { res_0 = true; } else if (tag_2 == 10) { res_0 = true; } else { res_0 = false; } FX_CHECK_EXN(_fx_catch_8); if (res_0) { use_br_0 = true; goto _fx_endmatch_0; } use_br_0 = false; _fx_endmatch_0: ; FX_CHECK_EXN(_fx_catch_8); } FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_8); if (use_br_0) { fx_str_t slit_6 = FX_MAKE_STR("("); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_6, 0), _fx_catch_8); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_8); } bool is_shift_0; if (bop_1->tag == 6) { is_shift_0 = true; } else { is_shift_0 = bop_1->tag == 7; } int_ a_pr_0; if (is_shift_0) { a_pr_0 = 1350; } else if (assoc_0.tag == 1) { a_pr_0 = pr0_1; } else { a_pr_0 = pr0_1 + 1; } int_ b_pr_0; if (is_shift_0) { b_pr_0 = 1350; } else if (assoc_0.tag == 2) { b_pr_0 = pr0_1; } else { b_pr_0 = pr0_1 + 1; } FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_3->t1, a_pr_0, 0), _fx_catch_8); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_8); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &bop_str_0, 0), _fx_catch_8); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_8); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_3->t2, b_pr_0, 0), _fx_catch_8); if (use_br_0) { FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_8); fx_str_t slit_7 = FX_MAKE_STR(")"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_7, 0), _fx_catch_8); } FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_8); _fx_catch_8: ; FX_FREE_STR(&bop_str_0); _fx_free_T3SiN13C_pp__assoc_t(&v_8); goto _fx_endmatch_2; } if (tag_0 == 4) { _fx_T3SiN13C_pp__assoc_t v_9 = {0}; fx_str_t uop_str_0 = {0}; _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* vcase_4 = &e_0->u.CExpUnary; _fx_N14C_form__cexp_t e_1 = vcase_4->t1; _fx_N16C_form__cunary_t* uop_0 = &vcase_4->t0; FX_CALL(_fx_M4C_ppFM9unop2str_T3SiN13C_pp__assoc_t1N16C_form__cunary_t(uop_0, &v_9, 0), _fx_catch_11); fx_copy_str(&v_9.t0, &uop_str_0); int_ pr0_2 = v_9.t1; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_11); if (pr0_2 < pr_0) { fx_str_t slit_8 = FX_MAKE_STR("("); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_8, 0), _fx_catch_11); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_11); } int tag_3 = uop_0->tag; bool res_1; if (tag_3 == 9) { res_1 = true; } else if (tag_3 == 10) { res_1 = true; } else { res_1 = false; } FX_CHECK_EXN(_fx_catch_11); if (res_1) { FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, e_1, pr0_2, 0), _fx_catch_9); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &uop_str_0, 0), _fx_catch_9); _fx_catch_9: ; goto _fx_endmatch_1; } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &uop_str_0, 0), _fx_catch_10); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, e_1, pr0_2, 0), _fx_catch_10); _fx_catch_10: ; _fx_endmatch_1: ; FX_CHECK_EXN(_fx_catch_11); if (pr0_2 < pr_0) { FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_11); fx_str_t slit_9 = FX_MAKE_STR(")"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_9, 0), _fx_catch_11); } FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_11); _fx_catch_11: ; FX_FREE_STR(&uop_str_0); _fx_free_T3SiN13C_pp__assoc_t(&v_9); goto _fx_endmatch_2; } if (tag_0 == 5) { _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* vcase_5 = &e_0->u.CExpMem; FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_5->t0, 1400, 0), _fx_catch_12); fx_str_t slit_10 = FX_MAKE_STR("."); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_10, 0), _fx_catch_12); FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, &vcase_5->t1, &vcase_5->t2.t1, 0), _fx_catch_12); _fx_catch_12: ; goto _fx_endmatch_2; } if (tag_0 == 6) { _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* vcase_6 = &e_0->u.CExpArrow; FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_6->t0, 1400, 0), _fx_catch_13); fx_str_t slit_11 = FX_MAKE_STR("->"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_11, 0), _fx_catch_13); FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, &vcase_6->t1, &vcase_6->t2.t1, 0), _fx_catch_13); _fx_catch_13: ; goto _fx_endmatch_2; } if (tag_0 == 7) { _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t* vcase_7 = &e_0->u.CExpCast; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_14); fx_str_t slit_12 = FX_MAKE_STR("("); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_12, 0), _fx_catch_14); FX_CALL( _fx_M4C_ppFM8pp_ctyp_v4R5PP__tN14C_form__ctyp_tNt6option1R9Ast__id_tR10Ast__loc_t(pp_0, vcase_7->t1, &_fx_g10C_pp__None, &vcase_7->t2, 0), _fx_catch_14); fx_str_t slit_13 = FX_MAKE_STR(")"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_13, 0), _fx_catch_14); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_14); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_7->t0, 1301, 0), _fx_catch_14); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_14); _fx_catch_14: ; goto _fx_endmatch_2; } if (tag_0 == 8) { _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* vcase_8 = &e_0->u.CExpTernary; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_15); if (200 < pr_0) { fx_str_t slit_14 = FX_MAKE_STR("("); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_14, 0), _fx_catch_15); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_15); } FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_8->t0, 0, 0), _fx_catch_15); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_15); fx_str_t slit_15 = FX_MAKE_STR("?"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_15, 0), _fx_catch_15); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_15); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_8->t1, 0, 0), _fx_catch_15); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_15); fx_str_t slit_16 = FX_MAKE_STR(":"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_16, 0), _fx_catch_15); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_15); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_8->t2, 0, 0), _fx_catch_15); if (200 < pr_0) { FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_15); fx_str_t slit_17 = FX_MAKE_STR(")"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_17, 0), _fx_catch_15); } FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_15); _fx_catch_15: ; goto _fx_endmatch_2; } if (tag_0 == 9) { _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* vcase_9 = &e_0->u.CExpCall; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_16); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_9->t0, 1400, 0), _fx_catch_16); fx_str_t slit_18 = FX_MAKE_STR("("); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_18, 0), _fx_catch_16); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_16); FX_CALL(_fx_M4C_ppFM8pp_elistv2R5PP__tLN14C_form__cexp_t(pp_0, vcase_9->t1, 0), _fx_catch_16); fx_str_t slit_19 = FX_MAKE_STR(")"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_19, 0), _fx_catch_16); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_16); _fx_catch_16: ; goto _fx_endmatch_2; } if (tag_0 == 10) { _fx_LN14C_form__cexp_t eseq_0 = 0; _fx_LN14C_form__cexp_t eseq_1 = e_0->u.CExpInit.t0; if (eseq_1 != 0) { FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_18); fx_str_t slit_20 = FX_MAKE_STR("{"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_20, 0), _fx_catch_18); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_18); int_ i_1 = 0; FX_COPY_PTR(eseq_1, &eseq_0); _fx_LN14C_form__cexp_t lst_1 = eseq_0; for (; lst_1; lst_1 = lst_1->tl, i_1 += 1) { _fx_N14C_form__cexp_t e_2 = lst_1->hd; if (i_1 > 0) { fx_str_t slit_21 = FX_MAKE_STR(","); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_21, 0), _fx_catch_17); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_17); } FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, e_2, 0, 0), _fx_catch_17); _fx_catch_17: ; FX_CHECK_EXN(_fx_catch_18); } FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_18); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_18); fx_str_t slit_22 = FX_MAKE_STR("}"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_22, 0), _fx_catch_18); } else { fx_str_t slit_23 = FX_MAKE_STR("{0}"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_23, 0), _fx_catch_18); } _fx_catch_18: ; if (eseq_0) { _fx_free_LN14C_form__cexp_t(&eseq_0); } goto _fx_endmatch_2; } if (tag_0 == 11) { _fx_T2N14C_form__ctyp_tR10Ast__loc_t* vcase_10 = &e_0->u.CExpTyp; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_19); FX_CALL( _fx_M4C_ppFM8pp_ctyp_v4R5PP__tN14C_form__ctyp_tNt6option1R9Ast__id_tR10Ast__loc_t(pp_0, vcase_10->t0, &_fx_g10C_pp__None, &vcase_10->t1, 0), _fx_catch_19); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_19); _fx_catch_19: ; goto _fx_endmatch_2; } if (tag_0 == 12) { fx_str_t v_10 = {0}; fx_str_t v_11 = {0}; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_20); FX_CALL(_fx_M6StringFM5stripS1S(&e_0->u.CExpCCode.t0, &v_10, 0), _fx_catch_20); fx_str_t slit_24 = FX_MAKE_STR("\n"); fx_str_t slit_25 = FX_MAKE_STR("\n"); { const fx_str_t strs_3[] = { slit_24, v_10, slit_25 }; FX_CALL(fx_strjoin(0, 0, 0, strs_3, 3, &v_11), _fx_catch_20); } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_11, 0), _fx_catch_20); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_20); _fx_catch_20: ; FX_FREE_STR(&v_11); FX_FREE_STR(&v_10); goto _fx_endmatch_2; } FX_FAST_THROW(FX_EXN_NoMatchError, _fx_cleanup); _fx_endmatch_2: ; _fx_cleanup: ; return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM8pp_elistv2R5PP__tLN14C_form__cexp_t( struct _fx_R5PP__t* pp_0, struct _fx_LN14C_form__cexp_t_data_t* el_0, void* fx_fv) { int fx_status = 0; FX_CALL(fx_check_stack(), _fx_cleanup); int_ i_0 = 0; _fx_LN14C_form__cexp_t lst_0 = el_0; for (; lst_0; lst_0 = lst_0->tl, i_0 += 1) { _fx_N14C_form__cexp_t e_0 = lst_0->hd; if (i_0 > 0) { fx_str_t slit_0 = FX_MAKE_STR(","); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_0, 0), _fx_catch_0); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_0); } FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, e_0, 0, 0), _fx_catch_0); _fx_catch_0: ; FX_CHECK_EXN(_fx_cleanup); } _fx_cleanup: ; return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM14pprint_fun_hdrv5R5PP__tR9Ast__id_tBR10Ast__loc_tB( struct _fx_R5PP__t* pp_0, struct _fx_R9Ast__id_t* fname_0, bool semicolon_0, struct _fx_R10Ast__loc_t* loc_0, bool fwd_mode_0, void* fx_fv) { _fx_N15C_form__cinfo_t v_0 = {0}; _fx_R17C_form__cdeffun_t v_1 = {0}; fx_str_t cf_cname_0 = {0}; _fx_N14C_form__ctyp_t cf_rt_0 = 0; _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t cf_args_0 = 0; fx_str_t v_2 = {0}; fx_str_t v_3 = {0}; int fx_status = 0; FX_CALL(_fx_M6C_formFM6cinfo_N15C_form__cinfo_t2R9Ast__id_tR10Ast__loc_t(fname_0, loc_0, &v_0, 0), _fx_cleanup); if (v_0.tag == 3) { _fx_copy_R17C_form__cdeffun_t(&v_0.u.CFun->data, &v_1); } else { fx_str_t v_4 = {0}; fx_str_t v_5 = {0}; fx_exn_t v_6 = {0}; FX_CALL(_fx_M6C_formFM7idc2strS2R9Ast__id_tR10Ast__loc_t(fname_0, loc_0, &v_4, 0), _fx_catch_0); fx_str_t slit_0 = FX_MAKE_STR("the forward declaration of "); fx_str_t slit_1 = FX_MAKE_STR(" does not reference a function"); { const fx_str_t strs_0[] = { slit_0, v_4, slit_1 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 3, &v_5), _fx_catch_0); } FX_CALL(_fx_M3AstFM11compile_errE2RM5loc_tS(loc_0, &v_5, &v_6, 0), _fx_catch_0); FX_THROW(&v_6, false, _fx_catch_0); _fx_catch_0: ; fx_free_exn(&v_6); FX_FREE_STR(&v_5); FX_FREE_STR(&v_4); } FX_CHECK_EXN(_fx_cleanup); _fx_R10Ast__loc_t cf_loc_0 = v_1.cf_loc; _fx_R16Ast__fun_flags_t cf_flags_0 = v_1.cf_flags; fx_copy_str(&v_1.cf_cname, &cf_cname_0); FX_COPY_PTR(v_1.cf_rt, &cf_rt_0); FX_COPY_PTR(v_1.cf_args, &cf_args_0); FX_CALL(_fx_M2PPFM6beginvv1RM1t(pp_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_cleanup); if (cf_flags_0.fun_flag_private) { fx_str_t slit_2 = FX_MAKE_STR("static "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_2, 0), _fx_cleanup); } else { fx_str_t slit_3 = FX_MAKE_STR("FX_EXTERN_C "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_3, 0), _fx_cleanup); } fx_str_t slit_4 = FX_MAKE_STR(""); fx_str_t slit_5 = FX_MAKE_STR(""); FX_CALL( _fx_M4C_ppFM9pp_ctyp__v7R5PP__tSSN14C_form__ctyp_tNt6option1R9Ast__id_tBR10Ast__loc_t(pp_0, &slit_4, &slit_5, cf_rt_0, &_fx_g10C_pp__None, false, &cf_loc_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &cf_cname_0, 0), _fx_cleanup); fx_str_t slit_6 = FX_MAKE_STR("("); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_6, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_cleanup); if (cf_args_0 == 0) { fx_str_t slit_7 = FX_MAKE_STR("void"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_7, 0), _fx_catch_1); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_1); _fx_catch_1: ; } else { int_ nargs_0 = _fx_M4C_ppFM6lengthi1LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t(cf_args_0, 0); int_ i_0 = 0; _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t lst_0 = cf_args_0; for (; lst_0; lst_0 = lst_0->tl, i_0 += 1) { _fx_N14C_form__ctyp_t t_0 = 0; fx_str_t v_7 = {0}; _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* __pat___0 = &lst_0->hd; _fx_R9Ast__id_t n_0 = __pat___0->t0; FX_COPY_PTR(__pat___0->t1, &t_0); bool last_0 = i_0 == nargs_0 - 1; if (last_0) { fx_str_t slit_8 = FX_MAKE_STR(""); fx_copy_str(&slit_8, &v_7); } else { fx_str_t slit_9 = FX_MAKE_STR(","); fx_copy_str(&slit_9, &v_7); } _fx_Nt6option1R9Ast__id_t v_8; _fx_M4C_ppFM4SomeNt6option1R9Ast__id_t1R9Ast__id_t(&n_0, &v_8); fx_str_t slit_10 = FX_MAKE_STR(""); FX_CALL( _fx_M4C_ppFM9pp_ctyp__v7R5PP__tSSN14C_form__ctyp_tNt6option1R9Ast__id_tBR10Ast__loc_t(pp_0, &slit_10, &v_7, t_0, &v_8, true, &cf_loc_0, 0), _fx_catch_2); if (!last_0) { FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_2); } _fx_catch_2: ; FX_FREE_STR(&v_7); if (t_0) { _fx_free_N14C_form__ctyp_t(&t_0); } FX_CHECK_EXN(_fx_catch_3); } _fx_catch_3: ; } FX_CHECK_EXN(_fx_cleanup); if (semicolon_0) { fx_str_t slit_11 = FX_MAKE_STR(";"); fx_copy_str(&slit_11, &v_2); } else { fx_str_t slit_12 = FX_MAKE_STR(""); fx_copy_str(&slit_12, &v_2); } fx_str_t slit_13 = FX_MAKE_STR(")"); { const fx_str_t strs_1[] = { slit_13, v_2 }; FX_CALL(fx_strjoin(0, 0, 0, strs_1, 2, &v_3), _fx_cleanup); } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_3, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_cleanup); _fx_cleanup: ; _fx_free_N15C_form__cinfo_t(&v_0); _fx_free_R17C_form__cdeffun_t(&v_1); FX_FREE_STR(&cf_cname_0); if (cf_rt_0) { _fx_free_N14C_form__ctyp_t(&cf_rt_0); } if (cf_args_0) { _fx_free_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t(&cf_args_0); } FX_FREE_STR(&v_2); FX_FREE_STR(&v_3); return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM26pprint_cstmt_or_block_cboxv2R5PP__tN15C_form__cstmt_t( struct _fx_R5PP__t* pp_0, struct _fx_N15C_form__cstmt_t_data_t* s_0, void* fx_fv) { _fx_LN15C_form__cstmt_t sl_0 = 0; int fx_status = 0; FX_CALL(fx_check_stack(), _fx_cleanup); int tag_0 = FX_REC_VARIANT_TAG(s_0); if (tag_0 == 7) { FX_COPY_PTR(s_0->u.CStmtBlock.t0, &sl_0); } else if (tag_0 != 1) { FX_CALL(_fx_cons_LN15C_form__cstmt_t(s_0, 0, true, &sl_0), _fx_catch_0); _fx_catch_0: ; } FX_CHECK_EXN(_fx_cleanup); fx_str_t slit_0 = FX_MAKE_STR("{"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM6beginvv2RM1ti(pp_0, 0, 0), _fx_cleanup); int_ i_0 = 0; _fx_LN15C_form__cstmt_t lst_0 = sl_0; for (; lst_0; lst_0 = lst_0->tl, i_0 += 1) { _fx_N15C_form__cstmt_t s_1 = lst_0->hd; if (i_0 > 0) { FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_1); } FX_CALL(_fx_M4C_ppFM9pp_cstmt_v2R5PP__tN15C_form__cstmt_t(pp_0, s_1, 0), _fx_catch_1); _fx_catch_1: ; FX_CHECK_EXN(_fx_cleanup); } FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_cleanup); fx_str_t slit_1 = FX_MAKE_STR("}"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_1, 0), _fx_cleanup); _fx_cleanup: ; if (sl_0) { _fx_free_LN15C_form__cstmt_t(&sl_0); } return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM21pprint_cstmt_as_blockv2R5PP__tN15C_form__cstmt_t( struct _fx_R5PP__t* pp_0, struct _fx_N15C_form__cstmt_t_data_t* s_0, void* fx_fv) { _fx_LN15C_form__cstmt_t sl_0 = 0; int fx_status = 0; FX_CALL(fx_check_stack(), _fx_cleanup); int tag_0 = FX_REC_VARIANT_TAG(s_0); if (tag_0 == 7) { FX_COPY_PTR(s_0->u.CStmtBlock.t0, &sl_0); } else if (tag_0 != 1) { FX_CALL(_fx_cons_LN15C_form__cstmt_t(s_0, 0, true, &sl_0), _fx_catch_0); _fx_catch_0: ; } FX_CHECK_EXN(_fx_cleanup); if (sl_0 == 0) { fx_str_t slit_0 = FX_MAKE_STR("{}"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_0, 0), _fx_catch_1); _fx_catch_1: ; } else { FX_CALL(_fx_M2PPFM6beginvv1RM1t(pp_0, 0), _fx_catch_3); fx_str_t slit_1 = FX_MAKE_STR("{"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_1, 0), _fx_catch_3); int_ i_0 = 0; _fx_LN15C_form__cstmt_t lst_0 = sl_0; for (; lst_0; lst_0 = lst_0->tl, i_0 += 1) { _fx_N15C_form__cstmt_t s_1 = lst_0->hd; FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_2); FX_CALL(_fx_M4C_ppFM9pp_cstmt_v2R5PP__tN15C_form__cstmt_t(pp_0, s_1, 0), _fx_catch_2); _fx_catch_2: ; FX_CHECK_EXN(_fx_catch_3); } FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_3); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_3); fx_str_t slit_2 = FX_MAKE_STR("}"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_2, 0), _fx_catch_3); _fx_catch_3: ; } _fx_cleanup: ; if (sl_0) { _fx_free_LN15C_form__cstmt_t(&sl_0); } return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM9pp_cstmt_v2R5PP__tN15C_form__cstmt_t( struct _fx_R5PP__t* pp_0, struct _fx_N15C_form__cstmt_t_data_t* s_0, void* fx_fv) { int fx_status = 0; FX_CALL(fx_check_stack(), _fx_cleanup); int tag_0 = FX_REC_VARIANT_TAG(s_0); if (tag_0 == 1) { fx_str_t slit_0 = FX_MAKE_STR("{}"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_0, 0), _fx_catch_0); _fx_catch_0: ; } else if (tag_0 == 2) { FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &s_0->u.CComment.t0, 0), _fx_catch_1); _fx_catch_1: ; } else if (tag_0 == 3) { _fx_N14C_form__cexp_t e_0 = s_0->u.CExp; FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, e_0, 0, 0), _fx_catch_3); if (FX_REC_VARIANT_TAG(e_0) != 12) { fx_str_t slit_1 = FX_MAKE_STR(";"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_1, 0), _fx_catch_2); _fx_catch_2: ; } FX_CHECK_EXN(_fx_catch_3); _fx_catch_3: ; } else if (tag_0 == 4) { fx_str_t slit_2 = FX_MAKE_STR("break;"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_2, 0), _fx_catch_4); _fx_catch_4: ; } else if (tag_0 == 5) { fx_str_t slit_3 = FX_MAKE_STR("continue;"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_3, 0), _fx_catch_5); _fx_catch_5: ; } else if (tag_0 == 6) { _fx_Nt6option1N14C_form__cexp_t* e_opt_0 = &s_0->u.CStmtReturn.t0; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_7); fx_str_t slit_4 = FX_MAKE_STR("return"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_4, 0), _fx_catch_7); if (e_opt_0->tag == 2) { FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_6); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, e_opt_0->u.Some, 0, 0), _fx_catch_6); _fx_catch_6: ; } FX_CHECK_EXN(_fx_catch_7); fx_str_t slit_5 = FX_MAKE_STR(";"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_5, 0), _fx_catch_7); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_7); _fx_catch_7: ; } else if (tag_0 == 7) { FX_CALL(_fx_M4C_ppFM21pprint_cstmt_as_blockv2R5PP__tN15C_form__cstmt_t(pp_0, s_0, 0), _fx_catch_8); _fx_catch_8: ; } else if (tag_0 == 8) { fx_str_t v_0 = {0}; fx_str_t nstr_0 = {0}; fx_str_t v_1 = {0}; fx_str_t v_2 = {0}; _fx_T2R9Ast__id_tN15C_form__cstmt_t* vcase_0 = &s_0->u.CStmtSync; _fx_R9Ast__id_t* n_0 = &vcase_0->t0; if (_fx_g12Options__opt.enable_openmp) { FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_9); fx_str_t slit_6 = FX_MAKE_STR("#pragma omp critical"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_6, 0), _fx_catch_9); bool v_3; FX_CALL(_fx_M4C_ppFM6__ne__B2R9Ast__id_tR9Ast__id_t(n_0, &_fx_g9Ast__noid, &v_3, 0), _fx_catch_9); if (v_3) { FX_CALL(_fx_M3AstFM2ppS1RM4id_t(n_0, &v_0, 0), _fx_catch_9); fx_str_t slit_7 = FX_MAKE_STR("."); fx_str_t slit_8 = FX_MAKE_STR("__"); FX_CALL(_fx_M6StringFM7replaceS3SSS(&v_0, &slit_7, &slit_8, &nstr_0, 0), _fx_catch_9); FX_CALL(_fx_M4C_ppFM6stringS1S(&nstr_0, &v_1, 0), _fx_catch_9); fx_str_t slit_9 = FX_MAKE_STR(" ("); fx_str_t slit_10 = FX_MAKE_STR(")"); { const fx_str_t strs_0[] = { slit_9, v_1, slit_10 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 3, &v_2), _fx_catch_9); } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_2, 0), _fx_catch_9); } FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_9); } FX_CALL(_fx_M4C_ppFM21pprint_cstmt_as_blockv2R5PP__tN15C_form__cstmt_t(pp_0, vcase_0->t1, 0), _fx_catch_9); _fx_catch_9: ; FX_FREE_STR(&v_2); FX_FREE_STR(&v_1); FX_FREE_STR(&nstr_0); FX_FREE_STR(&v_0); } else if (tag_0 == 9) { _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* vcase_1 = &s_0->u.CStmtIf; fx_str_t slit_11 = FX_MAKE_STR("if"); FX_CALL( _fx_M4C_ppFM16print_cascade_ifv5SN14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR5PP__t(&slit_11, vcase_1->t0, vcase_1->t1, vcase_1->t2, pp_0, 0), _fx_catch_10); _fx_catch_10: ; } else if (tag_0 == 10) { _fx_T2R9Ast__id_tR10Ast__loc_t* vcase_2 = &s_0->u.CStmtGoto; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_11); fx_str_t slit_12 = FX_MAKE_STR("goto"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_12, 0), _fx_catch_11); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_11); FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, &vcase_2->t0, &vcase_2->t1, 0), _fx_catch_11); fx_str_t slit_13 = FX_MAKE_STR(";"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_13, 0), _fx_catch_11); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_11); _fx_catch_11: ; } else if (tag_0 == 11) { _fx_T2R9Ast__id_tR10Ast__loc_t* vcase_3 = &s_0->u.CStmtLabel; FX_CALL(_fx_M2PPFM6breakuv1RM1t(pp_0, 0), _fx_catch_12); FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, &vcase_3->t0, &vcase_3->t1, 0), _fx_catch_12); fx_str_t slit_14 = FX_MAKE_STR(": ;"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_14, 0), _fx_catch_12); _fx_catch_12: ; } else if (tag_0 == 12) { _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* vcase_4 = &s_0->u.CStmtFor; _fx_LN14C_form__cexp_t e3_0 = vcase_4->t3; _fx_Nt6option1N14C_form__cexp_t* e2_opt_0 = &vcase_4->t2; _fx_LN14C_form__cexp_t e1_0 = vcase_4->t1; _fx_Nt6option1N14C_form__ctyp_t* t_opt_0 = &vcase_4->t0; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_17); fx_str_t slit_15 = FX_MAKE_STR("for ("); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_15, 0), _fx_catch_17); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_17); if (e1_0 != 0) { if (t_opt_0->tag == 2) { FX_CALL( _fx_M4C_ppFM8pp_ctyp_v4R5PP__tN14C_form__ctyp_tNt6option1R9Ast__id_tR10Ast__loc_t(pp_0, t_opt_0->u.Some, &_fx_g10C_pp__None, &vcase_4->t5, 0), _fx_catch_13); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_13); _fx_catch_13: ; } FX_CHECK_EXN(_fx_catch_14); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_14); FX_CALL(_fx_M4C_ppFM8pp_elistv2R5PP__tLN14C_form__cexp_t(pp_0, e1_0, 0), _fx_catch_14); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_14); _fx_catch_14: ; } FX_CHECK_EXN(_fx_catch_17); fx_str_t slit_16 = FX_MAKE_STR(";"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_16, 0), _fx_catch_17); if (e2_opt_0->tag == 2) { FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_15); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, e2_opt_0->u.Some, 0, 0), _fx_catch_15); _fx_catch_15: ; } FX_CHECK_EXN(_fx_catch_17); fx_str_t slit_17 = FX_MAKE_STR(";"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_17, 0), _fx_catch_17); if (e3_0 != 0) { FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_16); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_16); FX_CALL(_fx_M4C_ppFM8pp_elistv2R5PP__tLN14C_form__cexp_t(pp_0, e3_0, 0), _fx_catch_16); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_16); _fx_catch_16: ; } FX_CHECK_EXN(_fx_catch_17); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_17); fx_str_t slit_18 = FX_MAKE_STR(")"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_18, 0), _fx_catch_17); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_17); FX_CALL(_fx_M4C_ppFM26pprint_cstmt_or_block_cboxv2R5PP__tN15C_form__cstmt_t(pp_0, vcase_4->t4, 0), _fx_catch_17); _fx_catch_17: ; } else if (tag_0 == 13) { _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* vcase_5 = &s_0->u.CStmtWhile; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_18); fx_str_t slit_19 = FX_MAKE_STR("while ("); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_19, 0), _fx_catch_18); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_18); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_5->t0, 0, 0), _fx_catch_18); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_18); fx_str_t slit_20 = FX_MAKE_STR(")"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_20, 0), _fx_catch_18); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_18); FX_CALL(_fx_M4C_ppFM26pprint_cstmt_or_block_cboxv2R5PP__tN15C_form__cstmt_t(pp_0, vcase_5->t1, 0), _fx_catch_18); _fx_catch_18: ; } else if (tag_0 == 14) { _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t* vcase_6 = &s_0->u.CStmtDoWhile; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_19); fx_str_t slit_21 = FX_MAKE_STR("do"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_21, 0), _fx_catch_19); FX_CALL(_fx_M4C_ppFM26pprint_cstmt_or_block_cboxv2R5PP__tN15C_form__cstmt_t(pp_0, vcase_6->t0, 0), _fx_catch_19); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_19); fx_str_t slit_22 = FX_MAKE_STR("while ("); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_22, 0), _fx_catch_19); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_19); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_6->t1, 0, 0), _fx_catch_19); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_19); fx_str_t slit_23 = FX_MAKE_STR(");"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_23, 0), _fx_catch_19); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_19); _fx_catch_19: ; } else if (tag_0 == 15) { _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t cases_0 = 0; _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t* vcase_7 = &s_0->u.CStmtSwitch; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_25); fx_str_t slit_24 = FX_MAKE_STR("switch ("); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_24, 0), _fx_catch_25); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_25); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, vcase_7->t0, 0, 0), _fx_catch_25); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_25); fx_str_t slit_25 = FX_MAKE_STR(") {"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_25, 0), _fx_catch_25); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_25); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_25); FX_COPY_PTR(vcase_7->t1, &cases_0); _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t lst_0 = cases_0; for (; lst_0; lst_0 = lst_0->tl) { _fx_LN14C_form__cexp_t labels_0 = 0; _fx_LN15C_form__cstmt_t code_0 = 0; fx_str_t v_4 = {0}; fx_str_t v_5 = {0}; _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* __pat___0 = &lst_0->hd; FX_COPY_PTR(__pat___0->t0, &labels_0); FX_COPY_PTR(__pat___0->t1, &code_0); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_24); bool isdefault_0; if (labels_0 == 0) { fx_str_t slit_26 = FX_MAKE_STR("default:"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_26, 0), _fx_catch_20); isdefault_0 = true; _fx_catch_20: ; } else { _fx_LN14C_form__cexp_t lst_1 = labels_0; for (; lst_1; lst_1 = lst_1->tl) { _fx_N14C_form__cexp_t l_0 = lst_1->hd; fx_str_t slit_27 = FX_MAKE_STR("case "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_27, 0), _fx_catch_21); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, l_0, 0, 0), _fx_catch_21); fx_str_t slit_28 = FX_MAKE_STR(":"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_28, 0), _fx_catch_21); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_21); _fx_catch_21: ; FX_CHECK_EXN(_fx_catch_22); } isdefault_0 = false; _fx_catch_22: ; } FX_CHECK_EXN(_fx_catch_24); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_24); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_24); FX_CALL(_fx_M2PPFM6beginvv1RM1t(pp_0, 0), _fx_catch_24); int_ v_6; FX_CALL(_fx_M4C_ppFM8length1_i1LN15C_form__cstmt_t(code_0, &v_6, 0), _fx_catch_24); int_ t_0; if (isdefault_0) { t_0 = 0; } else { t_0 = 1; } int_ codelen_0 = v_6 + t_0; int_ i_0 = 0; _fx_LN15C_form__cstmt_t lst_2 = code_0; for (; lst_2; lst_2 = lst_2->tl, i_0 += 1) { _fx_N15C_form__cstmt_t s_1 = lst_2->hd; if (i_0 == 0) { fx_str_t slit_29 = FX_MAKE_STR(" "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_29, 0), _fx_catch_23); } FX_CALL(_fx_M4C_ppFM9pp_cstmt_v2R5PP__tN15C_form__cstmt_t(pp_0, s_1, 0), _fx_catch_23); if (i_0 < codelen_0 - 1) { FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_23); } _fx_catch_23: ; FX_CHECK_EXN(_fx_catch_24); } if (isdefault_0) { if (code_0 == 0) { FX_CALL(_fx_F7__mul__S2Ci((char_)32, 3, &v_4, 0), _fx_catch_24); fx_str_t slit_30 = FX_MAKE_STR(";"); { const fx_str_t strs_1[] = { v_4, slit_30 }; FX_CALL(fx_strjoin(0, 0, 0, strs_1, 2, &v_5), _fx_catch_24); } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_5, 0), _fx_catch_24); } } else { fx_str_t slit_31 = FX_MAKE_STR("break;"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_31, 0), _fx_catch_24); } FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_24); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_24); _fx_catch_24: ; FX_FREE_STR(&v_5); FX_FREE_STR(&v_4); if (code_0) { _fx_free_LN15C_form__cstmt_t(&code_0); } if (labels_0) { _fx_free_LN14C_form__cexp_t(&labels_0); } FX_CHECK_EXN(_fx_catch_25); } fx_str_t slit_32 = FX_MAKE_STR("}"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_32, 0), _fx_catch_25); _fx_catch_25: ; if (cases_0) { _fx_free_LT2LN14C_form__cexp_tLN15C_form__cstmt_t(&cases_0); } } else if (tag_0 == 16) { _fx_N15C_form__cinfo_t v_7 = {0}; _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t* vcase_8 = &s_0->u.CDefVal; _fx_R10Ast__loc_t* loc_0 = &vcase_8->t3; _fx_Nt6option1N14C_form__cexp_t* e_opt_1 = &vcase_8->t2; _fx_R9Ast__id_t* n_1 = &vcase_8->t1; FX_CALL(_fx_M6C_formFM6cinfo_N15C_form__cinfo_t2R9Ast__id_tR10Ast__loc_t(n_1, loc_0, &v_7, 0), _fx_catch_27); bool is_private_0; if (v_7.tag == 2) { is_private_0 = v_7.u.CVal.cv_flags.val_flag_private; } else { is_private_0 = false; } FX_CHECK_EXN(_fx_catch_27); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_27); if (is_private_0) { fx_str_t slit_33 = FX_MAKE_STR("static"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_33, 0), _fx_catch_27); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_27); } _fx_Nt6option1R9Ast__id_t v_8; _fx_M4C_ppFM4SomeNt6option1R9Ast__id_t1R9Ast__id_t(n_1, &v_8); FX_CALL( _fx_M4C_ppFM8pp_ctyp_v4R5PP__tN14C_form__ctyp_tNt6option1R9Ast__id_tR10Ast__loc_t(pp_0, vcase_8->t0, &v_8, loc_0, 0), _fx_catch_27); if (e_opt_1->tag == 2) { fx_str_t slit_34 = FX_MAKE_STR(" ="); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_34, 0), _fx_catch_26); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_26); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, e_opt_1->u.Some, 0, 0), _fx_catch_26); _fx_catch_26: ; } FX_CHECK_EXN(_fx_catch_27); fx_str_t slit_35 = FX_MAKE_STR(";"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_35, 0), _fx_catch_27); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_27); _fx_catch_27: ; _fx_free_N15C_form__cinfo_t(&v_7); } else if (tag_0 == 17) { _fx_LN15C_form__cstmt_t cf_body_0 = 0; _fx_R17C_form__cdeffun_t* v_9 = &s_0->u.CDefFun->data; _fx_R10Ast__loc_t cf_loc_0 = v_9->cf_loc; FX_COPY_PTR(v_9->cf_body, &cf_body_0); _fx_R9Ast__id_t cf_name_0 = v_9->cf_name; FX_CALL(_fx_M4C_ppFM14pprint_fun_hdrv5R5PP__tR9Ast__id_tBR10Ast__loc_tB(pp_0, &cf_name_0, false, &cf_loc_0, false, 0), _fx_catch_29); FX_CALL(_fx_M2PPFM6beginvv1RM1t(pp_0, 0), _fx_catch_29); fx_str_t slit_36 = FX_MAKE_STR("{"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_36, 0), _fx_catch_29); FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_29); int_ i_1 = 0; _fx_LN15C_form__cstmt_t lst_3 = cf_body_0; for (; lst_3; lst_3 = lst_3->tl, i_1 += 1) { _fx_N15C_form__cstmt_t s_2 = lst_3->hd; if (i_1 > 0) { FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_28); } FX_CALL(_fx_M4C_ppFM9pp_cstmt_v2R5PP__tN15C_form__cstmt_t(pp_0, s_2, 0), _fx_catch_28); _fx_catch_28: ; FX_CHECK_EXN(_fx_catch_29); } FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_29); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_29); fx_str_t slit_37 = FX_MAKE_STR("}"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_37, 0), _fx_catch_29); FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_29); _fx_catch_29: ; if (cf_body_0) { _fx_free_LN15C_form__cstmt_t(&cf_body_0); } } else if (tag_0 == 19) { _fx_N15C_form__cinfo_t v_10 = {0}; _fx_T2R9Ast__id_tR10Ast__loc_t* vcase_9 = &s_0->u.CDefForwardSym; _fx_R10Ast__loc_t* cf_loc_1 = &vcase_9->t1; _fx_R9Ast__id_t* cf_name_1 = &vcase_9->t0; FX_CALL(_fx_M6C_formFM6cinfo_N15C_form__cinfo_t2R9Ast__id_tR10Ast__loc_t(cf_name_1, cf_loc_1, &v_10, 0), _fx_catch_33); int tag_1 = v_10.tag; if (tag_1 == 3) { FX_CALL(_fx_M4C_ppFM14pprint_fun_hdrv5R5PP__tR9Ast__id_tBR10Ast__loc_tB(pp_0, cf_name_1, true, cf_loc_1, true, 0), _fx_catch_30); _fx_catch_30: ; } else if (tag_1 == 2) { FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_31); fx_str_t slit_38 = FX_MAKE_STR("FX_EXTERN_C_VAL("); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_38, 0), _fx_catch_31); FX_CALL(_fx_M2PPFM3cutv1RM1t(pp_0, 0), _fx_catch_31); _fx_Nt6option1R9Ast__id_t v_11; _fx_M4C_ppFM4SomeNt6option1R9Ast__id_t1R9Ast__id_t(cf_name_1, &v_11); fx_str_t slit_39 = FX_MAKE_STR(""); fx_str_t slit_40 = FX_MAKE_STR(")"); FX_CALL( _fx_M4C_ppFM9pp_ctyp__v7R5PP__tSSN14C_form__ctyp_tNt6option1R9Ast__id_tBR10Ast__loc_t(pp_0, &slit_39, &slit_40, v_10.u.CVal.cv_typ, &v_11, true, cf_loc_1, 0), _fx_catch_31); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_31); _fx_catch_31: ; } else { fx_str_t v_12 = {0}; fx_str_t v_13 = {0}; fx_str_t v_14 = {0}; fx_exn_t v_15 = {0}; FX_CALL(_fx_M6C_formFM7idc2strS2R9Ast__id_tR10Ast__loc_t(cf_name_1, cf_loc_1, &v_12, 0), _fx_catch_32); FX_CALL(_fx_M4C_ppFM6stringS1S(&v_12, &v_13, 0), _fx_catch_32); fx_str_t slit_41 = FX_MAKE_STR("the forward declaration of "); fx_str_t slit_42 = FX_MAKE_STR(" does not reference a function or a value"); { const fx_str_t strs_2[] = { slit_41, v_13, slit_42 }; FX_CALL(fx_strjoin(0, 0, 0, strs_2, 3, &v_14), _fx_catch_32); } FX_CALL(_fx_M3AstFM11compile_errE2RM5loc_tS(cf_loc_1, &v_14, &v_15, 0), _fx_catch_32); FX_THROW(&v_15, false, _fx_catch_32); _fx_catch_32: ; fx_free_exn(&v_15); FX_FREE_STR(&v_14); FX_FREE_STR(&v_13); FX_FREE_STR(&v_12); } FX_CHECK_EXN(_fx_catch_33); _fx_catch_33: ; _fx_free_N15C_form__cinfo_t(&v_10); } else if (tag_0 == 18) { _fx_N14C_form__ctyp_t ct_typ_0 = 0; _fx_R17C_form__cdeftyp_t* v_16 = &s_0->u.CDefTyp->data; _fx_R10Ast__loc_t ct_loc_0 = v_16->ct_loc; FX_COPY_PTR(v_16->ct_typ, &ct_typ_0); _fx_R9Ast__id_t ct_name_0 = v_16->ct_name; _fx_Nt6option1R9Ast__id_t v_17; _fx_M4C_ppFM4SomeNt6option1R9Ast__id_t1R9Ast__id_t(&ct_name_0, &v_17); fx_str_t slit_43 = FX_MAKE_STR("typedef "); fx_str_t slit_44 = FX_MAKE_STR(";"); FX_CALL( _fx_M4C_ppFM9pp_ctyp__v7R5PP__tSSN14C_form__ctyp_tNt6option1R9Ast__id_tBR10Ast__loc_t(pp_0, &slit_43, &slit_44, ct_typ_0, &v_17, true, &ct_loc_0, 0), _fx_catch_34); FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_34); _fx_catch_34: ; if (ct_typ_0) { _fx_free_N14C_form__ctyp_t(&ct_typ_0); } } else if (tag_0 == 20) { _fx_T2R9Ast__id_tR10Ast__loc_t* vcase_10 = &s_0->u.CDefForwardTyp; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_35); fx_str_t slit_45 = FX_MAKE_STR("struct "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_45, 0), _fx_catch_35); FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, &vcase_10->t0, &vcase_10->t1, 0), _fx_catch_35); fx_str_t slit_46 = FX_MAKE_STR(";"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_46, 0), _fx_catch_35); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_35); FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_35); _fx_catch_35: ; } else if (tag_0 == 21) { _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t cenum_members_0 = 0; fx_str_t cenum_cname_0 = {0}; _fx_R18C_form__cdefenum_t* v_18 = &s_0->u.CDefEnum->data; _fx_R10Ast__loc_t cenum_loc_0 = v_18->cenum_loc; FX_COPY_PTR(v_18->cenum_members, &cenum_members_0); fx_copy_str(&v_18->cenum_cname, &cenum_cname_0); fx_str_t slit_47 = FX_MAKE_STR("typedef enum {"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_47, 0), _fx_catch_38); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_38); FX_CALL(_fx_M2PPFM6beginvv1RM1t(pp_0, 0), _fx_catch_38); int_ i_2 = 0; _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t lst_4 = cenum_members_0; for (; lst_4; lst_4 = lst_4->tl, i_2 += 1) { _fx_Nt6option1N14C_form__cexp_t e_opt_2 = {0}; _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* __pat___1 = &lst_4->hd; _fx_R9Ast__id_t n_2 = __pat___1->t0; _fx_copy_Nt6option1N14C_form__cexp_t(&__pat___1->t1, &e_opt_2); if (i_2 == 0) { fx_str_t slit_48 = FX_MAKE_STR(" "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_48, 0), _fx_catch_37); } else { fx_str_t slit_49 = FX_MAKE_STR(","); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_49, 0), _fx_catch_37); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_37); } FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, &n_2, &cenum_loc_0, 0), _fx_catch_37); if (e_opt_2.tag == 2) { fx_str_t slit_50 = FX_MAKE_STR("="); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_50, 0), _fx_catch_36); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, e_opt_2.u.Some, 0, 0), _fx_catch_36); _fx_catch_36: ; } FX_CHECK_EXN(_fx_catch_37); _fx_catch_37: ; _fx_free_Nt6option1N14C_form__cexp_t(&e_opt_2); FX_CHECK_EXN(_fx_catch_38); } FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_38); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_38); fx_str_t slit_51 = FX_MAKE_STR("} "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_51, 0), _fx_catch_38); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &cenum_cname_0, 0), _fx_catch_38); fx_str_t slit_52 = FX_MAKE_STR(";"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_52, 0), _fx_catch_38); FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_38); _fx_catch_38: ; FX_FREE_STR(&cenum_cname_0); if (cenum_members_0) { _fx_free_LT2R9Ast__id_tNt6option1N14C_form__cexp_t(&cenum_members_0); } } else if (tag_0 == 22) { fx_str_t ci_cname_0 = {0}; fx_str_t v_19 = {0}; fx_str_t v_20 = {0}; fx_str_t vtbl_cname_0 = {0}; fx_str_t v_21 = {0}; fx_str_t v_22 = {0}; _fx_R23C_form__cdefinterface_t* v_23 = &s_0->u.CDefInterface->data; _fx_R10Ast__loc_t ci_loc_0 = v_23->ci_loc; _fx_R9Ast__id_t ci_vtbl_0 = v_23->ci_vtbl; fx_copy_str(&v_23->ci_cname, &ci_cname_0); FX_CALL(_fx_M2PPFM6beginvv1RM1t(pp_0, 0), _fx_catch_39); FX_CALL(_fx_M4C_ppFM6stringS1S(&ci_cname_0, &v_19, 0), _fx_catch_39); fx_str_t slit_53 = FX_MAKE_STR("typedef struct "); fx_str_t slit_54 = FX_MAKE_STR(" {"); { const fx_str_t strs_3[] = { slit_53, v_19, slit_54 }; FX_CALL(fx_strjoin(0, 0, 0, strs_3, 3, &v_20), _fx_catch_39); } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_20, 0), _fx_catch_39); FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_39); FX_CALL(_fx_M6C_formFM13get_idc_cnameS2R9Ast__id_tR10Ast__loc_t(&ci_vtbl_0, &ci_loc_0, &vtbl_cname_0, 0), _fx_catch_39); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &vtbl_cname_0, 0), _fx_catch_39); fx_str_t slit_55 = FX_MAKE_STR("* vtbl;"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_55, 0), _fx_catch_39); FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_39); fx_str_t slit_56 = FX_MAKE_STR("fx_object_t* obj;"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_56, 0), _fx_catch_39); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_39); FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_39); FX_CALL(_fx_M4C_ppFM6stringS1S(&ci_cname_0, &v_21, 0), _fx_catch_39); fx_str_t slit_57 = FX_MAKE_STR("} "); fx_str_t slit_58 = FX_MAKE_STR(";"); { const fx_str_t strs_4[] = { slit_57, v_21, slit_58 }; FX_CALL(fx_strjoin(0, 0, 0, strs_4, 3, &v_22), _fx_catch_39); } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_22, 0), _fx_catch_39); FX_CALL(_fx_M2PPFM7newlinev1RM1t(pp_0, 0), _fx_catch_39); _fx_catch_39: ; FX_FREE_STR(&v_22); FX_FREE_STR(&v_21); FX_FREE_STR(&vtbl_cname_0); FX_FREE_STR(&v_20); FX_FREE_STR(&v_19); FX_FREE_STR(&ci_cname_0); } else if (tag_0 == 23) { _fx_LN15C_form__cstmt_t cm_body_0 = 0; _fx_LR9Ast__id_t cm_args_0 = 0; fx_str_t cm_cname_0 = {0}; _fx_R19C_form__cdefmacro_t* v_24 = &s_0->u.CMacroDef->data; _fx_R10Ast__loc_t cm_loc_0 = v_24->cm_loc; FX_COPY_PTR(v_24->cm_body, &cm_body_0); FX_COPY_PTR(v_24->cm_args, &cm_args_0); fx_copy_str(&v_24->cm_cname, &cm_cname_0); fx_str_t slit_59 = FX_MAKE_STR("#define "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_59, 0), _fx_catch_44); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &cm_cname_0, 0), _fx_catch_44); if (cm_args_0 != 0) { fx_str_t slit_60 = FX_MAKE_STR("("); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_60, 0), _fx_catch_41); int_ i_3 = 0; _fx_LR9Ast__id_t lst_5 = cm_args_0; for (; lst_5; lst_5 = lst_5->tl, i_3 += 1) { _fx_R9Ast__id_t* a_0 = &lst_5->hd; if (i_3 > 0) { fx_str_t slit_61 = FX_MAKE_STR(", "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_61, 0), _fx_catch_40); } FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, a_0, &cm_loc_0, 0), _fx_catch_40); _fx_catch_40: ; FX_CHECK_EXN(_fx_catch_41); } fx_str_t slit_62 = FX_MAKE_STR(")"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_62, 0), _fx_catch_41); _fx_catch_41: ; } FX_CHECK_EXN(_fx_catch_44); if (cm_body_0 != 0) { _fx_LN15C_form__cstmt_t lst_6 = cm_body_0; for (; lst_6; lst_6 = lst_6->tl) { _fx_N15C_form__cstmt_t s_3 = lst_6->hd; FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_42); fx_str_t slit_63 = FX_MAKE_STR("\\"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_63, 0), _fx_catch_42); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_42); fx_str_t slit_64 = FX_MAKE_STR(" "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_64, 0), _fx_catch_42); FX_CALL(_fx_M4C_ppFM9pp_cstmt_v2R5PP__tN15C_form__cstmt_t(pp_0, s_3, 0), _fx_catch_42); _fx_catch_42: ; FX_CHECK_EXN(_fx_catch_43); } _fx_catch_43: ; } FX_CHECK_EXN(_fx_catch_44); _fx_catch_44: ; FX_FREE_STR(&cm_cname_0); FX_FREE_LIST_SIMPLE(&cm_args_0); if (cm_body_0) { _fx_free_LN15C_form__cstmt_t(&cm_body_0); } } else if (tag_0 == 24) { _fx_T2R9Ast__id_tR10Ast__loc_t* vcase_11 = &s_0->u.CMacroUndef; FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_45); fx_str_t slit_65 = FX_MAKE_STR("#undef "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_65, 0), _fx_catch_45); FX_CALL(_fx_M4C_ppFM5pp_idv3R5PP__tR9Ast__id_tR10Ast__loc_t(pp_0, &vcase_11->t0, &vcase_11->t1, 0), _fx_catch_45); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_45); _fx_catch_45: ; } else if (tag_0 == 25) { _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t cs_l_0 = 0; _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t* vcase_12 = &s_0->u.CMacroIf; _fx_LN15C_form__cstmt_t else_l_0 = vcase_12->t1; int_ i_4 = 0; FX_COPY_PTR(vcase_12->t0, &cs_l_0); _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t lst_7 = cs_l_0; for (; lst_7; lst_7 = lst_7->tl, i_4 += 1) { _fx_N14C_form__cexp_t c_0 = 0; _fx_LN15C_form__cstmt_t sl_0 = 0; fx_str_t v_25 = {0}; _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* __pat___2 = &lst_7->hd; FX_COPY_PTR(__pat___2->t0, &c_0); FX_COPY_PTR(__pat___2->t1, &sl_0); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_47); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_47); if (i_4 == 0) { fx_str_t slit_66 = FX_MAKE_STR("#if "); fx_copy_str(&slit_66, &v_25); } else { fx_str_t slit_67 = FX_MAKE_STR("#elif "); fx_copy_str(&slit_67, &v_25); } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_25, 0), _fx_catch_47); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, c_0, 0, 0), _fx_catch_47); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_47); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_47); _fx_LN15C_form__cstmt_t lst_8 = sl_0; for (; lst_8; lst_8 = lst_8->tl) { _fx_N15C_form__cstmt_t s_4 = lst_8->hd; FX_CALL(_fx_M4C_ppFM9pp_cstmt_v2R5PP__tN15C_form__cstmt_t(pp_0, s_4, 0), _fx_catch_46); _fx_catch_46: ; FX_CHECK_EXN(_fx_catch_47); } _fx_catch_47: ; FX_FREE_STR(&v_25); if (sl_0) { _fx_free_LN15C_form__cstmt_t(&sl_0); } if (c_0) { _fx_free_N14C_form__cexp_t(&c_0); } FX_CHECK_EXN(_fx_catch_50); } if (else_l_0 != 0) { _fx_LN15C_form__cstmt_t else_l_1 = 0; FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_49); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_49); fx_str_t slit_68 = FX_MAKE_STR("#else"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_68, 0), _fx_catch_49); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_49); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_49); FX_COPY_PTR(else_l_0, &else_l_1); _fx_LN15C_form__cstmt_t lst_9 = else_l_1; for (; lst_9; lst_9 = lst_9->tl) { _fx_N15C_form__cstmt_t s_5 = lst_9->hd; FX_CALL(_fx_M4C_ppFM9pp_cstmt_v2R5PP__tN15C_form__cstmt_t(pp_0, s_5, 0), _fx_catch_48); _fx_catch_48: ; FX_CHECK_EXN(_fx_catch_49); } _fx_catch_49: ; if (else_l_1) { _fx_free_LN15C_form__cstmt_t(&else_l_1); } } FX_CHECK_EXN(_fx_catch_50); FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_50); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_50); fx_str_t slit_69 = FX_MAKE_STR("#endif"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_69, 0), _fx_catch_50); _fx_catch_50: ; if (cs_l_0) { _fx_free_LT2N14C_form__cexp_tLN15C_form__cstmt_t(&cs_l_0); } } else if (tag_0 == 26) { FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_51); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_51); fx_str_t slit_70 = FX_MAKE_STR("#include "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_70, 0), _fx_catch_51); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &s_0->u.CMacroInclude.t0, 0), _fx_catch_51); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_51); _fx_catch_51: ; } else if (tag_0 == 27) { FX_CALL(_fx_M2PPFM6break0v1RM1t(pp_0, 0), _fx_catch_52); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_52); fx_str_t slit_71 = FX_MAKE_STR("#pragma "); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_71, 0), _fx_catch_52); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &s_0->u.CMacroPragma.t0, 0), _fx_catch_52); FX_CALL(_fx_M2PPFM3endv1RM1t(pp_0, 0), _fx_catch_52); _fx_catch_52: ; } else { FX_FAST_THROW(FX_EXN_NoMatchError, _fx_cleanup); } _fx_cleanup: ; return fx_status; } static int _fx_M4C_ppFM16print_cascade_ifv5SN14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR5PP__t( fx_str_t* prefix_0, struct _fx_N14C_form__cexp_t_data_t* e_0, struct _fx_N15C_form__cstmt_t_data_t* s1_0, struct _fx_N15C_form__cstmt_t_data_t* s2_0, struct _fx_R5PP__t* pp_0, void* fx_fv) { fx_str_t prefix_1 = {0}; _fx_N14C_form__cexp_t e_1 = 0; _fx_N15C_form__cstmt_t s1_1 = 0; _fx_N15C_form__cstmt_t s2_1 = 0; int fx_status = 0; FX_CALL(fx_check_stack(), _fx_cleanup); fx_copy_str(prefix_0, &prefix_1); FX_COPY_PTR(e_0, &e_1); FX_COPY_PTR(s1_0, &s1_1); FX_COPY_PTR(s2_0, &s2_1); for (;;) { fx_str_t prefix_2 = {0}; _fx_N14C_form__cexp_t e_2 = 0; _fx_N15C_form__cstmt_t s1_2 = 0; _fx_N15C_form__cstmt_t s2_2 = 0; fx_str_t v_0 = {0}; fx_copy_str(&prefix_1, &prefix_2); FX_COPY_PTR(e_1, &e_2); FX_COPY_PTR(s1_1, &s1_2); FX_COPY_PTR(s2_1, &s2_2); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_3); fx_str_t slit_0 = FX_MAKE_STR(" ("); { const fx_str_t strs_0[] = { prefix_2, slit_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 2, &v_0), _fx_catch_3); } FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &v_0, 0), _fx_catch_3); FX_CALL(_fx_M4C_ppFM8pp_cexp_v3R5PP__tN14C_form__cexp_ti(pp_0, e_2, 0, 0), _fx_catch_3); fx_str_t slit_1 = FX_MAKE_STR(")"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_1, 0), _fx_catch_3); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_3); FX_CALL(_fx_M4C_ppFM26pprint_cstmt_or_block_cboxv2R5PP__tN15C_form__cstmt_t(pp_0, s1_2, 0), _fx_catch_3); int tag_0 = FX_REC_VARIANT_TAG(s2_2); bool res_0; if (tag_0 == 1) { res_0 = true; goto _fx_endmatch_0; } if (tag_0 == 7) { if (s2_2->u.CStmtBlock.t0 == 0) { res_0 = true; goto _fx_endmatch_0; } } res_0 = false; _fx_endmatch_0: ; FX_CHECK_EXN(_fx_catch_3); if (res_0) { FX_BREAK(_fx_catch_0); _fx_catch_0: ; goto _fx_endmatch_1; } if (tag_0 == 9) { _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* vcase_0 = &s2_2->u.CStmtIf; FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_1); fx_str_t slit_2 = FX_MAKE_STR("else if"); FX_FREE_STR(&prefix_1); fx_copy_str(&slit_2, &prefix_1); _fx_N14C_form__cexp_t* e__0 = &vcase_0->t0; _fx_free_N14C_form__cexp_t(&e_1); FX_COPY_PTR(*e__0, &e_1); _fx_N15C_form__cstmt_t* s1__0 = &vcase_0->t1; _fx_free_N15C_form__cstmt_t(&s1_1); FX_COPY_PTR(*s1__0, &s1_1); _fx_N15C_form__cstmt_t* s2__0 = &vcase_0->t2; _fx_free_N15C_form__cstmt_t(&s2_1); FX_COPY_PTR(*s2__0, &s2_1); _fx_catch_1: ; goto _fx_endmatch_1; } FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_2); FX_CALL(_fx_M2PPFM5beginv1RM1t(pp_0, 0), _fx_catch_2); fx_str_t slit_3 = FX_MAKE_STR("else"); FX_CALL(_fx_M2PPFM3strv2RM1tS(pp_0, &slit_3, 0), _fx_catch_2); FX_CALL(_fx_M2PPFM5spacev1RM1t(pp_0, 0), _fx_catch_2); FX_CALL(_fx_M4C_ppFM26pprint_cstmt_or_block_cboxv2R5PP__tN15C_form__cstmt_t(pp_0, s2_2, 0), _fx_catch_2); FX_BREAK(_fx_catch_2); _fx_catch_2: ; _fx_endmatch_1: ; FX_CHECK_EXN(_fx_catch_3); _fx_catch_3: ; FX_FREE_STR(&v_0); if (s2_2) { _fx_free_N15C_form__cstmt_t(&s2_2); } if (s1_2) { _fx_free_N15C_form__cstmt_t(&s1_2); } if (e_2) { _fx_free_N14C_form__cexp_t(&e_2); } FX_FREE_STR(&prefix_2); FX_CHECK_BREAK(); FX_CHECK_EXN(_fx_cleanup); } _fx_cleanup: ; FX_FREE_STR(&prefix_1); if (e_1) { _fx_free_N14C_form__cexp_t(&e_1); } if (s1_1) { _fx_free_N15C_form__cstmt_t(&s1_1); } if (s2_1) { _fx_free_N15C_form__cstmt_t(&s2_1); } return fx_status; } FX_EXTERN_C int _fx_M4C_ppFM20pprint_top_to_stringS1LN15C_form__cstmt_t( struct _fx_LN15C_form__cstmt_t_data_t* code_0, fx_str_t* fx_result, void* fx_fv) { _fx_R5PP__t pp_0 = {0}; _fx_LS all_lines_0 = 0; int fx_status = 0; FX_CALL(_fx_M2PPFM21pprint_to_string_listRM1t2ii(128, 3, &pp_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM6beginvv2RM1ti(&pp_0, 0, 0), _fx_cleanup); int_ i_0 = 0; _fx_LN15C_form__cstmt_t lst_0 = code_0; for (; lst_0; lst_0 = lst_0->tl, i_0 += 1) { _fx_N15C_form__cstmt_t s_0 = lst_0->hd; if (i_0 != 0) { FX_CALL(_fx_M2PPFM6break0v1RM1t(&pp_0, 0), _fx_catch_0); } FX_CALL(_fx_M4C_ppFM9pp_cstmt_v2R5PP__tN15C_form__cstmt_t(&pp_0, s_0, 0), _fx_catch_0); _fx_catch_0: ; FX_CHECK_EXN(_fx_cleanup); } FX_CALL(_fx_M2PPFM7newlinev1RM1t(&pp_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM3endv1RM1t(&pp_0, 0), _fx_cleanup); FX_CALL(_fx_M2PPFM5flushv1RM1t(&pp_0, 0), _fx_cleanup); _fx_FPLS0* f_0 = &pp_0.get_f; FX_CALL(f_0->fp(&all_lines_0, f_0->fcv), _fx_cleanup); fx_str_t slit_0 = FX_MAKE_STR(""); fx_str_t slit_1 = FX_MAKE_STR("\n"); fx_str_t slit_2 = FX_MAKE_STR("\n"); FX_CALL(_fx_F12join_embraceS4SSSLS(&slit_0, &slit_1, &slit_2, all_lines_0, fx_result, 0), _fx_cleanup); _fx_cleanup: ; _fx_free_R5PP__t(&pp_0); if (all_lines_0) { _fx_free_LS(&all_lines_0); } return fx_status; } FX_EXTERN_C int fx_init_C_pp(void) { int fx_status = 0; return fx_status; } FX_EXTERN_C void fx_deinit_C_pp(void) { }
gemm.c
#include "gemm.h" #include "utils.h" #include "im2col.h" #include "dark_cuda.h" #include <stdlib.h> #include <stdio.h> #include <math.h> #include <float.h> #include <string.h> #include <stdint.h> #ifdef _WIN32 #include <intrin.h> #endif #if defined(_OPENMP) #include <omp.h> #endif #define TILE_M 4 // 4 ops #define TILE_N 16 // AVX2 = 2 ops * 8 floats #define TILE_K 16 // loop #ifdef __cplusplus #define PUT_IN_REGISTER #else #define PUT_IN_REGISTER register #endif void gemm_bin(int M, int N, int K, float ALPHA, char *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ char A_PART = A[i*lda+k]; if(A_PART){ for(j = 0; j < N; ++j){ C[i*ldc+j] += B[k*ldb+j]; } } else { for(j = 0; j < N; ++j){ C[i*ldc+j] -= B[k*ldb+j]; } } } } } float *random_matrix(int rows, int cols) { int i; float* m = (float*)calloc(rows * cols, sizeof(float)); for(i = 0; i < rows*cols; ++i){ m[i] = (float)rand()/RAND_MAX; } return m; } void time_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); int i; clock_t start = clock(), end; for(i = 0; i<10; ++i){ gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC); free(a); free(b); free(c); } void gemm(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc); } //-------------------------------------------- // XNOR bitwise GEMM for binary neural network //-------------------------------------------- static inline unsigned char xnor(unsigned char a, unsigned char b) { //return a == b; return !(a^b); } // INT-32 static inline uint32_t get_bit_int32(uint32_t const*const src, size_t index) { size_t src_i = index / 32; int src_shift = index % 32; unsigned char val = (src[src_i] & (1 << src_shift)) > 0; return val; } static inline uint32_t xnor_int32(uint32_t a, uint32_t b) { return ~(a^b); } static inline uint64_t xnor_int64(uint64_t a, uint64_t b) { return ~(a^b); } static inline uint32_t fill_bit_int32(char src) { if (src == 0) return 0x00000000; else return 0xFFFFFFFF; } static inline uint64_t fill_bit_int64(char src) { if (src == 0) return 0x0000000000000000; else return 0xFFFFFFFFFFFFFFFF; } void binary_int32_printf(uint32_t src) { int i; for (i = 0; i < 32; ++i) { if (src & 1) printf("1"); else printf("0"); src = src >> 1; } printf("\n"); } void binary_int64_printf(uint64_t src) { int i; for (i = 0; i < 64; ++i) { if (src & 1) printf("1"); else printf("0"); src = src >> 1; } printf("\n"); } /* void gemm_nn_custom_bin_mean(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int *count_arr = calloc(M*N, sizeof(int)); int i, j, k; for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024] for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216] char a_bit = get_bit(A, i*lda + k); for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056] char b_bit = get_bit(B, k*ldb + j); count_arr[i*ldc + j] += xnor(a_bit, b_bit); } } } for (i = 0; i < M; ++i) { float mean_val = mean_arr[i]; for (j = 0; j < N; ++j) { C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val; } } free(count_arr); } */ /* void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int *count_arr = calloc(M*N, sizeof(int)); int i, j, k; for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024] for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056] for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216] char a_bit = get_bit(A, i*lda + k); char b_bit = get_bit(B, j*ldb + k); count_arr[i*ldc + j] += xnor(a_bit, b_bit); } } } for (i = 0; i < M; ++i) { float mean_val = mean_arr[i]; for (j = 0; j < N; ++j) { C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val; } } free(count_arr); } */ /* void gemm_nn_custom_bin_mean(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int *count_arr = calloc(M*N, sizeof(int)); int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024] int j, k, h; for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216] const char a_bit = get_bit(A, i*lda + k); uint64_t a_bit64 = fill_bit_int64(a_bit); int k_ldb = k*ldb; for (j = 0; j < N; j += 64) { // out_h*out_w - one channel output size [169 - 173056] if ((N - j > 64) && (k_ldb % 8 == 0)) { uint64_t b_bit64 = *((uint64_t *)(B + (k_ldb + j) / 8)); uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64); //printf("\n %d \n",__builtin_popcountll(c_bit64)); // gcc printf("\n %d \n", __popcnt64(c_bit64)); // msvs int h; for (h = 0; h < 64; ++h) if ((c_bit64 >> h) & 1) count_arr[i*ldc + j + h] += 1; //binary_int64_printf(a_bit64); //binary_int64_printf(b_bit64); //binary_int64_printf(c_bit64); } else { for (; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056] char b_bit = get_bit(B, k_ldb + j); if (xnor(a_bit, b_bit)) count_arr[i*ldc + j] += 1; } } } } } if (mean_arr) { //int K_2 = K / 2; for (i = 0; i < M; ++i) { float mean_val = mean_arr[i]; //float mean_val2 = 2 * mean_val; for (j = 0; j < N; ++j) { C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val; //C[i*ldc + j] = (count_arr[i*ldc + j] - K_2) *mean_val2; } } } else { for (i = 0; i < M; ++i) { for (j = 0; j < N; ++j) { C[i*ldc + j] = count_arr[i*ldc + j] - K / 2; } } } free(count_arr); //getchar(); } */ /* void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024] int j, k, h; float mean_val = mean_arr[i]; for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056] int count = 0; for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216] uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64); #ifdef WIN32 int tmp_count = __popcnt64(c_bit64); #else int tmp_count = __builtin_popcountll(c_bit64); #endif if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits count += tmp_count; //binary_int64_printf(c_bit64); //printf(", count = %d \n\n", tmp_count); } C[i*ldc + j] = (2 * count - K) * mean_val; } } } */ //---------------------------- // is not used void transpose_32x32_bits_my(uint32_t *A, uint32_t *B, int lda, int ldb) { unsigned int x, y; for (y = 0; y < 32; ++y) { for (x = 0; x < 32; ++x) { if (A[y * lda] & (1 << x)) B[x * ldb] |= (uint32_t)1 << y; } } } #ifndef GPU uint8_t reverse_8_bit(uint8_t a) { return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16; } uint32_t reverse_32_bit(uint32_t a) { // unsigned int __rbit(unsigned int val) // for ARM //__asm__("rbit %0, %1\n" : "=r"(output) : "r"(input)); return (reverse_8_bit(a >> 24) << 0) | (reverse_8_bit(a >> 16) << 8) | (reverse_8_bit(a >> 8) << 16) | (reverse_8_bit(a >> 0) << 24); } #define swap(a0, a1, j, m) t = (a0 ^ (a1 >>j)) & m; a0 = a0 ^ t; a1 = a1 ^ (t << j); void transpose32_optimized(uint32_t A[32]) { int j, k; unsigned m, t; //m = 0x0000FFFF; //for (j = 16; j != 0; j = j >> 1, m = m ^ (m << j)) { // for (k = 0; k < 32; k = (k + j + 1) & ~j) { // t = (A[k] ^ (A[k + j] >> j)) & m; // A[k] = A[k] ^ t; // A[k + j] = A[k + j] ^ (t << j); // } //} j = 16; m = 0x0000FFFF; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 8; m = 0x00ff00ff; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 4; m = 0x0f0f0f0f; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 2; m = 0x33333333; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 1; m = 0x55555555; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } // reverse Y for (j = 0; j < 16; ++j) { uint32_t tmp = A[j]; A[j] = reverse_32_bit(A[31 - j]); A[31 - j] = reverse_32_bit(tmp); } } void transpose_32x32_bits_reversed_diagonale(uint32_t *A, uint32_t *B, int m, int n) { unsigned A_tmp[32]; int i; #pragma unroll for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m]; transpose32_optimized(A_tmp); #pragma unroll for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i]; } void transpose_8x8_bits_my(unsigned char *A, unsigned char *B, int lda, int ldb) { unsigned x, y; for (y = 0; y < 8; ++y) { for (x = 0; x < 8; ++x) { if (A[y * lda] & (1 << x)) B[x * ldb] |= 1 << y; } } } unsigned char reverse_byte_1(char a) { return ((a & 0x1) << 7) | ((a & 0x2) << 5) | ((a & 0x4) << 3) | ((a & 0x8) << 1) | ((a & 0x10) >> 1) | ((a & 0x20) >> 3) | ((a & 0x40) >> 5) | ((a & 0x80) >> 7); } unsigned char reverse_byte(unsigned char a) { return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16; } static unsigned char lookup[16] = { 0x0, 0x8, 0x4, 0xc, 0x2, 0xa, 0x6, 0xe, 0x1, 0x9, 0x5, 0xd, 0x3, 0xb, 0x7, 0xf, }; unsigned char reverse_byte_3(unsigned char n) { // Reverse the top and bottom nibble then swap them. return (lookup[n & 0b1111] << 4) | lookup[n >> 4]; } void transpose8rS32_reversed_diagonale(unsigned char* A, unsigned char* B, int m, int n) { unsigned x, y, t; x = y = 0; // Load the array and pack it into x and y. //x = (A[0] << 24) | (A[m] << 16) | (A[2 * m] << 8) | A[3 * m]; //y = (A[4 * m] << 24) | (A[5 * m] << 16) | (A[6 * m] << 8) | A[7 * m]; t = (x ^ (x >> 7)) & 0x00AA00AA; x = x ^ t ^ (t << 7); t = (y ^ (y >> 7)) & 0x00AA00AA; y = y ^ t ^ (t << 7); t = (x ^ (x >> 14)) & 0x0000CCCC; x = x ^ t ^ (t << 14); t = (y ^ (y >> 14)) & 0x0000CCCC; y = y ^ t ^ (t << 14); t = (x & 0xF0F0F0F0) | ((y >> 4) & 0x0F0F0F0F); y = ((x << 4) & 0xF0F0F0F0) | (y & 0x0F0F0F0F); x = t; B[7 * n] = reverse_byte(x >> 24); B[6 * n] = reverse_byte(x >> 16); B[5 * n] = reverse_byte(x >> 8); B[4 * n] = reverse_byte(x); B[3 * n] = reverse_byte(y >> 24); B[2 * n] = reverse_byte(y >> 16); B[1 * n] = reverse_byte(y >> 8); B[0 * n] = reverse_byte(y); } /* // transpose by 8-bit void transpose_bin(char *A, char *B, const int n, const int m, const int lda, const int ldb, const int block_size) { //printf("\n n = %d, ldb = %d \t\t m = %d, lda = %d \n", n, ldb, m, lda); int i; #pragma omp parallel for for (i = 0; i < n; i += 8) { int j; for (j = 0; j < m; j += 8) { int a_index = i*lda + j; int b_index = j*ldb + i; //transpose_8x8_bits_my(&A[a_index/8], &B[b_index/8], lda/8, ldb/8); transpose8rS32_reversed_diagonale(&A[a_index / 8], &B[b_index / 8], lda / 8, ldb / 8); } for (; j < m; ++j) { if (get_bit(A, i*lda + j)) set_bit(B, j*ldb + i); } } } */ #endif // transpose by 32-bit void transpose_bin(uint32_t *A, uint32_t *B, const int n, const int m, const int lda, const int ldb, const int block_size) { //printf("\n n = %d (n mod 32 = %d), m = %d (m mod 32 = %d) \n", n, n % 32, m, m % 32); //printf("\n lda = %d (lda mod 32 = %d), ldb = %d (ldb mod 32 = %d) \n", lda, lda % 32, ldb, ldb % 32); int i; #pragma omp parallel for for (i = 0; i < n; i += 32) { int j; for (j = 0; j < m; j += 32) { int a_index = i*lda + j; int b_index = j*ldb + i; transpose_32x32_bits_reversed_diagonale(&A[a_index / 32], &B[b_index / 32], lda / 32, ldb / 32); //transpose_32x32_bits_my(&A[a_index/32], &B[b_index/32], lda/32, ldb/32); } for (; j < m; ++j) { if (get_bit((const unsigned char* const)A, i * lda + j)) set_bit((unsigned char* const)B, j * ldb + i); } } } static inline int popcnt_32(uint32_t val32) { #ifdef WIN32 // Windows MSVS int tmp_count = __popcnt(val32); #else // Linux GCC int tmp_count = __builtin_popcount(val32); #endif return tmp_count; } //---------------------------- #if (defined(__AVX__) && defined(__x86_64__)) || defined(_WIN64) #ifdef _WIN64 #include <intrin.h> #include <ammintrin.h> #include <immintrin.h> #include <smmintrin.h> #if defined(_MSC_VER) && _MSC_VER <= 1900 static inline __int32 _mm256_extract_epi64(__m256i a, const int index) { return a.m256i_i64[index]; } static inline __int32 _mm256_extract_epi32(__m256i a, const int index) { return a.m256i_i32[index]; } #endif static inline float _castu32_f32(uint32_t a) { return *((float *)&a); } static inline float _mm256_extract_float32(__m256 a, const int index) { return a.m256_f32[index]; } #else // Linux GCC/Clang #include <x86intrin.h> #include <ammintrin.h> #include <immintrin.h> #include <smmintrin.h> #include <cpuid.h> static inline float _castu32_f32(uint32_t a) { return *((float *)&a); } static inline float _mm256_extract_float32(__m256 a, const int index) { return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), index)); } void asm_cpuid(uint32_t* abcd, uint32_t eax) { uint32_t ebx = 0, edx = 0, ecx = 0; // EBX is saved to EDI and later restored __asm__("movl %%ebx, %%edi;" "cpuid;" "xchgl %%ebx, %%edi;" : "=D"(ebx), "+a"(eax), "+c"(ecx), "=d"(edx)); abcd[0] = eax; abcd[1] = ebx; abcd[2] = ecx; abcd[3] = edx; } #endif #ifdef _WIN32 // Windows #define cpuid(info, x) __cpuidex(info, x, 0) #else // GCC Intrinsics void cpuid(int info[4], int InfoType) { __cpuid_count(InfoType, 0, info[0], info[1], info[2], info[3]); } #endif // Misc. static int HW_MMX, HW_x64, HW_RDRAND, HW_BMI1, HW_BMI2, HW_ADX, HW_PREFETCHWT1; static int HW_ABM; // Advanced Bit Manipulation // SIMD: 128-bit static int HW_SSE, HW_SSE2, HW_SSE3, HW_SSSE3, HW_SSE41, HW_SSE42, HW_SSE4a, HW_AES, HW_SHA; // SIMD: 256-bit static int HW_AVX, HW_XOP, HW_FMA3, HW_FMA4, HW_AVX2; // SIMD: 512-bit static int HW_AVX512F; // AVX512 Foundation static int HW_AVX512CD; // AVX512 Conflict Detection static int HW_AVX512PF; // AVX512 Prefetch static int HW_AVX512ER; // AVX512 Exponential + Reciprocal static int HW_AVX512VL; // AVX512 Vector Length Extensions static int HW_AVX512BW; // AVX512 Byte + Word static int HW_AVX512DQ; // AVX512 Doubleword + Quadword static int HW_AVX512IFMA; // AVX512 Integer 52-bit Fused Multiply-Add static int HW_AVX512VBMI; // AVX512 Vector Byte Manipulation Instructions // https://stackoverflow.com/questions/6121792/how-to-check-if-a-cpu-supports-the-sse3-instruction-set void check_cpu_features(void) { int info[4]; cpuid(info, 0); int nIds = info[0]; cpuid(info, 0x80000000); unsigned nExIds = info[0]; // Detect Features if (nIds >= 0x00000001) { cpuid(info, 0x00000001); HW_MMX = (info[3] & ((int)1 << 23)) != 0; HW_SSE = (info[3] & ((int)1 << 25)) != 0; HW_SSE2 = (info[3] & ((int)1 << 26)) != 0; HW_SSE3 = (info[2] & ((int)1 << 0)) != 0; HW_SSSE3 = (info[2] & ((int)1 << 9)) != 0; HW_SSE41 = (info[2] & ((int)1 << 19)) != 0; HW_SSE42 = (info[2] & ((int)1 << 20)) != 0; HW_AES = (info[2] & ((int)1 << 25)) != 0; HW_AVX = (info[2] & ((int)1 << 28)) != 0; HW_FMA3 = (info[2] & ((int)1 << 12)) != 0; HW_RDRAND = (info[2] & ((int)1 << 30)) != 0; } if (nIds >= 0x00000007) { cpuid(info, 0x00000007); HW_AVX2 = (info[1] & ((int)1 << 5)) != 0; HW_BMI1 = (info[1] & ((int)1 << 3)) != 0; HW_BMI2 = (info[1] & ((int)1 << 8)) != 0; HW_ADX = (info[1] & ((int)1 << 19)) != 0; HW_SHA = (info[1] & ((int)1 << 29)) != 0; HW_PREFETCHWT1 = (info[2] & ((int)1 << 0)) != 0; HW_AVX512F = (info[1] & ((int)1 << 16)) != 0; HW_AVX512CD = (info[1] & ((int)1 << 28)) != 0; HW_AVX512PF = (info[1] & ((int)1 << 26)) != 0; HW_AVX512ER = (info[1] & ((int)1 << 27)) != 0; HW_AVX512VL = (info[1] & ((int)1 << 31)) != 0; HW_AVX512BW = (info[1] & ((int)1 << 30)) != 0; HW_AVX512DQ = (info[1] & ((int)1 << 17)) != 0; HW_AVX512IFMA = (info[1] & ((int)1 << 21)) != 0; HW_AVX512VBMI = (info[2] & ((int)1 << 1)) != 0; } if (nExIds >= 0x80000001) { cpuid(info, 0x80000001); HW_x64 = (info[3] & ((int)1 << 29)) != 0; HW_ABM = (info[2] & ((int)1 << 5)) != 0; HW_SSE4a = (info[2] & ((int)1 << 6)) != 0; HW_FMA4 = (info[2] & ((int)1 << 16)) != 0; HW_XOP = (info[2] & ((int)1 << 11)) != 0; } } int is_avx() { static int result = -1; if (result == -1) { check_cpu_features(); result = HW_AVX; if (result == 1) printf(" Used AVX \n"); else printf(" Not used AVX \n"); } return result; } int is_fma_avx2() { static int result = -1; if (result == -1) { check_cpu_features(); result = HW_FMA3 && HW_AVX2; if (result == 1) printf(" Used FMA & AVX2 \n"); else printf(" Not used FMA & AVX2 \n"); } return result; } // https://software.intel.com/sites/landingpage/IntrinsicsGuide void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; if (is_avx() == 1) { // AVX for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { float A_PART = ALPHA*A[i*lda + k]; __m256 a256, b256, c256, result256; // AVX a256 = _mm256_set1_ps(A_PART); for (j = 0; j < N - 8; j += 8) { b256 = _mm256_loadu_ps(&B[k*ldb + j]); c256 = _mm256_loadu_ps(&C[i*ldc + j]); // FMA - Intel Haswell (2013), AMD Piledriver (2012) //result256 = _mm256_fmadd_ps(a256, b256, c256); result256 = _mm256_mul_ps(a256, b256); result256 = _mm256_add_ps(result256, c256); _mm256_storeu_ps(&C[i*ldc + j], result256); } int prev_end = (N % 8 == 0) ? (N - 8) : (N / 8) * 8; for (j = prev_end; j < N; ++j) C[i*ldc + j] += A_PART*B[k*ldb + j]; } } } else { for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { PUT_IN_REGISTER float A_PART = ALPHA * A[i * lda + k]; for (j = 0; j < N; ++j) { C[i*ldc + j] += A_PART*B[k*ldb + j]; } /* // SSE __m128 a128, b128, c128, result128; // SSE a128 = _mm_set1_ps(A_PART); for (j = 0; j < N - 4; j += 4) { b128 = _mm_loadu_ps(&B[k*ldb + j]); c128 = _mm_loadu_ps(&C[i*ldc + j]); //result128 = _mm_fmadd_ps(a128, b128, c128); result128 = _mm_mul_ps(a128, b128); result128 = _mm_add_ps(result128, c128); _mm_storeu_ps(&C[i*ldc + j], result128); } int prev_end = (N % 4 == 0) ? (N - 4) : (N / 4) * 4; for (j = prev_end; j < N; ++j){ C[i*ldc + j] += A_PART*B[k*ldb + j]; } */ } } } } void gemm_nn_fast(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i; #pragma omp parallel for for (i = 0; i < (M / TILE_M)*TILE_M; i += TILE_M) { int j, k; int i_d, k_d; for (k = 0; k < (K / TILE_K)*TILE_K; k += TILE_K) { for (j = 0; j < (N / TILE_N)*TILE_N; j += TILE_N) { // L1 - 6 bits tag [11:6] - cache size 32 KB, conflict for each 4 KB // L2 - 9 bits tag [14:6] - cache size 256 KB, conflict for each 32 KB // L3 - 13 bits tag [18:6] - cache size 8 MB, conflict for each 512 KB __m256 result256; __m256 a256_0, b256_0; // AVX __m256 a256_1, b256_1; // AVX __m256 a256_2;// , b256_2; // AVX __m256 a256_3;// , b256_3; // AVX __m256 c256_0, c256_1, c256_2, c256_3; __m256 c256_4, c256_5, c256_6, c256_7; c256_0 = _mm256_loadu_ps(&C[(0 + i)*ldc + (0 + j)]); c256_1 = _mm256_loadu_ps(&C[(1 + i)*ldc + (0 + j)]); c256_2 = _mm256_loadu_ps(&C[(0 + i)*ldc + (8 + j)]); c256_3 = _mm256_loadu_ps(&C[(1 + i)*ldc + (8 + j)]); c256_4 = _mm256_loadu_ps(&C[(2 + i)*ldc + (0 + j)]); c256_5 = _mm256_loadu_ps(&C[(3 + i)*ldc + (0 + j)]); c256_6 = _mm256_loadu_ps(&C[(2 + i)*ldc + (8 + j)]); c256_7 = _mm256_loadu_ps(&C[(3 + i)*ldc + (8 + j)]); for (k_d = 0; k_d < (TILE_K); ++k_d) { a256_0 = _mm256_set1_ps(ALPHA*A[(0 + i)*lda + (k_d + k)]); a256_1 = _mm256_set1_ps(ALPHA*A[(1 + i)*lda + (k_d + k)]); a256_2 = _mm256_set1_ps(ALPHA*A[(2 + i)*lda + (k_d + k)]); a256_3 = _mm256_set1_ps(ALPHA*A[(3 + i)*lda + (k_d + k)]); b256_0 = _mm256_loadu_ps(&B[(k_d + k)*ldb + (0 + j)]); b256_1 = _mm256_loadu_ps(&B[(k_d + k)*ldb + (8 + j)]); // FMA - Intel Haswell (2013), AMD Piledriver (2012) //c256_0 = _mm256_fmadd_ps(a256_0, b256_0, c256_0); //c256_1 = _mm256_fmadd_ps(a256_1, b256_0, c256_1); //c256_2 = _mm256_fmadd_ps(a256_0, b256_1, c256_2); //c256_3 = _mm256_fmadd_ps(a256_1, b256_1, c256_3); //c256_4 = _mm256_fmadd_ps(a256_2, b256_0, c256_4); //c256_5 = _mm256_fmadd_ps(a256_3, b256_0, c256_5); //c256_6 = _mm256_fmadd_ps(a256_2, b256_1, c256_6); //c256_7 = _mm256_fmadd_ps(a256_3, b256_1, c256_7); result256 = _mm256_mul_ps(a256_0, b256_0); c256_0 = _mm256_add_ps(result256, c256_0); result256 = _mm256_mul_ps(a256_1, b256_0); c256_1 = _mm256_add_ps(result256, c256_1); result256 = _mm256_mul_ps(a256_0, b256_1); c256_2 = _mm256_add_ps(result256, c256_2); result256 = _mm256_mul_ps(a256_1, b256_1); c256_3 = _mm256_add_ps(result256, c256_3); result256 = _mm256_mul_ps(a256_2, b256_0); c256_4 = _mm256_add_ps(result256, c256_4); result256 = _mm256_mul_ps(a256_3, b256_0); c256_5 = _mm256_add_ps(result256, c256_5); result256 = _mm256_mul_ps(a256_2, b256_1); c256_6 = _mm256_add_ps(result256, c256_6); result256 = _mm256_mul_ps(a256_3, b256_1); c256_7 = _mm256_add_ps(result256, c256_7); } _mm256_storeu_ps(&C[(0 + i)*ldc + (0 + j)], c256_0); _mm256_storeu_ps(&C[(1 + i)*ldc + (0 + j)], c256_1); _mm256_storeu_ps(&C[(0 + i)*ldc + (8 + j)], c256_2); _mm256_storeu_ps(&C[(1 + i)*ldc + (8 + j)], c256_3); _mm256_storeu_ps(&C[(2 + i)*ldc + (0 + j)], c256_4); _mm256_storeu_ps(&C[(3 + i)*ldc + (0 + j)], c256_5); _mm256_storeu_ps(&C[(2 + i)*ldc + (8 + j)], c256_6); _mm256_storeu_ps(&C[(3 + i)*ldc + (8 + j)], c256_7); } for (j = (N / TILE_N)*TILE_N; j < N; ++j) { for (i_d = i; i_d < (i + TILE_M); ++i_d) { for (k_d = k; k_d < (k + TILE_K); ++k_d) { PUT_IN_REGISTER float A_PART = ALPHA*A[i_d*lda + k_d]; C[i_d*ldc + j] += A_PART*B[k_d*ldb + j]; } } } } for (k = (K / TILE_K)*TILE_K; k < K; ++k) { for (i_d = i; i_d < (i + TILE_M); ++i_d) { PUT_IN_REGISTER float A_PART = ALPHA*A[i_d*lda + k]; for (j = 0; j < N; ++j) { C[i_d*ldc + j] += A_PART*B[k*ldb + j]; } } } } for (i = (M / TILE_M)*TILE_M; i < M; ++i) { int j, k; for (k = 0; k < K; ++k) { PUT_IN_REGISTER float A_PART = ALPHA*A[i*lda + k]; for (j = 0; j < N; ++j) { C[i*ldc + j] += A_PART*B[k*ldb + j]; } } } } void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA, uint32_t *A, int lda, uint32_t *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n int j, s; float mean_val = mean_arr[i]; //printf(" l.mean_arr[i] = %d \n ", l.mean_arr[i]); for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c) { PUT_IN_REGISTER uint32_t A_PART = A[i*lda + s]; __m256i a256 = _mm256_set1_epi32(A_PART); for (j = 0; j < N - 8; j += 8) { __m256i b256 = *((__m256i*)&B[s*ldb + j]); __m256i xor256 = _mm256_xor_si256(a256, b256); // xnor = xor(a,b) __m256i all_1 = _mm256_set1_epi8((char)255); __m256i xnor256 = _mm256_andnot_si256(xor256, all_1); // xnor = not(xor(a,b)) // waiting for - CPUID Flags: AVX512VPOPCNTDQ: __m512i _mm512_popcnt_epi32(__m512i a) __m256 count = _mm256_setr_ps( popcnt_32(_mm256_extract_epi32(xnor256, 0)), popcnt_32(_mm256_extract_epi32(xnor256, 1)), popcnt_32(_mm256_extract_epi32(xnor256, 2)), popcnt_32(_mm256_extract_epi32(xnor256, 3)), popcnt_32(_mm256_extract_epi32(xnor256, 4)), popcnt_32(_mm256_extract_epi32(xnor256, 5)), popcnt_32(_mm256_extract_epi32(xnor256, 6)), popcnt_32(_mm256_extract_epi32(xnor256, 7))); __m256 val2 = _mm256_set1_ps(2); count = _mm256_mul_ps(count, val2); // count * 2 __m256 val32 = _mm256_set1_ps(32); count = _mm256_sub_ps(count, val32); // count - 32 __m256 mean256 = _mm256_set1_ps(mean_val); count = _mm256_mul_ps(count, mean256); // count * mean_val __m256 c256 = *((__m256*)&C[i*ldc + j]); count = _mm256_add_ps(count, c256); // c = c + count *((__m256*)&C[i*ldc + j]) = count; } for (; j < N; ++j) // out_h*out_w; { PUT_IN_REGISTER uint32_t B_PART = B[s*ldb + j]; uint32_t xnor_result = ~(A_PART ^ B_PART); int32_t count = popcnt_32(xnor_result); // must be Signed int C[i*ldc + j] += (2 * count - 32) * mean_val; } } } } void convolution_2d_old(int w, int h, int ksize, int n, int c, int pad, int stride, float *weights, float *input, float *output) { //const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1 //const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1 int fil; // filter index #pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP for (fil = 0; fil < n; ++fil) { //int i, f, j; int chan, y, x, f_y, f_x; // channel index for (chan = 0; chan < c; ++chan) // input - y for (y = 0; y < h; ++y) // input - x for (x = 0; x < w; ++x) { int const output_index = fil*w*h + y*w + x; int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize; int const input_pre_index = chan*w*h; float sum = 0; // filter - y for (f_y = 0; f_y < ksize; ++f_y) { int input_y = y + f_y - pad; // filter - x for (f_x = 0; f_x < ksize; ++f_x) { int input_x = x + f_x - pad; if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue; int input_index = input_pre_index + input_y*w + input_x; int weights_index = weights_pre_index + f_y*ksize + f_x; sum += input[input_index] * weights[weights_index]; } } // l.output[filters][width][height] += // state.input[channels][width][height] * // l.weights[filters][channels][filter_width][filter_height]; output[output_index] += sum; } } } void convolution_2d(int w, int h, int ksize, int n, int c, int pad, int stride, float *weights, float *input, float *output, float *mean) { //const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1 //const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1 int i; #if defined(_OPENMP) static int max_num_threads = 0; if (max_num_threads == 0) { max_num_threads = omp_get_max_threads(); //omp_set_num_threads( max_num_threads / 2); } #endif //convolution_2d_old(w, h, ksize, n, c, pad, stride, weights, input, output); __m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000); for (i = 0; i < ksize*ksize*n*c; i+=8) { *((__m256*)&weights[i]) = _mm256_and_ps(*((__m256*)&weights[i]), _mm256_castsi256_ps(all256_sing1)); } //for (i = 0; i < w*h*c; i += 8) { //*((__m256*)&input[i]) = _mm256_and_ps(*((__m256*)&input[i]), _mm256_castsi256_ps(all256_sing1)); //} //__m256i all256_last_zero = _mm256_set1_epi32(0xFFFFFFFF); //all256_last_zero.m256i_i32[7] = 0; __m256i all256_last_zero = _mm256_set_epi32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x0); __m256i idx256 = _mm256_set_epi32(0, 7, 6, 5, 4, 3, 2, 1); //__m256 all256_sing1 = _mm256_set1_ps(0x80000000); __m256 all256_one = _mm256_set1_ps(1); __m256i all256i_one = _mm256_set1_epi32(1); ///__m256i src256 = _mm256_loadu_si256((__m256i *)(&src[i])); ///__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats int fil; // filter index #pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP for (fil = 0; fil < n; ++fil) { int chan, y, x, f_y, f_x; float cur_mean = fabs(mean[fil]); __m256 mean256 = _mm256_set1_ps(cur_mean); // channel index //for (chan = 0; chan < c; ++chan) // input - y for (y = 0; y < h; ++y) // input - x for (x = 0; x < w-8; x+=8) { int const output_index = fil*w*h + y*w + x; float sum = 0; __m256 sum256 = _mm256_set1_ps(0); for (chan = 0; chan < c; ++chan) { int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize; int const input_pre_index = chan*w*h; // filter - y for (f_y = 0; f_y < ksize; ++f_y) { int input_y = y + f_y - pad; //__m256 in = *((__m256*)&input[input_pre_index + input_y*w]); if (input_y < 0 || input_y >= h) continue; //__m256 in = _mm256_loadu_ps(&input[input_pre_index + input_y*w + x - pad]); // filter - x for (f_x = 0; f_x < ksize; ++f_x) { int input_x = x + f_x - pad; //if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue; int input_index = input_pre_index + input_y*w + input_x; int weights_index = weights_pre_index + f_y*ksize + f_x; //if (input_y < 0 || input_y >= h) continue; //sum += input[input_index] * weights[weights_index]; __m256 in = *((__m256*)&input[input_index]); __m256 w = _mm256_set1_ps(weights[weights_index]); //__m256 w_sign = _mm256_and_ps(w, _mm256_castsi256_ps(all256_sing1)); // check sign in 8 x 32-bit floats __m256 xor256 = _mm256_xor_ps(w, in); //printf("\n xor256_1 = %f, xor256_2 = %f \n", xor256.m256_f32[0], xor256.m256_f32[1]); //printf("\n in = %f, w = %f, xor256 = %f \n", in.m256_f32[0], w_sign.m256_f32[0], xor256.m256_f32[0]); //__m256 pn1 = _mm256_and_ps(_mm256_castsi256_ps(all256i_one), xor256); //sum256 = xor256; sum256 = _mm256_add_ps(xor256, sum256); //printf("\n --- \n"); //printf("\n 0 = %f, 1 = %f, 2 = %f, 3 = %f, 4 = %f, 5 = %f, 6 = %f, 7 = %f \n", in.m256_f32[0], in.m256_f32[1], in.m256_f32[2], in.m256_f32[3], in.m256_f32[4], in.m256_f32[5], in.m256_f32[6], in.m256_f32[7]); if (f_x < ksize-1) { //in = _mm256_permutevar8x32_ps(in, idx256); //in = _mm256_and_ps(in, _mm256_castsi256_ps(all256_last_zero)); } } } } // l.output[filters][width][height] += // state.input[channels][width][height] * // l.weights[filters][channels][filter_width][filter_height]; //output[output_index] += sum; sum256 = _mm256_mul_ps(sum256, mean256); //printf("\n cur_mean = %f, sum256 = %f, sum256 = %f, in = %f \n", // cur_mean, sum256.m256_f32[0], sum256.m256_f32[1], input[input_pre_index]); //__m256 out = *((__m256*)&output[output_index]); //out = _mm256_add_ps(out, sum256); //*((__m256*)&output[output_index]) = out; *((__m256*)&output[output_index]) = sum256; //_mm256_storeu_ps(&C[i*ldc + j], result256); } } } // http://graphics.stanford.edu/~seander/bithacks.html // https://stackoverflow.com/questions/17354971/fast-counting-the-number-of-set-bits-in-m128i-register // https://arxiv.org/pdf/1611.07612.pdf static inline int popcnt128(__m128i n) { const __m128i n_hi = _mm_unpackhi_epi64(n, n); #ifdef _MSC_VER return __popcnt64(_mm_cvtsi128_si64(n)) + __popcnt64(_mm_cvtsi128_si64(n_hi)); #else return __popcntq(_mm_cvtsi128_si64(n)) + __popcntq(_mm_cvtsi128_si64(n_hi)); #endif } static inline int popcnt256(__m256i n) { return popcnt128(_mm256_extractf128_si256(n, 0)) + popcnt128(_mm256_extractf128_si256(n, 1)); } static inline __m256i count256(__m256i v) { __m256i lookup = _mm256_setr_epi8(0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4); __m256i low_mask = _mm256_set1_epi8(0x0f); __m256i lo = _mm256_and_si256(v, low_mask); __m256i hi = _mm256_and_si256(_mm256_srli_epi32(v, 4), low_mask); __m256i popcnt1 = _mm256_shuffle_epi8(lookup, lo); __m256i popcnt2 = _mm256_shuffle_epi8(lookup, hi); __m256i total = _mm256_add_epi8(popcnt1, popcnt2); return _mm256_sad_epu8(total, _mm256_setzero_si256()); } static inline int popcnt256_custom(__m256i n) { __m256i val = count256(n); //return val.m256i_i64[0] + //val.m256i_i64[1] + //val.m256i_i64[2] + //val.m256i_i64[3]; return _mm256_extract_epi64(val, 0) + _mm256_extract_epi64(val, 1) + _mm256_extract_epi64(val, 2) + _mm256_extract_epi64(val, 3); } static inline void xnor_avx2_popcnt(__m256i a_bit256, __m256i b_bit256, __m256i *count_sum) { __m256i c_bit256 = _mm256_set1_epi8((char)255); __m256i xor256 = _mm256_xor_si256(a_bit256, b_bit256); // xnor = not(xor(a,b)) c_bit256 = _mm256_andnot_si256(xor256, c_bit256); // can be optimized - we can do other NOT for wegihts once and do not do this NOT *count_sum = _mm256_add_epi64(count256(c_bit256), *count_sum); // 1st part - popcnt Mula's algorithm } // 2nd part - popcnt Mula's algorithm static inline int get_count_mula(__m256i count_sum) { return _mm256_extract_epi64(count_sum, 0) + _mm256_extract_epi64(count_sum, 1) + _mm256_extract_epi64(count_sum, 2) + _mm256_extract_epi64(count_sum, 3); } // 5x times faster than gemm()-float32 // further optimizations: do mean-mult only for the last layer void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #if defined(_OPENMP) static int max_num_threads = 0; if (max_num_threads == 0) { max_num_threads = omp_get_max_threads(); //omp_set_num_threads(max_num_threads / 2); } #endif //#pragma omp parallel for //for (i = 0; i < M; ++i) #pragma omp parallel for for (i = 0; i < (M/2)*2; i += 2) { // l.n - filters [16 - 55 - 1024] float mean_val_0 = mean_arr[i + 0]; float mean_val_1 = mean_arr[i + 1]; int j, k; //__m256i all_1 = _mm256_set1_epi8(255); //for (j = 0; j < N; ++j) for (j = 0; j < (N/2)*2; j += 2) { // out_h*out_w - one channel output size [169 - 173056] //int count = 0; const int bit_step = 256; __m256i count_sum_0 = _mm256_set1_epi8(0); __m256i count_sum_1 = _mm256_set1_epi8(0); __m256i count_sum_2 = _mm256_set1_epi8(0); __m256i count_sum_3 = _mm256_set1_epi8(0); for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216] __m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + 0)*lda + k) / 8)); __m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8)); __m256i a_bit256_1 = _mm256_loadu_si256((__m256i *)(A + ((i + 1)*lda + k) / 8)); __m256i b_bit256_1 = _mm256_loadu_si256((__m256i *)(B + ((j + 1)*ldb + k) / 8)); xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum_0); xnor_avx2_popcnt(a_bit256_0, b_bit256_1, &count_sum_1); xnor_avx2_popcnt(a_bit256_1, b_bit256_0, &count_sum_2); xnor_avx2_popcnt(a_bit256_1, b_bit256_1, &count_sum_3); //count += popcnt256(c_bit256); //binary_int64_printf(c_bit64); //printf(", count = %d \n\n", tmp_count); } int count_0 = get_count_mula(count_sum_0); int count_1 = get_count_mula(count_sum_1); int count_2 = get_count_mula(count_sum_2); int count_3 = get_count_mula(count_sum_3); const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step)); count_0 = count_0 - f1; // remove extra bits (from empty space for align only) count_1 = count_1 - f1; count_2 = count_2 - f1; count_3 = count_3 - f1; C[i*ldc + (j + 0)] = (2 * count_0 - K) * mean_val_0; C[i*ldc + (j + 1)] = (2 * count_1 - K) * mean_val_0; C[(i + 1)*ldc + (j + 0)] = (2 * count_2 - K) * mean_val_1; C[(i + 1)*ldc + (j + 1)] = (2 * count_3 - K) * mean_val_1; } int i_d; for (i_d = 0; i_d < 2; ++i_d) { float mean_val = mean_arr[i + i_d]; for (j = (N / 2) * 2; j < N; j += 1) { // out_h*out_w - one channel output size [169 - 173056] const int bit_step = 256; __m256i count_sum = _mm256_set1_epi8(0); for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216] __m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + i_d + 0)*lda + k) / 8)); __m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8)); xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum); } int count = get_count_mula(count_sum); const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step)); count = count - f1; // remove extra bits (from empty space for align only) C[(i + i_d)*ldc + j] = (2 * count - K) * mean_val; } } } for (i = (M / 2) * 2; i < M; i += 1) { float mean_val = mean_arr[i]; int j, k; for (j = 0; j < N; j += 1) { // out_h*out_w - one channel output size [169 - 173056] const int bit_step = 256; __m256i count_sum = _mm256_set1_epi8(0); for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216] __m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + 0)*lda + k) / 8)); __m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8)); xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum); } int count = get_count_mula(count_sum); const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step)); count = count - f1; // remove extra bits (from empty space for align only) C[i*ldc + j] = (2 * count - K) * mean_val; } } } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom_transpose(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col, int ldb_align) { const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; int c; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1) { #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col - pad; ++h) { for (w = pad; w < width_col - pad - 4; w+=8) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; __m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)])); data_col[col_index + ldb_align * 0] = _mm256_extract_float32(src256, 0);// src256.m256_f32[0]; data_col[col_index + ldb_align * 1] = _mm256_extract_float32(src256, 1);// src256.m256_f32[1]; data_col[col_index + ldb_align * 2] = _mm256_extract_float32(src256, 2);// src256.m256_f32[2]; data_col[col_index + ldb_align * 3] = _mm256_extract_float32(src256, 3);// src256.m256_f32[3]; data_col[col_index + ldb_align * 4] = _mm256_extract_float32(src256, 4);// src256.m256_f32[4]; data_col[col_index + ldb_align * 5] = _mm256_extract_float32(src256, 5);// src256.m256_f32[5]; data_col[col_index + ldb_align * 6] = _mm256_extract_float32(src256, 6);// src256.m256_f32[6]; data_col[col_index + ldb_align * 7] = _mm256_extract_float32(src256, 7);// src256.m256_f32[7]; //_mm256_storeu_ps(&data_col[col_index], src256); } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { w = width_col - 1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = height_col - 1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } else { #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = 0; h < height_col; ++h) { for (w = 0; w < width_col; ++w) { int im_row = h_offset + h * stride; int im_col = w_offset + w * stride; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col) { int c; const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2()) { #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col-pad; ++h) { for (w = pad; w < width_col-pad-8; w += 8) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; int col_index = (c * height_col + h) * width_col + w; //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; __m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)])); _mm256_storeu_ps(&data_col[col_index], src256); } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { w = width_col-1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = height_col-1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } else { //printf("\n Error: is no non-optimized version \n"); im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); } } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom_align(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col, int bit_align) { int c; const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2()) { int new_ldb = bit_align; #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col - pad; ++h) { for (w = pad; w < width_col - pad - 8; w += 8) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; __m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)])); _mm256_storeu_ps(&data_col[col_index], src256); } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { w = width_col - 1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = height_col - 1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } else { printf("\n Error: is no non-optimized version \n"); //im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin // float_to_bit(b, t_input, src_size); // transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8); } } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom_bin(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col, int bit_align) { int c; const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2()) { __m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000); __m256 float_zero256 = _mm256_set1_ps(0.00); int new_ldb = bit_align; #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col - pad; ++h) { for (w = pad; w < width_col - pad - 8; w += 8) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //__m256i src256 = _mm256_loadu_si256((__m256i *)(&data_im[im_col + width*(im_row + height*c_im)])); //__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats //uint16_t mask = _mm256_movemask_ps(_mm256_castsi256_ps(result256)); // (val >= 0) ? 0 : 1 //mask = ~mask; // inverse mask, (val >= 0) ? 1 : 0 __m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)])); __m256 result256 = _mm256_cmp_ps(src256, float_zero256, _CMP_GT_OS); uint16_t mask = _mm256_movemask_ps(result256); // (val > 0) ? 0 : 1 uint16_t* dst_ptr = (uint16_t*)&((uint8_t*)data_col)[col_index / 8]; *dst_ptr |= (mask << (col_index % 8)); } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; float val = data_im[im_col + width*(im_row + height*c_im)]; if (val > 0) set_bit((unsigned char* const)data_col, col_index); } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char* const)data_col, col_index); } } { w = width_col - 1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char* const)data_col, col_index); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char* const)data_col, col_index); } } { h = height_col - 1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char* const)data_col, col_index); } } } } else { printf("\n Error: is no non-optimized version \n"); //im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin // float_to_bit(b, t_input, src_size); // transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8); } } void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a) { int i = 0; if (a == LINEAR) {} else if (a == LEAKY) { if (is_fma_avx2()) { __m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000); __m256 all256_01 = _mm256_set1_ps(0.1F); for (i = 0; i < n - 8; i += 8) { //x[i] = (x[i]>0) ? x[i] : .1*x[i]; __m256 src256 = _mm256_loadu_ps(&x[i]); __m256 mult256 = _mm256_mul_ps((src256), all256_01); // mult * 0.1 __m256i sign256 = _mm256_and_si256(_mm256_castps_si256(src256), all256_sing1); // check sign in 8 x 32-bit floats __m256 result256 = _mm256_blendv_ps(src256, mult256, _mm256_castsi256_ps(sign256)); // (sign>0) ? src : mult; _mm256_storeu_ps(&x[i], result256); } } for (; i < n; ++i) { x[i] = (x[i]>0) ? x[i] : .1*x[i]; } } else { for (i = 0; i < n; ++i) { x[i] = activate(x[i], a); } } } void float_to_bit(float *src, unsigned char *dst, size_t size) { size_t dst_size = size / 8 + 1; memset(dst, 0, dst_size); size_t i; //__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000); __m256 float_zero256 = _mm256_set1_ps(0.0); for (i = 0; i < size; i+=8) { //__m256i src256 = _mm256_loadu_si256((__m256i *)(&src[i])); //__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats //uint32_t mask = _mm256_movemask_ps(_mm256_castsi256_ps(result256)); // (val >= 0) ? 0 : 1 ////mask = ~mask; // inverse mask, (val >= 0) ? 1 : 0 __m256 src256 = _mm256_loadu_ps((float *)(&src[i])); __m256 result256 = _mm256_cmp_ps(src256, float_zero256, _CMP_GT_OS); uint32_t mask = _mm256_movemask_ps(result256); // (val > 0) ? 0 : 1 dst[i / 8] = mask; } } static inline void transpose4x4_SSE(float *A, float *B, const int lda, const int ldb) { __m128 row1 = _mm_loadu_ps(&A[0 * lda]); __m128 row2 = _mm_loadu_ps(&A[1 * lda]); __m128 row3 = _mm_loadu_ps(&A[2 * lda]); __m128 row4 = _mm_loadu_ps(&A[3 * lda]); _MM_TRANSPOSE4_PS(row1, row2, row3, row4); _mm_storeu_ps(&B[0 * ldb], row1); _mm_storeu_ps(&B[1 * ldb], row2); _mm_storeu_ps(&B[2 * ldb], row3); _mm_storeu_ps(&B[3 * ldb], row4); } void transpose_block_SSE4x4(float *A, float *B, const int n, const int m, const int lda, const int ldb, const int block_size) { int i; #pragma omp parallel for for (i = 0; i < n; i += block_size) { int j, i2, j2; //int max_i2 = (i + block_size < n) ? (i + block_size) : n; if (i + block_size < n) { int max_i2 = i + block_size; for (j = 0; j < m; j += block_size) { //int max_j2 = (j + block_size < m) ? (j + block_size) : m; if (j + block_size < m) { int max_j2 = j + block_size; for (i2 = i; i2 < max_i2; i2 += 4) { for (j2 = j; j2 < max_j2; j2 += 4) { transpose4x4_SSE(&A[i2*lda + j2], &B[j2*ldb + i2], lda, ldb); } } } else { for (i2 = i; i2 < max_i2; ++i2) { for (j2 = j; j2 < m; ++j2) { B[j2*ldb + i2] = A[i2*lda + j2]; } } } } } else { for (i2 = i; i2 < n; ++i2) { for (j2 = 0; j2 < m; ++j2) { B[j2*ldb + i2] = A[i2*lda + j2]; } } } } } void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c, int pad, int stride, int batch) { const int w_offset = -pad / 2; const int h_offset = -pad / 2; int b, k; for (b = 0; b < batch; ++b) { #pragma omp parallel for for (k = 0; k < c; ++k) { int i, j, m, n; for (i = 0; i < out_h; ++i) { //for (j = 0; j < out_w; ++j) { j = 0; if(stride == 1 && is_avx() == 1) { for (j = 0; j < out_w - 8 - (size - 1); j += 8) { int out_index = j + out_w*(i + out_h*(k + c*b)); __m256 max256 = _mm256_set1_ps(-FLT_MAX); for (n = 0; n < size; ++n) { for (m = 0; m < size; ++m) { int cur_h = h_offset + i*stride + n; int cur_w = w_offset + j*stride + m; int index = cur_w + w*(cur_h + h*(k + b*c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); if (!valid) continue; __m256 src256 = _mm256_loadu_ps(&src[index]); max256 = _mm256_max_ps(src256, max256); } } _mm256_storeu_ps(&dst[out_index], max256); } } else if (size == 2 && stride == 2 && is_avx() == 1) { for (j = 0; j < out_w - 4; j += 4) { int out_index = j + out_w*(i + out_h*(k + c*b)); //float max = -FLT_MAX; //int max_i = -1; __m128 max128 = _mm_set1_ps(-FLT_MAX); for (n = 0; n < size; ++n) { //for (m = 0; m < size; ++m) m = 0; { int cur_h = h_offset + i*stride + n; int cur_w = w_offset + j*stride + m; int index = cur_w + w*(cur_h + h*(k + b*c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); if (!valid) continue; __m256 src256 = _mm256_loadu_ps(&src[index]); __m256 src256_2 = _mm256_permute_ps(src256, (1 << 0) | (3 << 4)); __m256 max256 = _mm256_max_ps(src256, src256_2); __m128 src128_0 = _mm256_extractf128_ps(max256, 0); __m128 src128_1 = _mm256_extractf128_ps(max256, 1); __m128 src128 = _mm_shuffle_ps(src128_0, src128_1, (2 << 2) | (2 << 6)); max128 = _mm_max_ps(src128, max128); } } _mm_storeu_ps(&dst[out_index], max128); } } for (; j < out_w; ++j) { int out_index = j + out_w*(i + out_h*(k + c*b)); float max = -FLT_MAX; int max_i = -1; for (n = 0; n < size; ++n) { for (m = 0; m < size; ++m) { int cur_h = h_offset + i*stride + n; int cur_w = w_offset + j*stride + m; int index = cur_w + w*(cur_h + h*(k + b*c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); float val = (valid != 0) ? src[index] : -FLT_MAX; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } dst[out_index] = max; indexes[out_index] = max_i; } } } } } #else // AVX int is_avx() { return 0; } int is_fma_avx2() { return 0; } void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { PUT_IN_REGISTER float A_PART = ALPHA * A[i * lda + k]; for (j = 0; j < N; ++j) { C[i*ldc + j] += A_PART*B[k*ldb + j]; } } } } void gemm_nn_fast(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; #pragma omp parallel for for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { PUT_IN_REGISTER float A_PART = ALPHA*A[i*lda + k]; for (j = 0; j < N; ++j) { C[i*ldc + j] += A_PART*B[k*ldb + j]; } } } } void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA, uint32_t *A, int lda, uint32_t *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n int j, s; float mean_val = mean_arr[i]; //printf(" l.mean_arr[i] = %d \n ", l.mean_arr[i]); for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c) { //PUT_IN_REGISTER float A_PART = 1*a[i*k + s]; PUT_IN_REGISTER uint32_t A_PART = A[i * lda + s]; for (j = 0; j < N; ++j) // out_h*out_w; { //c[i*n + j] += A_PART*b[s*n + j]; PUT_IN_REGISTER uint32_t B_PART = B[s * ldb + j]; uint32_t xnor_result = ~(A_PART ^ B_PART); //printf(" xnor_result = %d, ", xnor_result); int32_t count = popcnt_32(xnor_result); // must be Signed int C[i*ldc + j] += (2 * count - 32) * mean_val; //c[i*n + j] += count*mean; } } } } void convolution_2d(int w, int h, int ksize, int n, int c, int pad, int stride, float *weights, float *input, float *output, float *mean) { const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1 const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1 //int i, f, j; int fil; // filter index #pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP for (fil = 0; fil < n; ++fil) { int chan, y, x, f_y, f_x; // channel index for (chan = 0; chan < c; ++chan) // input - y for (y = 0; y < h; ++y) // input - x for (x = 0; x < w; ++x) { int const output_index = fil*w*h + y*w + x; int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize; int const input_pre_index = chan*w*h; float sum = 0; // filter - y for (f_y = 0; f_y < ksize; ++f_y) { int input_y = y + f_y - pad; // filter - x for (f_x = 0; f_x < ksize; ++f_x) { int input_x = x + f_x - pad; if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue; int input_index = input_pre_index + input_y*w + input_x; int weights_index = weights_pre_index + f_y*ksize + f_x; sum += input[input_index] * weights[weights_index]; } } // l.output[filters][width][height] += // state.input[channels][width][height] * // l.weights[filters][channels][filter_width][filter_height]; output[output_index] += sum; } } } static inline int popcnt_64(uint64_t val64) { #ifdef WIN32 // Windows #ifdef _WIN64 // Windows 64-bit int tmp_count = __popcnt64(val64); #else // Windows 32-bit int tmp_count = __popcnt(val64); tmp_count += __popcnt(val64 >> 32); #endif #else // Linux #if defined(__x86_64__) || defined(__aarch64__) // Linux 64-bit int tmp_count = __builtin_popcountll(val64); #else // Linux 32-bit int tmp_count = __builtin_popcount(val64); tmp_count += __builtin_popcount(val64 >> 32); #endif #endif return tmp_count; } void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024] int j, k; float mean_val = mean_arr[i]; for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056] int count = 0; for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216] uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64); int tmp_count = popcnt_64(c_bit64); if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits count += tmp_count; //binary_int64_printf(c_bit64); //printf(", count = %d \n\n", tmp_count); } C[i*ldc + j] = (2 * count - K) * mean_val; } } } void im2col_cpu_custom_transpose(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col, int ldb_align) { printf("\n im2col_cpu_custom_transpose() isn't implemented without AVX \n"); } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col) { im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); return; int c; const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1) { #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col - pad; ++h) { for (w = pad; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { w = width_col - 1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = height_col - 1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } else { //printf("\n Error: is no non-optimized version \n"); im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); } } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom_bin(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col, int bit_align) { int c; const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1) { int new_ldb = bit_align; #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col - pad; ++h) { for (w = pad; w < width_col - pad - 8; w += 1) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; float val = data_im[im_col + width*(im_row + height*c_im)]; if (val > 0) set_bit((unsigned char*)data_col, col_index); } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; float val = data_im[im_col + width*(im_row + height*c_im)]; if (val > 0) set_bit((unsigned char*)data_col, col_index); } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char*)data_col, col_index); } } { w = width_col - 1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char*)data_col, col_index); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char*)data_col, col_index); } } { h = height_col - 1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char*)data_col, col_index); } } } } else { printf("\n Error: is no non-optimized version \n"); //im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin // float_to_bit(b, t_input, src_size); // transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8); } } void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a) { int i; if (a == LINEAR) { } else if (a == LEAKY) { for (i = 0; i < n; ++i) { x[i] = (x[i]>0) ? x[i] : .1*x[i]; } } else { for (i = 0; i < n; ++i) { x[i] = activate(x[i], a); } } } void float_to_bit(float *src, unsigned char *dst, size_t size) { size_t dst_size = size / 8 + 1; memset(dst, 0, dst_size); size_t i; char* byte_arr = (char*)calloc(size, sizeof(char)); for (i = 0; i < size; ++i) { if (src[i] > 0) byte_arr[i] = 1; } //for (i = 0; i < size; ++i) { // dst[i / 8] |= byte_arr[i] << (i % 8); //} for (i = 0; i < size; i += 8) { char dst_tmp = 0; dst_tmp |= byte_arr[i + 0] << 0; dst_tmp |= byte_arr[i + 1] << 1; dst_tmp |= byte_arr[i + 2] << 2; dst_tmp |= byte_arr[i + 3] << 3; dst_tmp |= byte_arr[i + 4] << 4; dst_tmp |= byte_arr[i + 5] << 5; dst_tmp |= byte_arr[i + 6] << 6; dst_tmp |= byte_arr[i + 7] << 7; dst[i / 8] = dst_tmp; } free(byte_arr); } static inline void transpose_scalar_block(float *A, float *B, const int lda, const int ldb, const int block_size) { int i; //#pragma omp parallel for for (i = 0; i<block_size; i++) { int j; for (j = 0; j<block_size; j++) { B[j*ldb + i] = A[i*lda + j]; } } } void transpose_block_SSE4x4(float *A, float *B, const int n, const int m, const int lda, const int ldb, const int block_size) { int i; #pragma omp parallel for for (i = 0; i < n; i += block_size) { int j, i2, j2; for (j = 0; j < m; j += block_size) { int max_i2 = i + block_size < n ? i + block_size : n; int max_j2 = j + block_size < m ? j + block_size : m; for (i2 = i; i2 < max_i2; ++i2) { for (j2 = j; j2 < max_j2; ++j2) { B[j2*ldb + i2] = A[i2*lda + j2]; } } } } } void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c, int pad, int stride, int batch) { int b, k; const int w_offset = -pad / 2; const int h_offset = -pad / 2; for (b = 0; b < batch; ++b) { #pragma omp parallel for for (k = 0; k < c; ++k) { int i, j, m, n; for (i = 0; i < out_h; ++i) { for (j = 0; j < out_w; ++j) { int out_index = j + out_w*(i + out_h*(k + c*b)); float max = -FLT_MAX; int max_i = -1; for (n = 0; n < size; ++n) { for (m = 0; m < size; ++m) { int cur_h = h_offset + i*stride + n; int cur_w = w_offset + j*stride + m; int index = cur_w + w*(cur_h + h*(k + b*c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); float val = (valid != 0) ? src[index] : -FLT_MAX; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } dst[out_index] = max; indexes[out_index] = max_i; } } } } } #endif // AVX // 32 channels -> 1 channel (with 32 floats) // 256 channels -> 8 channels (with 32 floats) void repack_input(float *input, float *re_packed_input, int w, int h, int c) { const int items_per_channel = w * h; int chan, i; for (chan = 0; chan < c; chan += 32) { for (i = 0; i < items_per_channel; ++i) { int c_pack; for (c_pack = 0; c_pack < 32; ++c_pack) { float src = input[(chan + c_pack)*items_per_channel + i]; re_packed_input[chan*items_per_channel + i * 32 + c_pack] = src; } } } } void transpose_uint32(uint32_t *src, uint32_t *dst, int src_h, int src_w, int src_align, int dst_align) { //l.bit_align - algined (n) by 32 //new_ldb - aligned (k) by 256 int i; //#pragma omp parallel for for (i = 0; i < src_h; i += 1) // l.size*l.size*l.c; { int j; for (j = 0; j < src_w; j += 1) // out_h*out_w; { ((uint32_t *)dst)[j*dst_align / 32 + i] = ((uint32_t *)src)[i*src_align + j]; } } } void gemm_nn_bin_transposed_32bit_packed(int M, int N, int K, float ALPHA, uint32_t *A, int lda, uint32_t *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n int j, s; float mean_val = mean_arr[i]; for (j = 0; j < N; ++j) // out_h*out_w; { float val = 0; for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c) { PUT_IN_REGISTER uint32_t A_PART = ((uint32_t*)A)[i*lda + s]; PUT_IN_REGISTER uint32_t B_PART = ((uint32_t*)B)[j * ldb + s]; uint32_t xnor_result = ~(A_PART ^ B_PART); int32_t count = popcnt_32(xnor_result); // must be Signed int val += (2 * count - 32) * mean_val; } C[i*ldc + j] += val; } } } void convolution_repacked(uint32_t *packed_input, uint32_t *packed_weights, float *output, int w, int h, int c, int n, int size, int pad, int new_lda, float *mean_arr) { int fil; // filter index #pragma omp parallel for for (fil = 0; fil < n; ++fil) { float mean_val = mean_arr[fil]; int chan, y, x, f_y, f_x; // c_pack // channel index for (chan = 0; chan < c / 32; ++chan) //for (chan = 0; chan < l.c; chan += 32) //for (c_pack = 0; c_pack < 32; ++c_pack) // input - y for (y = 0; y < h; ++y) // input - x for (x = 0; x < w; ++x) { int const output_index = fil*w*h + y*w + x; float sum = 0; // filter - y for (f_y = 0; f_y < size; ++f_y) { int input_y = y + f_y - pad; // filter - x for (f_x = 0; f_x < size; ++f_x) { int input_x = x + f_x - pad; if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue; // normal //float input = state.input[(chan + c_pack)*l.w*l.h + input_y*l.w + input_x]; //float weight = l.weights[fil*l.c*l.size*l.size + (chan + c_pack)*l.size*l.size + f_y*l.size + f_x]; // packed //float input = re_packed_input[chan*l.w*l.h + (input_y*l.w + input_x) * 32 + c_pack]; //float weight = l.weights[fil*l.c*l.size*l.size + chan*l.size*l.size + (f_y*l.size + f_x) * 32 + c_pack]; //sum += input * weight; //float input = re_packed_input[chan*l.w*l.h + (input_y*l.w + input_x) * 32 + c_pack]; //float weight = l.weights[fil*l.c*l.size*l.size + chan*l.size*l.size + (f_y*l.size + f_x) * 32 + c_pack]; //uint32_t bit1 = input > 0; //uint32_t bit2 = weight > 0; //uint32_t count = (~(bit1 ^ bit2)) & 1; //float result = (2 * (float)count - 1) * mean_val; //printf("\n mul = %f, bit1 = %d, bit2 = %d, count = %d, mean = %f, result = %f ", input*weight, bit1, bit2, count, mean_val, result); //sum += result; uint32_t input = ((uint32_t *)packed_input)[chan*w*h + input_y*w + input_x]; //uint32_t weight = ((uint32_t *)l.align_bit_weights)[fil*l.c*l.size*l.size/32 + chan*l.size*l.size + f_y*l.size + f_x]; uint32_t weight = ((uint32_t *)packed_weights)[fil*new_lda / 32 + chan*size*size + f_y*size + f_x]; uint32_t xnor_result = ~(input ^ weight); int32_t count = popcnt_32(xnor_result); // mandatory Signed int sum += (2 * count - 32) * mean_val; } } // l.output[filters][width][height] += // state.input[channels][width][height] * // l.weights[filters][channels][filter_width][filter_height]; output[output_index] += sum; } } } void gemm_nt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ PUT_IN_REGISTER float sum = 0; for(k = 0; k < K; ++k){ sum += ALPHA*A[i*lda+k]*B[j*ldb + k]; } C[i*ldc+j] += sum; } } } void gemm_tn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ PUT_IN_REGISTER float A_PART = ALPHA * A[k * lda + i]; for(j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } } } void gemm_tt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ PUT_IN_REGISTER float sum = 0; for(k = 0; k < K; ++k){ sum += ALPHA*A[i+k*lda]*B[k+j*ldb]; } C[i*ldc+j] += sum; } } } void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { //printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc); if (BETA != 1){ #pragma omp parallel for collapse(2) for(int i = 0; i < M; ++i){ for(int j = 0; j < N; ++j){ C[i*ldc + j] *= BETA; } } } is_avx(); // initialize static variable if (is_fma_avx2() && !TA && !TB) { gemm_nn_fast(M, N, K, ALPHA, A, lda, B, ldb, C, ldc); } else { #pragma omp parallel for for(int t = 0; t < M; ++t) { if (!TA && !TB) gemm_nn(1, N, K, ALPHA, A + t*lda, lda, B, ldb, C + t*ldc, ldc); else if (TA && !TB) gemm_tn(1, N, K, ALPHA, A + t, lda, B, ldb, C + t*ldc, ldc); else if (!TA && TB) gemm_nt(1, N, K, ALPHA, A + t*lda, lda, B, ldb, C + t*ldc, ldc); else gemm_tt(1, N, K, ALPHA, A + t, lda, B, ldb, C + t*ldc, ldc); } } } #ifdef GPU #include <math.h> void gemm_ongpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A_gpu, int lda, float *B_gpu, int ldb, float BETA, float *C_gpu, int ldc) { cublasHandle_t handle = blas_handle(); cudaError_t stream_status = (cudaError_t)cublasSetStream(handle, get_cuda_stream()); CHECK_CUDA(stream_status); cudaError_t status = (cudaError_t)cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N), (TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc); CHECK_CUDA(status); } void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { float *A_gpu = cuda_make_array(A, (TA ? lda*K:lda*M)); float *B_gpu = cuda_make_array(B, (TB ? ldb*N : ldb*K)); float *C_gpu = cuda_make_array(C, ldc*M); gemm_ongpu(TA, TB, M, N, K, ALPHA, A_gpu, lda, B_gpu, ldb, BETA, C_gpu, ldc); cuda_pull_array(C_gpu, C, ldc*M); cuda_free(A_gpu); cuda_free(B_gpu); cuda_free(C_gpu); } #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> void time_gpu_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); int i; clock_t start = clock(), end; for(i = 0; i<32; ++i){ gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC); free(a); free(b); free(c); } void time_ongpu(int TA, int TB, int m, int k, int n) { int iter = 10; float *a = random_matrix(m,k); float *b = random_matrix(k,n); int lda = (!TA)?k:m; int ldb = (!TB)?n:k; float *c = random_matrix(m,n); float *a_cl = cuda_make_array(a, m*k); float *b_cl = cuda_make_array(b, k*n); float *c_cl = cuda_make_array(c, m*n); int i; clock_t start = clock(), end; for(i = 0; i<iter; ++i){ gemm_ongpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n); cudaDeviceSynchronize(); } double flop = ((double)m)*n*(2.*k + 2.)*iter; double gflop = flop/pow(10., 9); end = clock(); double seconds = sec(end-start); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds); cuda_free(a_cl); cuda_free(b_cl); cuda_free(c_cl); free(a); free(b); free(c); } void test_gpu_accuracy(int TA, int TB, int m, int k, int n) { srand(0); float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); float *c_gpu = random_matrix(m,n); memset(c, 0, m*n*sizeof(float)); memset(c_gpu, 0, m*n*sizeof(float)); int i; //pm(m,k,b); gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n); //printf("GPU\n"); //pm(m, n, c_gpu); gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); //printf("\n\nCPU\n"); //pm(m, n, c); double sse = 0; for(i = 0; i < m*n; ++i) { //printf("%f %f\n", c[i], c_gpu[i]); sse += pow(c[i]-c_gpu[i], 2); } printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n)); free(a); free(b); free(c); free(c_gpu); } int test_gpu_blas() { /* test_gpu_accuracy(0,0,10,576,75); test_gpu_accuracy(0,0,17,10,10); test_gpu_accuracy(1,0,17,10,10); test_gpu_accuracy(0,1,17,10,10); test_gpu_accuracy(1,1,17,10,10); test_gpu_accuracy(0,0,1000,10,100); test_gpu_accuracy(1,0,1000,10,100); test_gpu_accuracy(0,1,1000,10,100); test_gpu_accuracy(1,1,1000,10,100); test_gpu_accuracy(0,0,10,10,10); time_ongpu(0,0,64,2916,363); time_ongpu(0,0,64,2916,363); time_ongpu(0,0,64,2916,363); time_ongpu(0,0,192,729,1600); time_ongpu(0,0,384,196,1728); time_ongpu(0,0,256,196,3456); time_ongpu(0,0,256,196,2304); time_ongpu(0,0,128,4096,12544); time_ongpu(0,0,128,4096,4096); */ time_ongpu(0,0,64,75,12544); time_ongpu(0,0,64,75,12544); time_ongpu(0,0,64,75,12544); time_ongpu(0,0,64,576,12544); time_ongpu(0,0,256,2304,784); time_ongpu(1,1,2304,256,784); time_ongpu(0,0,512,4608,196); time_ongpu(1,1,4608,512,196); return 0; } #endif
selectionSimd.c
/* Versão otimizada do algoritmo base do Selection Sort utilizando a diretiva do OpenMP #pragma omp for simd Compilação: gcc -o selectionSimd -fopenmp selectionSimd.c */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> void selectionSort (int vet[],int tam); void exibevetor(int vet[], int tam); void selectionSort (int vet[],int tam){ int i,j,aux,menor; #pragma omp for simd //instrui o compilador a distribuir as iterações do laço com o time de threads, operando em paralelo utilizando instruções SIMD for(i = 0; i < (tam-1); ++i) { menor = i; for(j = (i+1); j < tam; ++j){ if(vet[j] < vet[menor]) // busca pelo menor elemento atraves do indice menor= j; // salva o novo indice como menor } /* troca e coloca o menor elemento para frente */ if(vet[i] != vet[menor]){ aux = vet[i]; vet[i] = vet[menor]; vet[menor] = aux; } } } void exibevetor(int vet[], int tam){ int i; for (i = 0; i < tam; ++i){ printf("%d ", vet[i]); } printf("\n"); } int main () { int *vet, i, tam; clock_t t, end; double cpu_time_used; printf("Digite o tamanho do vetor:\n"); scanf("%d",&tam); vet = (int *)malloc(sizeof(int)*tam); if(vet == NULL){ exit(1); } for(i = 0; i < tam; ++i){ vet[i] = rand() %100; //preenche vetor com valores pseudo-aleatorios } exibevetor(vet,tam); t = clock(); selectionSort(vet,tam); t = clock()-t; exibevetor(vet,tam); cpu_time_used = ((double)t)/CLOCKS_PER_SEC; printf("\nTempo de execução: %f\n", cpu_time_used); free(vet); }
v_p_strategy.h
// // Project Name: KratosPFEMFluidDynamicsApplication $ // Last modified by: $Author: AFranci $ // Date: $Date: January 2016 $ // Revision: $Revision: 0.0 $ // // #ifndef KRATOS_V_P_STRATEGY_H #define KRATOS_V_P_STRATEGY_H #include "includes/define.h" #include "includes/model_part.h" #include "includes/deprecated_variables.h" #include "utilities/openmp_utils.h" #include "processes/process.h" #include "solving_strategies/schemes/scheme.h" #include "solving_strategies/strategies/solving_strategy.h" #include "custom_utilities/mesher_utilities.hpp" #include "custom_utilities/boundary_normals_calculation_utilities.hpp" #include "custom_utilities/solver_settings.h" #include "pfem_fluid_dynamics_application_variables.h" #include <stdio.h> #include <math.h> namespace Kratos { ///@addtogroup PFEMFluidDynamicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ template <class TSparseSpace, class TDenseSpace, class TLinearSolver> class VPStrategy : public SolvingStrategy<TSparseSpace, TDenseSpace> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(VPStrategy); typedef SolvingStrategy<TSparseSpace, TDenseSpace> BaseType; typedef TwoStepVPSolverSettings<TSparseSpace, TDenseSpace, TLinearSolver> SolverSettingsType; ///@} ///@name Life Cycle ///@{ VPStrategy(ModelPart &rModelPart, SolverSettingsType &rSolverConfig) : BaseType(rModelPart) { std::cout << "VPStrategy INITIALIZE STRATEGY" << std::endl; InitializeStrategy(rSolverConfig); } VPStrategy(ModelPart &rModelPart, typename TLinearSolver::Pointer pVelocityLinearSolver, typename TLinearSolver::Pointer pPressureLinearSolver, bool ReformDofSet = true, unsigned int DomainSize = 2) : BaseType(rModelPart) { KRATOS_TRY; KRATOS_CATCH(""); } /// Destructor. virtual ~VPStrategy() {} virtual int Check() override { return false; } virtual bool SolveSolutionStep() override { return false; } virtual void FinalizeSolutionStep() override {} virtual void InitializeSolutionStep() override {} void UpdateTopology(ModelPart &rModelPart, unsigned int echoLevel) { KRATOS_TRY; this->CalculateDisplacementsAndPorosity(); BaseType::MoveMesh(); KRATOS_CATCH(""); } void SetBlockedAndIsolatedFlags() { KRATOS_TRY; ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd); for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { unsigned int numNodes = itElem->GetGeometry().size(); std::vector<array_1d<double, 3>> nodesCoordinates; nodesCoordinates.resize(numNodes); (itElem)->Set(BLOCKED, false); (itElem)->Set(ISOLATED, false); unsigned int freeSurfaceNodes = 0; unsigned int freeSurfaceRigidNodes = 0; unsigned int rigidNodes = 0; unsigned int isolatedNodes = 0; for (unsigned int i = 0; i < numNodes; i++) { if (itElem->GetGeometry()[i].Is(FREE_SURFACE)) { freeSurfaceNodes++; if (itElem->GetGeometry()[i].Is(RIGID)) { freeSurfaceRigidNodes++; } } else if (itElem->GetGeometry()[i].Is(RIGID)) { rigidNodes++; } nodesCoordinates[i] = itElem->GetGeometry()[i].Coordinates(); ElementWeakPtrVectorType &neighb_elems = itElem->GetGeometry()[i].GetValue(NEIGHBOUR_ELEMENTS); if (neighb_elems.size() == 1) { isolatedNodes++; } } if (dimension == 3) { double a1 = 0; //slope x for plane on the first triangular face of the tetrahedra (nodes A,B,C) double b1 = 0; //slope y for plane on the first triangular face of the tetrahedra (nodes A,B,C) double c1 = 0; //slope z for plane on the first triangular face of the tetrahedra (nodes A,B,C) a1 = (nodesCoordinates[1][1] - nodesCoordinates[0][1]) * (nodesCoordinates[2][2] - nodesCoordinates[0][2]) - (nodesCoordinates[2][1] - nodesCoordinates[0][1]) * (nodesCoordinates[1][2] - nodesCoordinates[0][2]); b1 = (nodesCoordinates[1][2] - nodesCoordinates[0][2]) * (nodesCoordinates[2][0] - nodesCoordinates[0][0]) - (nodesCoordinates[2][2] - nodesCoordinates[0][2]) * (nodesCoordinates[1][0] - nodesCoordinates[0][0]); c1 = (nodesCoordinates[1][0] - nodesCoordinates[0][0]) * (nodesCoordinates[2][1] - nodesCoordinates[0][1]) - (nodesCoordinates[2][0] - nodesCoordinates[0][0]) * (nodesCoordinates[1][1] - nodesCoordinates[0][1]); double a2 = 0; //slope x for plane on the second triangular face of the tetrahedra (nodes A,B,D) double b2 = 0; //slope y for plane on the second triangular face of the tetrahedra (nodes A,B,D) double c2 = 0; //slope z for plane on the second triangular face of the tetrahedra (nodes A,B,D) a2 = (nodesCoordinates[1][1] - nodesCoordinates[0][1]) * (nodesCoordinates[3][2] - nodesCoordinates[0][2]) - (nodesCoordinates[3][1] - nodesCoordinates[0][1]) * (nodesCoordinates[1][2] - nodesCoordinates[0][2]); b2 = (nodesCoordinates[1][2] - nodesCoordinates[0][2]) * (nodesCoordinates[3][0] - nodesCoordinates[0][0]) - (nodesCoordinates[3][2] - nodesCoordinates[0][2]) * (nodesCoordinates[1][0] - nodesCoordinates[0][0]); c2 = (nodesCoordinates[1][0] - nodesCoordinates[0][0]) * (nodesCoordinates[3][1] - nodesCoordinates[0][1]) - (nodesCoordinates[3][0] - nodesCoordinates[0][0]) * (nodesCoordinates[1][1] - nodesCoordinates[0][1]); double a3 = 0; //slope x for plane on the third triangular face of the tetrahedra (nodes B,C,D) double b3 = 0; //slope y for plane on the third triangular face of the tetrahedra (nodes B,C,D) double c3 = 0; //slope z for plane on the third triangular face of the tetrahedra (nodes B,C,D) a3 = (nodesCoordinates[1][1] - nodesCoordinates[2][1]) * (nodesCoordinates[3][2] - nodesCoordinates[2][2]) - (nodesCoordinates[3][1] - nodesCoordinates[2][1]) * (nodesCoordinates[1][2] - nodesCoordinates[2][2]); b3 = (nodesCoordinates[1][2] - nodesCoordinates[2][2]) * (nodesCoordinates[3][0] - nodesCoordinates[2][0]) - (nodesCoordinates[3][2] - nodesCoordinates[2][2]) * (nodesCoordinates[1][0] - nodesCoordinates[2][0]); c3 = (nodesCoordinates[1][0] - nodesCoordinates[2][0]) * (nodesCoordinates[3][1] - nodesCoordinates[2][1]) - (nodesCoordinates[3][0] - nodesCoordinates[2][0]) * (nodesCoordinates[1][1] - nodesCoordinates[2][1]); double a4 = 0; //slope x for plane on the fourth triangular face of the tetrahedra (nodes A,C,D) double b4 = 0; //slope y for plane on the fourth triangular face of the tetrahedra (nodes A,C,D) double c4 = 0; //slope z for plane on the fourth triangular face of the tetrahedra (nodes A,C,D) a4 = (nodesCoordinates[0][1] - nodesCoordinates[2][1]) * (nodesCoordinates[3][2] - nodesCoordinates[2][2]) - (nodesCoordinates[3][1] - nodesCoordinates[2][1]) * (nodesCoordinates[0][2] - nodesCoordinates[2][2]); b4 = (nodesCoordinates[0][2] - nodesCoordinates[2][2]) * (nodesCoordinates[3][0] - nodesCoordinates[2][0]) - (nodesCoordinates[3][2] - nodesCoordinates[2][2]) * (nodesCoordinates[0][0] - nodesCoordinates[2][0]); c4 = (nodesCoordinates[0][0] - nodesCoordinates[2][0]) * (nodesCoordinates[3][1] - nodesCoordinates[2][1]) - (nodesCoordinates[3][0] - nodesCoordinates[2][0]) * (nodesCoordinates[0][1] - nodesCoordinates[2][1]); double cosAngle12 = (a1 * a2 + b1 * b2 + c1 * c2) / (sqrt(pow(a1, 2) + pow(b1, 2) + pow(c1, 2)) * sqrt(pow(a2, 2) + pow(b2, 2) + pow(c2, 2))); double cosAngle13 = (a1 * a3 + b1 * b3 + c1 * c3) / (sqrt(pow(a1, 2) + pow(b1, 2) + pow(c1, 2)) * sqrt(pow(a3, 2) + pow(b3, 2) + pow(c3, 2))); double cosAngle14 = (a1 * a4 + b1 * b4 + c1 * c4) / (sqrt(pow(a1, 2) + pow(b1, 2) + pow(c1, 2)) * sqrt(pow(a4, 2) + pow(b4, 2) + pow(c4, 2))); double cosAngle23 = (a3 * a2 + b3 * b2 + c3 * c2) / (sqrt(pow(a3, 2) + pow(b3, 2) + pow(c3, 2)) * sqrt(pow(a2, 2) + pow(b2, 2) + pow(c2, 2))); double cosAngle24 = (a4 * a2 + b4 * b2 + c4 * c2) / (sqrt(pow(a4, 2) + pow(b4, 2) + pow(c4, 2)) * sqrt(pow(a2, 2) + pow(b2, 2) + pow(c2, 2))); double cosAngle34 = (a4 * a3 + b4 * b3 + c4 * c3) / (sqrt(pow(a4, 2) + pow(b4, 2) + pow(c4, 2)) * sqrt(pow(a3, 2) + pow(b3, 2) + pow(c3, 2))); if ((fabs(cosAngle12) > 0.99 || fabs(cosAngle13) > 0.99 || fabs(cosAngle14) > 0.99 || fabs(cosAngle23) > 0.99 || fabs(cosAngle24) > 0.99 || fabs(cosAngle34) > 0.99) && (freeSurfaceNodes == numNodes) && isolatedNodes > 1) { (itElem)->Set(BLOCKED, true); // std::cout << "in the strategy BLOCKED ELEMENT: " << (itElem)->Id() << std::endl; } else if ((fabs(cosAngle12) > 0.995 || fabs(cosAngle13) > 0.995 || fabs(cosAngle14) > 0.995 || fabs(cosAngle23) > 0.995 || fabs(cosAngle24) > 0.995 || fabs(cosAngle34) > 0.995) && (freeSurfaceNodes == numNodes) && isolatedNodes == 1) { (itElem)->Set(BLOCKED, true); // std::cout << "in the strategy BLOCKED ELEMENT: " << (itElem)->Id() << std::endl; } else if ((fabs(cosAngle12) > 0.999 || fabs(cosAngle13) > 0.999 || fabs(cosAngle14) > 0.999 || fabs(cosAngle23) > 0.999 || fabs(cosAngle24) > 0.999 || fabs(cosAngle34) > 0.999) && (freeSurfaceNodes == numNodes)) { (itElem)->Set(BLOCKED, true); // std::cout << "in the strategy BLOCKED ELEMENT: " << (itElem)->Id() << std::endl; } } if (freeSurfaceNodes == numNodes && rigidNodes == 0 && isolatedNodes >= (numNodes - 1)) { (itElem)->Set(ISOLATED, true); (itElem)->Set(BLOCKED, false); } } } KRATOS_CATCH(""); } void CalculatePressureVelocity() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; unsigned int timeStep = rCurrentProcessInfo[STEP]; for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { if (timeStep == 1) { (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0; } else { double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0); double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1); double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0); CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval; } } } void CalculatePressureAcceleration() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; unsigned int timeStep = rCurrentProcessInfo[STEP]; for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { if (timeStep == 1) { (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0; } else { double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0); double &PreviousPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1); double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0); CurrentPressureAcceleration = (CurrentPressureVelocity - PreviousPressureVelocity) / timeInterval; } } } virtual void CalculateTemporalVariables() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0); array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0); array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1); /* if((i)->IsNot(ISOLATED) || (i)->Is(SOLID)){ */ if ((i)->IsNot(ISOLATED) && ((i)->IsNot(RIGID) || (i)->Is(SOLID))) { UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity); } else if ((i)->Is(RIGID)) { array_1d<double, 3> Zeros(3, 0.0); (i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros; (i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros; } else { (i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0; if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION)) { array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION); (i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration; (i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME]; } } const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; unsigned int timeStep = rCurrentProcessInfo[STEP]; if (timeStep == 1) { (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0; } else { double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0); double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1); double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0); double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0); CurrentPressureAcceleration = CurrentPressureVelocity / timeInterval; CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval; CurrentPressureAcceleration += -CurrentPressureVelocity / timeInterval; } } } void CalculateAccelerations() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0); array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0); array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1); /* if((i)->IsNot(ISOLATED) || (i)->Is(SOLID)){ */ if ((i)->IsNot(ISOLATED) && ((i)->IsNot(RIGID) || (i)->Is(SOLID))) { UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity); } else if ((i)->Is(RIGID)) { array_1d<double, 3> Zeros(3, 0.0); (i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros; (i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros; } else { (i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0; if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION)) { array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION); (i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration; (i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME]; } } } } inline void UpdateAccelerations(array_1d<double, 3> &CurrentAcceleration, const array_1d<double, 3> &CurrentVelocity, array_1d<double, 3> &PreviousAcceleration, const array_1d<double, 3> &PreviousVelocity) { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double Dt = rCurrentProcessInfo[DELTA_TIME]; noalias(CurrentAcceleration) = 2.0 * (CurrentVelocity - PreviousVelocity) / Dt - PreviousAcceleration; } virtual void CalculateDisplacementsAndPorosity() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double TimeStep = rCurrentProcessInfo[DELTA_TIME]; for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0); array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0); array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1); /* if( i->IsFixed(DISPLACEMENT_X) == false ) */ CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0]; /* if( i->IsFixed(DISPLACEMENT_Y) == false ) */ CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1]; /* if( i->IsFixed(DISPLACEMENT_Z) == false ) */ CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2]; // currentFluidFractionRate = (currentFluidFraction - previousFluidFraction)/TimeStep; } } virtual void UpdateStressStrain() {} virtual void Clear() override {} ///@} ///@name Access ///@{ virtual void SetEchoLevel(int Level) override { BaseType::SetEchoLevel(Level); } ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { std::stringstream buffer; buffer << "VPStrategy"; return buffer.str(); } /// Print information about this object. void PrintInfo(std::ostream &rOStream) const override { rOStream << "VPStrategy"; } /// Print object's data. void PrintData(std::ostream &rOStream) const override { } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected Life Cycle ///@{ ///@} ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /// Calculate the coefficients for time iteration. /** * @param rCurrentProcessInfo ProcessInfo instance from the fluid ModelPart. Must contain DELTA_TIME variables. */ virtual bool SolveMomentumIteration(unsigned int it, unsigned int maxIt, bool &fixedTimeStep, double &velocityNorm) { return false; } virtual bool SolveContinuityIteration(unsigned int it, unsigned int maxIt, double &NormP) { return false; } void ComputeErrorL2Norm(double tensilStressSign) //tensilStressSign = 1.0 for FIC, tensilStressSign = -1.0 for FS { ModelPart &rModelPart = BaseType::GetModelPart(); const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double currentTime = rCurrentProcessInfo[TIME]; const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); long double sumErrorL2Velocity = 0; long double sumErrorL2VelocityX = 0; long double sumErrorL2VelocityY = 0; long double sumErrorL2Pressure = 0; long double sumErrorL2TauXX = 0; long double sumErrorL2TauYY = 0; long double sumErrorL2TauXY = 0; #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd); for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { Element::GeometryType &geometry = itElem->GetGeometry(); long double nodalArea = 0; if (dimension == 2) { nodalArea = geometry.Area() / 3.0; } else if (dimension == 3) { nodalArea = geometry.Volume() * 0.25; } long double bariPosX = 0; long double bariPosY = 0; long double eleErrorL2Velocity = 0; long double eleErrorL2VelocityX = 0; long double eleErrorL2VelocityY = 0; long double eleErrorL2Pressure = 0; //ShapeFunctionDerivativesArrayType DN_DX; Matrix NContainer; NContainer = geometry.ShapeFunctionsValues(GeometryData::GI_GAUSS_1); const Vector &N = row(NContainer, 0); const unsigned int NumNodes = geometry.size(); double elementalPressure = N[0] * geometry(0)->FastGetSolutionStepValue(PRESSURE); double elementalVelocityX = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_X); double elementalVelocityY = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_Y); ; for (unsigned int i = 1; i < NumNodes; i++) { elementalPressure += N[i] * geometry(i)->FastGetSolutionStepValue(PRESSURE); elementalVelocityX += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_X); elementalVelocityY += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_Y); } for (unsigned int i = 0; i < geometry.size(); i++) { const long double nodalPosX = geometry(i)->X(); const long double nodalPosY = geometry(i)->Y(); bariPosX += nodalPosX / 3.0; bariPosY += nodalPosY / 3.0; } const long double posX = bariPosX; const long double posY = bariPosY; long double expectedVelocityX = pow(posX, 2) * (1.0 - posX) * (1.0 - posX) * (2.0 * posY - 6.0 * pow(posY, 2) + 4.0 * pow(posY, 3)); long double expectedVelocityY = -pow(posY, 2) * (1.0 - posY) * (1.0 - posY) * (2.0 * posX - 6.0 * pow(posX, 2) + 4.0 * pow(posX, 3)); long double expectedPressure = -tensilStressSign * posX * (1.0 - posX); eleErrorL2VelocityX = elementalVelocityX - expectedVelocityX; eleErrorL2VelocityY = elementalVelocityY - expectedVelocityY; eleErrorL2Pressure = elementalPressure - expectedPressure; sumErrorL2VelocityX += pow(eleErrorL2VelocityX, 2) * geometry.Area(); sumErrorL2VelocityY += pow(eleErrorL2VelocityY, 2) * geometry.Area(); sumErrorL2Pressure += pow(eleErrorL2Pressure, 2) * geometry.Area(); const long double tauXX = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XX); const long double tauYY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_YY); const long double tauXY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XY); long double expectedTauXX = 2.0 * (-4.0 * (1.0 - bariPosX) * bariPosX * (-1.0 + 2.0 * bariPosX) * bariPosY * (1.0 - 3.0 * bariPosY + 2.0 * pow(bariPosY, 2))); long double expectedTauYY = 2.0 * (4.0 * bariPosX * (1.0 - 3.0 * bariPosX + 2.0 * pow(bariPosX, 2)) * (1.0 - bariPosY) * bariPosY * (-1.0 + 2.0 * bariPosY)); long double expectedTauXY = (2.0 * (1.0 - 6.0 * bariPosY + 6.0 * pow(bariPosY, 2)) * (1.0 - bariPosX) * (1.0 - bariPosX) * pow(bariPosX, 2) - 2.0 * (1.0 - 6.0 * bariPosX + 6.0 * pow(bariPosX, 2)) * (1.0 - bariPosY) * (1 - bariPosY) * pow(bariPosY, 2)); long double nodalErrorTauXX = tauXX - expectedTauXX; long double nodalErrorTauYY = tauYY - expectedTauYY; long double nodalErrorTauXY = tauXY - expectedTauXY; sumErrorL2TauXX += pow(nodalErrorTauXX, 2) * geometry.Area(); sumErrorL2TauYY += pow(nodalErrorTauYY, 2) * geometry.Area(); sumErrorL2TauXY += pow(nodalErrorTauXY, 2) * geometry.Area(); } } long double errorL2Velocity = sqrt(sumErrorL2Velocity); long double errorL2VelocityX = sqrt(sumErrorL2VelocityX); long double errorL2VelocityY = sqrt(sumErrorL2VelocityY); long double errorL2Pressure = sqrt(sumErrorL2Pressure); long double errorL2TauXX = sqrt(sumErrorL2TauXX); long double errorL2TauYY = sqrt(sumErrorL2TauYY); long double errorL2TauXY = sqrt(sumErrorL2TauXY); std::ofstream myfileVelocity; myfileVelocity.open("errorL2VelocityFile.txt", std::ios::app); myfileVelocity << currentTime << "\t" << errorL2Velocity << "\n"; myfileVelocity.close(); std::ofstream myfileVelocityX; myfileVelocityX.open("errorL2VelocityXFile.txt", std::ios::app); myfileVelocityX << currentTime << "\t" << errorL2VelocityX << "\n"; myfileVelocityX.close(); std::ofstream myfileVelocityY; myfileVelocityY.open("errorL2VelocityYFile.txt", std::ios::app); myfileVelocityY << currentTime << "\t" << errorL2VelocityY << "\n"; myfileVelocityY.close(); std::ofstream myfilePressure; myfilePressure.open("errorL2PressureFile.txt", std::ios::app); myfilePressure << currentTime << "\t" << errorL2Pressure << "\n"; myfilePressure.close(); std::ofstream myfileTauXX; myfileTauXX.open("errorL2TauXXFile.txt", std::ios::app); myfileTauXX << currentTime << "\t" << errorL2TauXX << "\n"; myfileTauXX.close(); std::ofstream myfileTauYY; myfileTauYY.open("errorL2TauYYFile.txt", std::ios::app); myfileTauYY << currentTime << "\t" << errorL2TauYY << "\n"; myfileTauYY.close(); std::ofstream myfileTauXY; myfileTauXY.open("errorL2TauXYFile.txt", std::ios::app); myfileTauXY << currentTime << "\t" << errorL2TauXY << "\n"; myfileTauXY.close(); } void ComputeErrorL2NormCasePoiseuille() { ModelPart &rModelPart = BaseType::GetModelPart(); const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double currentTime = rCurrentProcessInfo[TIME]; const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); double sumErrorL2VelocityTheta = 0; double sumErrorL2TauTheta = 0; double r_in = 0.2; double R_out = 0.5; double kappa = r_in / R_out; double omega = 0.5; double viscosity = 100.0; #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd); for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { Element::GeometryType &geometry = itElem->GetGeometry(); long double nodalArea = 0; if (dimension == 2) { nodalArea = geometry.Area() / 3.0; } else if (dimension == 3) { nodalArea = geometry.Volume() * 0.25; } long double bariPosX = 0; long double bariPosY = 0; long double eleErrorL2Velocity = 0; long double eleErrorL2VelocityX = 0; long double eleErrorL2VelocityY = 0; long double eleErrorL2Pressure = 0; //ShapeFunctionDerivativesArrayType DN_DX; Matrix NContainer; NContainer = geometry.ShapeFunctionsValues(GeometryData::GI_GAUSS_1); //this->CalculateGeometryData(DN_DX,NContainer,GaussWeights); const Vector &N = row(NContainer, 0); // itElem->EvaluateInPoint(elementalPressure,PRESSURE,N); const unsigned int NumNodes = geometry.size(); double elementalPressure = N[0] * geometry(0)->FastGetSolutionStepValue(PRESSURE); double elementalVelocityX = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_X); double elementalVelocityY = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_Y); ; for (unsigned int i = 1; i < NumNodes; i++) { elementalPressure += N[i] * geometry(i)->FastGetSolutionStepValue(PRESSURE); elementalVelocityX += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_X); elementalVelocityY += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_Y); } for (unsigned int i = 0; i < geometry.size(); i++) { // index = i*dimension; const long double nodalPosX = geometry(i)->X(); const long double nodalPosY = geometry(i)->Y(); bariPosX += nodalPosX / 3.0; bariPosY += nodalPosY / 3.0; } const long double posX = bariPosX; const long double posY = bariPosY; const double rPos = sqrt(pow(posX, 2) + pow(posY, 2)); const double cosalfa = posX / rPos; const double sinalfa = posY / rPos; const double sin2alfa = 2.0 * cosalfa * sinalfa; const double cos2alfa = 1.0 - 2.0 * pow(sinalfa, 2); double expectedVelocityTheta = pow(kappa, 2) * omega * R_out / (1.0 - pow(kappa, 2)) * (R_out / rPos - rPos / R_out); double computedVelocityTheta = sqrt(pow(elementalVelocityX, 2) + pow(elementalVelocityY, 2)); double nodalErrorVelocityTheta = computedVelocityTheta - expectedVelocityTheta; const long double tauXX = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XX); const long double tauYY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_YY); const long double tauXY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XY); double expectedTauTheta = (2.0 * viscosity * pow(kappa, 2) * omega * pow(R_out, 2)) / (1.0 - pow(kappa, 2)) / pow(rPos, 2); double computedTauTheta = (tauXX - tauYY) * sin2alfa / 2.0 - tauXY * cos2alfa; double nodalErrorTauTheta = computedTauTheta - expectedTauTheta; sumErrorL2VelocityTheta += pow(nodalErrorVelocityTheta, 2) * geometry.Area(); sumErrorL2TauTheta += pow(nodalErrorTauTheta, 2) * geometry.Area(); } } double errorL2VelocityTheta = sqrt(sumErrorL2VelocityTheta); double errorL2TauTheta = sqrt(sumErrorL2TauTheta); std::ofstream myfileVelocity; myfileVelocity.open("errorL2Poiseuille.txt", std::ios::app); myfileVelocity << currentTime << "\t" << errorL2VelocityTheta << "\t" << errorL2TauTheta << "\n"; myfileVelocity.close(); } double ComputeVelocityNorm() { ModelPart &rModelPart = BaseType::GetModelPart(); const int n_nodes = rModelPart.NumberOfNodes(); double NormV = 0.00; #pragma omp parallel for reduction(+ \ : NormV) for (int i_node = 0; i_node < n_nodes; ++i_node) { const auto it_node = rModelPart.NodesBegin() + i_node; const auto &r_vel = it_node->FastGetSolutionStepValue(VELOCITY); for (unsigned int d = 0; d < 3; ++d) { NormV += r_vel[d] * r_vel[d]; } } NormV = BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV); NormV = sqrt(NormV); const double zero_tol = 1.0e-12; if (NormV < zero_tol) NormV = 1.00; return NormV; } double ComputePressureNorm() { ModelPart &rModelPart = BaseType::GetModelPart(); const int n_nodes = rModelPart.NumberOfNodes(); double NormP = 0.00; #pragma omp parallel for reduction(+ \ : NormP) for (int i_node = 0; i_node < n_nodes; ++i_node) { const auto it_node = rModelPart.NodesBegin() + i_node; const double Pr = it_node->FastGetSolutionStepValue(PRESSURE); NormP += Pr * Pr; } NormP = BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP); NormP = sqrt(NormP); const double zero_tol = 1.0e-12; if (NormP < zero_tol) NormP = 1.00; return NormP; } virtual bool CheckVelocityConvergence(const double NormDv, double &errorNormDv) { return false; } virtual bool CheckPressureConvergence(const double NormDp, double &errorNormDp, double &NormP) { return false; } virtual bool FixTimeStepMomentum(const double DvErrorNorm, bool &fixedTimeStep) { return false; } virtual bool CheckMomentumConvergence(const double DvErrorNorm, bool &fixedTimeStep) { return false; } virtual bool FixTimeStepContinuity(const double DvErrorNorm, bool &fixedTimeStep) { return false; } virtual bool CheckContinuityConvergence(const double DvErrorNorm, bool &fixedTimeStep) { return false; } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ // Fractional step index. /* 1 : Momentum step (calculate fractional step velocity) * 2-3 : Unused (reserved for componentwise calculation of frac step velocity) * 4 : Pressure step * 5 : Computation of projections * 6 : End of step velocity */ // unsigned int mStepId; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ virtual void InitializeStrategy(SolverSettingsType &rSolverConfig) { KRATOS_TRY; KRATOS_CATCH(""); } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. VPStrategy &operator=(VPStrategy const &rOther) {} /// Copy constructor. VPStrategy(VPStrategy const &rOther) {} ///@} }; /// Class VPStrategy ///@} ///@name Type Definitions ///@{ ///@} ///@} // addtogroup } // namespace Kratos. #endif // KRATOS_V_P_STRATEGY_H
julia.c
/* * mandelbrot.c * * @author: phdenzel * * The Julia set generator * */ #include "julia.h" #include "complex.h" #include "graphics.h" #include <SDL2/SDL.h> /* Global variables */ const int NMAX = 255; int WIDTH = 640; int HEIGHT = 480; //double ORIGIN[2] = {-0.75, 0.}; double ORIGIN[2] = {0., 0.}; double PROJ[2] = {4, 4*0.75}; // the first index is being used for mouse events complex_t cconst[9] = { {0., 0.}, {-0.8, 0.156}, {-0.7269, 0.1889}, {-0.61803398875, 0.}, {-0.4, 0.6}, {0.285, 0.01}, {-0.70176, 0.3842}, {-0.835, -0.2321}, {0, -0.8} }; /* Functions */ complex_t iterator(complex_t* z, complex_t* c) { complex_t z2 = complex_squarecpy(z); complex_t fz = complex_addcpy(&z2, c); return fz; } int mandelbrot(complex_t* z, complex_t* c, double x, double y) { c->real = linearMap(x, 0, WIDTH, ORIGIN[0]-PROJ[0]/2, ORIGIN[0]+PROJ[0]/2); c->imag = linearMap(y, 0, HEIGHT, ORIGIN[1]-PROJ[1]/2, ORIGIN[1]+PROJ[1]/2); int n = 0; while (n < NMAX) { if (complex_abs2(z) > 4) { break; } *z = iterator(z, c); n++; } *z = *c; return n; } int julia(complex_t* z, complex_t* c, double x, double y) { z->real = linearMap(x, 0, WIDTH, ORIGIN[0]-PROJ[0]/2, ORIGIN[0]+PROJ[0]/2); z->imag = linearMap(y, 0, HEIGHT, ORIGIN[1]-PROJ[1]/2, ORIGIN[1]+PROJ[1]/2); int n = 0; while (n < NMAX) { if (complex_abs2(z) > 4) { break; } *z = iterator(z, c); n++; } return n; } /* Main */ int main(void) { /* Init graphics */ SDL_Window* window; SDL_Renderer* renderer; SDL_Texture* texture; int is_running = 1; SDL_Init(SDL_INIT_VIDEO); window = SDL_CreateWindow("Mandelbrot", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, WIDTH, HEIGHT, 0); renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED); texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_ARGB8888, SDL_TEXTUREACCESS_STATIC, WIDTH, HEIGHT); Uint32* pixels = malloc(WIDTH*HEIGHT*sizeof(Uint32)); memset(pixels, 0, WIDTH*HEIGHT*sizeof(Uint32)); /* Mandelbrot algorithm */ complex_t z = {0.0, 0.0}; complex_t c = cconst[1]; int n = 0; int x = 0; int y = 0; /* Clean-up */ while (is_running) { is_running = processEvents(window, is_running, 1); // w/ mouse tracking #pragma omp simd for (int i = 0; i<WIDTH*HEIGHT; i++) { c = cconst[0]; // linear map pixel coordinates x = i%WIDTH; // pixel coord in x y = i/WIDTH; // pixel coord in y /* n = mandelbrot(&z, &c, x, y); */ n = julia(&z, &c, x, y); /* pixels[i] = getPixelColor(n, n, n, 255); */ /* pixels[i] = singulettMagmaCM(n%256); */ pixels[i] = singulettParulaCM(n%256); } /* Project to Renderer and then to Window */ SDL_UpdateTexture(texture, NULL, pixels, WIDTH*sizeof(Uint32)); SDL_RenderClear(renderer); SDL_RenderCopy(renderer, texture, NULL, NULL); SDL_RenderPresent(renderer); SDL_Delay(100); } free(pixels); SDL_DestroyWindow(window); SDL_DestroyRenderer(renderer); SDL_DestroyTexture(texture); SDL_Quit(); }
abstract_pivot_column.h
/* Copyright 2013 IST Austria Contributed by: Ulrich Bauer, Michael Kerber, Jan Reininghaus This file is part of PHAT. PHAT is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. PHAT is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with PHAT. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #include "../helpers/misc.h" #include "../representations/vector_vector.h" namespace phat { // Note: We could even make the rep generic in the underlying Const representation // But I cannot imagine that anything else than vector<vector<index>> would // make sense template< typename PivotColumn > class abstract_pivot_column : public vector_vector { protected: typedef vector_vector Base; typedef PivotColumn pivot_col; // For parallization purposes, it could be more than one full column mutable thread_local_storage< pivot_col > pivot_cols; mutable thread_local_storage< index > idx_of_pivot_cols; pivot_col& get_pivot_col() const { return pivot_cols(); } bool is_pivot_col( index idx ) const { return idx_of_pivot_cols() == idx; } void release_pivot_col() { index idx = idx_of_pivot_cols(); if( idx != -1 ) { this->matrix[ idx ].clear(); pivot_cols().get_col_and_clear( this->matrix[ idx ] ); } idx_of_pivot_cols() = -1; } void make_pivot_col( index idx ) { release_pivot_col(); idx_of_pivot_cols() = idx; get_pivot_col().add_col( matrix[ idx ] ); } public: void _set_num_cols( index nr_of_cols ) { #pragma omp parallel for for( int tid = 0; tid < omp_get_num_threads(); tid++ ) { pivot_cols[ tid ].init( nr_of_cols ); idx_of_pivot_cols[ tid ] = -1; } Base::_set_num_cols( nr_of_cols ); } void _add_to( index source, index target ) { if( !is_pivot_col( target ) ) make_pivot_col( target ); get_pivot_col().add_col( matrix[source] ); } void _sync() { #pragma omp parallel for for( int tid = 0; tid < omp_get_num_threads(); tid++ ) release_pivot_col(); } void _get_col( index idx, column& col ) const { is_pivot_col( idx ) ? get_pivot_col().get_col( col ) : Base::_get_col( idx, col ); } bool _is_empty( index idx ) const { return is_pivot_col( idx ) ? get_pivot_col().is_empty() : Base::_is_empty( idx ); } index _get_max_index( index idx ) const { return is_pivot_col( idx ) ? get_pivot_col().get_max_index() : Base::_get_max_index( idx ); } void _clear( index idx ) { is_pivot_col( idx ) ? get_pivot_col().clear() : Base::_clear( idx ); } void _set_col( index idx, const column& col ) { is_pivot_col( idx ) ? get_pivot_col().set_col( col ) : Base::_set_col( idx, col ); } void _remove_max( index idx ) { is_pivot_col( idx ) ? get_pivot_col().remove_max() : Base::_remove_max( idx ); } void finalize( index idx ) { Base::_finalize( idx ); } }; }
8158.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp target teams distribute parallel for private(j) for (i = 1; i < _PB_NI - 1; ++i) { for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
multi_device.h
// // Created by liqinbin on 10/14/20. // ThunderGBM multi_device.h: https://github.com/Xtra-Computing/thundergbm/blob/master/include/thundergbm/util/multi_device.h // Under Apache-2.0 license // copyright (c) 2020 jiashuai // #ifndef FEDTREE_MULTI_DEVICE_H #define FEDTREE_MULTI_DEVICE_H #ifdef USE_CUDA #include "FedTree/common.h" //switch to specific device and do something, then switch back to the original device //FIXME make this macro into a function? #define DO_ON_DEVICE(device_id, something) \ do { \ int org_device_id = 0; \ CUDA_CHECK(cudaGetDevice(&org_device_id)); \ CUDA_CHECK(cudaSetDevice(device_id)); \ something; \ CUDA_CHECK(cudaSetDevice(org_device_id)); \ } while (false) /** * Do something on multiple devices, then switch back to the original device * * * example: * * DO_ON_MULTI_DEVICES(n_devices, [&](int device_id){ * //do_something_on_device(device_id); * }); */ template<typename L> void DO_ON_MULTI_DEVICES(int n_devices, L do_something) { int org_device_id = 0; CUDA_CHECK(cudaGetDevice(&org_device_id)); #pragma omp parallel for num_threads(n_devices) for (int device_id = 0; device_id < n_devices; device_id++) { CUDA_CHECK(cudaSetDevice(device_id)); do_something(device_id); } CUDA_CHECK(cudaSetDevice(org_device_id)); } #endif #endif //FEDTREE_MULTI_DEVICE_H
GB_binop__bshift_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bshift_uint64 // A.*B function (eWiseMult): GB_AemultB__bshift_uint64 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bshift_uint64 // C+=b function (dense accum): GB_Cdense_accumb__bshift_uint64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bshift_uint64 // C=scalar+B GB_bind1st__bshift_uint64 // C=scalar+B' GB_bind1st_tran__bshift_uint64 // C=A+scalar GB_bind2nd__bshift_uint64 // C=A'+scalar GB_bind2nd_tran__bshift_uint64 // C type: uint64_t // A type: uint64_t // B,b type: int8_t // BinaryOp: cij = GB_bitshift_uint64 (aij, bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 0 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_bitshift_uint64 (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSHIFT || GxB_NO_UINT64 || GxB_NO_BSHIFT_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bshift_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bshift_uint64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bshift_uint64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__bshift_uint64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bshift_uint64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bshift_uint64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = GB_bitshift_uint64 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bshift_uint64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = Ax [p] ; Cx [p] = GB_bitshift_uint64 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_bitshift_uint64 (x, aij) ; \ } GrB_Info GB_bind1st_tran__bshift_uint64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = GB_bitshift_uint64 (aij, y) ; \ } GrB_Info GB_bind2nd_tran__bshift_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
colorspace.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE % % C O O L O O R R SS P P A A C E % % C O O L O O RRRR SSS PPPP AAAAA C EEE % % C O O L O O R R SS P A A C E % % CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE % % % % % % MagickCore Image Colorspace Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/property.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/gem.h" #include "magick/gem-private.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/utility.h" /* Typedef declarations. */ typedef struct _TransformPacket { MagickRealType x, y, z; } TransformPacket; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R G B T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RGBTransformImage() converts the reference image from sRGB to an alternate % colorspace. The transformation matrices are not the standard ones: the % weights are rescaled to normalized the range of the transformed values to % be [0..QuantumRange]. % % The format of the RGBTransformImage method is: % % MagickBooleanType RGBTransformImage(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % */ static inline void ConvertRGBToCMY(const Quantum red,const Quantum green, const Quantum blue,double *cyan,double *magenta,double *yellow) { *cyan=QuantumScale*(QuantumRange-red); *magenta=QuantumScale*(QuantumRange-green); *yellow=QuantumScale*(QuantumRange-blue); } static void ConvertRGBToLab(const Quantum red,const Quantum green, const Quantum blue,double *L,double *a,double *b) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLab(X,Y,Z,L,a,b); } static inline void ConvertXYZToLMS(const double x,const double y, const double z,double *L,double *M,double *S) { *L=0.7328*x+0.4296*y-0.1624*z; *M=(-0.7036*x+1.6975*y+0.0061*z); *S=0.0030*x+0.0136*y+0.9834*z; } static void ConvertRGBToLMS(const Quantum red,const Quantum green, const Quantum blue,double *L,double *M,double *S) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLMS(X,Y,Z,L,M,S); } static void ConvertRGBToLuv(const Quantum red,const Quantum green, const Quantum blue,double *L,double *u,double *v) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLuv(X,Y,Z,L,u,v); } static void ConvertRGBToYPbPr(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *Pb,double *Pr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5; *Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5; } static void ConvertRGBToYCbCr(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *Cb,double *Cr) { ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr); } static void ConvertRGBToYUV(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *U,double *V) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5; *V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5; } static void ConvertRGBToYDbDr(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *Db,double *Dr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5; *Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5; } static void ConvertRGBToYIQ(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *I,double *Q) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5; *Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5; } MagickExport MagickBooleanType RGBTransformImage(Image *image, const ColorspaceType colorspace) { #define RGBTransformImageTag "RGBTransform/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo primary_info; register ssize_t i; ssize_t y; TransformPacket *x_map, *y_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(colorspace != sRGBColorspace); assert(colorspace != TransparentColorspace); assert(colorspace != UndefinedColorspace); status=MagickTrue; progress=0; exception=(&image->exception); switch (colorspace) { case CMYKColorspace: { MagickPixelPacket zero; /* Convert RGB to CMYK colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); pixel.red=(MagickRealType) pixel.red; pixel.green=(MagickRealType) pixel.green; pixel.blue=(MagickRealType) pixel.blue; ConvertRGBToCMYK(&pixel); SetPixelPacket(image,&pixel,q,indexes+x); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->type=image->matte == MagickFalse ? ColorSeparationType : ColorSeparationMatteType; if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case GRAYColorspace: { /* Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelGray(q,ClampToQuantum(GetPixelIntensity(image,q))); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); image->type=GrayscaleType; return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from sRGB to HSI. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double X, Y, Z; Quantum blue, green, red; red=ClampToQuantum((MagickRealType) GetPixelRed(q)); green=ClampToQuantum((MagickRealType) GetPixelGreen(q)); blue=ClampToQuantum((MagickRealType) GetPixelBlue(q)); switch (colorspace) { case CMYColorspace: { ConvertRGBToCMY(red,green,blue,&X,&Y,&Z); break; } case HCLColorspace: { ConvertRGBToHCL(red,green,blue,&X,&Y,&Z); break; } case HCLpColorspace: { ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z); break; } case HSBColorspace: { ConvertRGBToHSB(red,green,blue,&X,&Y,&Z); break; } case HSIColorspace: { ConvertRGBToHSI(red,green,blue,&X,&Y,&Z); break; } case HSLColorspace: { ConvertRGBToHSL(red,green,blue,&X,&Y,&Z); break; } case HSVColorspace: { ConvertRGBToHSV(red,green,blue,&X,&Y,&Z); break; } case HWBColorspace: { ConvertRGBToHWB(red,green,blue,&X,&Y,&Z); break; } case LabColorspace: { ConvertRGBToLab(red,green,blue,&X,&Y,&Z); break; } case LCHColorspace: case LCHabColorspace: { ConvertRGBToLCHab(red,green,blue,&X,&Y,&Z); break; } case LCHuvColorspace: { ConvertRGBToLCHuv(red,green,blue,&X,&Y,&Z); break; } case LMSColorspace: { ConvertRGBToLMS(red,green,blue,&X,&Y,&Z); break; } case LuvColorspace: { ConvertRGBToLuv(red,green,blue,&X,&Y,&Z); break; } case XYZColorspace: { ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); break; } case YCbCrColorspace: { ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z); break; } case YDbDrColorspace: { ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z); break; } case YIQColorspace: { ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z); break; } case YPbPrColorspace: { ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z); break; } case YUVColorspace: { ConvertRGBToYUV(red,green,blue,&X,&Y,&Z); break; } default: break; } SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*X)); SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*Y)); SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*Z)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { #define DisplayGamma (1.0/1.7) #define FilmGamma 0.6 #define ReferenceBlack 95.0 #define ReferenceWhite 685.0 const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform RGB to Log colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma"); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma"); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black"); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white"); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) logmap[i]=ScaleMapToQuantum((MagickRealType) (MaxMap*(reference_white+ log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002/ film_gamma))/1024.0)); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { Quantum blue, green, red; red=ClampToQuantum((MagickRealType) GetPixelRed(q)); green=ClampToQuantum((MagickRealType) GetPixelGreen(q)); blue=ClampToQuantum((MagickRealType) GetPixelBlue(q)); SetPixelRed(q,logmap[ScaleQuantumToMap(red)]); SetPixelGreen(q,logmap[ScaleQuantumToMap(green)]); SetPixelBlue(q,logmap[ScaleQuantumToMap(blue)]); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform image from sRGB to linear RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum blue, green, red; red=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelBlue(q))); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(&primary_info,0,sizeof(primary_info)); switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B I and Q, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.33333*(double) i); y_map[i].x=(MagickRealType) (0.33334*(double) i); z_map[i].x=(MagickRealType) (0.33333*(double) i); x_map[i].y=(MagickRealType) (0.50000*(double) i); y_map[i].y=(MagickRealType) (0.00000*(double) i); z_map[i].y=(MagickRealType) (-0.50000*(double) i); x_map[i].z=(MagickRealType) (-0.25000*(double) i); y_map[i].z=(MagickRealType) (0.50000*(double) i); z_map[i].z=(MagickRealType) (-0.25000*(double) i); } break; } case Rec601LumaColorspace: { /* Initialize Rec601 luma tables: G = 0.298839*R+0.586811*G+0.114350*B */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839*(double) i); y_map[i].x=(MagickRealType) (0.586811*(double) i); z_map[i].x=(MagickRealType) (0.114350*(double) i); x_map[i].y=(MagickRealType) (0.298839*(double) i); y_map[i].y=(MagickRealType) (0.586811*(double) i); z_map[i].y=(MagickRealType) (0.114350*(double) i); x_map[i].z=(MagickRealType) (0.298839*(double) i); y_map[i].z=(MagickRealType) (0.586811*(double) i); z_map[i].z=(MagickRealType) (0.114350*(double) i); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.601): Y = 0.2988390*R+0.5868110*G+0.1143500*B Cb= -0.1687367*R-0.3312640*G+0.5000000*B Cr= 0.5000000*R-0.4186880*G-0.0813120*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839*(double) i); y_map[i].x=(MagickRealType) (0.586811*(double) i); z_map[i].x=(MagickRealType) (0.114350*(double) i); x_map[i].y=(MagickRealType) (-0.1687367*(double) i); y_map[i].y=(MagickRealType) (-0.331264*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].z=(MagickRealType) (-0.418688*(double) i); z_map[i].z=(MagickRealType) (-0.081312*(double) i); } break; } case Rec709LumaColorspace: { /* Initialize Rec709 luma tables: G = 0.21260*R+0.71520*G+0.07220*B */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.21260*(double) i); y_map[i].x=(MagickRealType) (0.71520*(double) i); z_map[i].x=(MagickRealType) (0.07220*(double) i); x_map[i].y=(MagickRealType) (0.21260*(double) i); y_map[i].y=(MagickRealType) (0.71520*(double) i); z_map[i].y=(MagickRealType) (0.07220*(double) i); x_map[i].z=(MagickRealType) (0.21260*(double) i); y_map[i].z=(MagickRealType) (0.71520*(double) i); z_map[i].z=(MagickRealType) (0.07220*(double) i); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.709): Y = 0.212600*R+0.715200*G+0.072200*B Cb= -0.114572*R-0.385428*G+0.500000*B Cr= 0.500000*R-0.454153*G-0.045847*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.212600*(double) i); y_map[i].x=(MagickRealType) (0.715200*(double) i); z_map[i].x=(MagickRealType) (0.072200*(double) i); x_map[i].y=(MagickRealType) (-0.114572*(double) i); y_map[i].y=(MagickRealType) (-0.385428*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].z=(MagickRealType) (-0.454153*(double) i); z_map[i].z=(MagickRealType) (-0.045847*(double) i); } break; } case YCCColorspace: { /* Initialize YCC tables: Y = 0.298839*R+0.586811*G+0.114350*B C1= -0.298839*R-0.586811*G+0.88600*B C2= 0.70100*R-0.586811*G-0.114350*B YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156)); primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137)); for (i=0; i <= (ssize_t) (0.018*MaxMap); i++) { x_map[i].x=0.003962014134275617*i; y_map[i].x=0.007778268551236748*i; z_map[i].x=0.001510600706713781*i; x_map[i].y=(-0.002426619775463276)*i; y_map[i].y=(-0.004763965913702149)*i; z_map[i].y=0.007190585689165425*i; x_map[i].z=0.006927257754597858*i; y_map[i].z=(-0.005800713697502058)*i; z_map[i].z=(-0.0011265440570958)*i; } for ( ; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.2201118963486454*(1.099*i-0.099); y_map[i].x=0.4321260306242638*(1.099*i-0.099); z_map[i].x=0.08392226148409894*(1.099*i-0.099); x_map[i].y=(-0.1348122097479598)*(1.099*i-0.099); y_map[i].y=(-0.2646647729834528)*(1.099*i-0.099); z_map[i].y=0.3994769827314126*(1.099*i-0.099); x_map[i].z=0.3848476530332144*(1.099*i-0.099); y_map[i].z=(-0.3222618720834477)*(1.099*i-0.099); z_map[i].z=(-0.06258578094976668)*(1.099*i-0.099); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert from sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register ssize_t x; register PixelPacket *restrict q; register size_t blue, green, red; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelRed(q))); green=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelGreen(q))); blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelBlue(q))); pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+ (MagickRealType) primary_info.x; pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+ (MagickRealType) primary_info.y; pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+ (MagickRealType) primary_info.z; SetPixelRed(q,ScaleMapToQuantum(pixel.red)); SetPixelGreen(q,ScaleMapToQuantum(pixel.green)); SetPixelBlue(q,ScaleMapToQuantum(pixel.blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RGBTransformImage) #endif proceed=SetImageProgress(image,RGBTransformImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { register size_t blue, green, red; /* Convert PseudoClass image. */ for (i=0; i < (ssize_t) image->colors; i++) { MagickPixelPacket pixel; red=ScaleQuantumToMap(ClampToQuantum((MagickRealType) image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum((MagickRealType) image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType) image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z; image->colormap[i].red=ScaleMapToQuantum(pixel.red); image->colormap[i].green=ScaleMapToQuantum(pixel.green); image->colormap[i].blue=ScaleMapToQuantum(pixel.blue); } (void) SyncImage(image); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorspace() sets the colorspace member of the Image structure. % % The format of the SetImageColorspace method is: % % MagickBooleanType SetImageColorspace(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % */ MagickExport MagickBooleanType SetImageColorspace(Image *image, const ColorspaceType colorspace) { if (image->colorspace == colorspace) return(MagickTrue); image->colorspace=colorspace; image->rendering_intent=UndefinedIntent; image->gamma=1.000; (void) ResetMagickMemory(&image->chromaticity,0,sizeof(image->chromaticity)); if (IsGrayColorspace(colorspace) != MagickFalse) { if ((image->intensity != Rec601LuminancePixelIntensityMethod) && (image->intensity != Rec709LuminancePixelIntensityMethod) && (image->intensity != UndefinedPixelIntensityMethod)) image->gamma=1.000/2.200; image->type=GrayscaleType; } else if (IssRGBColorspace(colorspace) != MagickFalse) image->gamma=1.000/2.200; if (image->gamma == (1.000/2.200)) { image->rendering_intent=PerceptualIntent; image->gamma=1.000/2.200; image->chromaticity.red_primary.x=0.6400; image->chromaticity.red_primary.y=0.3300; image->chromaticity.red_primary.z=0.0300; image->chromaticity.green_primary.x=0.3000; image->chromaticity.green_primary.y=0.6000; image->chromaticity.green_primary.z=0.1000; image->chromaticity.blue_primary.x=0.1500; image->chromaticity.blue_primary.y=0.0600; image->chromaticity.blue_primary.z=0.7900; image->chromaticity.white_point.x=0.3127; image->chromaticity.white_point.y=0.3290; image->chromaticity.white_point.z=0.3583; } if (IsGrayColorspace(colorspace) != MagickFalse) image->type=GrayscaleType; return(SyncImagePixelCache(image,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImageColorspace() transforms an image colorspace. % % The format of the TransformImageColorspace method is: % % MagickBooleanType TransformImageColorspace(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % */ MagickExport MagickBooleanType TransformImageColorspace(Image *image, const ColorspaceType colorspace) { MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (colorspace == UndefinedColorspace) return(SetImageColorspace(image,colorspace)); if (image->colorspace == colorspace) return(MagickTrue); /* same colorspace: no op */ /* Convert the reference image from an alternate colorspace to sRGB. */ (void) DeleteImageProfile(image,"icc"); (void) DeleteImageProfile(image,"icm"); if (IssRGBColorspace(colorspace) != MagickFalse) return(TransformRGBImage(image,colorspace)); status=MagickTrue; if (IssRGBColorspace(image->colorspace) == MagickFalse) status=TransformRGBImage(image,image->colorspace); if (status == MagickFalse) return(status); /* Convert the reference image from sRGB to an alternate colorspace. */ if (RGBTransformImage(image,colorspace) == MagickFalse) status=MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a n s f o r m R G B I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformRGBImage() converts the reference image from an alternate % colorspace to sRGB. The transformation matrices are not the standard ones: % the weights are rescaled to normalize the range of the transformed values to % be [0..QuantumRange]. % % The format of the TransformRGBImage method is: % % MagickBooleanType TransformRGBImage(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % */ static inline void ConvertCMYToRGB(const double cyan,const double magenta, const double yellow,Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(1.0-cyan)); *green=ClampToQuantum(QuantumRange*(1.0-magenta)); *blue=ClampToQuantum(QuantumRange*(1.0-yellow)); } static inline void ConvertLMSToXYZ(const double L,const double M,const double S, double *X,double *Y,double *Z) { *X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S; *Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S; *Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S; } static inline void ConvertLMSToRGB(const double L,const double M, const double S,Quantum *red,Quantum *green,Quantum *blue) { double X, Y, Z; ConvertLMSToXYZ(L,M,S,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertLuvToRGB(const double L,const double u, const double v,Quantum *red,Quantum *green,Quantum *blue) { double X, Y, Z; ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline ssize_t RoundToYCC(const MagickRealType value) { if (value <= 0.0) return(0); if (value >= 1388.0) return(1388); return((ssize_t) (value+0.5)); } static inline void ConvertCMYKToRGB(MagickPixelPacket *pixel) { pixel->red=((QuantumRange-(QuantumScale*pixel->red* (QuantumRange-pixel->index)+pixel->index))); pixel->green=((QuantumRange-(QuantumScale*pixel->green* (QuantumRange-pixel->index)+pixel->index))); pixel->blue=((QuantumRange-(QuantumScale*pixel->blue* (QuantumRange-pixel->index)+pixel->index))); } static inline void ConvertLabToRGB(const double L,const double a, const double b,Quantum *red,Quantum *green,Quantum *blue) { double X, Y, Z; ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(0.99999999999914679361*Y- 1.2188941887145875e-06*(Pb-0.5)+1.4019995886561440468*(Pr-0.5))); *green=ClampToQuantum(QuantumRange*(0.99999975910502514331*Y- 0.34413567816504303521*(Pb-0.5)-0.71413649331646789076*(Pr-0.5))); *blue=ClampToQuantum(QuantumRange*(1.00000124040004623180*Y+ 1.77200006607230409200*(Pb-0.5)+2.1453384174593273e-06*(Pr-0.5))); } static void ConvertYCbCrToRGB(const double Y,const double Cb, const double Cr,Quantum *red,Quantum *green,Quantum *blue) { ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue); } static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)- 0.52591263066186533*(Dr-0.5))); *green=ClampToQuantum(QuantumRange*(Y-0.12913289889050927*(Db-0.5)+ 0.26789932820759876*(Dr-0.5))); *blue=ClampToQuantum(QuantumRange*(Y+0.66467905997895482*(Db-0.5)- 7.9202543533108e-05*(Dr-0.5))); } static void ConvertYIQToRGB(const double Y,const double I,const double Q, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(Y+0.9562957197589482261*(I-0.5)+ 0.6210244164652610754*(Q-0.5))); *green=ClampToQuantum(QuantumRange*(Y-0.2721220993185104464*(I-0.5)- 0.6473805968256950427*(Q-0.5))); *blue=ClampToQuantum(QuantumRange*(Y-1.1069890167364901945*(I-0.5)+ 1.7046149983646481374*(Q-0.5))); } static void ConvertYUVToRGB(const double Y,const double U,const double V, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+ 1.1398279671717170825*(V-0.5))); *green=ClampToQuantum(QuantumRange*(Y-0.3946101641414141437*(U-0.5)- 0.5805003156565656797*(V-0.5))); *blue=ClampToQuantum(QuantumRange*(Y+2.0319996843434342537*(U-0.5)- 4.813762626262513e-04*(V-0.5))); } MagickExport MagickBooleanType TransformRGBImage(Image *image, const ColorspaceType colorspace) { #define TransformRGBImageTag "Transform/Image" static const float YCCMap[1389] = { 0.000000, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f, 0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f, 0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f, 0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f, 0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f, 0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f, 0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f, 0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f, 0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f, 0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f, 0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f, 0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f, 0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f, 0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f, 0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f, 0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f, 0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f, 0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f, 0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f, 0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f, 0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f, 0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f, 0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f, 0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f, 0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f, 0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f, 0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f, 0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f, 0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f, 0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f, 0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f, 0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f, 0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f, 0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f, 0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f, 0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f, 0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f, 0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f, 0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f, 0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f, 0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f, 0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f, 0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f, 0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f, 0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f, 0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f, 0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f, 0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f, 0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f, 0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f, 0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f, 0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f, 0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f, 0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f, 0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f, 0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f, 0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f, 0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f, 0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f, 0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f, 0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f, 0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f, 0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f, 0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f, 0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f, 0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f, 0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f, 0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f, 0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f, 0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f, 0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f, 0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f, 0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f, 0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f, 0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f, 0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f, 0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f, 0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f, 0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f, 0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f, 0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f, 0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f, 0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f, 0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f, 0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f, 0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f, 0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f, 0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f, 0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f, 0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f, 0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f, 0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f, 0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f, 0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f, 0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f, 0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f, 0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f, 0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f, 0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f, 0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f, 0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f, 0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f, 0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f, 0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f, 0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f, 0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f, 0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f, 0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f, 0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f, 0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f, 0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f, 0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f, 0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f, 0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f, 0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f, 0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f, 0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f, 0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f, 0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f, 0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f, 0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f, 0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f, 0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f, 0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f, 0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f, 0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f, 0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f, 0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f, 0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f, 0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f, 0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f, 0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f, 0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f, 0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f, 0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f, 0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f, 0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f, 0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f, 0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f, 0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f, 0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f, 0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f, 0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f, 0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f, 0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f, 0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f, 0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f, 0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f, 0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f, 0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f, 0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f, 0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f, 0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f, 0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f, 0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f, 0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f, 0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f, 0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f, 0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f, 0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f, 0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f, 0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f, 0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f, 0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f, 0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f, 0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f, 0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f, 0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f, 0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f, 0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f, 0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f, 0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f, 0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f, 0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f, 0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f, 0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f, 0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f, 0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f, 0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f, 0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f, 0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f, 0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f, 0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f, 0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f, 0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f, 0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f, 0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f, 0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f, 0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f, 0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f, 0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f, 0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f, 0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f, 0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f, 0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f, 0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f, 0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f, 0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f, 0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f, 0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f, 0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f, 0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f, 0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f, 0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f, 0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f, 0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f, 0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f, 0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f, 0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f, 0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f, 0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f, 0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f, 0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f, 0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f, 0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f, 0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f, 0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f, 0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f, 0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f, 0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f, 0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f, 0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f, 0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f, 0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f, 0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f, 0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f, 0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f, 0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f, 0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f, 0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f, 0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f, 0.998559f, 0.999280f, 1.000000 }; CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; TransformPacket *y_map, *x_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; progress=0; exception=(&image->exception); switch (image->colorspace) { case CMYKColorspace: { MagickPixelPacket zero; /* Transform image from CMYK to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); ConvertCMYKToRGB(&pixel); SetPixelPacket(image,&pixel,q,indexes+x); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case GRAYColorspace: case Rec601LumaColorspace: case Rec709LumaColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { MagickRealType gray; gray=(MagickRealType) GetPixelGray(q); SetPixelRed(q,ClampToQuantum(gray)); SetPixelGreen(q,ClampToQuantum(gray)); SetPixelBlue(q,ClampToQuantum(gray)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from source colorspace to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double X, Y, Z; Quantum blue, green, red; X=QuantumScale*GetPixelRed(q); Y=QuantumScale*GetPixelGreen(q); Z=QuantumScale*GetPixelBlue(q); switch (image->colorspace) { case CMYColorspace: { ConvertCMYToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLColorspace: { ConvertHCLToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLpColorspace: { ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue); break; } case HSBColorspace: { ConvertHSBToRGB(X,Y,Z,&red,&green,&blue); break; } case HSIColorspace: { ConvertHSIToRGB(X,Y,Z,&red,&green,&blue); break; } case HSLColorspace: { ConvertHSLToRGB(X,Y,Z,&red,&green,&blue); break; } case HSVColorspace: { ConvertHSVToRGB(X,Y,Z,&red,&green,&blue); break; } case HWBColorspace: { ConvertHWBToRGB(X,Y,Z,&red,&green,&blue); break; } case LabColorspace: { ConvertLabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHColorspace: case LCHabColorspace: { ConvertLCHabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHuvColorspace: { ConvertLCHuvToRGB(X,Y,Z,&red,&green,&blue); break; } case LMSColorspace: { ConvertLMSToRGB(X,Y,Z,&red,&green,&blue); break; } case LuvColorspace: { ConvertLuvToRGB(X,Y,Z,&red,&green,&blue); break; } case XYZColorspace: { ConvertXYZToRGB(X,Y,Z,&red,&green,&blue); break; } case YCbCrColorspace: { ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue); break; } case YDbDrColorspace: { ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue); break; } case YIQColorspace: { ConvertYIQToRGB(X,Y,Z,&red,&green,&blue); break; } case YPbPrColorspace: { ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue); break; } case YUVColorspace: { ConvertYUVToRGB(X,Y,Z,&red,&green,&blue); break; } default: break; } SetPixelRed(q,ClampToQuantum((MagickRealType) red)); SetPixelGreen(q,ClampToQuantum((MagickRealType) green)); SetPixelBlue(q,ClampToQuantum((MagickRealType) blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform Log to sRGB colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma"); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma"); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black"); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white"); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++) logmap[i]=(Quantum) 0; for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++) logmap[i]=ClampToQuantum((MagickRealType) QuantumRange/(1.0-black)* (pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002/ film_gamma)-black)); for ( ; i <= (ssize_t) MaxMap; i++) logmap[i]=QuantumRange; if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { Quantum blue, green, red; red=ClampToQuantum((MagickRealType) logmap[ScaleQuantumToMap(GetPixelRed(q))]); green=ClampToQuantum((MagickRealType) logmap[ScaleQuantumToMap(GetPixelGreen(q))]); blue=ClampToQuantum((MagickRealType) logmap[ScaleQuantumToMap(GetPixelBlue(q))]); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { Quantum blue, green, red; red=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelBlue(q))); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } switch (image->colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: R = I1+1.00000*I2-0.66668*I3 G = I1+0.00000*I2+1.33333*I3 B = I1-1.00000*I2-0.66668*I3 I and Q, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(1.0*(double) i); y_map[i].x=(0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].x=(-0.5*0.66668*(2.0*(double) i-MaxMap)); x_map[i].y=(1.0*(double) i); y_map[i].y=(0.5*0.00000*(2.0*(double) i-MaxMap)); z_map[i].y=(0.5*1.33333*(2.0*(double) i-MaxMap)); x_map[i].z=(1.0*(double) i); y_map[i].z=(-0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].z=(-0.5*0.66668*(2.0*(double) i-MaxMap)); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.402000*Cr G = Y-0.344136*Cb-0.714136*Cr B = Y+1.772000*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.99999999999914679361*(double) i; y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap); z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap); x_map[i].y=0.99999975910502514331*(double) i; y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap); z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap); x_map[i].z=1.00000124040004623180*(double) i; y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap); z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.574800*Cr G = Y-0.187324*Cb-0.468124*Cr B = Y+1.855600*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*(double) i-MaxMap)); z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*(double) i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*(double) i); y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*(double) i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*(double) i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*(double) i); y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*(double) i-MaxMap)); z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*(double) i-MaxMap)); } break; } case YCCColorspace: { /* Initialize YCC tables: R = Y +1.340762*C2 G = Y-0.317038*C1-0.682243*C2 B = Y+1.632639*C1 YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.3584000*(double) i); y_map[i].x=(MagickRealType) (0.0000000); z_map[i].x=(MagickRealType) (1.8215000*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].y=(MagickRealType) (1.3584000*(double) i); y_map[i].y=(MagickRealType) ((-0.4302726)*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].y=(MagickRealType) ((-0.9271435)*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].z=(MagickRealType) (1.3584000*(double) i); y_map[i].z=(MagickRealType) (2.2179000*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].z=(MagickRealType) (0.0000000); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert to sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register size_t blue, green, red; red=ScaleQuantumToMap(GetPixelRed(q)); green=ScaleQuantumToMap(GetPixelGreen(q)); blue=ScaleQuantumToMap(GetPixelBlue(q)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } SetPixelRed(q,ClampToQuantum(pixel.red)); SetPixelGreen(q,ClampToQuantum(pixel.green)); SetPixelBlue(q,ClampToQuantum(pixel.blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransformRGBImage) #endif proceed=SetImageProgress(image,TransformRGBImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { /* Convert PseudoClass image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { MagickPixelPacket pixel; register size_t blue, green, red; red=ScaleQuantumToMap(image->colormap[i].red); green=ScaleQuantumToMap(image->colormap[i].green); blue=ScaleQuantumToMap(image->colormap[i].blue); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } image->colormap[i].red=ClampToQuantum(pixel.red); image->colormap[i].green=ClampToQuantum(pixel.green); image->colormap[i].blue=ClampToQuantum(pixel.blue); } (void) SyncImage(image); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(MagickTrue); }
GB_unop__identity_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__(none)) // op(A') function: GB (_unop_tran__identity_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = aij #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ #if 0 GrB_Info GB (_unop_apply__(none)) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fx.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF X X % % F X X % % FFF X % % F X X % % F X X % % % % % % MagickCore Image Special Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/fx-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Define declarations. */ #define LeftShiftOperator 0xf5U #define RightShiftOperator 0xf6U #define LessThanEqualOperator 0xf7U #define GreaterThanEqualOperator 0xf8U #define EqualOperator 0xf9U #define NotEqualOperator 0xfaU #define LogicalAndOperator 0xfbU #define LogicalOrOperator 0xfcU #define ExponentialNotation 0xfdU struct _FxInfo { const Image *images; char *expression; FILE *file; SplayTreeInfo *colors, *symbols; CacheView **view; RandomInfo *random_info; ExceptionInfo *exception; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireFxInfo() allocates the FxInfo structure. % % The format of the AcquireFxInfo method is: % % FxInfo *AcquireFxInfo(Image *images,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o expression: the expression. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate FxInfo *AcquireFxInfo(const Image *images,const char *expression, ExceptionInfo *exception) { char fx_op[2]; const Image *next; FxInfo *fx_info; register ssize_t i; fx_info=(FxInfo *) AcquireCriticalMemory(sizeof(*fx_info)); (void) memset(fx_info,0,sizeof(*fx_info)); fx_info->exception=AcquireExceptionInfo(); fx_info->images=images; fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength( fx_info->images),sizeof(*fx_info->view)); if (fx_info->view == (CacheView **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); i=0; next=GetFirstImageInList(fx_info->images); for ( ; next != (Image *) NULL; next=next->next) { fx_info->view[i]=AcquireVirtualCacheView(next,exception); i++; } fx_info->random_info=AcquireRandomInfo(); fx_info->expression=ConstantString(expression); fx_info->file=stderr; (void) SubstituteString(&fx_info->expression," ",""); /* compact string */ /* Force right-to-left associativity for unary negation. */ (void) SubstituteString(&fx_info->expression,"-","-1.0*"); (void) SubstituteString(&fx_info->expression,"^-1.0*","^-"); (void) SubstituteString(&fx_info->expression,"E-1.0*","E-"); (void) SubstituteString(&fx_info->expression,"e-1.0*","e-"); /* Convert compound to simple operators. */ fx_op[1]='\0'; *fx_op=(char) LeftShiftOperator; (void) SubstituteString(&fx_info->expression,"<<",fx_op); *fx_op=(char) RightShiftOperator; (void) SubstituteString(&fx_info->expression,">>",fx_op); *fx_op=(char) LessThanEqualOperator; (void) SubstituteString(&fx_info->expression,"<=",fx_op); *fx_op=(char) GreaterThanEqualOperator; (void) SubstituteString(&fx_info->expression,">=",fx_op); *fx_op=(char) EqualOperator; (void) SubstituteString(&fx_info->expression,"==",fx_op); *fx_op=(char) NotEqualOperator; (void) SubstituteString(&fx_info->expression,"!=",fx_op); *fx_op=(char) LogicalAndOperator; (void) SubstituteString(&fx_info->expression,"&&",fx_op); *fx_op=(char) LogicalOrOperator; (void) SubstituteString(&fx_info->expression,"||",fx_op); *fx_op=(char) ExponentialNotation; (void) SubstituteString(&fx_info->expression,"**",fx_op); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d d N o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AddNoiseImage() adds random noise to the image. % % The format of the AddNoiseImage method is: % % Image *AddNoiseImage(const Image *image,const NoiseType noise_type, % const double attenuate,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o noise_type: The type of noise: Uniform, Gaussian, Multiplicative, % Impulse, Laplacian, or Poisson. % % o attenuate: attenuate the random distribution. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type, const double attenuate,ExceptionInfo *exception) { #define AddNoiseImageTag "AddNoise/Image" CacheView *image_view, *noise_view; Image *noise_image; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize noise image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) noise_image=AccelerateAddNoiseImage(image,noise_type,attenuate,exception); if (noise_image != (Image *) NULL) return(noise_image); #endif noise_image=CloneImage(image,0,0,MagickTrue,exception); if (noise_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse) { noise_image=DestroyImage(noise_image); return((Image *) NULL); } /* Add noise in each row. */ status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireVirtualCacheView(image,exception); noise_view=AcquireAuthenticCacheView(noise_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,noise_image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait noise_traits=GetPixelChannelTraits(noise_image,channel); if ((traits == UndefinedPixelTrait) || (noise_traits == UndefinedPixelTrait)) continue; if ((noise_traits & CopyPixelTrait) != 0) { SetPixelChannel(noise_image,channel,p[i],q); continue; } SetPixelChannel(noise_image,channel,ClampToQuantum( GenerateDifferentialNoise(random_info[id],p[i],noise_type,attenuate)), q); } p+=GetPixelChannels(image); q+=GetPixelChannels(noise_image); } sync=SyncCacheViewAuthenticPixels(noise_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AddNoiseImage) #endif proceed=SetImageProgress(image,AddNoiseImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } noise_view=DestroyCacheView(noise_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) noise_image=DestroyImage(noise_image); return(noise_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u e S h i f t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlueShiftImage() mutes the colors of the image to simulate a scene at % nighttime in the moonlight. % % The format of the BlueShiftImage method is: % % Image *BlueShiftImage(const Image *image,const double factor, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o factor: the shift factor. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlueShiftImage(const Image *image,const double factor, ExceptionInfo *exception) { #define BlueShiftImageTag "BlueShift/Image" CacheView *image_view, *shift_view; Image *shift_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate blue shift image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); shift_image=CloneImage(image,0,0,MagickTrue,exception); if (shift_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(shift_image,DirectClass,exception) == MagickFalse) { shift_image=DestroyImage(shift_image); return((Image *) NULL); } /* Blue-shift DirectClass image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); shift_view=AcquireAuthenticCacheView(shift_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,shift_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; Quantum quantum; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { quantum=GetPixelRed(image,p); if (GetPixelGreen(image,p) < quantum) quantum=GetPixelGreen(image,p); if (GetPixelBlue(image,p) < quantum) quantum=GetPixelBlue(image,p); pixel.red=0.5*(GetPixelRed(image,p)+factor*quantum); pixel.green=0.5*(GetPixelGreen(image,p)+factor*quantum); pixel.blue=0.5*(GetPixelBlue(image,p)+factor*quantum); quantum=GetPixelRed(image,p); if (GetPixelGreen(image,p) > quantum) quantum=GetPixelGreen(image,p); if (GetPixelBlue(image,p) > quantum) quantum=GetPixelBlue(image,p); pixel.red=0.5*(pixel.red+factor*quantum); pixel.green=0.5*(pixel.green+factor*quantum); pixel.blue=0.5*(pixel.blue+factor*quantum); SetPixelRed(shift_image,ClampToQuantum(pixel.red),q); SetPixelGreen(shift_image,ClampToQuantum(pixel.green),q); SetPixelBlue(shift_image,ClampToQuantum(pixel.blue),q); p+=GetPixelChannels(image); q+=GetPixelChannels(shift_image); } sync=SyncCacheViewAuthenticPixels(shift_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_BlueShiftImage) #endif proceed=SetImageProgress(image,BlueShiftImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); shift_view=DestroyCacheView(shift_view); if (status == MagickFalse) shift_image=DestroyImage(shift_image); return(shift_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h a r c o a l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CharcoalImage() creates a new image that is a copy of an existing one with % the edge highlighted. It allocates the memory necessary for the new Image % structure and returns a pointer to the new image. % % The format of the CharcoalImage method is: % % Image *CharcoalImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CharcoalImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *charcoal_image, *clone_image, *edge_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); edge_image=EdgeImage(clone_image,radius,exception); clone_image=DestroyImage(clone_image); if (edge_image == (Image *) NULL) return((Image *) NULL); charcoal_image=BlurImage(edge_image,radius,sigma,exception); edge_image=DestroyImage(edge_image); if (charcoal_image == (Image *) NULL) return((Image *) NULL); (void) NormalizeImage(charcoal_image,exception); (void) NegateImage(charcoal_image,MagickFalse,exception); (void) GrayscaleImage(charcoal_image,image->intensity,exception); return(charcoal_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorizeImage() blends the fill color with each pixel in the image. % A percentage blend is specified with opacity. Control the application % of different color components by specifying a different percentage for % each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue). % % The format of the ColorizeImage method is: % % Image *ColorizeImage(const Image *image,const char *blend, % const PixelInfo *colorize,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o blend: A character string indicating the level of blending as a % percentage. % % o colorize: A color value. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ColorizeImage(const Image *image,const char *blend, const PixelInfo *colorize,ExceptionInfo *exception) { #define ColorizeImageTag "Colorize/Image" #define Colorize(pixel,blend_percentage,colorize) \ (((pixel)*(100.0-(blend_percentage))+(colorize)*(blend_percentage))/100.0) CacheView *image_view; GeometryInfo geometry_info; Image *colorize_image; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; PixelInfo blend_percentage; ssize_t y; /* Allocate colorized image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); colorize_image=CloneImage(image,0,0,MagickTrue,exception); if (colorize_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(colorize_image,DirectClass,exception) == MagickFalse) { colorize_image=DestroyImage(colorize_image); return((Image *) NULL); } if ((IsGrayColorspace(colorize_image->colorspace) != MagickFalse) || (IsPixelInfoGray(colorize) != MagickFalse)) (void) SetImageColorspace(colorize_image,sRGBColorspace,exception); if ((colorize_image->alpha_trait == UndefinedPixelTrait) && (colorize->alpha_trait != UndefinedPixelTrait)) (void) SetImageAlpha(colorize_image,OpaqueAlpha,exception); if (blend == (const char *) NULL) return(colorize_image); GetPixelInfo(colorize_image,&blend_percentage); flags=ParseGeometry(blend,&geometry_info); blend_percentage.red=geometry_info.rho; blend_percentage.green=geometry_info.rho; blend_percentage.blue=geometry_info.rho; blend_percentage.black=geometry_info.rho; blend_percentage.alpha=(MagickRealType) TransparentAlpha; if ((flags & SigmaValue) != 0) blend_percentage.green=geometry_info.sigma; if ((flags & XiValue) != 0) blend_percentage.blue=geometry_info.xi; if ((flags & PsiValue) != 0) blend_percentage.alpha=geometry_info.psi; if (blend_percentage.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) blend_percentage.black=geometry_info.psi; if ((flags & ChiValue) != 0) blend_percentage.alpha=geometry_info.chi; } /* Colorize DirectClass image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(colorize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(colorize_image,colorize_image,colorize_image->rows,1) #endif for (y=0; y < (ssize_t) colorize_image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,colorize_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) colorize_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(colorize_image); i++) { PixelTrait traits = GetPixelChannelTraits(colorize_image, (PixelChannel) i); if (traits == UndefinedPixelTrait) continue; if ((traits & CopyPixelTrait) != 0) continue; SetPixelChannel(colorize_image,(PixelChannel) i,ClampToQuantum( Colorize(q[i],GetPixelInfoChannel(&blend_percentage,(PixelChannel) i), GetPixelInfoChannel(colorize,(PixelChannel) i))),q); } q+=GetPixelChannels(colorize_image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ColorizeImage) #endif proceed=SetImageProgress(image,ColorizeImageTag,progress++, colorize_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) colorize_image=DestroyImage(colorize_image); return(colorize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r M a t r i x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorMatrixImage() applies color transformation to an image. This method % permits saturation changes, hue rotation, luminance to alpha, and various % other effects. Although variable-sized transformation matrices can be used, % typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA % (or RGBA with offsets). The matrix is similar to those used by Adobe Flash % except offsets are in column 6 rather than 5 (in support of CMYKA images) % and offsets are normalized (divide Flash offset by 255). % % The format of the ColorMatrixImage method is: % % Image *ColorMatrixImage(const Image *image, % const KernelInfo *color_matrix,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o color_matrix: the color matrix. % % o exception: return any errors or warnings in this structure. % */ /* FUTURE: modify to make use of a MagickMatrix Mutliply function That should be provided in "matrix.c" (ASIDE: actually distorts should do this too but currently doesn't) */ MagickExport Image *ColorMatrixImage(const Image *image, const KernelInfo *color_matrix,ExceptionInfo *exception) { #define ColorMatrixImageTag "ColorMatrix/Image" CacheView *color_view, *image_view; double ColorMatrix[6][6] = { { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 }, { 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 }, { 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 }, { 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 } }; Image *color_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t u, v, y; /* Map given color_matrix, into a 6x6 matrix RGBKA and a constant */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); i=0; for (v=0; v < (ssize_t) color_matrix->height; v++) for (u=0; u < (ssize_t) color_matrix->width; u++) { if ((v < 6) && (u < 6)) ColorMatrix[v][u]=color_matrix->values[i]; i++; } /* Initialize color image. */ color_image=CloneImage(image,0,0,MagickTrue,exception); if (color_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(color_image,DirectClass,exception) == MagickFalse) { color_image=DestroyImage(color_image); return((Image *) NULL); } if (image->debug != MagickFalse) { char format[MagickPathExtent], *message; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " ColorMatrix image with color matrix:"); message=AcquireString(""); for (v=0; v < 6; v++) { *message='\0'; (void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < 6; u++) { (void) FormatLocaleString(format,MagickPathExtent,"%+f ", ColorMatrix[v][u]); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } /* Apply the ColorMatrix to image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); color_view=AcquireAuthenticCacheView(color_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,color_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t v; size_t height; GetPixelInfoPixel(image,p,&pixel); height=color_matrix->height > 6 ? 6UL : color_matrix->height; for (v=0; v < (ssize_t) height; v++) { double sum; sum=ColorMatrix[v][0]*GetPixelRed(image,p)+ColorMatrix[v][1]* GetPixelGreen(image,p)+ColorMatrix[v][2]*GetPixelBlue(image,p); if (image->colorspace == CMYKColorspace) sum+=ColorMatrix[v][3]*GetPixelBlack(image,p); if (image->alpha_trait != UndefinedPixelTrait) sum+=ColorMatrix[v][4]*GetPixelAlpha(image,p); sum+=QuantumRange*ColorMatrix[v][5]; switch (v) { case 0: pixel.red=sum; break; case 1: pixel.green=sum; break; case 2: pixel.blue=sum; break; case 3: pixel.black=sum; break; case 4: pixel.alpha=sum; break; default: break; } } SetPixelViaPixelInfo(color_image,&pixel,q); p+=GetPixelChannels(image); q+=GetPixelChannels(color_image); } if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ColorMatrixImage) #endif proceed=SetImageProgress(image,ColorMatrixImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } color_view=DestroyCacheView(color_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) color_image=DestroyImage(color_image); return(color_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyFxInfo() deallocates memory associated with an FxInfo structure. % % The format of the DestroyFxInfo method is: % % ImageInfo *DestroyFxInfo(ImageInfo *fx_info) % % A description of each parameter follows: % % o fx_info: the fx info. % */ MagickPrivate FxInfo *DestroyFxInfo(FxInfo *fx_info) { register ssize_t i; fx_info->exception=DestroyExceptionInfo(fx_info->exception); fx_info->expression=DestroyString(fx_info->expression); fx_info->symbols=DestroySplayTree(fx_info->symbols); fx_info->colors=DestroySplayTree(fx_info->colors); for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--) fx_info->view[i]=DestroyCacheView(fx_info->view[i]); fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view); fx_info->random_info=DestroyRandomInfo(fx_info->random_info); fx_info=(FxInfo *) RelinquishMagickMemory(fx_info); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F x E v a l u a t e C h a n n e l E x p r e s s i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxEvaluateChannelExpression() evaluates an expression and returns the % results. % % The format of the FxEvaluateExpression method is: % % double FxEvaluateChannelExpression(FxInfo *fx_info, % const PixelChannel channel,const ssize_t x,const ssize_t y, % double *alpha,Exceptioninfo *exception) % double FxEvaluateExpression(FxInfo *fx_info, % double *alpha,Exceptioninfo *exception) % % A description of each parameter follows: % % o fx_info: the fx info. % % o channel: the channel. % % o x,y: the pixel position. % % o alpha: the result. % % o exception: return any errors or warnings in this structure. % */ static double FxChannelStatistics(FxInfo *fx_info,Image *image, PixelChannel channel,const char *symbol,ExceptionInfo *exception) { ChannelType channel_mask; char key[MagickPathExtent], statistic[MagickPathExtent]; const char *value; register const char *p; channel_mask=UndefinedChannel; for (p=symbol; (*p != '.') && (*p != '\0'); p++) ; if (*p == '.') { ssize_t option; option=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,p+1); if (option >= 0) { channel=(PixelChannel) option; channel_mask=SetPixelChannelMask(image,(ChannelType) (1UL << channel)); } } (void) FormatLocaleString(key,MagickPathExtent,"%p.%.20g.%s",(void *) image, (double) channel,symbol); value=(const char *) GetValueFromSplayTree(fx_info->symbols,key); if (value != (const char *) NULL) { if (channel_mask != UndefinedChannel) (void) SetPixelChannelMask(image,channel_mask); return(QuantumScale*StringToDouble(value,(char **) NULL)); } (void) DeleteNodeFromSplayTree(fx_info->symbols,key); if (LocaleNCompare(symbol,"depth",5) == 0) { size_t depth; depth=GetImageDepth(image,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",(double) depth); } if (LocaleNCompare(symbol,"kurtosis",8) == 0) { double kurtosis, skewness; (void) GetImageKurtosis(image,&kurtosis,&skewness,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",kurtosis); } if (LocaleNCompare(symbol,"maxima",6) == 0) { double maxima, minima; (void) GetImageRange(image,&minima,&maxima,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",maxima); } if (LocaleNCompare(symbol,"mean",4) == 0) { double mean, standard_deviation; (void) GetImageMean(image,&mean,&standard_deviation,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",mean); } if (LocaleNCompare(symbol,"minima",6) == 0) { double maxima, minima; (void) GetImageRange(image,&minima,&maxima,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",minima); } if (LocaleNCompare(symbol,"skewness",8) == 0) { double kurtosis, skewness; (void) GetImageKurtosis(image,&kurtosis,&skewness,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",skewness); } if (LocaleNCompare(symbol,"standard_deviation",18) == 0) { double mean, standard_deviation; (void) GetImageMean(image,&mean,&standard_deviation,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g", standard_deviation); } if (channel_mask != UndefinedChannel) (void) SetPixelChannelMask(image,channel_mask); (void) AddValueToSplayTree(fx_info->symbols,ConstantString(key), ConstantString(statistic)); return(QuantumScale*StringToDouble(statistic,(char **) NULL)); } static double FxEvaluateSubexpression(FxInfo *,const PixelChannel,const ssize_t, const ssize_t,const char *,const size_t,double *,ExceptionInfo *); static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta) { if (beta != 0) return(FxGCD(beta,alpha % beta)); return(alpha); } static inline const char *FxSubexpression(const char *expression, ExceptionInfo *exception) { const char *subexpression; register ssize_t level; level=0; subexpression=expression; while ((*subexpression != '\0') && ((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL))) { if (strchr("(",(int) *subexpression) != (char *) NULL) level++; else if (strchr(")",(int) *subexpression) != (char *) NULL) level--; subexpression++; } if (*subexpression == '\0') (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnbalancedParenthesis","`%s'",expression); return(subexpression); } static double FxGetSymbol(FxInfo *fx_info,const PixelChannel channel, const ssize_t x,const ssize_t y,const char *expression,const size_t depth, ExceptionInfo *exception) { char *q, symbol[MagickPathExtent]; const char *p, *value; Image *image; MagickBooleanType status; PixelInfo pixel; double alpha, beta; PointInfo point; register ssize_t i; size_t level; p=expression; i=GetImageIndexInList(fx_info->images); level=0; point.x=(double) x; point.y=(double) y; if (isalpha((int) ((unsigned char) *(p+1))) == 0) { char *subexpression; subexpression=AcquireString(expression); if (strchr("suv",(int) *p) != (char *) NULL) { switch (*p) { case 's': default: { i=GetImageIndexInList(fx_info->images); break; } case 'u': i=0; break; case 'v': i=1; break; } p++; if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); i=(ssize_t) alpha; if (*p != '\0') p++; } if (*p == '.') p++; } if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0)) { p++; if (*p == '{') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '{') level++; else if (*p == '}') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); point.x=alpha; point.y=beta; if (*p != '\0') p++; } else if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); point.x+=alpha; point.y+=beta; if (*p != '\0') p++; } if (*p == '.') p++; } subexpression=DestroyString(subexpression); } image=GetImageFromList(fx_info->images,i); if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "NoSuchImage","`%s'",expression); return(0.0); } i=GetImageIndexInList(image); GetPixelInfo(image,&pixel); status=InterpolatePixelInfo(image,fx_info->view[i],image->interpolate, point.x,point.y,&pixel,exception); (void) status; if ((strlen(p) > 2) && (LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luma") != 0) && (LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) && (LocaleCompare(p,"saturation") != 0) && (LocaleCompare(p,"lightness") != 0)) { char name[MagickPathExtent]; (void) CopyMagickString(name,p,MagickPathExtent); for (q=name+(strlen(name)-1); q > name; q--) { if (*q == ')') break; if (*q == '.') { *q='\0'; break; } } if ((strlen(name) > 2) && (GetValueFromSplayTree(fx_info->symbols,name) == (const char *) NULL)) { PixelInfo *color; color=(PixelInfo *) GetValueFromSplayTree(fx_info->colors,name); if (color != (PixelInfo *) NULL) { pixel=(*color); p+=strlen(name); } else { MagickBooleanType status; status=QueryColorCompliance(name,AllCompliance,&pixel, fx_info->exception); if (status != MagickFalse) { (void) AddValueToSplayTree(fx_info->colors,ConstantString( name),ClonePixelInfo(&pixel)); p+=strlen(name); } } } } (void) CopyMagickString(symbol,p,MagickPathExtent); StripString(symbol); if (*symbol == '\0') { switch (channel) { case RedPixelChannel: return(QuantumScale*pixel.red); case GreenPixelChannel: return(QuantumScale*pixel.green); case BluePixelChannel: return(QuantumScale*pixel.blue); case BlackPixelChannel: { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), ImageError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.black); } case AlphaPixelChannel: { if (pixel.alpha_trait == UndefinedPixelTrait) return(1.0); alpha=(double) (QuantumScale*pixel.alpha); return(alpha); } case IndexPixelChannel: return(0.0); case IntensityPixelChannel: { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image,&pixel,quantum_pixel); return(QuantumScale*GetPixelIntensity(image,quantum_pixel)); } default: break; } (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",p); return(0.0); } switch (*symbol) { case 'A': case 'a': { if (LocaleCompare(symbol,"a") == 0) return((QuantumScale*pixel.alpha)); break; } case 'B': case 'b': { if (LocaleCompare(symbol,"b") == 0) return(QuantumScale*pixel.blue); break; } case 'C': case 'c': { if (LocaleNCompare(symbol,"channel",7) == 0) { GeometryInfo channel_info; MagickStatusType flags; flags=ParseGeometry(symbol+7,&channel_info); if (image->colorspace == CMYKColorspace) switch (channel) { case CyanPixelChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case MagentaPixelChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case YellowPixelChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackPixelChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } case AlphaPixelChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } default: return(0.0); } switch (channel) { case RedPixelChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case GreenPixelChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case BluePixelChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackPixelChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } case AlphaPixelChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } default: return(0.0); } } if (LocaleCompare(symbol,"c") == 0) return(QuantumScale*pixel.red); break; } case 'D': case 'd': { if (LocaleNCompare(symbol,"depth",5) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'G': case 'g': { if (LocaleCompare(symbol,"g") == 0) return(QuantumScale*pixel.green); break; } case 'K': case 'k': { if (LocaleNCompare(symbol,"kurtosis",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"k") == 0) { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.black); } break; } case 'H': case 'h': { if (LocaleCompare(symbol,"h") == 0) return((double) image->rows); if (LocaleCompare(symbol,"hue") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(hue); } break; } case 'I': case 'i': { if ((LocaleCompare(symbol,"image.depth") == 0) || (LocaleCompare(symbol,"image.minima") == 0) || (LocaleCompare(symbol,"image.maxima") == 0) || (LocaleCompare(symbol,"image.mean") == 0) || (LocaleCompare(symbol,"image.kurtosis") == 0) || (LocaleCompare(symbol,"image.skewness") == 0) || (LocaleCompare(symbol,"image.standard_deviation") == 0)) return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception)); if (LocaleCompare(symbol,"image.resolution.x") == 0) return(image->resolution.x); if (LocaleCompare(symbol,"image.resolution.y") == 0) return(image->resolution.y); if (LocaleCompare(symbol,"intensity") == 0) { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image,&pixel,quantum_pixel); return(QuantumScale*GetPixelIntensity(image,quantum_pixel)); } if (LocaleCompare(symbol,"i") == 0) return((double) x); break; } case 'J': case 'j': { if (LocaleCompare(symbol,"j") == 0) return((double) y); break; } case 'L': case 'l': { if (LocaleCompare(symbol,"lightness") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(lightness); } if (LocaleCompare(symbol,"luma") == 0) { double luma; luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luma); } if (LocaleCompare(symbol,"luminance") == 0) { double luminence; luminence=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luminence); } break; } case 'M': case 'm': { if (LocaleNCompare(symbol,"maxima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"mean",4) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"minima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"m") == 0) return(QuantumScale*pixel.green); break; } case 'N': case 'n': { if (LocaleCompare(symbol,"n") == 0) return((double) GetImageListLength(fx_info->images)); break; } case 'O': case 'o': { if (LocaleCompare(symbol,"o") == 0) return(QuantumScale*pixel.alpha); break; } case 'P': case 'p': { if (LocaleCompare(symbol,"page.height") == 0) return((double) image->page.height); if (LocaleCompare(symbol,"page.width") == 0) return((double) image->page.width); if (LocaleCompare(symbol,"page.x") == 0) return((double) image->page.x); if (LocaleCompare(symbol,"page.y") == 0) return((double) image->page.y); break; } case 'Q': case 'q': { if (LocaleCompare(symbol,"quality") == 0) return((double) image->quality); break; } case 'R': case 'r': { if (LocaleCompare(symbol,"resolution.x") == 0) return(image->resolution.x); if (LocaleCompare(symbol,"resolution.y") == 0) return(image->resolution.y); if (LocaleCompare(symbol,"r") == 0) return(QuantumScale*pixel.red); break; } case 'S': case 's': { if (LocaleCompare(symbol,"saturation") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(saturation); } if (LocaleNCompare(symbol,"skewness",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"standard_deviation",18) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'T': case 't': { if (LocaleCompare(symbol,"t") == 0) return((double) GetImageIndexInList(fx_info->images)); break; } case 'W': case 'w': { if (LocaleCompare(symbol,"w") == 0) return((double) image->columns); break; } case 'Y': case 'y': { if (LocaleCompare(symbol,"y") == 0) return(QuantumScale*pixel.blue); break; } case 'Z': case 'z': { if (LocaleCompare(symbol,"z") == 0) return((double) GetImageDepth(image,fx_info->exception)); break; } default: break; } value=(const char *) GetValueFromSplayTree(fx_info->symbols,symbol); if (value != (const char *) NULL) return(StringToDouble(value,(char **) NULL)); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",symbol); return(0.0); } static const char *FxOperatorPrecedence(const char *expression, ExceptionInfo *exception) { typedef enum { UndefinedPrecedence, NullPrecedence, BitwiseComplementPrecedence, ExponentPrecedence, ExponentialNotationPrecedence, MultiplyPrecedence, AdditionPrecedence, ShiftPrecedence, RelationalPrecedence, EquivalencyPrecedence, BitwiseAndPrecedence, BitwiseOrPrecedence, LogicalAndPrecedence, LogicalOrPrecedence, TernaryPrecedence, AssignmentPrecedence, CommaPrecedence, SeparatorPrecedence } FxPrecedence; FxPrecedence precedence, target; register const char *subexpression; register int c; size_t level; c=(-1); level=0; subexpression=(const char *) NULL; target=NullPrecedence; while ((c != '\0') && (*expression != '\0')) { precedence=UndefinedPrecedence; if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@')) { expression++; continue; } switch (*expression) { case 'A': case 'a': { #if defined(MAGICKCORE_HAVE_ACOSH) if (LocaleNCompare(expression,"acosh",5) == 0) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (LocaleNCompare(expression,"asinh",5) == 0) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ATANH) if (LocaleNCompare(expression,"atanh",5) == 0) { expression+=5; break; } #endif if (LocaleNCompare(expression,"atan2",5) == 0) { expression+=5; break; } break; } case 'E': case 'e': { if ((isdigit(c) != 0) && ((LocaleNCompare(expression,"E+",2) == 0) || (LocaleNCompare(expression,"E-",2) == 0))) { expression+=2; /* scientific notation */ break; } } case 'J': case 'j': { if ((LocaleNCompare(expression,"j0",2) == 0) || (LocaleNCompare(expression,"j1",2) == 0)) { expression+=2; break; } break; } case '#': { while (isxdigit((int) ((unsigned char) *(expression+1))) != 0) expression++; break; } default: break; } if ((c == (int) '{') || (c == (int) '[')) level++; else if ((c == (int) '}') || (c == (int) ']')) level--; if (level == 0) switch ((unsigned char) *expression) { case '~': case '!': { precedence=BitwiseComplementPrecedence; break; } case '^': case '@': { precedence=ExponentPrecedence; break; } default: { if (((c != 0) && ((isdigit(c) != 0) || (strchr(")",c) != (char *) NULL))) && (((islower((int) ((unsigned char) *expression)) != 0) || (strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) || ((isdigit(c) == 0) && (isdigit((int) ((unsigned char) *expression)) != 0))) && (strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL)) precedence=MultiplyPrecedence; break; } case '*': case '/': case '%': { precedence=MultiplyPrecedence; break; } case '+': case '-': { if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) || (isalpha(c) != 0)) precedence=AdditionPrecedence; break; } case LeftShiftOperator: case RightShiftOperator: { precedence=ShiftPrecedence; break; } case '<': case LessThanEqualOperator: case GreaterThanEqualOperator: case '>': { precedence=RelationalPrecedence; break; } case EqualOperator: case NotEqualOperator: { precedence=EquivalencyPrecedence; break; } case '&': { precedence=BitwiseAndPrecedence; break; } case '|': { precedence=BitwiseOrPrecedence; break; } case LogicalAndOperator: { precedence=LogicalAndPrecedence; break; } case LogicalOrOperator: { precedence=LogicalOrPrecedence; break; } case ExponentialNotation: { precedence=ExponentialNotationPrecedence; break; } case ':': case '?': { precedence=TernaryPrecedence; break; } case '=': { precedence=AssignmentPrecedence; break; } case ',': { precedence=CommaPrecedence; break; } case ';': { precedence=SeparatorPrecedence; break; } } if ((precedence == BitwiseComplementPrecedence) || (precedence == TernaryPrecedence) || (precedence == AssignmentPrecedence)) { if (precedence > target) { /* Right-to-left associativity. */ target=precedence; subexpression=expression; } } else if (precedence >= target) { /* Left-to-right associativity. */ target=precedence; subexpression=expression; } if (strchr("(",(int) *expression) != (char *) NULL) expression=FxSubexpression(expression,exception); c=(int) (*expression++); } return(subexpression); } static double FxEvaluateSubexpression(FxInfo *fx_info, const PixelChannel channel,const ssize_t x,const ssize_t y, const char *expression,const size_t depth,double *beta, ExceptionInfo *exception) { #define FxMaxParenthesisDepth 58 #define FxMaxSubexpressionDepth 200 #define FxReturn(value) \ { \ subexpression=DestroyString(subexpression); \ return(value); \ } char *q, *subexpression; double alpha, gamma; register const char *p; *beta=0.0; subexpression=AcquireString(expression); *subexpression='\0'; if (depth > FxMaxSubexpressionDepth) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",expression); FxReturn(0.0); } if (exception->severity >= ErrorException) FxReturn(0.0); while (isspace((int) ((unsigned char) *expression)) != 0) expression++; if (*expression == '\0') FxReturn(0.0); p=FxOperatorPrecedence(expression,exception); if (p != (const char *) NULL) { (void) CopyMagickString(subexpression,expression,(size_t) (p-expression+1)); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1, beta,exception); switch ((unsigned char) *p) { case '~': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) (~(size_t) *beta); FxReturn(*beta); } case '!': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(*beta == 0.0 ? 1.0 : 0.0); } case '^': { *beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p, depth+1,beta,exception)); FxReturn(*beta); } case '*': case ExponentialNotation: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha*(*beta)); } case '/': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if (*beta == 0.0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"DivideByZero","`%s'",expression); FxReturn(0.0); } FxReturn(alpha/(*beta)); } case '%': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=fabs(floor((*beta)+0.5)); if (*beta == 0.0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"DivideByZero","`%s'",expression); FxReturn(0.0); } FxReturn(fmod(alpha,*beta)); } case '+': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha+(*beta)); } case '-': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha-(*beta)); } case LeftShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (gamma+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } *beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5)); FxReturn(*beta); } case RightShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (gamma+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } *beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5)); FxReturn(*beta); } case '<': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha < *beta ? 1.0 : 0.0); } case LessThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha <= *beta ? 1.0 : 0.0); } case '>': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha > *beta ? 1.0 : 0.0); } case GreaterThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha >= *beta ? 1.0 : 0.0); } case EqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0); } case NotEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0); } case '&': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5)); FxReturn(*beta); } case '|': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5)); FxReturn(*beta); } case LogicalAndOperator: { p++; if (alpha <= 0.0) { *beta=0.0; FxReturn(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case LogicalOrOperator: { p++; if (alpha > 0.0) { *beta=1.0; FxReturn(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case '?': { (void) CopyMagickString(subexpression,++p,MagickPathExtent); q=subexpression; p=StringToken(":",&q); if (q == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } if (fabs(alpha) >= MagickEpsilon) gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); else gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q,depth+1,beta, exception); FxReturn(gamma); } case '=': { char numeric[MagickPathExtent]; q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); (void) FormatLocaleString(numeric,MagickPathExtent,"%.20g",*beta); (void) DeleteNodeFromSplayTree(fx_info->symbols,subexpression); (void) AddValueToSplayTree(fx_info->symbols,ConstantString( subexpression),ConstantString(numeric)); FxReturn(*beta); } case ',': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha); } case ';': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(*beta); } default: { gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1, beta,exception); FxReturn(gamma); } } } if (strchr("(",(int) *expression) != (char *) NULL) { if (depth >= FxMaxParenthesisDepth) (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "ParenthesisNestedTooDeeply","`%s'",expression); (void) CopyMagickString(subexpression,expression+1,MagickPathExtent); if (strlen(subexpression) != 0) subexpression[strlen(subexpression)-1]='\0'; gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1, beta,exception); FxReturn(gamma); } switch (*expression) { case '+': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn(1.0*gamma); } case '-': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn(-1.0*gamma); } case '~': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn((double) (~(size_t) (gamma+0.5))); } case 'A': case 'a': { if (LocaleNCompare(expression,"abs",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(fabs(alpha)); } #if defined(MAGICKCORE_HAVE_ACOSH) if (LocaleNCompare(expression,"acosh",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(acosh(alpha)); } #endif if (LocaleNCompare(expression,"acos",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(acos(alpha)); } #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"airy",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0.0) FxReturn(1.0); gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha); FxReturn(gamma*gamma); } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (LocaleNCompare(expression,"asinh",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(asinh(alpha)); } #endif if (LocaleNCompare(expression,"asin",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(asin(alpha)); } if (LocaleNCompare(expression,"alt",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0); } if (LocaleNCompare(expression,"atan2",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(atan2(alpha,*beta)); } #if defined(MAGICKCORE_HAVE_ATANH) if (LocaleNCompare(expression,"atanh",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(atanh(alpha)); } #endif if (LocaleNCompare(expression,"atan",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(atan(alpha)); } if (LocaleCompare(expression,"a") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'B': case 'b': { if (LocaleCompare(expression,"b") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'C': case 'c': { if (LocaleNCompare(expression,"ceil",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(ceil(alpha)); } if (LocaleNCompare(expression,"clamp",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (alpha < 0.0) FxReturn(0.0); if (alpha > 1.0) FxReturn(1.0); FxReturn(alpha); } if (LocaleNCompare(expression,"cosh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(cosh(alpha)); } if (LocaleNCompare(expression,"cos",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(cos(alpha)); } if (LocaleCompare(expression,"c") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'D': case 'd': { if (LocaleNCompare(expression,"debug",5) == 0) { const char *type; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (fx_info->images->colorspace == CMYKColorspace) switch (channel) { case CyanPixelChannel: type="cyan"; break; case MagentaPixelChannel: type="magenta"; break; case YellowPixelChannel: type="yellow"; break; case AlphaPixelChannel: type="opacity"; break; case BlackPixelChannel: type="black"; break; default: type="unknown"; break; } else switch (channel) { case RedPixelChannel: type="red"; break; case GreenPixelChannel: type="green"; break; case BluePixelChannel: type="blue"; break; case AlphaPixelChannel: type="opacity"; break; default: type="unknown"; break; } *subexpression='\0'; if (strlen(expression) > 6) (void) CopyMagickString(subexpression,expression+6, MagickPathExtent); if (strlen(subexpression) > 1) subexpression[strlen(subexpression)-1]='\0'; if (fx_info->file != (FILE *) NULL) (void) FormatLocaleFile(fx_info->file,"%s[%.20g,%.20g].%s: " "%s=%.*g\n",fx_info->images->filename,(double) x,(double) y,type, subexpression,GetMagickPrecision(),alpha); FxReturn(0.0); } if (LocaleNCompare(expression,"drc",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn((alpha/(*beta*(alpha-1.0)+1.0))); } break; } case 'E': case 'e': { if (LocaleCompare(expression,"epsilon") == 0) FxReturn(MagickEpsilon); #if defined(MAGICKCORE_HAVE_ERF) if (LocaleNCompare(expression,"erf",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(erf(alpha)); } #endif if (LocaleNCompare(expression,"exp",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(exp(alpha)); } if (LocaleCompare(expression,"e") == 0) FxReturn(2.7182818284590452354); break; } case 'F': case 'f': { if (LocaleNCompare(expression,"floor",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(floor(alpha)); } break; } case 'G': case 'g': { if (LocaleNCompare(expression,"gauss",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); gamma=exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI); FxReturn(gamma); } if (LocaleNCompare(expression,"gcd",3) == 0) { MagickOffsetType gcd; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType) (*beta+ 0.5)); FxReturn((double) gcd); } if (LocaleCompare(expression,"g") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'H': case 'h': { if (LocaleCompare(expression,"h") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (LocaleCompare(expression,"hue") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (LocaleNCompare(expression,"hypot",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(hypot(alpha,*beta)); } break; } case 'K': case 'k': { if (LocaleCompare(expression,"k") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'I': case 'i': { if (LocaleCompare(expression,"intensity") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (LocaleNCompare(expression,"int",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(floor(alpha)); } if (LocaleNCompare(expression,"isnan",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn((double) !!IsNaN(alpha)); } if (LocaleCompare(expression,"i") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'J': case 'j': { if (LocaleCompare(expression,"j") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); #if defined(MAGICKCORE_HAVE_J0) if (LocaleNCompare(expression,"j0",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(j0(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"j1",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(j1(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"jinc",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0.0) FxReturn(1.0); gamma=(2.0*j1((MagickPI*alpha))/(MagickPI*alpha)); FxReturn(gamma); } #endif break; } case 'L': case 'l': { if (LocaleNCompare(expression,"ln",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(log(alpha)); } if (LocaleNCompare(expression,"logtwo",6) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6, depth+1,beta,exception); FxReturn(log10(alpha)/log10(2.0)); } if (LocaleNCompare(expression,"log",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(log10(alpha)); } if (LocaleCompare(expression,"lightness") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'M': case 'm': { if (LocaleCompare(expression,"MaxRGB") == 0) FxReturn(QuantumRange); if (LocaleNCompare(expression,"maxima",6) == 0) break; if (LocaleNCompare(expression,"max",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha > *beta ? alpha : *beta); } if (LocaleNCompare(expression,"minima",6) == 0) break; if (LocaleNCompare(expression,"min",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha < *beta ? alpha : *beta); } if (LocaleNCompare(expression,"mod",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); gamma=alpha-floor((alpha*PerceptibleReciprocal(*beta)))*(*beta); FxReturn(gamma); } if (LocaleCompare(expression,"m") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'N': case 'n': { if (LocaleNCompare(expression,"not",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn((double) (alpha < MagickEpsilon)); } if (LocaleCompare(expression,"n") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'O': case 'o': { if (LocaleCompare(expression,"Opaque") == 0) FxReturn(1.0); if (LocaleCompare(expression,"o") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'P': case 'p': { if (LocaleCompare(expression,"phi") == 0) FxReturn(MagickPHI); if (LocaleCompare(expression,"pi") == 0) FxReturn(MagickPI); if (LocaleNCompare(expression,"pow",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(pow(alpha,*beta)); } if (LocaleCompare(expression,"p") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Q': case 'q': { if (LocaleCompare(expression,"QuantumRange") == 0) FxReturn(QuantumRange); if (LocaleCompare(expression,"QuantumScale") == 0) FxReturn(QuantumScale); break; } case 'R': case 'r': { if (LocaleNCompare(expression,"rand",4) == 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FxEvaluateSubexpression) #endif alpha=GetPseudoRandomValue(fx_info->random_info); FxReturn(alpha); } if (LocaleNCompare(expression,"round",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(floor(alpha+0.5)); } if (LocaleCompare(expression,"r") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'S': case 's': { if (LocaleCompare(expression,"saturation") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (LocaleNCompare(expression,"sign",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(alpha < 0.0 ? -1.0 : 1.0); } if (LocaleNCompare(expression,"sinc",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0) FxReturn(1.0); gamma=sin((MagickPI*alpha))/(MagickPI*alpha); FxReturn(gamma); } if (LocaleNCompare(expression,"sinh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(sinh(alpha)); } if (LocaleNCompare(expression,"sin",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(sin(alpha)); } if (LocaleNCompare(expression,"sqrt",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(sqrt(alpha)); } if (LocaleNCompare(expression,"squish",6) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6, depth+1,beta,exception); FxReturn((1.0/(1.0+exp(-alpha)))); } if (LocaleCompare(expression,"s") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'T': case 't': { if (LocaleNCompare(expression,"tanh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(tanh(alpha)); } if (LocaleNCompare(expression,"tan",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(tan(alpha)); } if (LocaleCompare(expression,"Transparent") == 0) FxReturn(0.0); if (LocaleNCompare(expression,"trunc",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (alpha >= 0.0) FxReturn(floor(alpha)); FxReturn(ceil(alpha)); } if (LocaleCompare(expression,"t") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'U': case 'u': { if (LocaleCompare(expression,"u") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'V': case 'v': { if (LocaleCompare(expression,"v") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'W': case 'w': { if (LocaleNCompare(expression,"while",5) == 0) { do { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); } while (fabs(alpha) >= MagickEpsilon); FxReturn(*beta); } if (LocaleCompare(expression,"w") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Y': case 'y': { if (LocaleCompare(expression,"y") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Z': case 'z': { if (LocaleCompare(expression,"z") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } default: break; } subexpression=DestroyString(subexpression); q=(char *) expression; alpha=InterpretSiPrefixValue(expression,&q); if (q == expression) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); FxReturn(alpha); } MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { MagickBooleanType status; status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha, exception); return(status); } MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { FILE *file; MagickBooleanType status; file=fx_info->file; fx_info->file=(FILE *) NULL; status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha, exception); fx_info->file=file; return(status); } MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info, const PixelChannel channel,const ssize_t x,const ssize_t y, double *alpha,ExceptionInfo *exception) { double beta; beta=0.0; *alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,0, &beta,exception); return(exception->severity == OptionError ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxImage() applies a mathematical expression to the specified image. % % The format of the FxImage method is: % % Image *FxImage(const Image *image,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o expression: A mathematical expression. % % o exception: return any errors or warnings in this structure. % */ static FxInfo **DestroyFxThreadSet(FxInfo **fx_info) { register ssize_t i; assert(fx_info != (FxInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (fx_info[i] != (FxInfo *) NULL) fx_info[i]=DestroyFxInfo(fx_info[i]); fx_info=(FxInfo **) RelinquishMagickMemory(fx_info); return(fx_info); } static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression, ExceptionInfo *exception) { char *fx_expression; FxInfo **fx_info; double alpha; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info)); if (fx_info == (FxInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return((FxInfo **) NULL); } (void) memset(fx_info,0,number_threads*sizeof(*fx_info)); if (*expression != '@') fx_expression=ConstantString(expression); else fx_expression=FileToString(expression+1,~0UL,exception); for (i=0; i < (ssize_t) number_threads; i++) { MagickBooleanType status; fx_info[i]=AcquireFxInfo(image,fx_expression,exception); if (fx_info[i] == (FxInfo *) NULL) break; status=FxPreprocessExpression(fx_info[i],&alpha,exception); if (status == MagickFalse) break; } fx_expression=DestroyString(fx_expression); if (i < (ssize_t) number_threads) fx_info=DestroyFxThreadSet(fx_info); return(fx_info); } MagickExport Image *FxImage(const Image *image,const char *expression, ExceptionInfo *exception) { #define FxImageTag "Fx/Image" CacheView *fx_view, *image_view; FxInfo **magick_restrict fx_info; Image *fx_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); fx_info=AcquireFxThreadSet(image,expression,exception); if (fx_info == (FxInfo **) NULL) return((Image *) NULL); fx_image=CloneImage(image,0,0,MagickTrue,exception); if (fx_image == (Image *) NULL) { fx_info=DestroyFxThreadSet(fx_info); return((Image *) NULL); } if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse) { fx_info=DestroyFxThreadSet(fx_info); fx_image=DestroyImage(fx_image); return((Image *) NULL); } /* Fx image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); fx_view=AcquireAuthenticCacheView(fx_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,fx_image,fx_image->rows,1) #endif for (y=0; y < (ssize_t) fx_image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) fx_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait fx_traits=GetPixelChannelTraits(fx_image,channel); if ((traits == UndefinedPixelTrait) || (fx_traits == UndefinedPixelTrait)) continue; if ((fx_traits & CopyPixelTrait) != 0) { SetPixelChannel(fx_image,channel,p[i],q); continue; } alpha=0.0; (void) FxEvaluateChannelExpression(fx_info[id],channel,x,y,&alpha, exception); q[i]=ClampToQuantum(QuantumRange*alpha); } p+=GetPixelChannels(image); q+=GetPixelChannels(fx_image); } if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FxImage) #endif proceed=SetImageProgress(image,FxImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } fx_view=DestroyCacheView(fx_view); image_view=DestroyCacheView(image_view); fx_info=DestroyFxThreadSet(fx_info); if (status == MagickFalse) fx_image=DestroyImage(fx_image); return(fx_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I m p l o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ImplodeImage() creates a new image that is a copy of an existing % one with the image pixels "implode" by the specified percentage. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ImplodeImage method is: % % Image *ImplodeImage(const Image *image,const double amount, % const PixelInterpolateMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o implode_image: Method ImplodeImage returns a pointer to the image % after it is implode. A null image is returned if there is a memory % shortage. % % o image: the image. % % o amount: Define the extent of the implosion. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ImplodeImage(const Image *image,const double amount, const PixelInterpolateMethod method,ExceptionInfo *exception) { #define ImplodeImageTag "Implode/Image" CacheView *canvas_view, *implode_view, *interpolate_view; Image *canvas, *implode_image; MagickBooleanType status; MagickOffsetType progress; double radius; PointInfo center, scale; ssize_t y; /* Initialize implode image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); canvas=CloneImage(image,0,0,MagickTrue,exception); if (canvas == (Image *) NULL) return((Image *) NULL); if ((canvas->alpha_trait == UndefinedPixelTrait) && (canvas->background_color.alpha != OpaqueAlpha)) (void) SetImageAlphaChannel(canvas,OpaqueAlphaChannel,exception); implode_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue, exception); if (implode_image == (Image *) NULL) { canvas=DestroyImage(canvas); return((Image *) NULL); } if (SetImageStorageClass(implode_image,DirectClass,exception) == MagickFalse) { canvas=DestroyImage(canvas); implode_image=DestroyImage(implode_image); return((Image *) NULL); } /* Compute scaling factor. */ scale.x=1.0; scale.y=1.0; center.x=0.5*canvas->columns; center.y=0.5*canvas->rows; radius=center.x; if (canvas->columns > canvas->rows) scale.y=(double) canvas->columns/(double) canvas->rows; else if (canvas->columns < canvas->rows) { scale.x=(double) canvas->rows/(double) canvas->columns; radius=center.y; } /* Implode image. */ status=MagickTrue; progress=0; canvas_view=AcquireVirtualCacheView(canvas,exception); interpolate_view=AcquireVirtualCacheView(canvas,exception); implode_view=AcquireAuthenticCacheView(implode_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(canvas,implode_image,canvas->rows,1) #endif for (y=0; y < (ssize_t) canvas->rows; y++) { double distance; PointInfo delta; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas->columns,1,exception); q=QueueCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } delta.y=scale.y*(double) (y-center.y); for (x=0; x < (ssize_t) canvas->columns; x++) { register ssize_t i; /* Determine if the pixel is within an ellipse. */ delta.x=scale.x*(double) (x-center.x); distance=delta.x*delta.x+delta.y*delta.y; if (distance >= (radius*radius)) for (i=0; i < (ssize_t) GetPixelChannels(canvas); i++) { PixelChannel channel = GetPixelChannelChannel(canvas,i); PixelTrait traits = GetPixelChannelTraits(canvas,channel); PixelTrait implode_traits = GetPixelChannelTraits(implode_image, channel); if ((traits == UndefinedPixelTrait) || (implode_traits == UndefinedPixelTrait)) continue; SetPixelChannel(implode_image,channel,p[i],q); } else { double factor; /* Implode the pixel. */ factor=1.0; if (distance > 0.0) factor=pow(sin(MagickPI*sqrt((double) distance)/radius/2),-amount); status=InterpolatePixelChannels(canvas,interpolate_view,implode_image, method,(double) (factor*delta.x/scale.x+center.x),(double) (factor* delta.y/scale.y+center.y),q,exception); if (status == MagickFalse) break; } p+=GetPixelChannels(canvas); q+=GetPixelChannels(implode_image); } if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse) status=MagickFalse; if (canvas->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ImplodeImage) #endif proceed=SetImageProgress(canvas,ImplodeImageTag,progress++, canvas->rows); if (proceed == MagickFalse) status=MagickFalse; } } implode_view=DestroyCacheView(implode_view); interpolate_view=DestroyCacheView(interpolate_view); canvas_view=DestroyCacheView(canvas_view); canvas=DestroyImage(canvas); if (status == MagickFalse) implode_image=DestroyImage(implode_image); return(implode_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The MorphImages() method requires a minimum of two images. The first % image is transformed into the second by a number of intervening images % as specified by frames. % % The format of the MorphImage method is: % % Image *MorphImages(const Image *image,const size_t number_frames, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_frames: Define the number of in-between image to generate. % The more in-between frames, the smoother the morph. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MorphImages(const Image *image,const size_t number_frames, ExceptionInfo *exception) { #define MorphImageTag "Morph/Image" double alpha, beta; Image *morph_image, *morph_images; MagickBooleanType status; MagickOffsetType scene; register const Image *next; register ssize_t n; ssize_t y; /* Clone first frame in sequence. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); morph_images=CloneImage(image,0,0,MagickTrue,exception); if (morph_images == (Image *) NULL) return((Image *) NULL); if (GetNextImageInList(image) == (Image *) NULL) { /* Morph single image. */ for (n=1; n < (ssize_t) number_frames; n++) { morph_image=CloneImage(image,0,0,MagickTrue,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) n, number_frames); if (proceed == MagickFalse) status=MagickFalse; } } return(GetFirstImageInList(morph_images)); } /* Morph image sequence. */ status=MagickTrue; scene=0; next=image; for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next)) { for (n=0; n < (ssize_t) number_frames; n++) { CacheView *image_view, *morph_view; beta=(double) (n+1.0)/(double) (number_frames+1.0); alpha=1.0-beta; morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta* GetNextImageInList(next)->columns+0.5),(size_t) (alpha*next->rows+beta* GetNextImageInList(next)->rows+0.5),next->filter,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } status=SetImageStorageClass(morph_image,DirectClass,exception); if (status == MagickFalse) { morph_image=DestroyImage(morph_image); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); morph_images=GetLastImageInList(morph_images); morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns, morph_images->rows,GetNextImageInList(next)->filter,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } image_view=AcquireVirtualCacheView(morph_image,exception); morph_view=AcquireAuthenticCacheView(morph_images,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(morph_image,morph_image,morph_image->rows,1) #endif for (y=0; y < (ssize_t) morph_images->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1, exception); q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) morph_images->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(morph_image); i++) { PixelChannel channel = GetPixelChannelChannel(morph_image,i); PixelTrait traits = GetPixelChannelTraits(morph_image,channel); PixelTrait morph_traits=GetPixelChannelTraits(morph_images,channel); if ((traits == UndefinedPixelTrait) || (morph_traits == UndefinedPixelTrait)) continue; if ((morph_traits & CopyPixelTrait) != 0) { SetPixelChannel(morph_image,channel,p[i],q); continue; } SetPixelChannel(morph_image,channel,ClampToQuantum(alpha* GetPixelChannel(morph_images,channel,q)+beta*p[i]),q); } p+=GetPixelChannels(morph_image); q+=GetPixelChannels(morph_images); } sync=SyncCacheViewAuthenticPixels(morph_view,exception); if (sync == MagickFalse) status=MagickFalse; } morph_view=DestroyCacheView(morph_view); image_view=DestroyCacheView(image_view); morph_image=DestroyImage(morph_image); } if (n < (ssize_t) number_frames) break; /* Clone last frame in sequence. */ morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); morph_images=GetLastImageInList(morph_images); if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MorphImages) #endif proceed=SetImageProgress(image,MorphImageTag,scene, GetImageListLength(image)); if (proceed == MagickFalse) status=MagickFalse; } scene++; } if (GetNextImageInList(next) != (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } return(GetFirstImageInList(morph_images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P l a s m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PlasmaImage() initializes an image with plasma fractal values. The image % must be initialized with a base color and the random number generator % seeded before this method is called. % % The format of the PlasmaImage method is: % % MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment, % size_t attenuate,size_t depth,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o segment: Define the region to apply plasma fractals values. % % o attenuate: Define the plasma attenuation factor. % % o depth: Limit the plasma recursion depth. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PlasmaPixel(RandomInfo *random_info, const double pixel,const double noise) { Quantum plasma; plasma=ClampToQuantum(pixel+noise*GetPseudoRandomValue(random_info)- noise/2.0); if (plasma <= 0) return((Quantum) 0); if (plasma >= QuantumRange) return(QuantumRange); return(plasma); } static MagickBooleanType PlasmaImageProxy(Image *image,CacheView *image_view, CacheView *u_view,CacheView *v_view,RandomInfo *random_info, const SegmentInfo *segment,size_t attenuate,size_t depth, ExceptionInfo *exception) { double plasma; register const Quantum *magick_restrict u, *magick_restrict v; register Quantum *magick_restrict q; register ssize_t i; ssize_t x, x_mid, y, y_mid; if ((fabs(segment->x2-segment->x1) <= MagickEpsilon) && (fabs(segment->y2-segment->y1) <= MagickEpsilon)) return(MagickTrue); if (depth != 0) { MagickBooleanType status; SegmentInfo local_info; /* Divide the area into quadrants and recurse. */ depth--; attenuate++; x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5); y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5); local_info=(*segment); local_info.x2=(double) x_mid; local_info.y2=(double) y_mid; (void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); local_info=(*segment); local_info.y1=(double) y_mid; local_info.x2=(double) x_mid; (void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); local_info=(*segment); local_info.x1=(double) x_mid; local_info.y2=(double) y_mid; (void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); local_info=(*segment); local_info.x1=(double) x_mid; local_info.y1=(double) y_mid; status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); return(status); } x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5); y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5); if ((fabs(segment->x1-x_mid) < MagickEpsilon) && (fabs(segment->x2-x_mid) < MagickEpsilon) && (fabs(segment->y1-y_mid) < MagickEpsilon) && (fabs(segment->y2-y_mid) < MagickEpsilon)) return(MagickFalse); /* Average pixels and apply plasma. */ plasma=(double) QuantumRange/(2.0*attenuate); if ((fabs(segment->x1-x_mid) > MagickEpsilon) || (fabs(segment->x2-x_mid) > MagickEpsilon)) { /* Left pixel. */ x=(ssize_t) ceil(segment->x1-0.5); u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5),1,1, exception); v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5),1,1, exception); q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); if (fabs(segment->x1-segment->x2) > MagickEpsilon) { /* Right pixel. */ x=(ssize_t) ceil(segment->x2-0.5); u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5), 1,1,exception); v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5), 1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } } if ((fabs(segment->y1-y_mid) > MagickEpsilon) || (fabs(segment->y2-y_mid) > MagickEpsilon)) { if ((fabs(segment->x1-x_mid) > MagickEpsilon) || (fabs(segment->y2-y_mid) > MagickEpsilon)) { /* Bottom pixel. */ y=(ssize_t) ceil(segment->y2-0.5); u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y, 1,1,exception); v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y, 1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } if (fabs(segment->y1-segment->y2) > MagickEpsilon) { /* Top pixel. */ y=(ssize_t) ceil(segment->y1-0.5); u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y, 1,1,exception); v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y, 1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } } if ((fabs(segment->x1-segment->x2) > MagickEpsilon) || (fabs(segment->y1-segment->y2) > MagickEpsilon)) { /* Middle pixel. */ x=(ssize_t) ceil(segment->x1-0.5); y=(ssize_t) ceil(segment->y1-0.5); u=GetCacheViewVirtualPixels(u_view,x,y,1,1,exception); x=(ssize_t) ceil(segment->x2-0.5); y=(ssize_t) ceil(segment->y2-0.5); v=GetCacheViewVirtualPixels(v_view,x,y,1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } if ((fabs(segment->x2-segment->x1) < 3.0) && (fabs(segment->y2-segment->y1) < 3.0)) return(MagickTrue); return(MagickFalse); } MagickExport MagickBooleanType PlasmaImage(Image *image, const SegmentInfo *segment,size_t attenuate,size_t depth, ExceptionInfo *exception) { CacheView *image_view, *u_view, *v_view; MagickBooleanType status; RandomInfo *random_info; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); u_view=AcquireVirtualCacheView(image,exception); v_view=AcquireVirtualCacheView(image,exception); random_info=AcquireRandomInfo(); status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,segment, attenuate,depth,exception); random_info=DestroyRandomInfo(random_info); v_view=DestroyCacheView(v_view); u_view=DestroyCacheView(u_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o l a r o i d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PolaroidImage() simulates a Polaroid picture. % % The format of the PolaroidImage method is: % % Image *PolaroidImage(const Image *image,const DrawInfo *draw_info, % const char *caption,const double angle, % const PixelInterpolateMethod method,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o caption: the Polaroid caption. % % o angle: Apply the effect along this angle. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info, const char *caption,const double angle,const PixelInterpolateMethod method, ExceptionInfo *exception) { Image *bend_image, *caption_image, *flop_image, *picture_image, *polaroid_image, *rotate_image, *trim_image; size_t height; ssize_t quantum; /* Simulate a Polaroid picture. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double) image->rows)/25.0,10.0); height=image->rows+2*quantum; caption_image=(Image *) NULL; if (caption != (const char *) NULL) { char geometry[MagickPathExtent], *text; DrawInfo *annotate_info; ImageInfo *image_info; MagickBooleanType status; ssize_t count; TypeMetric metrics; /* Generate caption image. */ caption_image=CloneImage(image,image->columns,1,MagickTrue,exception); if (caption_image == (Image *) NULL) return((Image *) NULL); image_info=AcquireImageInfo(); annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info); text=InterpretImageProperties(image_info,(Image *) image,caption, exception); image_info=DestroyImageInfo(image_info); (void) CloneString(&annotate_info->text,text); count=FormatMagickCaption(caption_image,annotate_info,MagickTrue,&metrics, &text,exception); status=SetImageExtent(caption_image,image->columns,(size_t) ((count+1)* (metrics.ascent-metrics.descent)+0.5),exception); if (status == MagickFalse) caption_image=DestroyImage(caption_image); else { caption_image->background_color=image->border_color; (void) SetImageBackgroundColor(caption_image,exception); (void) CloneString(&annotate_info->text,text); (void) FormatLocaleString(geometry,MagickPathExtent,"+0+%.20g", metrics.ascent); if (annotate_info->gravity == UndefinedGravity) (void) CloneString(&annotate_info->geometry,AcquireString( geometry)); (void) AnnotateImage(caption_image,annotate_info,exception); height+=caption_image->rows; } annotate_info=DestroyDrawInfo(annotate_info); text=DestroyString(text); } picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue, exception); if (picture_image == (Image *) NULL) { if (caption_image != (Image *) NULL) caption_image=DestroyImage(caption_image); return((Image *) NULL); } picture_image->background_color=image->border_color; (void) SetImageBackgroundColor(picture_image,exception); (void) CompositeImage(picture_image,image,OverCompositeOp,MagickTrue,quantum, quantum,exception); if (caption_image != (Image *) NULL) { (void) CompositeImage(picture_image,caption_image,OverCompositeOp, MagickTrue,quantum,(ssize_t) (image->rows+3*quantum/2),exception); caption_image=DestroyImage(caption_image); } (void) QueryColorCompliance("none",AllCompliance, &picture_image->background_color,exception); (void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel,exception); rotate_image=RotateImage(picture_image,90.0,exception); picture_image=DestroyImage(picture_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); picture_image=rotate_image; bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0* picture_image->columns,method,exception); picture_image=DestroyImage(picture_image); if (bend_image == (Image *) NULL) return((Image *) NULL); picture_image=bend_image; rotate_image=RotateImage(picture_image,-90.0,exception); picture_image=DestroyImage(picture_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); picture_image=rotate_image; picture_image->background_color=image->background_color; polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3, exception); if (polaroid_image == (Image *) NULL) { picture_image=DestroyImage(picture_image); return(picture_image); } flop_image=FlopImage(polaroid_image,exception); polaroid_image=DestroyImage(polaroid_image); if (flop_image == (Image *) NULL) { picture_image=DestroyImage(picture_image); return(picture_image); } polaroid_image=flop_image; (void) CompositeImage(polaroid_image,picture_image,OverCompositeOp, MagickTrue,(ssize_t) (-0.01*picture_image->columns/2.0),0L,exception); picture_image=DestroyImage(picture_image); (void) QueryColorCompliance("none",AllCompliance, &polaroid_image->background_color,exception); rotate_image=RotateImage(polaroid_image,angle,exception); polaroid_image=DestroyImage(polaroid_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); polaroid_image=rotate_image; trim_image=TrimImage(polaroid_image,exception); polaroid_image=DestroyImage(polaroid_image); if (trim_image == (Image *) NULL) return((Image *) NULL); polaroid_image=trim_image; return(polaroid_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p i a T o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSepiaToneImage() applies a special effect to the image, similar to the % effect achieved in a photo darkroom by sepia toning. Threshold ranges from % 0 to QuantumRange and is a measure of the extent of the sepia toning. A % threshold of 80% is a good starting point for a reasonable tone. % % The format of the SepiaToneImage method is: % % Image *SepiaToneImage(const Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: the tone threshold. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SepiaToneImage(const Image *image,const double threshold, ExceptionInfo *exception) { #define SepiaToneImageTag "SepiaTone/Image" CacheView *image_view, *sepia_view; Image *sepia_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize sepia-toned image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sepia_image=CloneImage(image,0,0,MagickTrue,exception); if (sepia_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(sepia_image,DirectClass,exception) == MagickFalse) { sepia_image=DestroyImage(sepia_image); return((Image *) NULL); } /* Tone each row of the image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); sepia_view=AcquireAuthenticCacheView(sepia_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,sepia_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double intensity, tone; intensity=GetPixelIntensity(image,p); tone=intensity > threshold ? (double) QuantumRange : intensity+ (double) QuantumRange-threshold; SetPixelRed(sepia_image,ClampToQuantum(tone),q); tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange : intensity+(double) QuantumRange-7.0*threshold/6.0; SetPixelGreen(sepia_image,ClampToQuantum(tone),q); tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0; SetPixelBlue(sepia_image,ClampToQuantum(tone),q); tone=threshold/7.0; if ((double) GetPixelGreen(image,q) < tone) SetPixelGreen(sepia_image,ClampToQuantum(tone),q); if ((double) GetPixelBlue(image,q) < tone) SetPixelBlue(sepia_image,ClampToQuantum(tone),q); SetPixelAlpha(sepia_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(sepia_image); } if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SepiaToneImage) #endif proceed=SetImageProgress(image,SepiaToneImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sepia_view=DestroyCacheView(sepia_view); image_view=DestroyCacheView(image_view); (void) NormalizeImage(sepia_image,exception); (void) ContrastImage(sepia_image,MagickTrue,exception); if (status == MagickFalse) sepia_image=DestroyImage(sepia_image); return(sepia_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d o w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadowImage() simulates a shadow from the specified image and returns it. % % The format of the ShadowImage method is: % % Image *ShadowImage(const Image *image,const double alpha, % const double sigma,const ssize_t x_offset,const ssize_t y_offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o alpha: percentage transparency. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o x_offset: the shadow x-offset. % % o y_offset: the shadow y-offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadowImage(const Image *image,const double alpha, const double sigma,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { #define ShadowImageTag "Shadow/Image" CacheView *image_view; ChannelType channel_mask; Image *border_image, *clone_image, *shadow_image; MagickBooleanType status; PixelInfo background_color; RectangleInfo border_info; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(clone_image,sRGBColorspace,exception); (void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod, exception); border_info.width=(size_t) floor(2.0*sigma+0.5); border_info.height=(size_t) floor(2.0*sigma+0.5); border_info.x=0; border_info.y=0; (void) QueryColorCompliance("none",AllCompliance,&clone_image->border_color, exception); clone_image->alpha_trait=BlendPixelTrait; border_image=BorderImage(clone_image,&border_info,OverCompositeOp,exception); clone_image=DestroyImage(clone_image); if (border_image == (Image *) NULL) return((Image *) NULL); if (border_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel,exception); /* Shadow image. */ status=MagickTrue; background_color=border_image->background_color; background_color.alpha_trait=BlendPixelTrait; image_view=AcquireAuthenticCacheView(border_image,exception); for (y=0; y < (ssize_t) border_image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) border_image->columns; x++) { if (border_image->alpha_trait != UndefinedPixelTrait) background_color.alpha=GetPixelAlpha(border_image,q)*alpha/100.0; SetPixelViaPixelInfo(border_image,&background_color,q); q+=GetPixelChannels(border_image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { border_image=DestroyImage(border_image); return((Image *) NULL); } channel_mask=SetImageChannelMask(border_image,AlphaChannel); shadow_image=BlurImage(border_image,0.0,sigma,exception); border_image=DestroyImage(border_image); if (shadow_image == (Image *) NULL) return((Image *) NULL); (void) SetPixelChannelMask(shadow_image,channel_mask); if (shadow_image->page.width == 0) shadow_image->page.width=shadow_image->columns; if (shadow_image->page.height == 0) shadow_image->page.height=shadow_image->rows; shadow_image->page.width+=x_offset-(ssize_t) border_info.width; shadow_image->page.height+=y_offset-(ssize_t) border_info.height; shadow_image->page.x+=x_offset-(ssize_t) border_info.width; shadow_image->page.y+=y_offset-(ssize_t) border_info.height; return(shadow_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S k e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SketchImage() simulates a pencil sketch. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SketchImage() selects a suitable radius for you. Angle gives the angle % of the sketch. % % The format of the SketchImage method is: % % Image *SketchImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the % center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SketchImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { CacheView *random_view; Image *blend_image, *blur_image, *dodge_image, *random_image, *sketch_image; MagickBooleanType status; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Sketch image. */ random_image=CloneImage(image,image->columns << 1,image->rows << 1, MagickTrue,exception); if (random_image == (Image *) NULL) return((Image *) NULL); status=MagickTrue; random_info=AcquireRandomInfoThreadSet(); random_view=AcquireAuthenticCacheView(random_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(random_image,random_image,random_image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) random_image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) random_image->columns; x++) { double value; register ssize_t i; value=GetPseudoRandomValue(random_info[id]); for (i=0; i < (ssize_t) GetPixelChannels(random_image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=ClampToQuantum(QuantumRange*value); } q+=GetPixelChannels(random_image); } if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse) status=MagickFalse; } random_view=DestroyCacheView(random_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) { random_image=DestroyImage(random_image); return(random_image); } blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception); random_image=DestroyImage(random_image); if (blur_image == (Image *) NULL) return((Image *) NULL); dodge_image=EdgeImage(blur_image,radius,exception); blur_image=DestroyImage(blur_image); if (dodge_image == (Image *) NULL) return((Image *) NULL); (void) NormalizeImage(dodge_image,exception); (void) NegateImage(dodge_image,MagickFalse,exception); (void) TransformImage(&dodge_image,(char *) NULL,"50%",exception); sketch_image=CloneImage(image,0,0,MagickTrue,exception); if (sketch_image == (Image *) NULL) { dodge_image=DestroyImage(dodge_image); return((Image *) NULL); } (void) CompositeImage(sketch_image,dodge_image,ColorDodgeCompositeOp, MagickTrue,0,0,exception); dodge_image=DestroyImage(dodge_image); blend_image=CloneImage(image,0,0,MagickTrue,exception); if (blend_image == (Image *) NULL) { sketch_image=DestroyImage(sketch_image); return((Image *) NULL); } if (blend_image->alpha_trait != BlendPixelTrait) (void) SetImageAlpha(blend_image,TransparentAlpha,exception); (void) SetImageArtifact(blend_image,"compose:args","20x80"); (void) CompositeImage(sketch_image,blend_image,BlendCompositeOp,MagickTrue, 0,0,exception); blend_image=DestroyImage(blend_image); return(sketch_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S o l a r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SolarizeImage() applies a special effect to the image, similar to the effect % achieved in a photo darkroom by selectively exposing areas of photo % sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a % measure of the extent of the solarization. % % The format of the SolarizeImage method is: % % MagickBooleanType SolarizeImage(Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: Define the extent of the solarization. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SolarizeImage(Image *image, const double threshold,ExceptionInfo *exception) { #define SolarizeImageTag "Solarize/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); if (image->storage_class == PseudoClass) { register ssize_t i; /* Solarize colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((double) image->colormap[i].red > threshold) image->colormap[i].red=QuantumRange-image->colormap[i].red; if ((double) image->colormap[i].green > threshold) image->colormap[i].green=QuantumRange-image->colormap[i].green; if ((double) image->colormap[i].blue > threshold) image->colormap[i].blue=QuantumRange-image->colormap[i].blue; } } /* Solarize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((double) q[i] > threshold) q[i]=QuantumRange-q[i]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SolarizeImage) #endif proceed=SetImageProgress(image,SolarizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t e g a n o I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SteganoImage() hides a digital watermark within the image. Recover % the hidden watermark later to prove that the authenticity of an image. % Offset defines the start position within the image to hide the watermark. % % The format of the SteganoImage method is: % % Image *SteganoImage(const Image *image,Image *watermark, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o watermark: the watermark image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SteganoImage(const Image *image,const Image *watermark, ExceptionInfo *exception) { #define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0) #define SetBit(alpha,i,set) (Quantum) ((set) != 0 ? (size_t) (alpha) \ | (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i))) #define SteganoImageTag "Stegano/Image" CacheView *stegano_view, *watermark_view; Image *stegano_image; int c; MagickBooleanType status; PixelInfo pixel; register Quantum *q; register ssize_t x; size_t depth, one; ssize_t i, j, k, y; /* Initialize steganographic image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(watermark != (const Image *) NULL); assert(watermark->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); one=1UL; stegano_image=CloneImage(image,0,0,MagickTrue,exception); if (stegano_image == (Image *) NULL) return((Image *) NULL); stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH; if (SetImageStorageClass(stegano_image,DirectClass,exception) == MagickFalse) { stegano_image=DestroyImage(stegano_image); return((Image *) NULL); } /* Hide watermark in low-order bits of image. */ c=0; i=0; j=0; depth=stegano_image->depth; k=stegano_image->offset; status=MagickTrue; watermark_view=AcquireVirtualCacheView(watermark,exception); stegano_view=AcquireAuthenticCacheView(stegano_image,exception); for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--) { for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++) { for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++) { ssize_t offset; (void) GetOneCacheViewVirtualPixelInfo(watermark_view,x,y,&pixel, exception); offset=k/(ssize_t) stegano_image->columns; if (offset >= (ssize_t) stegano_image->rows) break; q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t) stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1, exception); if (q == (Quantum *) NULL) break; switch (c) { case 0: { SetPixelRed(stegano_image,SetBit(GetPixelRed(stegano_image,q),j, GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q); break; } case 1: { SetPixelGreen(stegano_image,SetBit(GetPixelGreen(stegano_image,q),j, GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q); break; } case 2: { SetPixelBlue(stegano_image,SetBit(GetPixelBlue(stegano_image,q),j, GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q); break; } } if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse) break; c++; if (c == 3) c=0; k++; if (k == (ssize_t) (stegano_image->columns*stegano_image->columns)) k=0; if (k == stegano_image->offset) j++; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType) (depth-i),depth); if (proceed == MagickFalse) status=MagickFalse; } } stegano_view=DestroyCacheView(stegano_view); watermark_view=DestroyCacheView(watermark_view); if (status == MagickFalse) stegano_image=DestroyImage(stegano_image); return(stegano_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t e r e o A n a g l y p h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StereoAnaglyphImage() combines two images and produces a single image that % is the composite of a left and right image of a stereo pair. Special % red-green stereo glasses are required to view this effect. % % The format of the StereoAnaglyphImage method is: % % Image *StereoImage(const Image *left_image,const Image *right_image, % ExceptionInfo *exception) % Image *StereoAnaglyphImage(const Image *left_image, % const Image *right_image,const ssize_t x_offset,const ssize_t y_offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o left_image: the left image. % % o right_image: the right image. % % o exception: return any errors or warnings in this structure. % % o x_offset: amount, in pixels, by which the left image is offset to the % right of the right image. % % o y_offset: amount, in pixels, by which the left image is offset to the % bottom of the right image. % % */ MagickExport Image *StereoImage(const Image *left_image, const Image *right_image,ExceptionInfo *exception) { return(StereoAnaglyphImage(left_image,right_image,0,0,exception)); } MagickExport Image *StereoAnaglyphImage(const Image *left_image, const Image *right_image,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { #define StereoImageTag "Stereo/Image" const Image *image; Image *stereo_image; MagickBooleanType status; ssize_t y; assert(left_image != (const Image *) NULL); assert(left_image->signature == MagickCoreSignature); if (left_image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", left_image->filename); assert(right_image != (const Image *) NULL); assert(right_image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(right_image != (const Image *) NULL); image=left_image; if ((left_image->columns != right_image->columns) || (left_image->rows != right_image->rows)) ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer"); /* Initialize stereo image attributes. */ stereo_image=CloneImage(left_image,left_image->columns,left_image->rows, MagickTrue,exception); if (stereo_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(stereo_image,DirectClass,exception) == MagickFalse) { stereo_image=DestroyImage(stereo_image); return((Image *) NULL); } (void) SetImageColorspace(stereo_image,sRGBColorspace,exception); /* Copy left image to red channel and right image to blue channel. */ status=MagickTrue; for (y=0; y < (ssize_t) stereo_image->rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; register Quantum *magick_restrict r; p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1, exception); q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception); r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL) || (r == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) stereo_image->columns; x++) { SetPixelRed(image,GetPixelRed(left_image,p),r); SetPixelGreen(image,GetPixelGreen(right_image,q),r); SetPixelBlue(image,GetPixelBlue(right_image,q),r); if ((GetPixelAlphaTraits(stereo_image) & CopyPixelTrait) != 0) SetPixelAlpha(image,(GetPixelAlpha(left_image,p)+ GetPixelAlpha(right_image,q))/2,r); p+=GetPixelChannels(left_image); q+=GetPixelChannels(right_image); r+=GetPixelChannels(stereo_image); } if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse) break; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y, stereo_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } if (status == MagickFalse) stereo_image=DestroyImage(stereo_image); return(stereo_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S w i r l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SwirlImage() swirls the pixels about the center of the image, where % degrees indicates the sweep of the arc through which each pixel is moved. % You get a more dramatic effect as the degrees move from 1 to 360. % % The format of the SwirlImage method is: % % Image *SwirlImage(const Image *image,double degrees, % const PixelInterpolateMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o degrees: Define the tightness of the swirling effect. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SwirlImage(const Image *image,double degrees, const PixelInterpolateMethod method,ExceptionInfo *exception) { #define SwirlImageTag "Swirl/Image" CacheView *canvas_view, *interpolate_view, *swirl_view; Image *canvas, *swirl_image; MagickBooleanType status; MagickOffsetType progress; double radius; PointInfo center, scale; ssize_t y; /* Initialize swirl image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); canvas=CloneImage(image,0,0,MagickTrue,exception); if (canvas == (Image *) NULL) return((Image *) NULL); if ((canvas->alpha_trait == UndefinedPixelTrait) && (canvas->background_color.alpha != OpaqueAlpha)) (void) SetImageAlphaChannel(canvas,OpaqueAlphaChannel,exception); swirl_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue, exception); if (swirl_image == (Image *) NULL) { canvas=DestroyImage(canvas); return((Image *) NULL); } if (SetImageStorageClass(swirl_image,DirectClass,exception) == MagickFalse) { canvas=DestroyImage(canvas); swirl_image=DestroyImage(swirl_image); return((Image *) NULL); } /* Compute scaling factor. */ center.x=(double) canvas->columns/2.0; center.y=(double) canvas->rows/2.0; radius=MagickMax(center.x,center.y); scale.x=1.0; scale.y=1.0; if (canvas->columns > canvas->rows) scale.y=(double) canvas->columns/(double) canvas->rows; else if (canvas->columns < canvas->rows) scale.x=(double) canvas->rows/(double) canvas->columns; degrees=(double) DegreesToRadians(degrees); /* Swirl image. */ status=MagickTrue; progress=0; canvas_view=AcquireVirtualCacheView(canvas,exception); interpolate_view=AcquireVirtualCacheView(image,exception); swirl_view=AcquireAuthenticCacheView(swirl_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(canvas,swirl_image,canvas->rows,1) #endif for (y=0; y < (ssize_t) canvas->rows; y++) { double distance; PointInfo delta; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas->columns,1,exception); q=QueueCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } delta.y=scale.y*(double) (y-center.y); for (x=0; x < (ssize_t) canvas->columns; x++) { /* Determine if the pixel is within an ellipse. */ delta.x=scale.x*(double) (x-center.x); distance=delta.x*delta.x+delta.y*delta.y; if (distance >= (radius*radius)) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(canvas,i); PixelTrait traits = GetPixelChannelTraits(canvas,channel); PixelTrait swirl_traits = GetPixelChannelTraits(swirl_image, channel); if ((traits == UndefinedPixelTrait) || (swirl_traits == UndefinedPixelTrait)) continue; SetPixelChannel(swirl_image,channel,p[i],q); } } else { double cosine, factor, sine; /* Swirl the pixel. */ factor=1.0-sqrt((double) distance)/radius; sine=sin((double) (degrees*factor*factor)); cosine=cos((double) (degrees*factor*factor)); status=InterpolatePixelChannels(canvas,interpolate_view,swirl_image, method,((cosine*delta.x-sine*delta.y)/scale.x+center.x),(double) ((sine*delta.x+cosine*delta.y)/scale.y+center.y),q,exception); if (status == MagickFalse) break; } p+=GetPixelChannels(canvas); q+=GetPixelChannels(swirl_image); } if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse) status=MagickFalse; if (canvas->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SwirlImage) #endif proceed=SetImageProgress(canvas,SwirlImageTag,progress++,canvas->rows); if (proceed == MagickFalse) status=MagickFalse; } } swirl_view=DestroyCacheView(swirl_view); interpolate_view=DestroyCacheView(interpolate_view); canvas_view=DestroyCacheView(canvas_view); canvas=DestroyImage(canvas); if (status == MagickFalse) swirl_image=DestroyImage(swirl_image); return(swirl_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TintImage() applies a color vector to each pixel in the image. The length % of the vector is 0 for black and white and at its maximum for the midtones. % The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5)))) % % The format of the TintImage method is: % % Image *TintImage(const Image *image,const char *blend, % const PixelInfo *tint,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o blend: A color value used for tinting. % % o tint: A color value used for tinting. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TintImage(const Image *image,const char *blend, const PixelInfo *tint,ExceptionInfo *exception) { #define TintImageTag "Tint/Image" CacheView *image_view, *tint_view; double intensity; GeometryInfo geometry_info; Image *tint_image; MagickBooleanType status; MagickOffsetType progress; PixelInfo color_vector; MagickStatusType flags; ssize_t y; /* Allocate tint image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); tint_image=CloneImage(image,0,0,MagickTrue,exception); if (tint_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(tint_image,DirectClass,exception) == MagickFalse) { tint_image=DestroyImage(tint_image); return((Image *) NULL); } if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsPixelInfoGray(tint) == MagickFalse)) (void) SetImageColorspace(tint_image,sRGBColorspace,exception); if (blend == (const char *) NULL) return(tint_image); /* Determine RGB values of the color. */ GetPixelInfo(image,&color_vector); flags=ParseGeometry(blend,&geometry_info); color_vector.red=geometry_info.rho; color_vector.green=geometry_info.rho; color_vector.blue=geometry_info.rho; color_vector.alpha=(MagickRealType) OpaqueAlpha; if ((flags & SigmaValue) != 0) color_vector.green=geometry_info.sigma; if ((flags & XiValue) != 0) color_vector.blue=geometry_info.xi; if ((flags & PsiValue) != 0) color_vector.alpha=geometry_info.psi; if (image->colorspace == CMYKColorspace) { color_vector.black=geometry_info.rho; if ((flags & PsiValue) != 0) color_vector.black=geometry_info.psi; if ((flags & ChiValue) != 0) color_vector.alpha=geometry_info.chi; } intensity=(double) GetPixelInfoIntensity((const Image *) NULL,tint); color_vector.red=(double) (color_vector.red*tint->red/100.0-intensity); color_vector.green=(double) (color_vector.green*tint->green/100.0-intensity); color_vector.blue=(double) (color_vector.blue*tint->blue/100.0-intensity); color_vector.black=(double) (color_vector.black*tint->black/100.0-intensity); color_vector.alpha=(double) (color_vector.alpha*tint->alpha/100.0-intensity); /* Tint image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); tint_view=AcquireAuthenticCacheView(tint_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,tint_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PixelInfo pixel; double weight; GetPixelInfo(image,&pixel); weight=QuantumScale*GetPixelRed(image,p)-0.5; pixel.red=(MagickRealType) GetPixelRed(image,p)+color_vector.red* (1.0-(4.0*(weight*weight))); weight=QuantumScale*GetPixelGreen(image,p)-0.5; pixel.green=(MagickRealType) GetPixelGreen(image,p)+color_vector.green* (1.0-(4.0*(weight*weight))); weight=QuantumScale*GetPixelBlue(image,p)-0.5; pixel.blue=(MagickRealType) GetPixelBlue(image,p)+color_vector.blue* (1.0-(4.0*(weight*weight))); weight=QuantumScale*GetPixelBlack(image,p)-0.5; pixel.black=(MagickRealType) GetPixelBlack(image,p)+color_vector.black* (1.0-(4.0*(weight*weight))); pixel.alpha=(MagickRealType) GetPixelAlpha(image,p); SetPixelViaPixelInfo(tint_image,&pixel,q); p+=GetPixelChannels(image); q+=GetPixelChannels(tint_image); } if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TintImage) #endif proceed=SetImageProgress(image,TintImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } tint_view=DestroyCacheView(tint_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) tint_image=DestroyImage(tint_image); return(tint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % V i g n e t t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % VignetteImage() softens the edges of the image in vignette style. % % The format of the VignetteImage method is: % % Image *VignetteImage(const Image *image,const double radius, % const double sigma,const ssize_t x,const ssize_t y, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o x, y: Define the x and y ellipse offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *VignetteImage(const Image *image,const double radius, const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception) { char ellipse[MagickPathExtent]; DrawInfo *draw_info; Image *canvas, *blur_image, *oval_image, *vignette_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); canvas=CloneImage(image,0,0,MagickTrue,exception); if (canvas == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(canvas,DirectClass,exception) == MagickFalse) { canvas=DestroyImage(canvas); return((Image *) NULL); } canvas->alpha_trait=BlendPixelTrait; oval_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue, exception); if (oval_image == (Image *) NULL) { canvas=DestroyImage(canvas); return((Image *) NULL); } (void) QueryColorCompliance("#000000",AllCompliance, &oval_image->background_color,exception); (void) SetImageBackgroundColor(oval_image,exception); draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL); (void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->fill, exception); (void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->stroke, exception); (void) FormatLocaleString(ellipse,MagickPathExtent,"ellipse %g,%g,%g,%g," "0.0,360.0",image->columns/2.0,image->rows/2.0,image->columns/2.0-x, image->rows/2.0-y); draw_info->primitive=AcquireString(ellipse); (void) DrawImage(oval_image,draw_info,exception); draw_info=DestroyDrawInfo(draw_info); blur_image=BlurImage(oval_image,radius,sigma,exception); oval_image=DestroyImage(oval_image); if (blur_image == (Image *) NULL) { canvas=DestroyImage(canvas); return((Image *) NULL); } blur_image->alpha_trait=UndefinedPixelTrait; (void) CompositeImage(canvas,blur_image,IntensityCompositeOp,MagickTrue, 0,0,exception); blur_image=DestroyImage(blur_image); vignette_image=MergeImageLayers(canvas,FlattenLayer,exception); canvas=DestroyImage(canvas); if (vignette_image != (Image *) NULL) (void) TransformImageColorspace(vignette_image,image->colorspace,exception); return(vignette_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WaveImage() creates a "ripple" effect in the image by shifting the pixels % vertically along a sine wave whose amplitude and wavelength is specified % by the given parameters. % % The format of the WaveImage method is: % % Image *WaveImage(const Image *image,const double amplitude, % const double wave_length,const PixelInterpolateMethod method, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o amplitude, wave_length: Define the amplitude and wave length of the % sine wave. % % o interpolate: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *WaveImage(const Image *image,const double amplitude, const double wave_length,const PixelInterpolateMethod method, ExceptionInfo *exception) { #define WaveImageTag "Wave/Image" CacheView *canvas_view, *wave_view; Image *canvas, *wave_image; MagickBooleanType status; MagickOffsetType progress; double *sine_map; register ssize_t i; ssize_t y; /* Initialize wave image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); canvas=CloneImage(image,0,0,MagickTrue,exception); if (canvas == (Image *) NULL) return((Image *) NULL); if ((canvas->alpha_trait == UndefinedPixelTrait) && (canvas->background_color.alpha != OpaqueAlpha)) (void) SetImageAlpha(canvas,OpaqueAlpha,exception); wave_image=CloneImage(canvas,canvas->columns,(size_t) (canvas->rows+2.0* fabs(amplitude)),MagickTrue,exception); if (wave_image == (Image *) NULL) { canvas=DestroyImage(canvas); return((Image *) NULL); } if (SetImageStorageClass(wave_image,DirectClass,exception) == MagickFalse) { canvas=DestroyImage(canvas); wave_image=DestroyImage(wave_image); return((Image *) NULL); } /* Allocate sine map. */ sine_map=(double *) AcquireQuantumMemory((size_t) wave_image->columns, sizeof(*sine_map)); if (sine_map == (double *) NULL) { canvas=DestroyImage(canvas); wave_image=DestroyImage(wave_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) wave_image->columns; i++) sine_map[i]=fabs(amplitude)+amplitude*sin((double) ((2.0*MagickPI*i)/ wave_length)); /* Wave image. */ status=MagickTrue; progress=0; canvas_view=AcquireVirtualCacheView(canvas,exception); wave_view=AcquireAuthenticCacheView(wave_image,exception); (void) SetCacheViewVirtualPixelMethod(canvas_view, BackgroundVirtualPixelMethod); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(canvas,wave_image,wave_image->rows,1) #endif for (y=0; y < (ssize_t) wave_image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) wave_image->columns; x++) { status=InterpolatePixelChannels(canvas,canvas_view,wave_image,method, (double) x,(double) (y-sine_map[x]),q,exception); if (status == MagickFalse) break; q+=GetPixelChannels(wave_image); } if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_WaveImage) #endif proceed=SetImageProgress(canvas,WaveImageTag,progress++,canvas->rows); if (proceed == MagickFalse) status=MagickFalse; } } wave_view=DestroyCacheView(wave_view); canvas_view=DestroyCacheView(canvas_view); canvas=DestroyImage(canvas); sine_map=(double *) RelinquishMagickMemory(sine_map); if (status == MagickFalse) wave_image=DestroyImage(wave_image); return(wave_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W a v e l e t D e n o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WaveletDenoiseImage() removes noise from the image using a wavelet % transform. The wavelet transform is a fast hierarchical scheme for % processing an image using a set of consecutive lowpass and high_pass filters, % followed by a decimation. This results in a decomposition into different % scales which can be regarded as different “frequency bands”, determined by % the mother wavelet. Adapted from dcraw.c by David Coffin. % % The format of the WaveletDenoiseImage method is: % % Image *WaveletDenoiseImage(const Image *image,const double threshold, % const double softness,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: set the threshold for smoothing. % % o softness: attenuate the smoothing threshold. % % o exception: return any errors or warnings in this structure. % */ static inline void HatTransform(const float *magick_restrict pixels, const size_t stride,const size_t extent,const size_t scale,float *kernel) { const float *magick_restrict p, *magick_restrict q, *magick_restrict r; register ssize_t i; p=pixels; q=pixels+scale*stride; r=pixels+scale*stride; for (i=0; i < (ssize_t) scale; i++) { kernel[i]=0.25f*(*p+(*p)+(*q)+(*r)); p+=stride; q-=stride; r+=stride; } for ( ; i < (ssize_t) (extent-scale); i++) { kernel[i]=0.25f*(2.0f*(*p)+*(p-scale*stride)+*(p+scale*stride)); p+=stride; } q=p-scale*stride; r=pixels+stride*(extent-2); for ( ; i < (ssize_t) extent; i++) { kernel[i]=0.25f*(*p+(*p)+(*q)+(*r)); p+=stride; q+=stride; r-=stride; } } MagickExport Image *WaveletDenoiseImage(const Image *image, const double threshold,const double softness,ExceptionInfo *exception) { CacheView *image_view, *noise_view; float *kernel, *pixels; Image *noise_image; MagickBooleanType status; MagickSizeType number_pixels; MemoryInfo *pixels_info; ssize_t channel; static const float noise_levels[] = { 0.8002f, 0.2735f, 0.1202f, 0.0585f, 0.0291f, 0.0152f, 0.0080f, 0.0044f }; /* Initialize noise image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) noise_image=AccelerateWaveletDenoiseImage(image,threshold,exception); if (noise_image != (Image *) NULL) return(noise_image); #endif noise_image=CloneImage(image,0,0,MagickTrue,exception); if (noise_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse) { noise_image=DestroyImage(noise_image); return((Image *) NULL); } if (AcquireMagickResource(WidthResource,4*image->columns) == MagickFalse) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); pixels_info=AcquireVirtualMemory(3*image->columns,image->rows* sizeof(*pixels)); kernel=(float *) AcquireQuantumMemory(MagickMax(image->rows,image->columns), GetOpenMPMaximumThreads()*sizeof(*kernel)); if ((pixels_info == (MemoryInfo *) NULL) || (kernel == (float *) NULL)) { if (kernel != (float *) NULL) kernel=(float *) RelinquishMagickMemory(kernel); if (pixels_info != (MemoryInfo *) NULL) pixels_info=RelinquishVirtualMemory(pixels_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(float *) GetVirtualMemoryBlob(pixels_info); status=MagickTrue; number_pixels=(MagickSizeType) image->columns*image->rows; image_view=AcquireAuthenticCacheView(image,exception); noise_view=AcquireAuthenticCacheView(noise_image,exception); for (channel=0; channel < (ssize_t) GetPixelChannels(image); channel++) { register ssize_t i; size_t high_pass, low_pass; ssize_t level, y; PixelChannel pixel_channel; PixelTrait traits; if (status == MagickFalse) continue; traits=GetPixelChannelTraits(image,(PixelChannel) channel); if (traits == UndefinedPixelTrait) continue; pixel_channel=GetPixelChannelChannel(image,channel); if ((pixel_channel != RedPixelChannel) && (pixel_channel != GreenPixelChannel) && (pixel_channel != BluePixelChannel)) continue; /* Copy channel from image to wavelet pixel array. */ i=0; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { pixels[i++]=(float) p[channel]; p+=GetPixelChannels(image); } } /* Low pass filter outputs are called approximation kernel & high pass filters are referred to as detail kernel. The detail kernel have high values in the noisy parts of the signal. */ high_pass=0; for (level=0; level < 5; level++) { double magnitude; ssize_t x, y; low_pass=(size_t) (number_pixels*((level & 0x01)+1)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register float *magick_restrict p, *magick_restrict q; register ssize_t x; p=kernel+id*image->columns; q=pixels+y*image->columns; HatTransform(q+high_pass,1,image->columns,(size_t) (1UL << level),p); q+=low_pass; for (x=0; x < (ssize_t) image->columns; x++) *q++=(*p++); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) \ magick_number_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); register float *magick_restrict p, *magick_restrict q; register ssize_t y; p=kernel+id*image->rows; q=pixels+x+low_pass; HatTransform(q,image->columns,image->rows,(size_t) (1UL << level),p); for (y=0; y < (ssize_t) image->rows; y++) { *q=(*p++); q+=image->columns; } } /* To threshold, each coefficient is compared to a threshold value and attenuated / shrunk by some factor. */ magnitude=threshold*noise_levels[level]; for (i=0; i < (ssize_t) number_pixels; ++i) { pixels[high_pass+i]-=pixels[low_pass+i]; if (pixels[high_pass+i] < -magnitude) pixels[high_pass+i]+=magnitude-softness*magnitude; else if (pixels[high_pass+i] > magnitude) pixels[high_pass+i]-=magnitude-softness*magnitude; else pixels[high_pass+i]*=softness; if (high_pass != 0) pixels[i]+=pixels[high_pass+i]; } high_pass=low_pass; } /* Reconstruct image from the thresholded wavelet kernel. */ i=0; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; register ssize_t x; ssize_t offset; q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; break; } offset=GetPixelChannelOffset(noise_image,pixel_channel); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType pixel; pixel=(MagickRealType) pixels[i]+pixels[low_pass+i]; q[offset]=ClampToQuantum(pixel); i++; q+=GetPixelChannels(noise_image); } sync=SyncCacheViewAuthenticPixels(noise_view,exception); if (sync == MagickFalse) status=MagickFalse; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,AddNoiseImageTag,(MagickOffsetType) channel,GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } noise_view=DestroyCacheView(noise_view); image_view=DestroyCacheView(image_view); kernel=(float *) RelinquishMagickMemory(kernel); pixels_info=RelinquishVirtualMemory(pixels_info); if (status == MagickFalse) noise_image=DestroyImage(noise_image); return(noise_image); }
ten_tusscher_2004_epi_S1_19.c
//Original Ten Tusscher #include <assert.h> #include <stdlib.h> #include "ten_tusscher_2004_epi_S1_19.h" GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } //TODO: this should be called only once for the whole mesh, like in the GPU code SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5574211764260,0.00129305755715058,0.779441422719268,0.779241742711666,0.000175039240857358,0.484977289081740,0.00294257507368012,0.999998344595344,1.93700269716616e-08,1.89380174481509e-05,0.999773792418493,1.00755963480393,0.999999137126184,3.41466316398601e-05,1.23162815450729,9.71224673801957,139.552422843336}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL /// real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito //#ifdef EPI real Gto=0.294; //#endif // #ifdef ENDO // real Gto=0.073; //#endif //#ifdef MCELL // real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.0344988699429,0.000243427554127383,0.000161272832250911,0.000484228011827550,0.275092424538870,0.175484829191378,0.164879494363494,3.77803127027096,0.0197412874581791,1.93055058781161,1099.31582404877,0.000553709594039336,0.144015543772373,0.0199814298252655,0.00826445055600327,9.00070147931675e-06}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; ///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
updater_basemaker-inl.h
/*! * Copyright 2014 by Contributors * \file updater_basemaker-inl.h * \brief implement a common tree constructor * \author Tianqi Chen */ #ifndef XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #define XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #include <rabit/rabit.h> #include <vector> #include <algorithm> #include <string> #include <limits> #include <utility> #include "xgboost/base.h" #include "xgboost/json.h" #include "xgboost/tree_updater.h" #include "param.h" #include "constraints.h" #include "../common/io.h" #include "../common/random.h" #include "../common/quantile.h" namespace xgboost { namespace tree { /*! * \brief base tree maker class that defines common operation * needed in tree making */ class BaseMaker: public TreeUpdater { public: void Configure(const Args& args) override { param_.UpdateAllowUnknown(args); } void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); fromJson(config.at("train_param"), &this->param_); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["train_param"] = toJson(param_); } protected: // helper to collect and query feature meta information struct FMetaHelper { public: /*! \brief find type of each feature, use column format */ inline void InitByCol(DMatrix* p_fmat, const RegTree& tree) { fminmax_.resize(tree.param.num_feature * 2); std::fill(fminmax_.begin(), fminmax_.end(), -std::numeric_limits<bst_float>::max()); // start accumulating statistics for (const auto &batch : p_fmat->GetBatches<SortedCSCPage>()) { for (bst_uint fid = 0; fid < batch.Size(); ++fid) { auto c = batch[fid]; if (c.size() != 0) { CHECK_LT(fid * 2, fminmax_.size()); fminmax_[fid * 2 + 0] = std::max(-c[0].fvalue, fminmax_[fid * 2 + 0]); fminmax_[fid * 2 + 1] = std::max(c[c.size() - 1].fvalue, fminmax_[fid * 2 + 1]); } } } } /*! \brief synchronize the information */ inline void SyncInfo() { rabit::Allreduce<rabit::op::Max>(dmlc::BeginPtr(fminmax_), fminmax_.size()); } // get feature type, 0:empty 1:binary 2:real inline int Type(bst_uint fid) const { CHECK_LT(fid * 2 + 1, fminmax_.size()) << "FeatHelper fid exceed query bound "; bst_float a = fminmax_[fid * 2]; bst_float b = fminmax_[fid * 2 + 1]; if (a == -std::numeric_limits<bst_float>::max()) return 0; if (-a == b) { return 1; } else { return 2; } } bst_float MaxValue(bst_uint fid) const { return fminmax_[fid *2 + 1]; } void SampleCol(float p, std::vector<bst_feature_t> *p_findex) const { std::vector<bst_feature_t> &findex = *p_findex; findex.clear(); for (size_t i = 0; i < fminmax_.size(); i += 2) { const auto fid = static_cast<bst_uint>(i / 2); if (this->Type(fid) != 0) findex.push_back(fid); } auto n = static_cast<unsigned>(p * findex.size()); std::shuffle(findex.begin(), findex.end(), common::GlobalRandom()); findex.resize(n); // sync the findex if it is subsample std::string s_cache; common::MemoryBufferStream fc(&s_cache); dmlc::Stream& fs = fc; if (rabit::GetRank() == 0) { fs.Write(findex); } rabit::Broadcast(&s_cache, 0); fs.Read(&findex); } private: std::vector<bst_float> fminmax_; }; // ------static helper functions ------ // helper function to get to next level of the tree /*! \brief this is helper function for row based data*/ inline static int NextLevel(const SparsePage::Inst &inst, const RegTree &tree, int nid) { const RegTree::Node &n = tree[nid]; bst_uint findex = n.SplitIndex(); for (const auto& ins : inst) { if (findex == ins.index) { if (ins.fvalue < n.SplitCond()) { return n.LeftChild(); } else { return n.RightChild(); } } } return n.DefaultChild(); } // ------class member helpers--------- /*! \brief initialize temp data structure */ inline void InitData(const std::vector<GradientPair> &gpair, const DMatrix &fmat, const RegTree &tree) { { // setup position position_.resize(gpair.size()); std::fill(position_.begin(), position_.end(), 0); // mark delete for the deleted datas for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0f) position_[i] = ~position_[i]; } // mark subsample if (param_.subsample < 1.0f) { CHECK_EQ(param_.sampling_method, TrainParam::kUniform) << "Only uniform sampling is supported, " << "gradient-based sampling is only support by GPU Hist."; std::bernoulli_distribution coin_flip(param_.subsample); auto& rnd = common::GlobalRandom(); for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0f) continue; if (!coin_flip(rnd)) position_[i] = ~position_[i]; } } } { // expand query qexpand_.reserve(256); qexpand_.clear(); qexpand_.push_back(0); this->UpdateNode2WorkIndex(tree); } this->interaction_constraints_.Configure(param_, fmat.Info().num_col_); } /*! \brief update queue expand add in new leaves */ inline void UpdateQueueExpand(const RegTree &tree) { std::vector<int> newnodes; for (int nid : qexpand_) { if (!tree[nid].IsLeaf()) { newnodes.push_back(tree[nid].LeftChild()); newnodes.push_back(tree[nid].RightChild()); } } // use new nodes for qexpand qexpand_ = newnodes; this->UpdateNode2WorkIndex(tree); } // return decoded position inline int DecodePosition(bst_uint ridx) const { const int pid = position_[ridx]; return pid < 0 ? ~pid : pid; } // encode the encoded position value for ridx inline void SetEncodePosition(bst_uint ridx, int nid) { if (position_[ridx] < 0) { position_[ridx] = ~nid; } else { position_[ridx] = nid; } } /*! * \brief this is helper function uses column based data structure, * reset the positions to the lastest one * \param nodes the set of nodes that contains the split to be used * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ inline void ResetPositionCol(const std::vector<int> &nodes, DMatrix *p_fmat, const RegTree &tree) { // set the positions in the nondefault this->SetNonDefaultPositionCol(nodes, p_fmat, tree); this->SetDefaultPostion(p_fmat, tree); } /*! * \brief helper function to set the non-leaf positions to default direction. * This function can be applied multiple times and will get the same result. * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ inline void SetDefaultPostion(DMatrix *p_fmat, const RegTree &tree) { // set default direct nodes to default // for leaf nodes that are not fresh, mark then to ~nid, // so that they are ignored in future statistics collection const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_); #pragma omp parallel for schedule(static) for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) { const int nid = this->DecodePosition(ridx); if (tree[nid].IsLeaf()) { // mark finish when it is not a fresh leaf if (tree[nid].RightChild() == -1) { position_[ridx] = ~nid; } } else { // push to default branch if (tree[nid].DefaultLeft()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } } } /*! * \brief this is helper function uses column based data structure, * to CORRECT the positions of non-default directions that WAS set to default * before calling this function. * \param batch The column batch * \param sorted_split_set The set of index that contains split solutions. * \param tree the regression tree structure */ inline void CorrectNonDefaultPositionByBatch( const SparsePage &batch, const std::vector<bst_uint> &sorted_split_set, const RegTree &tree) { for (size_t fid = 0; fid < batch.Size(); ++fid) { auto col = batch[fid]; auto it = std::lower_bound(sorted_split_set.begin(), sorted_split_set.end(), fid); if (it != sorted_split_set.end() && *it == fid) { const auto ndata = static_cast<bst_omp_uint>(col.size()); #pragma omp parallel for schedule(static) for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); CHECK(tree[nid].IsLeaf()); int pid = tree[nid].Parent(); // go back to parent, correct those who are not default if (!tree[nid].IsRoot() && tree[pid].SplitIndex() == fid) { if (fvalue < tree[pid].SplitCond()) { this->SetEncodePosition(ridx, tree[pid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[pid].RightChild()); } } } } } } /*! * \brief this is helper function uses column based data structure, * \param nodes the set of nodes that contains the split to be used * \param tree the regression tree structure * \param out_split_set The split index set */ inline void GetSplitSet(const std::vector<int> &nodes, const RegTree &tree, std::vector<unsigned>* out_split_set) { std::vector<unsigned>& fsplits = *out_split_set; fsplits.clear(); // step 1, classify the non-default data into right places for (int nid : nodes) { if (!tree[nid].IsLeaf()) { fsplits.push_back(tree[nid].SplitIndex()); } } std::sort(fsplits.begin(), fsplits.end()); fsplits.resize(std::unique(fsplits.begin(), fsplits.end()) - fsplits.begin()); } /*! * \brief this is helper function uses column based data structure, * update all positions into nondefault branch, if any, ignore the default branch * \param nodes the set of nodes that contains the split to be used * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ virtual void SetNonDefaultPositionCol(const std::vector<int> &nodes, DMatrix *p_fmat, const RegTree &tree) { std::vector<unsigned> fsplits; this->GetSplitSet(nodes, tree, &fsplits); for (const auto &batch : p_fmat->GetBatches<SortedCSCPage>()) { for (auto fid : fsplits) { auto col = batch[fid]; const auto ndata = static_cast<bst_omp_uint>(col.size()); #pragma omp parallel for schedule(static) for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); // go back to parent, correct those who are not default if (!tree[nid].IsLeaf() && tree[nid].SplitIndex() == fid) { if (fvalue < tree[nid].SplitCond()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } } } } } /*! \brief helper function to get statistics from a tree */ template<typename TStats> inline void GetNodeStats(const std::vector<GradientPair> &gpair, const DMatrix &fmat, const RegTree &tree, std::vector< std::vector<TStats> > *p_thread_temp, std::vector<TStats> *p_node_stats) { std::vector< std::vector<TStats> > &thread_temp = *p_thread_temp; thread_temp.resize(omp_get_max_threads()); p_node_stats->resize(tree.param.num_nodes); #pragma omp parallel { const int tid = omp_get_thread_num(); thread_temp[tid].resize(tree.param.num_nodes, TStats()); for (unsigned int nid : qexpand_) { thread_temp[tid][nid] = TStats(); } } // setup position const auto ndata = static_cast<bst_omp_uint>(fmat.Info().num_row_); #pragma omp parallel for schedule(static) for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) { const int nid = position_[ridx]; const int tid = omp_get_thread_num(); if (nid >= 0) { thread_temp[tid][nid].Add(gpair[ridx]); } } // sum the per thread statistics together for (int nid : qexpand_) { TStats &s = (*p_node_stats)[nid]; s = TStats(); for (size_t tid = 0; tid < thread_temp.size(); ++tid) { s.Add(thread_temp[tid][nid]); } } } /*! \brief common helper data structure to build sketch */ struct SketchEntry { /*! \brief total sum of amount to be met */ double sum_total; /*! \brief statistics used in the sketch */ double rmin, wmin; /*! \brief last seen feature value */ bst_float last_fvalue; /*! \brief current size of sketch */ double next_goal; // pointer to the sketch to put things in common::WXQuantileSketch<bst_float, bst_float> *sketch; // initialize the space inline void Init(unsigned max_size) { next_goal = -1.0f; rmin = wmin = 0.0f; sketch->temp.Reserve(max_size + 1); sketch->temp.size = 0; } /*! * \brief push a new element to sketch * \param fvalue feature value, comes in sorted ascending order * \param w weight * \param max_size */ inline void Push(bst_float fvalue, bst_float w, unsigned max_size) { if (next_goal == -1.0f) { next_goal = 0.0f; last_fvalue = fvalue; wmin = w; return; } if (last_fvalue != fvalue) { double rmax = rmin + wmin; if (rmax >= next_goal && sketch->temp.size != max_size) { if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) { // push to sketch sketch->temp.data[sketch->temp.size] = common::WXQuantileSketch<bst_float, bst_float>:: Entry(static_cast<bst_float>(rmin), static_cast<bst_float>(rmax), static_cast<bst_float>(wmin), last_fvalue); CHECK_LT(sketch->temp.size, max_size) << "invalid maximum size max_size=" << max_size << ", stemp.size" << sketch->temp.size; ++sketch->temp.size; } if (sketch->temp.size == max_size) { next_goal = sum_total * 2.0f + 1e-5f; } else { next_goal = static_cast<bst_float>(sketch->temp.size * sum_total / max_size); } } else { if (rmax >= next_goal) { LOG(TRACKER) << "INFO: rmax=" << rmax << ", sum_total=" << sum_total << ", naxt_goal=" << next_goal << ", size=" << sketch->temp.size; } } rmin = rmax; wmin = w; last_fvalue = fvalue; } else { wmin += w; } } /*! \brief push final unfinished value to the sketch */ inline void Finalize(unsigned max_size) { double rmax = rmin + wmin; if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) { CHECK_LE(sketch->temp.size, max_size) << "Finalize: invalid maximum size, max_size=" << max_size << ", stemp.size=" << sketch->temp.size; // push to sketch sketch->temp.data[sketch->temp.size] = common::WXQuantileSketch<bst_float, bst_float>:: Entry(static_cast<bst_float>(rmin), static_cast<bst_float>(rmax), static_cast<bst_float>(wmin), last_fvalue); ++sketch->temp.size; } sketch->PushTemp(); } }; /*! \brief training parameter of tree grower */ TrainParam param_; /*! \brief queue of nodes to be expanded */ std::vector<int> qexpand_; /*! * \brief map active node to is working index offset in qexpand, * can be -1, which means the node is node actively expanding */ std::vector<int> node2workindex_; /*! * \brief position of each instance in the tree * can be negative, which means this position is no longer expanding * see also Decode/EncodePosition */ std::vector<int> position_; FeatureInteractionConstraintHost interaction_constraints_; private: inline void UpdateNode2WorkIndex(const RegTree &tree) { // update the node2workindex std::fill(node2workindex_.begin(), node2workindex_.end(), -1); node2workindex_.resize(tree.param.num_nodes); for (size_t i = 0; i < qexpand_.size(); ++i) { node2workindex_[qexpand_[i]] = static_cast<int>(i); } } }; } // namespace tree } // namespace xgboost #endif // XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
GB_unop__identity_fp64_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp64_uint64) // op(A') function: GB (_unop_tran__identity_fp64_uint64) // C type: double // A type: uint64_t // cast: double cij = (double) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ double z = (double) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (double) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp64_uint64) ( double *Cx, // Cx and Ax may be aliased const uint64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint64_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp64_uint64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 4; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=2*Nt-2;t1++) { lbp=ceild(t1+2,2); ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1+2,2),ceild(4*t2-Nz+9,4));t3<=min(min(floord(4*Nt+Ny-9,4),floord(2*t1+Ny-3,4)),floord(4*t2+Ny-9,4));t3++) { for (t4=max(max(ceild(t1-508,512),ceild(4*t2-Nz-1011,1024)),ceild(4*t3-Ny-1011,1024));t4<=min(min(min(floord(4*Nt+Nx-9,1024),floord(2*t1+Nx-3,1024)),floord(4*t2+Nx-9,1024)),floord(4*t3+Nx-9,1024));t4++) { for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(1024*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) { for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) { lbv=max(1024*t4,4*t5+4); ubv=min(1024*t4+1023,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
FSAI_precond.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <math.h> #include <omp.h> #include "H2Pack.h" #include "H2Pack_utils.h" #include "CSRPlus.h" #include "FSAI_precond.h" // Calculate the square of distance of each pair of 2D points for KNN search static void calc_2D_dist2( const DTYPE *coord0, const int ld0, const int n0, const DTYPE *coord1, const int ld1, const int n1, DTYPE *dist, const int ld ) { const DTYPE *x0 = coord0 + ld0 * 0; const DTYPE *y0 = coord0 + ld0 * 1; const DTYPE *x1 = coord1 + ld1 * 0; const DTYPE *y1 = coord1 + ld1 * 1; #pragma omp parallel for for (int i = 0; i < n0; i++) { DTYPE x0_i = x0[i]; DTYPE y0_i = y0[i]; DTYPE *dist_i = dist + i * ld; #pragma omp simd for (int j = 0; j < n1; j++) { DTYPE dx = x0_i - x1[j]; DTYPE dy = y0_i - y1[j]; dist_i[j] = dx * dx + dy * dy; } } } // Calculate the square of distance of each pair of 3D points for KNN search static void calc_3D_dist2( const DTYPE *coord0, const int ld0, const int n0, const DTYPE *coord1, const int ld1, const int n1, DTYPE *dist, const int ld ) { const DTYPE *x0 = coord0 + ld0 * 0; const DTYPE *y0 = coord0 + ld0 * 1; const DTYPE *z0 = coord0 + ld0 * 1; const DTYPE *x1 = coord1 + ld1 * 0; const DTYPE *y1 = coord1 + ld1 * 1; const DTYPE *z1 = coord1 + ld1 * 1; #pragma omp parallel for for (int i = 0; i < n0; i++) { DTYPE x0_i = x0[i]; DTYPE y0_i = y0[i]; DTYPE z0_i = z0[i]; DTYPE *dist_i = dist + i * ld; #pragma omp simd for (int j = 0; j < n1; j++) { DTYPE dx = x0_i - x1[j]; DTYPE dy = y0_i - y1[j]; DTYPE dz = z0_i - z1[j]; dist_i[j] = dx * dx + dy * dy + dz * dz; } } } // Quick-sort for (key, val) pairs static void qsort_DTYPE_int_pair(DTYPE *key, int *val, int l, int r) { int i = l, j = r, tmp_val; DTYPE mid_key = key[(l + r) / 2], tmp_key; while (i <= j) { while (key[i] < mid_key) i++; while (key[j] > mid_key) j--; if (i <= j) { tmp_key = key[i]; key[i] = key[j]; key[j] = tmp_key; tmp_val = val[i]; val[i] = val[j]; val[j] = tmp_val; i++; j--; } } if (i < r) qsort_DTYPE_int_pair(key, val, i, r); if (j > l) qsort_DTYPE_int_pair(key, val, l, j); } // Search k nearest neighbors for each point using the hierarchical partition in H2Pack static void H2P_search_knn(H2Pack_p h2pack, const int k, int *knn) { int pt_dim = h2pack->pt_dim; int n_point = h2pack->n_point; int n_leaf_node = h2pack->n_leaf_node; int max_neighbor = h2pack->max_neighbor; int max_leaf_pts = h2pack->max_leaf_points; int *leaf_nodes = h2pack->height_nodes; int *pt_cluster = h2pack->pt_cluster; DTYPE *enbox = h2pack->enbox; DTYPE *coord = h2pack->coord; H2P_dense_mat_p dist; H2P_dense_mat_p neighbor_pt_coord; H2P_int_vec_p pt_idx; int max_candidate = max_leaf_pts * max_neighbor; H2P_dense_mat_init(&dist, max_leaf_pts, max_candidate); H2P_dense_mat_init(&neighbor_pt_coord, pt_dim, max_candidate); H2P_int_vec_init(&pt_idx, max_leaf_pts * max_candidate); for (int i = 0; i < n_leaf_node; i++) { int node = leaf_nodes[i]; int node_pt_s = pt_cluster[2 * node]; int node_pt_e = pt_cluster[2 * node + 1]; int node_npt = node_pt_e - node_pt_s + 1; DTYPE *node_enbox = enbox + node * 2 * pt_dim; // Gather all points in inadmissible leaf nodes int neighbor_pt_cnt = 0; for (int j = 0; j < n_leaf_node; j++) { int node_j = leaf_nodes[j]; DTYPE *node_j_enbox = enbox + node_j * 2 * pt_dim; if (H2P_check_box_admissible(node_enbox, node_j_enbox, pt_dim, ALPHA_H2) == 1) continue; int node_j_pt_s = pt_cluster[2 * node_j]; int node_j_pt_e = pt_cluster[2 * node_j + 1]; int node_j_npt = node_j_pt_e - node_j_pt_s + 1; copy_matrix_block( sizeof(DTYPE), pt_dim, node_j_npt, coord + node_j_pt_s, n_point, neighbor_pt_coord->data + neighbor_pt_cnt, max_candidate ); for (int k = 0; k < node_j_npt; k++) pt_idx->data[neighbor_pt_cnt + k] = node_j_pt_s + k; neighbor_pt_cnt += node_j_npt; } // End of j loop ASSERT_PRINTF( neighbor_pt_cnt <= max_candidate, "Node %d: inadm nodes + self only has %d points, > %d estimated maximum\n", node, neighbor_pt_cnt, max_candidate ); if (neighbor_pt_cnt < k) { WARNING_PRINTF( "Node %d has only %d (< %d) nearest neighbors from inadmissible nodes\n", node, neighbor_pt_cnt, k ); } for (int j = 1; j < node_npt; j++) memcpy(pt_idx->data + j * neighbor_pt_cnt, pt_idx->data, sizeof(int) * neighbor_pt_cnt); // Calculate pairwise distance H2P_dense_mat_resize(dist, node_npt, neighbor_pt_cnt); if (pt_dim == 2) { calc_2D_dist2( coord + node_pt_s, n_point, node_npt, neighbor_pt_coord->data, max_candidate, neighbor_pt_cnt, dist->data, dist->ld ); } if (pt_dim == 3) { calc_3D_dist2( coord + node_pt_s, n_point, node_npt, neighbor_pt_coord->data, max_candidate, neighbor_pt_cnt, dist->data, dist->ld ); } // Sort pairwise distance and get the nearest neighbors if (neighbor_pt_cnt > k) { #pragma omp parallel for for (int j = 0; j < node_npt; j++) { DTYPE *dist_j = dist->data + j * neighbor_pt_cnt; int *pt_idx_j = pt_idx->data + j * neighbor_pt_cnt; qsort_DTYPE_int_pair(dist_j, pt_idx_j, 0, neighbor_pt_cnt - 1); } // End of j loop copy_matrix_block(sizeof(int), node_npt, k, pt_idx->data, neighbor_pt_cnt, knn + node_pt_s * k, k); } else { copy_matrix_block(sizeof(int), node_npt, neighbor_pt_cnt, pt_idx->data, neighbor_pt_cnt, knn + node_pt_s * k, k); // Not enough neighbor points, set the rest as self #pragma omp parallel for for (int j = node_pt_s; j <= node_pt_e; j++) { int *knn_j = knn + j * k; for (int l = neighbor_pt_cnt; l < k; l++) knn_j[l] = j; } } } // End of i loop H2P_dense_mat_destroy(&dist); H2P_dense_mat_destroy(&neighbor_pt_coord); H2P_int_vec_destroy(&pt_idx); free(dist); free(pt_idx); } // Construct a FSAI_precond from a H2Pack structure void H2P_build_FSAI_precond(H2Pack_p h2pack, const int rank, const DTYPE shift, FSAI_precond_p *precond_) { FSAI_precond_p precond = (FSAI_precond_p) malloc(sizeof(FSAI_precond_s)); assert(precond != NULL); if (h2pack->pt_dim != 2 && h2pack->pt_dim != 3) { ERROR_PRINTF("FSAI preconditioner construction only support 2D or 3D points\n"); return; } double st = get_wtime_sec(); int mat_size = h2pack->krnl_mat_size; int n_point = h2pack->n_point; int n_thread = h2pack->n_thread; int pt_dim = h2pack->pt_dim; int xpt_dim = h2pack->xpt_dim; int krnl_dim = h2pack->krnl_dim; int n_neighbor = rank / krnl_dim; int max_nnz = krnl_dim * krnl_dim * n_neighbor * n_point; DTYPE *coord = h2pack->coord; int *knn = (int*) malloc(sizeof(int) * n_point * n_neighbor); int *row = (int*) malloc(sizeof(int) * max_nnz); int *col = (int*) malloc(sizeof(int) * max_nnz); DTYPE *val = (DTYPE*) malloc(sizeof(DTYPE) * max_nnz); ASSERT_PRINTF( knn != NULL && row != NULL & col != NULL && val != NULL, "Failed to allocate working arrays for FSAI preconditioner construction\n" ); H2P_search_knn(h2pack, n_neighbor, knn); //FILE *ouf = fopen("C_knn.bin", "wb"); //fwrite(knn, sizeof(int), n_neighbor * n_point, ouf); //fclose(ouf); int nnz = 0; int *row_ptr = (int*) malloc(sizeof(int) * (n_point + 1)); row_ptr[0] = 0; for (int i = 0; i < n_point; i++) { int num_i = 0; int *nn_i = knn + i * n_neighbor; for (int j = 0; j < n_neighbor; j++) if (nn_i[j] < i) num_i++; num_i++; // For self num_i *= krnl_dim * krnl_dim; row_ptr[i + 1] = num_i + row_ptr[i]; } nnz = row_ptr[n_point]; #pragma omp parallel num_threads(n_thread) { int tid = omp_get_thread_num(); H2P_thread_buf_p tb = h2pack->tb[tid]; H2P_int_vec_p nn_idx = tb->idx0; H2P_int_vec_p col_idx = tb->idx1; H2P_dense_mat_p nn_coord = tb->mat0; H2P_dense_mat_p tmpAU = tb->mat1; H2P_dense_mat_p tmpYDL = tb->mat2; #pragma omp for schedule(guided) for (int i = 0; i < n_point; i++) { int row_i_spos = row_ptr[i]; int row_i_epos = row_ptr[i + 1]; int row_nnz = row_i_epos - row_i_spos; H2P_int_vec_set_capacity(nn_idx, row_nnz); // nn_i = [neighbor(i, neighbor(i, :) < i), i]; // num_i = length(nn_i); int num_i = 0; int *nn_i = knn + i * n_neighbor; for (int j = 0; j < n_neighbor; j++) { if (nn_i[j] >= i) continue; nn_idx->data[num_i] = nn_i[j]; num_i++; } nn_idx->data[num_i] = i; num_i++; nn_idx->length = num_i; // tmpA = kernel({coord(nn_i, :), coord(nn_i, :)}) + shift * eye(krnl_dim * num_i); H2P_dense_mat_resize(nn_coord, xpt_dim, num_i); for (int k = 0; k < xpt_dim; k++) { DTYPE *nn_coord_k = nn_coord->data + k * num_i; DTYPE *coord_k = coord + k * n_point; for (int l = 0; l < num_i; l++) nn_coord_k[l] = coord_k[nn_idx->data[l]]; } int A_size = num_i * krnl_dim; H2P_dense_mat_resize(tmpAU, A_size + krnl_dim, A_size); DTYPE *tmpA = tmpAU->data; DTYPE *tmpU = tmpAU->data + A_size * A_size; h2pack->krnl_eval( nn_coord->data, num_i, num_i, nn_coord->data, num_i, num_i, h2pack->krnl_param, tmpA, A_size ); for (int j = 0; j < A_size; j++) tmpA[j * A_size + j] += shift; // tmpU = [zeros(krnl_dim * (num_i - 1), krnl_dim); eye(krnl_dim)]; memset(tmpU, 0, sizeof(DTYPE) * A_size * krnl_dim); int offset = krnl_dim * (num_i - 1); for (int j = 0; j < krnl_dim; j++) tmpU[(offset + j) * krnl_dim + j] = 1.0; // tmpY = (tmpA \ tmpU)'; if (A_size == 1) { tmpU[0] /= tmpA[0]; } else { H2P_int_vec_set_capacity(col_idx, A_size); int *ipiv = col_idx->data; LAPACK_GETRF(LAPACK_ROW_MAJOR, A_size, A_size, tmpA, A_size, ipiv); LAPACK_GETRS(LAPACK_ROW_MAJOR, 'N', A_size, krnl_dim, tmpA, A_size, ipiv, tmpU, krnl_dim); } H2P_dense_mat_resize(tmpYDL, krnl_dim, A_size + 2 * krnl_dim); DTYPE *tmpY = tmpYDL->data; DTYPE *tmpD = tmpY + krnl_dim * A_size; DTYPE *tmpL = tmpD + krnl_dim * krnl_dim; if (krnl_dim == 1) { DTYPE coef = 1.0 / sqrt(tmpU[A_size - 1]); for (int j = 0; j < A_size; j++) tmpY[j] = tmpU[j] * coef; } else { H2P_transpose_dmat(1, A_size, krnl_dim, tmpU, krnl_dim, tmpY, A_size); // tmpD = tmpY(:, end-krnl_dim+1:end); DTYPE *tmpY_src = tmpY + (A_size - krnl_dim); copy_matrix_block(sizeof(DTYPE), krnl_dim, krnl_dim, tmpY_src, A_size, tmpD, krnl_dim); // tmpL = 0.5 * (tmpL + tmpL'); for (int j = 0; j < krnl_dim; j++) { for (int k = 0; k < krnl_dim; k++) { int idx_jk = j * krnl_dim + k; int idx_kj = k * krnl_dim + j; tmpL[idx_jk] = 0.5 * (tmpD[idx_jk] + tmpD[idx_kj]); } } // tmpL = chol(tmpL, 'lower'); int info; info = LAPACK_POTRF(LAPACK_ROW_MAJOR, 'L', krnl_dim, tmpL, krnl_dim); ASSERT_PRINTF(info == 0, "Point %d Cholesky factorization for tmpL returned %d\n", i, info); // tmpY = linsolve(tmpL, tmpY, struct('LT', true)); CBLAS_TRSM( CblasRowMajor, CblasLeft, CblasLower, CblasNoTrans, CblasNonUnit, krnl_dim, A_size, 1.0, tmpL, krnl_dim, tmpY, A_size ); } // End of "if (krnl_dim == 1)" H2P_int_vec_set_capacity(col_idx, num_i * krnl_dim); for (int j = 0; j < num_i; j++) { int *col_idx_j = col_idx->data + j * krnl_dim; for (int k = 0; k < krnl_dim; k++) col_idx_j[k] = krnl_dim * nn_idx->data[j] + k; } int cnt = 0; for (int j = 0; j < krnl_dim; j++) { for (int k = 0; k < A_size; k++) { row[row_i_spos + cnt] = krnl_dim * i + j; col[row_i_spos + cnt] = col_idx->data[k]; val[row_i_spos + cnt] = tmpY[j * A_size + k]; cnt++; } } // End of j loop } // End of i loop } // End of "#pragma omp parallel" CSRP_mat_p G = NULL, Gt = NULL; CSRP_init_with_COO_mat(mat_size, mat_size, nnz, row, col, val, &G); CSRP_init_with_COO_mat(mat_size, mat_size, nnz, col, row, val, &Gt); CSRP_partition_multithread(G, n_thread, n_thread); CSRP_partition_multithread(Gt, n_thread, n_thread); CSRP_optimize_NUMA(G); CSRP_optimize_NUMA(Gt); size_t pmt_idx_bytes = sizeof(int) * mat_size; size_t pmt_vec_bytes = sizeof(DTYPE) * mat_size; int *fwd_pmt = (int*) malloc(pmt_idx_bytes); int *bwd_pmt = (int*) malloc(pmt_idx_bytes); DTYPE *pmt_b = (DTYPE*) malloc(pmt_vec_bytes); DTYPE *pmt_x = (DTYPE*) malloc(pmt_vec_bytes); ASSERT_PRINTF( fwd_pmt != NULL && bwd_pmt != NULL && pmt_b != NULL && pmt_x != NULL, "Failed to allocate vector permutation arrays for FSAI preconditioner\n" ); memcpy(fwd_pmt, h2pack->fwd_pmt_idx, pmt_idx_bytes); memcpy(bwd_pmt, h2pack->bwd_pmt_idx, pmt_idx_bytes); size_t total_msize = 2.0 * ((sizeof(DTYPE) + sizeof(int)) * (nnz + mat_size)); total_msize += 2 * (pmt_idx_bytes + pmt_vec_bytes); free(knn); free(row); free(col); free(val); free(row_ptr); double et = get_wtime_sec(); DTYPE *x0 = (DTYPE*) malloc(sizeof(DTYPE) * mat_size); ASSERT_PRINTF(x0 != NULL, "Failed to allocate working array of size %d for FSAI preconditioner\n", mat_size); precond->mat_size = mat_size; precond->x0 = x0; precond->pmt_b = pmt_b; precond->pmt_x = pmt_x; precond->fwd_pmt = fwd_pmt; precond->bwd_pmt = bwd_pmt; precond->G = G; precond->Gt = Gt; precond->t_build = et - st; precond->t_apply = 0.0; precond->n_apply = 0; precond->mem_MB = (double) total_msize / 1048576.0; *precond_ = precond; } // Apply FSAI preconditioner, x := M_{FSAI}^{-1} * b void FSAI_precond_apply(FSAI_precond_p precond, const DTYPE *b, DTYPE *x) { if (precond == NULL) return; double st = get_wtime_sec(); gather_vector_elements(sizeof(DTYPE), precond->mat_size, precond->fwd_pmt, b, precond->pmt_b); CSRP_SpMV(precond->G, precond->pmt_b, precond->x0); CSRP_SpMV(precond->Gt, precond->x0, precond->pmt_x); gather_vector_elements(sizeof(DTYPE), precond->mat_size, precond->bwd_pmt, precond->pmt_x, x); double et = get_wtime_sec(); precond->t_apply += et - st; precond->n_apply++; } // Destroy a FSAI_precond structure void FSAI_precond_destroy(FSAI_precond_p *precond_) { FSAI_precond_p precond = *precond_; if (precond == NULL) return; CSRP_free(&precond->G); CSRP_free(&precond->Gt); free(precond->pmt_b); free(precond->pmt_x); free(precond->fwd_pmt); free(precond->bwd_pmt); free(precond->G); free(precond->Gt); free(precond); *precond_ = NULL; } // Print statistic info of a FSAI_precond structure void FSAI_precond_print_stat(FSAI_precond_p precond) { if (precond == NULL) return; printf( "FSAI precond used memory = %.2lf MB, build time = %.3lf sec, apply avg time = %.3lf sec\n", precond->mem_MB, precond->t_build, precond->t_apply / (double) precond->n_apply ); }
openmp.c
// g++ -fopenmp openmp.c -o openmp.out -O3 && ./openmp.out 10 100 #include <stdio.h> #include <stdlib.h> #include <cstdlib> #include <time.h> #include <omp.h> #include "config.h" #include <string.h> void showDistances(int matrix[], int n); void populateMatrix(int *matrix, int n, int density); void floydWarshall(int* matrix, uint n, int threads); int main(int argc, char** argv) { uint n, density, threads; if(argc <= 3) { n = DEFAULT; density = 100; threads = omp_get_max_threads(); } else { n = atoi(argv[1]); density = atoi(argv[2]); threads = atoi(argv[3]); } int* matrix; matrix = (int*) malloc(n * n * sizeof(int)); populateMatrix(matrix, n, density); printf("*** Adjacency matrix:\n"); showDistances(matrix, n); struct timespec start, end; long long accum; clock_gettime(CLOCK_MONOTONIC_RAW, &start); floydWarshall(matrix, n, threads); clock_gettime(CLOCK_MONOTONIC_RAW, &end); accum = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000; printf("*** The solution is:\n"); showDistances(matrix, n); printf("[SEQUENTIAL] Total elapsed time %lld ns\n", accum); free(matrix); return 0; } void floydWarshall(int* matrix, uint n, int threads) { int i, j, k; int *rowK = (int*)malloc(sizeof(int)*n); #pragma omp parallel num_threads(threads) private(k) shared(matrix, rowK) for (k = 0; k < n; k++) { #pragma omp master memcpy(rowK, matrix + (k * n), sizeof(int)*n); #pragma omp for private(i, j) schedule(static) for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { int newPath = matrix[i * n + k] + rowK[j]; if (matrix[i * n + j] > newPath) { matrix[i * n + j] = newPath; } } } } } void showDistances(int matrix[], int n) { if(PRINTABLE) { int i, j; printf(" "); for(i = 0; i < n; i++) { printf("[%d] ", i); } printf("\n"); for(i = 0; i < n; i++) { printf("[%d]", i); for(j = 0; j < n; j++) { if(matrix[i * n + j] == INF) { printf(" inf"); } else { printf("%5d", matrix[i * n + j]); } } printf("\n"); } printf("\n"); } } void populateMatrix(int *matrix, int n, int density) { uint i, j, value; srand(42); for (i = 0; i < n; i++) { for (j = 0; j < n; j++){ if(i == j) { matrix[i*n+j] = 0; } else { value = 1 + rand() % MAX; if(value > density) { matrix[i*n+j] = INF; } else { matrix[i*n+j] = value; } } } } }
omp_loop.h
// -*- C++ -*- // Copyright (C) 2007-2016 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the terms // of the GNU General Public License as published by the Free Software // Foundation; either version 3, or (at your option) any later // version. // This library is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // <http://www.gnu.org/licenses/>. /** @file parallel/omp_loop.h * @brief Parallelization of embarrassingly parallel execution by * means of an OpenMP for loop. * This file is a GNU parallel extension to the Standard C++ Library. */ // Written by Felix Putze. #ifndef _GLIBCXX_PARALLEL_OMP_LOOP_H #define _GLIBCXX_PARALLEL_OMP_LOOP_H 1 #include <omp.h> #include <parallel/settings.h> #include <parallel/basic_iterator.h> #include <parallel/base.h> namespace __gnu_parallel { /** @brief Embarrassingly parallel algorithm for random access * iterators, using an OpenMP for loop. * * @param __begin Begin iterator of element sequence. * @param __end End iterator of element sequence. * @param __o User-supplied functor (comparator, predicate, adding * functor, etc.). * @param __f Functor to @a process an element with __op (depends on * desired functionality, e. g. for std::for_each(), ...). * @param __r Functor to @a add a single __result to the already * processed elements (depends on functionality). * @param __base Base value for reduction. * @param __output Pointer to position where final result is written to * @param __bound Maximum number of elements processed (e. g. for * std::count_n()). * @return User-supplied functor (that may contain a part of the result). */ template<typename _RAIter, typename _Op, typename _Fu, typename _Red, typename _Result> _Op __for_each_template_random_access_omp_loop(_RAIter __begin, _RAIter __end, _Op __o, _Fu& __f, _Red __r, _Result __base, _Result& __output, typename std::iterator_traits<_RAIter>::difference_type __bound) { typedef typename std::iterator_traits<_RAIter>::difference_type _DifferenceType; _DifferenceType __length = __end - __begin; _ThreadIndex __num_threads = __gnu_parallel::min<_DifferenceType> (__get_max_threads(), __length); _Result *__thread_results; # pragma omp parallel num_threads(__num_threads) { # pragma omp single { __num_threads = omp_get_num_threads(); __thread_results = new _Result[__num_threads]; for (_ThreadIndex __i = 0; __i < __num_threads; ++__i) __thread_results[__i] = _Result(); } _ThreadIndex __iam = omp_get_thread_num(); #pragma omp for schedule(dynamic, _Settings::get().workstealing_chunk_size) for (_DifferenceType __pos = 0; __pos < __length; ++__pos) __thread_results[__iam] = __r(__thread_results[__iam], __f(__o, __begin+__pos)); } //parallel for (_ThreadIndex __i = 0; __i < __num_threads; ++__i) __output = __r(__output, __thread_results[__i]); delete [] __thread_results; // Points to last element processed (needed as return value for // some algorithms like transform). __f._M_finish_iterator = __begin + __length; return __o; } } // end namespace #endif /* _GLIBCXX_PARALLEL_OMP_LOOP_H */
GB_binop__pow_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_uint16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__pow_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_uint16) // C=scalar+B GB (_bind1st__pow_uint16) // C=scalar+B' GB (_bind1st_tran__pow_uint16) // C=A+scalar GB (_bind2nd__pow_uint16) // C=A'+scalar GB (_bind2nd_tran__pow_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = GB_pow_uint16 (aij, bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_pow_uint16 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_UINT16 || GxB_NO_POW_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pow_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_pow_uint16 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_pow_uint16 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint16 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint16 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ep_single.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 2.3 OpenMP C versions - EP This benchmark is an OpenMP C version of the NPB EP code. The OpenMP C versions are developed by RWCP and derived from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Author: P. O. Frederickson D. H. Bailey A. C. Woo OpenMP C version: S. Satoh --------------------------------------------------------------------*/ //#include "npb-C.h" /* NAS Parallel Benchmarks 2.3 OpenMP C Versions */ #include <stdio.h> #include <stdlib.h> #include <math.h> #if defined(_OPENMP) #include <omp.h> #endif /* _OPENMP */ typedef int boolean; typedef struct { double real; double imag; } dcomplex; #define TRUE 1 #define FALSE 0 #define max(a,b) (((a) > (b)) ? (a) : (b)) #define min(a,b) (((a) < (b)) ? (a) : (b)) #define pow2(a) ((a)*(a)) #define get_real(c) c.real #define get_imag(c) c.imag #define cadd(c,a,b) (c.real = a.real + b.real, c.imag = a.imag + b.imag) #define csub(c,a,b) (c.real = a.real - b.real, c.imag = a.imag - b.imag) #define cmul(c,a,b) (c.real = a.real * b.real - a.imag * b.imag, \ c.imag = a.real * b.imag + a.imag * b.real) #define crmul(c,a,b) (c.real = a.real * b, c.imag = a.imag * b) extern double randlc(double *, double); extern void vranlc(int, double *, double, double *); extern void timer_clear(int); extern void timer_start(int); extern void timer_stop(int); extern double timer_read(int); extern void c_print_results(char *name, char cclass, int n1, int n2, int n3, int niter, int nthreads, double t, double mops, char *optype, int passed_verification, char *npbversion, char *compiletime, char *cc, char *clink, char *c_lib, char *c_inc, char *cflags, char *clinkflags, char *rand); //#include "npbparams.h" /******************/ /* default values */ /******************/ #ifndef CLASS #define CLASS 'B' #endif #if CLASS == 'S' /* CLASS = S */ /* c This file is generated automatically by the setparams utility. c It sets the number of processors and the classc of the NPB c in this directory. Do not modify it by hand. */ #define CLASS 'S' #define M 24 #define CONVERTDOUBLE FALSE #endif #if CLASS == 'W' /* CLASS = W */ /* c This file is generated automatically by the setparams utility. c It sets the number of processors and the classc of the NPB c in this directory. Do not modify it by hand. */ #define CLASS 'W' #define M 25 #define CONVERTDOUBLE FALSE #endif #if CLASS == 'A' /* CLASS = A */ /* c This file is generated automatically by the setparams utility. c It sets the number of processors and the classc of the NPB c in this directory. Do not modify it by hand. */ #define CLASS 'A' #define M 28 #define CONVERTDOUBLE FALSE #endif #if CLASS == 'B' /* CLASS = B */ /* c This file is generated automatically by the setparams utility. c It sets the number of processors and the classc of the NPB c in this directory. Do not modify it by hand. */ #define CLASS 'B' #define M 30 #define CONVERTDOUBLE FALSE #endif #if CLASS == 'C' /* CLASS = C */ /* c This file is generated automatically by the setparams utility. c It sets the number of processors and the classc of the NPB c in this directory. Do not modify it by hand. */ #define CLASS 'C' #define M 32 #define CONVERTDOUBLE FALSE #endif #define COMPILETIME "28 Oct 2014" #define NPBVERSION "2.3" #define CS1 "gcc" #define CS2 "$(CC)" #define CS3 "(none)" #define CS4 "-I../common" #define CS5 "-fopenmp -O2" #define CS6 "-lm -fopenmp" #define CS7 "randdp" /* parameters */ #define MK 16 #define MM (M - MK) #define NN (1 << MM) #define NK (1 << MK) #define NQ 10 #define EPSILON 1.0e-8 #define A 1220703125.0 #define S 271828183.0 #define TIMERS_ENABLED FALSE /* global variables */ /* common /storage/ */ static double x[2*NK]; #pragma omp threadprivate(x) static double q[NQ]; /*-------------------------------------------------------------------- program EMBAR c-------------------------------------------------------------------*/ /* c This is the serial version of the APP Benchmark 1, c the "embarassingly parallel" benchmark. c c M is the Log_2 of the number of complex pairs of uniform (0, 1) random c numbers. MK is the Log_2 of the size of each batch of uniform random c numbers. MK can be set for convenience on a given system, since it does c not affect the results. */ int main(int argc, char **argv) { double Mops, t1, t2, t3, t4, x1, x2, sx, sy, tm, an, tt, gc; double dum[3] = { 1.0, 1.0, 1.0 }; int np, ierr, node, no_nodes, i, ik, kk, l, k, nit, ierrcode, no_large_nodes, np_add, k_offset, j; int nthreads = 1; boolean verified; char size[13+1]; /* character*13 */ /* c Because the size of the problem is too large to store in a 32-bit c integer for some classes, we put it into a string (for printing). c Have to strip off the decimal point put in there by the floating c point print statement (internal file) */ printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version" " - EP Benchmark\n"); sprintf(size, "%12.0f", pow(2.0, M+1)); for (j = 13; j >= 1; j--) { if (size[j] == '.') size[j] = ' '; } printf(" Number of random numbers generated: %13s\n", size); verified = FALSE; /* c Compute the number of "batches" of random number pairs generated c per processor. Adjust if the number of processors does not evenly c divide the total number */ np = NN; /* c Call the random number generator functions and initialize c the x-array to reduce the effects of paging on the timings. c Also, call all mathematical functions that are used. Make c sure these initializations cannot be eliminated as dead code. */ vranlc(0, &(dum[0]), dum[1], &(dum[2])); dum[0] = randlc(&(dum[1]), dum[2]); for (i = 0; i < 2*NK; i++) x[i] = -1.0e99; Mops = log(sqrt(fabs(max(1.0, 1.0)))); timer_clear(1); timer_clear(2); timer_clear(3); timer_start(1); vranlc(0, &t1, A, x); /* Compute AN = A ^ (2 * NK) (mod 2^46). */ t1 = A; for ( i = 1; i <= MK+1; i++) { t2 = randlc(&t1, t1); } an = t1; tt = S; gc = 0.0; sx = 0.0; sy = 0.0; for ( i = 0; i <= NQ - 1; i++) { q[i] = 0.0; } /* c Each instance of this loop may be performed independently. We compute c the k offsets separately to take into account the fact that some nodes c have more numbers to generate than others */ k_offset = -1; #pragma omp parallel copyin(x) { double t1, t2, t3, t4, x1, x2; int kk, i, ik, l; double qq[NQ]; /* private copy of q[0:NQ-1] */ for (i = 0; i < NQ; i++) qq[i] = 0.0; #pragma omp for reduction(+:sx,sy) schedule(static) for (k = 1; k <= np; k++) { kk = k_offset + k; t1 = S; t2 = an; /* Find starting seed t1 for this kk. */ for (i = 1; i <= 100; i++) { ik = kk / 2; if (2 * ik != kk) t3 = randlc(&t1, t2); if (ik == 0) break; t3 = randlc(&t2, t2); kk = ik; } /* Compute uniform pseudorandom numbers. */ if (TIMERS_ENABLED == TRUE) timer_start(3); vranlc(2*NK, &t1, A, x-1); if (TIMERS_ENABLED == TRUE) timer_stop(3); /* c Compute Gaussian deviates by acceptance-rejection method and c tally counts in concentric square annuli. This loop is not c vectorizable. */ if (TIMERS_ENABLED == TRUE) timer_start(2); for ( i = 0; i < NK; i++) { x1 = 2.0 * x[2*i] - 1.0; x2 = 2.0 * x[2*i+1] - 1.0; t1 = pow2(x1) + pow2(x2); if (t1 <= 1.0) { t2 = sqrt(-2.0 * log(t1) / t1); t3 = (x1 * t2); /* Xi */ t4 = (x2 * t2); /* Yi */ l = max(fabs(t3), fabs(t4)); qq[l] += 1.0; /* counts */ sx = sx + t3; /* sum of Xi */ sy = sy + t4; /* sum of Yi */ } } if (TIMERS_ENABLED == TRUE) timer_stop(2); } #pragma omp critical { for (i = 0; i <= NQ - 1; i++) q[i] += qq[i]; } #if defined(_OPENMP) #pragma omp master nthreads = omp_get_num_threads(); #endif /* _OPENMP */ } /* end of parallel region */ for (i = 0; i <= NQ-1; i++) { gc = gc + q[i]; } timer_stop(1); tm = timer_read(1); nit = 0; if (M == 24) { if((fabs((sx- (-3.247834652034740e3))/sx) <= EPSILON) && (fabs((sy- (-6.958407078382297e3))/sy) <= EPSILON)) { verified = TRUE; } } else if (M == 25) { if ((fabs((sx- (-2.863319731645753e3))/sx) <= EPSILON) && (fabs((sy- (-6.320053679109499e3))/sy) <= EPSILON)) { verified = TRUE; } } else if (M == 28) { { if ((fabs((sx- (-4.295875165629892e3))/sx) <= EPSILON) && (fabs((sy- (-1.580732573678431e4))/sy) <= EPSILON)) { verified = TRUE; } printf("Debug: 231, sx is:%f, sy is:%f\n",sx,sy); } } else if (M == 30) { if ((fabs((sx- (4.033815542441498e4))/sx) <= EPSILON) && (fabs((sy- (-2.660669192809235e4))/sy) <= EPSILON)) { verified = TRUE; } } else if (M == 32) { if ((fabs((sx- (4.764367927995374e4))/sx) <= EPSILON) && (fabs((sy- (-8.084072988043731e4))/sy) <= EPSILON)) { verified = TRUE; } } Mops = pow(2.0, M+1)/tm/1000000.0; printf("EP Benchmark Results: \n" "CPU Time = %10.4f\n" "N = 2^%5d\n" "No. Gaussian Pairs = %15.0f\n" "Sums = %25.15e %25.15e\n" "Counts:\n", tm, M, gc, sx, sy); for (i = 0; i <= NQ-1; i++) { printf("%3d %15.0f\n", i, q[i]); } c_print_results("EP", CLASS, M+1, 0, 0, nit, nthreads, tm, Mops, "Random numbers generated", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); if (TIMERS_ENABLED == TRUE) { printf("Total time: %f", timer_read(1)); printf("Gaussian pairs: %f", timer_read(2)); printf("Random numbers: %f", timer_read(3)); } } /* cat ./common/c_print_results.c */ /*****************************************************************/ /****** C _ P R I N T _ R E S U L T S ******/ /*****************************************************************/ void c_print_results( char *name, char cclass, int n1, int n2, int n3, int niter, int nthreads, double t, double mops, char *optype, int passed_verification, char *npbversion, char *compiletime, char *cc, char *clink, char *c_lib, char *c_inc, char *cflags, char *clinkflags, char *rand) { char *evalue="1000"; printf( "\n\n %s Benchmark Completed\n", name ); printf( " Class = %c\n", cclass ); if( n2 == 0 && n3 == 0 ) printf( " Size = %12d\n", n1 ); /* as in IS */ else printf( " Size = %3dx%3dx%3d\n", n1,n2,n3 ); printf( " Iterations = %12d\n", niter ); printf( " Threads = %12d\n", nthreads ); printf( " Time in seconds = %12.2f\n", t ); printf( " Mop/s total = %12.2f\n", mops ); printf( " Operation type = %24s\n", optype); if( passed_verification ) printf( " Verification = SUCCESSFUL\n" ); else printf( " Verification = UNSUCCESSFUL\n" ); printf( " Version = %12s\n", npbversion ); printf( " Compile date = %12s\n", compiletime ); printf( "\n Compile options:\n" ); printf( " CC = %s\n", cc ); printf( " CLINK = %s\n", clink ); printf( " C_LIB = %s\n", c_lib ); printf( " C_INC = %s\n", c_inc ); printf( " CFLAGS = %s\n", cflags ); printf( " CLINKFLAGS = %s\n", clinkflags ); printf( " RAND = %s\n", rand ); #ifdef SMP evalue = getenv("MP_SET_NUMTHREADS"); printf( " MULTICPUS = %s\n", evalue ); #endif /* printf( "\n\n" ); printf( " Please send the results of this run to:\n\n" ); printf( " NPB Development Team\n" ); printf( " Internet: npb@nas.nasa.gov\n \n" ); printf( " If email is not available, send this to:\n\n" ); printf( " MS T27A-1\n" ); printf( " NASA Ames Research Center\n" ); printf( " Moffett Field, CA 94035-1000\n\n" ); printf( " Fax: 415-604-3957\n\n" );*/ } /* cat ./common/c_timers.c */ /* #include "wtime.h" #if defined(IBM) #define wtime wtime #elif defined(CRAY) #define wtime WTIME #else #define wtime wtime_ #endif */ /* Prototype */ void wtime( double * ); /*****************************************************************/ /****** E L A P S E D _ T I M E ******/ /*****************************************************************/ double elapsed_time( void ) { double t; wtime( &t ); return( t ); } double start[64], elapsed[64]; /*****************************************************************/ /****** T I M E R _ C L E A R ******/ /*****************************************************************/ void timer_clear( int n ) { elapsed[n] = 0.0; } /*****************************************************************/ /****** T I M E R _ S T A R T ******/ /*****************************************************************/ void timer_start( int n ) { start[n] = elapsed_time(); } /*****************************************************************/ /****** T I M E R _ S T O P ******/ /*****************************************************************/ void timer_stop( int n ) { double t, now; now = elapsed_time(); t = now - start[n]; elapsed[n] += t; } /*****************************************************************/ /****** T I M E R _ R E A D ******/ /*****************************************************************/ double timer_read( int n ) { return( elapsed[n] ); } void wtime(double *t) { static int sec = -1; struct timeval tv; gettimeofday(&tv, (void *)0); // gettimeofday(&tv, (struct timezone *)0); if (sec < 0) sec = tv.tv_sec; *t = (tv.tv_sec - sec) + 1.0e-6*tv.tv_usec; } // common/c_randdp.c /* */ #if defined(USE_POW) #define r23 pow(0.5, 23.0) #define r46 (r23*r23) #define t23 pow(2.0, 23.0) #define t46 (t23*t23) #else #define r23 (0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5) #define r46 (r23*r23) #define t23 (2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0) #define t46 (t23*t23) #endif /*c--------------------------------------------------------------------- c---------------------------------------------------------------------*/ double randlc (double *x, double a) { /*c--------------------------------------------------------------------- c---------------------------------------------------------------------*/ /*c--------------------------------------------------------------------- c c This routine returns a uniform pseudorandom double precision number in the c range (0, 1) by using the linear congruential generator c c x_{k+1} = a x_k (mod 2^46) c c where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers c before repeating. The argument A is the same as 'a' in the above formula, c and X is the same as x_0. A and X must be odd double precision integers c in the range (1, 2^46). The returned value RANDLC is normalized to be c between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain c the new seed x_1, so that subsequent calls to RANDLC using the same c arguments will generate a continuous sequence. c c This routine should produce the same results on any computer with at least c 48 mantissa bits in double precision floating point data. On 64 bit c systems, double precision should be disabled. c c David H. Bailey October 26, 1990 c c---------------------------------------------------------------------*/ double t1,t2,t3,t4,a1,a2,x1,x2,z; /*c--------------------------------------------------------------------- c Break A into two parts such that A = 2^23 * A1 + A2. c---------------------------------------------------------------------*/ t1 = r23 * a; a1 = (int)t1; a2 = a - t23 * a1; /*c--------------------------------------------------------------------- c Break X into two parts such that X = 2^23 * X1 + X2, compute c Z = A1 * X2 + A2 * X1 (mod 2^23), and then c X = 2^23 * Z + A2 * X2 (mod 2^46). c---------------------------------------------------------------------*/ t1 = r23 * (*x); x1 = (int)t1; x2 = (*x) - t23 * x1; t1 = a1 * x2 + a2 * x1; t2 = (int)(r23 * t1); z = t1 - t23 * t2; t3 = t23 * z + a2 * x2; t4 = (int)(r46 * t3); (*x) = t3 - t46 * t4; return (r46 * (*x)); } /*c--------------------------------------------------------------------- c---------------------------------------------------------------------*/ void vranlc (int n, double *x_seed, double a, double* y) { /* void vranlc (int n, double *x_seed, double a, double y[]) { */ /*c--------------------------------------------------------------------- c---------------------------------------------------------------------*/ /*c--------------------------------------------------------------------- c c This routine generates N uniform pseudorandom double precision numbers in c the range (0, 1) by using the linear congruential generator c c x_{k+1} = a x_k (mod 2^46) c c where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers c before repeating. The argument A is the same as 'a' in the above formula, c and X is the same as x_0. A and X must be odd double precision integers c in the range (1, 2^46). The N results are placed in Y and are normalized c to be between 0 and 1. X is updated to contain the new seed, so that c subsequent calls to VRANLC using the same arguments will generate a c continuous sequence. If N is zero, only initialization is performed, and c the variables X, A and Y are ignored. c c This routine is the standard version designed for scalar or RISC systems. c However, it should produce the same results on any single processor c computer with at least 48 mantissa bits in double precision floating point c data. On 64 bit systems, double precision should be disabled. c c---------------------------------------------------------------------*/ int i; double x,t1,t2,t3,t4,a1,a2,x1,x2,z; /*c--------------------------------------------------------------------- c Break A into two parts such that A = 2^23 * A1 + A2. c---------------------------------------------------------------------*/ t1 = r23 * a; a1 = (int)t1; a2 = a - t23 * a1; x = *x_seed; /*c--------------------------------------------------------------------- c Generate N results. This loop is not vectorizable. c---------------------------------------------------------------------*/ for (i = 1; i <= n; i++) { /*c--------------------------------------------------------------------- c Break X into two parts such that X = 2^23 * X1 + X2, compute c Z = A1 * X2 + A2 * X1 (mod 2^23), and then c X = 2^23 * Z + A2 * X2 (mod 2^46). c---------------------------------------------------------------------*/ t1 = r23 * x; x1 = (int)t1; x2 = x - t23 * x1; t1 = a1 * x2 + a2 * x1; t2 = (int)(r23 * t1); z = t1 - t23 * t2; t3 = t23 * z + a2 * x2; t4 = (int)(r46 * t3); x = t3 - t46 * t4; y[i] = r46 * x; } *x_seed = x; }
zhesv.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" #include <string.h> /***************************************************************************//** * * @ingroup plasma_hesv * * Solves a system of linear equations A * X = B with LTLt factorization. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * TODO: only support Lower for now * * @param[in] n * The order of the matrix A. n >= 0. * * @param[in] nrhs * The number of right hand sides, i.e., the number of * columns of the matrix B. nrhs >= 0. * * @param[in,out] A * Details of the LTL factorization of the Hermitian matrix A, * as computed by plasma_zhetrf. * * @param[in] lda * The leading dimension of the array A. * * @param[in,out] T * Details of the LU factorization of the band matrix A, as * computed by plasma_zgbtrf. * * @param[in] ldt * The leading dimension of the array T. * * @param[in] ipiv * The pivot indices used for zhetrf; for 1 <= i <= min(m,n), * row i of the matrix was interchanged with row ipiv(i). * * @param[in] ipiv2 * The pivot indices used for zgbtrf; for 1 <= i <= min(m,n), * row i of the matrix was interchanged with row ipiv(i). * * @param[in,out] B * On entry, the n-by-nrhs right hand side matrix B. * On exit, if return value = 0, the n-by-nrhs solution matrix X. * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,n). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************* * * @sa plasma_omp_zhesv * @sa plasma_chesv * @sa plasma_dsysv * @sa plasma_ssysv * @sa plasma_zhetrf * @sa plasma_zhetrs * ******************************************************************************/ int plasma_zhesv(plasma_enum_t uplo, int n, int nrhs, plasma_complex64_t *pA, int lda, int *ipiv, plasma_complex64_t *pT, int ldt, int *ipiv2, plasma_complex64_t *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if (//(uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo (Upper not supported, yet)"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (nrhs < 0) { plasma_error("illegal value of nrhs"); return -5; } if (lda < imax(1, n)) { plasma_error("illegal value of lda"); return -7; } if (ldb < imax(1, n)) { plasma_error("illegal value of ldb"); return -10; } // quick return if (imax(n, nrhs) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_hetrf(plasma, PlasmaComplexDouble, n); // Set tiling parameters. int nb = plasma->nb; // Adjust max number of panel threads int max_panel_threads_gbtrf = 1; int max_panel_threads_hetrf = 1; if (plasma->max_panel_threads > 3) { max_panel_threads_gbtrf = 2; } max_panel_threads_hetrf = imax(1, plasma->max_panel_threads - max_panel_threads_gbtrf); plasma->max_panel_threads = max_panel_threads_hetrf; // Initialize barrier. plasma_barrier_init(&plasma->barrier); // Initialize tile matrix descriptors. plasma_desc_t A; plasma_desc_t T; plasma_desc_t B; int tku = (nb+nb+nb-1)/nb; // number of tiles in upper band (not including diagonal) int tkl = (nb+nb-1)/nb; // number of tiles in lower band (not including diagonal) int lm = (tku+tkl+1)*nb; // since we use zgetrf on panel, we pivot back within panel. // this could fill the last tile of the panel, // and we need extra NB space on the bottom int retval; retval = plasma_desc_triangular_create(PlasmaComplexDouble, uplo, nb, nb, n, n, 0, 0, n, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_band_create(PlasmaComplexDouble, PlasmaGeneral, nb, nb, lm, n, 0, 0, n, n, nb, nb, &T); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_band_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, n, nrhs, 0, 0, n, nrhs, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Create workspace. plasma_desc_t W; int tot = 3; int ldw = (1+(4+tot)*A.mt)*nb; // block column retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, ldw, nb, 0, 0, ldw, nb, &W); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // Initialize data. memset(T.matrix, 0, ldt*n*sizeof(plasma_complex64_t)); memset(W.matrix, 0, ldw*nb*sizeof(plasma_complex64_t)); for (int i = 0; i < nb; i++) ipiv[i] = 1+i; // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_ztr2desc(pA, lda, A, &sequence, &request); plasma_omp_zpb2desc(pT, ldt, T, &sequence, &request); plasma_omp_zge2desc(pB, ldb, B, &sequence, &request); } // implicit synchronization #pragma omp parallel #pragma omp master { // Call the tile async function. plasma_omp_zhesv(uplo, A, ipiv, T, ipiv2, B, W, &sequence, &request); } // implicit synchronization #pragma omp parallel #pragma omp master { // Translate back to LAPACK layout. plasma_omp_zdesc2ge(B, pB, ldb, &sequence, &request); } // implicit synchronization // Free matrix A in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&T); plasma_desc_destroy(&B); plasma_desc_destroy(&W); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_hesv * * Solves a system of linear equations using previously * computed factorization. * Non-blocking tile version of plasma_zhesv(). * May return before the computation is finished. * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] A * The triangular factor U or L from the Cholesky factorization * A = U^H*U or A = L*L^H, computed by plasma_zpotrf. * * @param[in,out] B * On entry, the n-by-nrhs right hand side matrix B. * On exit, if return value = 0, the n-by-nrhs solution matrix X. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zhesv * @sa plasma_omp_zhesv * @sa plasma_omp_chesv * @sa plasma_omp_dsysv * @sa plasma_omp_ssysv * @sa plasma_omp_zhetrf * @sa plasma_omp_zhetrs * ******************************************************************************/ void plasma_omp_zhesv(plasma_enum_t uplo, plasma_desc_t A, int *ipiv, plasma_desc_t T, int *ipiv2, plasma_desc_t B, plasma_desc_t W, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (//(uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo (Upper not supported, yet)"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_error("invalid B"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_fatal_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_fatal_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.n == 0 || B.n == 0) return; // Call the parallel functions. plasma_pzhetrf_aasen(uplo, A, ipiv, T, W, sequence, request); plasma_pzgbtrf(T, ipiv2, sequence, request); // dependency on ipiv #pragma omp taskwait if (uplo == PlasmaLower) { plasma_desc_t vA; plasma_desc_t vB; // forward-substitution with L if (A.m > A.nb) { vA = plasma_desc_view(A, A.nb, 0, A.m-A.nb, A.n-A.nb); vB = plasma_desc_view(B, B.nb, 0, B.m-B.nb, B.n); plasma_pzgeswp(PlasmaRowwise, B, ipiv, 1, sequence, request); #pragma omp taskwait plasma_pztrsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit, 1.0, vA, vB, sequence, request); } // solve with band matrix T #pragma omp taskwait plasma_pztbsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit, 1.0, T, B, ipiv2, sequence, request); plasma_pztbsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit, 1.0, T, B, ipiv2, sequence, request); // backward-substitution with L^H if (A.m > A.nb) { plasma_pztrsm(PlasmaLeft, PlasmaLower, PlasmaConjTrans, PlasmaUnit, 1.0, vA, vB, sequence, request); #pragma omp taskwait plasma_pzgeswp(PlasmaRowwise, B, ipiv, -1, sequence, request); } } else { // TODO: upper } }
Step_TBB.h
#ifndef __Step_TBB_h__ #define __Step_TBB_h__ #include <chrono> #include "Step.h" int cal_substep_func_TBB(void* _self); class Step_TBB : public Step { protected: size_t thread_num; double new_time; double step_time_minus_tol; double next_output_time_minus_tol; bool output_not_needed, step_not_end; bool continue_cal; std::chrono::high_resolution_clock::time_point t0, t1; std::chrono::microseconds cpu_time; public: Step_TBB(const char* _name, const char* _type = "Step_TBB", CalSubstepFunc _cal_substep_func = &cal_substep_func_TBB); ~Step_TBB(); inline void set_thread_num(size_t th_num) noexcept { thread_num = th_num; } int solve() override; // this function need to be put into // #pragma omp master void continue_calculation(); void exit_calculation(); void abort_calculation(); // in microseconds inline long long get_time() const noexcept { return std::chrono::duration_cast<std::chrono::microseconds>(cpu_time).count(); } }; #endif
GB_unaryop__lnot_int8_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int8_uint32 // op(A') function: GB_tran__lnot_int8_uint32 // C type: int8_t // A type: uint32_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint32_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT8 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int8_uint32 ( int8_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int8_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
resize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE % % R R E SS I ZZ E % % RRRR EEE SSS I ZZZ EEE % % R R E SS I ZZ E % % R R EEEEE SSSSS IIIII ZZZZZ EEEEE % % % % % % MagickCore Image Resize Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/blob.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/draw.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/magick.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel.h" #include "magick/pixel-private.h" #include "magick/option.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resize.h" #include "magick/resize-private.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" #if defined(MAGICKCORE_LQR_DELEGATE) #include <lqr.h> #endif /* Typedef declarations. */ struct _ResizeFilter { MagickRealType (*filter)(const MagickRealType,const ResizeFilter *), (*window)(const MagickRealType,const ResizeFilter *), support, /* filter region of support - the filter support limit */ window_support, /* window support, usally equal to support (expert only) */ scale, /* dimension scaling to fit window support (usally 1.0) */ blur, /* x-scale (blur-sharpen) */ coefficient[7]; /* cubic coefficents for BC-cubic filters */ size_t signature; }; /* Forward declaractions. */ static MagickRealType I0(MagickRealType x), BesselOrderOne(MagickRealType), Sinc(const MagickRealType, const ResizeFilter *), SincFast(const MagickRealType, const ResizeFilter *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F i l t e r F u n c t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % These are the various filter and windowing functions that are provided. % % They are internal to this module only. See AcquireResizeFilterInfo() for % details of the access to these functions, via the GetResizeFilterSupport() % and GetResizeFilterWeight() API interface. % % The individual filter functions have this format... % % static MagickRealtype *FilterName(const MagickRealType x, % const MagickRealType support) % % A description of each parameter follows: % % o x: the distance from the sampling point generally in the range of 0 to % support. The GetResizeFilterWeight() ensures this a positive value. % % o resize_filter: current filter information. This allows function to % access support, and possibly other pre-calculated information defining % the functions. % */ static MagickRealType Blackman(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Blackman: 2nd order cosine windowing function: 0.42 + 0.5 cos(pi x) + 0.08 cos(2pi x) Refactored by Chantal Racette and Nicolas Robidoux to one trig call and five flops. */ const MagickRealType cosine=cos((double) (MagickPI*x)); return(0.34+cosine*(0.5+cosine*0.16)); } static MagickRealType Bohman(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Bohman: 2rd Order cosine windowing function: (1-x) cos(pi x) + sin(pi x) / pi. Refactored by Nicolas Robidoux to one trig call, one sqrt call, and 7 flops, taking advantage of the fact that the support of Bohman is 1.0 (so that we know that sin(pi x) >= 0). */ const double cosine=cos((double) (MagickPI*x)); const double sine=sqrt(1.0-cosine*cosine); return((MagickRealType) ((1.0-x)*cosine+(1.0/MagickPI)*sine)); } static MagickRealType Box(const MagickRealType magick_unused(x), const ResizeFilter *magick_unused(resize_filter)) { /* A Box filter is a equal weighting function (all weights equal). DO NOT LIMIT results by support or resize point sampling will work as it requests points beyond its normal 0.0 support size. */ return(1.0); } static MagickRealType Cosine(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Cosine window function: cos((pi/2)*x). */ return((MagickRealType)cos((double) (MagickPI2*x))); } static MagickRealType CubicBC(const MagickRealType x, const ResizeFilter *resize_filter) { /* Cubic Filters using B,C determined values: Mitchell-Netravali B = 1/3 C = 1/3 "Balanced" cubic spline filter Catmull-Rom B = 0 C = 1/2 Interpolatory and exact on linears Spline B = 1 C = 0 B-Spline Gaussian approximation Hermite B = 0 C = 0 B-Spline interpolator See paper by Mitchell and Netravali, Reconstruction Filters in Computer Graphics Computer Graphics, Volume 22, Number 4, August 1988 http://www.cs.utexas.edu/users/fussell/courses/cs384g/lectures/mitchell/ Mitchell.pdf. Coefficents are determined from B,C values: P0 = ( 6 - 2*B )/6 = coeff[0] P1 = 0 P2 = (-18 +12*B + 6*C )/6 = coeff[1] P3 = ( 12 - 9*B - 6*C )/6 = coeff[2] Q0 = ( 8*B +24*C )/6 = coeff[3] Q1 = ( -12*B -48*C )/6 = coeff[4] Q2 = ( 6*B +30*C )/6 = coeff[5] Q3 = ( - 1*B - 6*C )/6 = coeff[6] which are used to define the filter: P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1 Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x < 2 which ensures function is continuous in value and derivative (slope). */ if (x < 1.0) return(resize_filter->coefficient[0]+x*(x* (resize_filter->coefficient[1]+x*resize_filter->coefficient[2]))); if (x < 2.0) return(resize_filter->coefficient[3]+x*(resize_filter->coefficient[4]+x* (resize_filter->coefficient[5]+x*resize_filter->coefficient[6]))); return(0.0); } static MagickRealType Gaussian(const MagickRealType x, const ResizeFilter *resize_filter) { /* Gaussian with a sigma = 1/2 (or as user specified) Gaussian Formula (1D) ... exp( -(x^2)/((2.0*sigma^2) ) / (sqrt(2*PI)*sigma^2)) Gaussian Formula (2D) ... exp( -(x^2+y^2)/(2.0*sigma^2) ) / (PI*sigma^2) ) or for radius exp( -(r^2)/(2.0*sigma^2) ) / (PI*sigma^2) ) Note that it is only a change from 1-d to radial form is in the normalization multiplier which is not needed or used when Gaussian is used as a filter. The constants are pre-calculated... coeff[0]=sigma; coeff[1]=1.0/(2.0*sigma^2); coeff[2]=1.0/(sqrt(2*PI)*sigma^2); exp( -coeff[1]*(x^2)) ) * coeff[2]; However the multiplier coeff[1] is need, the others are informative only. This separates the gaussian 'sigma' value from the 'blur/support' settings allowing for its use in special 'small sigma' gaussians, without the filter 'missing' pixels because the support becomes too small. */ return(exp((double)(-resize_filter->coefficient[1]*x*x))); } static MagickRealType Hanning(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Cosine window function: 0.5+0.5*cos(pi*x). */ const MagickRealType cosine=cos((double) (MagickPI*x)); return(0.5+0.5*cosine); } static MagickRealType Hamming(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Offset cosine window function: .54 + .46 cos(pi x). */ const MagickRealType cosine=cos((double) (MagickPI*x)); return(0.54+0.46*cosine); } static MagickRealType Jinc(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* See Pratt "Digital Image Processing" p.97 for Jinc/Bessel functions. http://mathworld.wolfram.com/JincFunction.html and page 11 of http://www.ph.ed.ac.uk/%7ewjh/teaching/mo/slides/lens/lens.pdf The original "zoom" program by Paul Heckbert called this "Bessel". But really it is more accurately named "Jinc". */ if (x == 0.0) return((MagickRealType) (0.5*MagickPI)); return(BesselOrderOne((MagickRealType) MagickPI*x)/x); } static MagickRealType Kaiser(const MagickRealType x, const ResizeFilter *resize_filter) { /* Kaiser Windowing Function (bessel windowing) I0( beta * sqrt( 1-x^2) ) / IO(0) Beta (coeff[0]) is a free value from 5 to 8 (defaults to 6.5). However it is typically defined in terms of Alpha*PI The normalization factor (coeff[1]) is not actually needed, but without it the filters has a large value at x=0 making it difficult to compare the function with other windowing functions. */ return(resize_filter->coefficient[1]*I0(resize_filter->coefficient[0]* sqrt((double) (1.0-x*x)))); } static MagickRealType Lagrange(const MagickRealType x, const ResizeFilter *resize_filter) { MagickRealType value; register ssize_t i; ssize_t n, order; /* Lagrange piecewise polynomial fit of sinc: N is the 'order' of the lagrange function and depends on the overall support window size of the filter. That is: for a support of 2, it gives a lagrange-4 (piecewise cubic function). "n" identifies the piece of the piecewise polynomial. See Survey: Interpolation Methods, IEEE Transactions on Medical Imaging, Vol 18, No 11, November 1999, p1049-1075, -- Equation 27 on p1064. */ if (x > resize_filter->support) return(0.0); order=(ssize_t) (2.0*resize_filter->window_support); /* number of pieces */ n=(ssize_t) (resize_filter->window_support+x); value=1.0f; for (i=0; i < order; i++) if (i != n) value*=(n-i-x)/(n-i); return(value); } static MagickRealType Quadratic(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* 2rd order (quadratic) B-Spline approximation of Gaussian. */ if (x < 0.5) return(0.75-x*x); if (x < 1.5) return(0.5*(x-1.5)*(x-1.5)); return(0.0); } static MagickRealType Sinc(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Scaled sinc(x) function using a trig call: sinc(x) == sin(pi x)/(pi x). */ if (x != 0.0) { const MagickRealType alpha=(MagickRealType) (MagickPI*x); return(sin((double) alpha)/alpha); } return((MagickRealType) 1.0); } static MagickRealType SincFast(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Approximations of the sinc function sin(pi x)/(pi x) over the interval [-4,4] constructed by Nicolas Robidoux and Chantal Racette with funding from the Natural Sciences and Engineering Research Council of Canada. Although the approximations are polynomials (for low order of approximation) and quotients of polynomials (for higher order of approximation) and consequently are similar in form to Taylor polynomials / Pade approximants, the approximations are computed with a completely different technique. Summary: These approximations are "the best" in terms of bang (accuracy) for the buck (flops). More specifically: Among the polynomial quotients that can be computed using a fixed number of flops (with a given "+ - * / budget"), the chosen polynomial quotient is the one closest to the approximated function with respect to maximum absolute relative error over the given interval. The Remez algorithm, as implemented in the boost library's minimax package, is the key to the construction: http://www.boost.org/doc/libs/1_36_0/libs/ math/doc/sf_and_dist/html/math_toolkit/backgrounders/remez.html If outside of the interval of approximation, use the standard trig formula. */ if (x > 4.0) { const MagickRealType alpha=(MagickRealType) (MagickPI*x); return(sin((double) alpha)/alpha); } { /* The approximations only depend on x^2 (sinc is an even function). */ const MagickRealType xx = x*x; #if MAGICKCORE_QUANTUM_DEPTH <= 8 /* Maximum absolute relative error 6.3e-6 < 1/2^17. */ const double c0 = 0.173610016489197553621906385078711564924e-2L; const double c1 = -0.384186115075660162081071290162149315834e-3L; const double c2 = 0.393684603287860108352720146121813443561e-4L; const double c3 = -0.248947210682259168029030370205389323899e-5L; const double c4 = 0.107791837839662283066379987646635416692e-6L; const double c5 = -0.324874073895735800961260474028013982211e-8L; const double c6 = 0.628155216606695311524920882748052490116e-10L; const double c7 = -0.586110644039348333520104379959307242711e-12L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7)))))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p); #elif MAGICKCORE_QUANTUM_DEPTH <= 16 /* Max. abs. rel. error 2.2e-8 < 1/2^25. */ const double c0 = 0.173611107357320220183368594093166520811e-2L; const double c1 = -0.384240921114946632192116762889211361285e-3L; const double c2 = 0.394201182359318128221229891724947048771e-4L; const double c3 = -0.250963301609117217660068889165550534856e-5L; const double c4 = 0.111902032818095784414237782071368805120e-6L; const double c5 = -0.372895101408779549368465614321137048875e-8L; const double c6 = 0.957694196677572570319816780188718518330e-10L; const double c7 = -0.187208577776590710853865174371617338991e-11L; const double c8 = 0.253524321426864752676094495396308636823e-13L; const double c9 = -0.177084805010701112639035485248501049364e-15L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*(c7+xx*(c8+xx*c9)))))))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p); #else /* Max. abs. rel. error 1.2e-12 < 1/2^39. */ const double c0 = 0.173611111110910715186413700076827593074e-2L; const double c1 = -0.289105544717893415815859968653611245425e-3L; const double c2 = 0.206952161241815727624413291940849294025e-4L; const double c3 = -0.834446180169727178193268528095341741698e-6L; const double c4 = 0.207010104171026718629622453275917944941e-7L; const double c5 = -0.319724784938507108101517564300855542655e-9L; const double c6 = 0.288101675249103266147006509214934493930e-11L; const double c7 = -0.118218971804934245819960233886876537953e-13L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7)))))); const double d0 = 1.0L; const double d1 = 0.547981619622284827495856984100563583948e-1L; const double d2 = 0.134226268835357312626304688047086921806e-2L; const double d3 = 0.178994697503371051002463656833597608689e-4L; const double d4 = 0.114633394140438168641246022557689759090e-6L; const double q = d0+xx*(d1+xx*(d2+xx*(d3+xx*d4))); return((MagickRealType) ((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)/q*p)); #endif } } static MagickRealType Triangle(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* 1st order (linear) B-Spline, bilinear interpolation, Tent 1D filter, or a Bartlett 2D Cone filter. Also used as a Bartlett Windowing function for Sinc(). */ if (x < 1.0) return(1.0-x); return(0.0); } static MagickRealType Welsh(const MagickRealType x, const ResizeFilter *magick_unused(resize_filter)) { /* Welsh parabolic windowing filter. */ if (x < 1.0) return(1.0-x*x); return(0.0); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e R e s i z e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireResizeFilter() allocates the ResizeFilter structure. Choose from % these filters: % % FIR (Finite impulse Response) Filters % Box Triangle Quadratic % Spline Hermite Catrom % Mitchell % % IIR (Infinite impulse Response) Filters % Gaussian Sinc Jinc (Bessel) % % Windowed Sinc/Jinc Filters % Blackman Bohman Lanczos % Hann Hamming Cosine % Kaiser Welch Parzen % Bartlett % % Special Purpose Filters % Cubic SincFast LanczosSharp Lanczos2 Lanczos2Sharp % Robidoux RobidouxSharp % % The users "-filter" selection is used to lookup the default 'expert' % settings for that filter from a internal table. However any provided % 'expert' settings (see below) may override this selection. % % FIR filters are used as is, and are limited to that filters support window % (unless over-ridden). 'Gaussian' while classed as an IIR filter, is also % simply clipped by its support size (currently 1.5 or approximately 3*sigma % as recommended by many references) % % The special a 'cylindrical' filter flag will promote the default 4-lobed % Windowed Sinc filter to a 3-lobed Windowed Jinc equivalent, which is better % suited to this style of image resampling. This typically happens when using % such a filter for images distortions. % % SPECIFIC FILTERS: % % Directly requesting 'Sinc', 'Jinc' function as a filter will force the use % of function without any windowing, or promotion for cylindrical usage. This % is not recommended, except by image processing experts, especially as part % of expert option filter function selection. % % Two forms of the 'Sinc' function are available: Sinc and SincFast. Sinc is % computed using the traditional sin(pi*x)/(pi*x); it is selected if the user % specifically specifies the use of a Sinc filter. SincFast uses highly % accurate (and fast) polynomial (low Q) and rational (high Q) approximations, % and will be used by default in most cases. % % The Lanczos filter is a special 3-lobed Sinc-windowed Sinc filter (promoted % to Jinc-windowed Jinc for cylindrical (Elliptical Weighted Average) use). % The Sinc version is the most popular windowed filter. % % LanczosSharp is a slightly sharpened (blur=0.9812505644269356 < 1) form of % the Lanczos filter, specifically designed for EWA distortion (as a % Jinc-Jinc); it can also be used as a slightly sharper orthogonal Lanczos % (Sinc-Sinc) filter. The chosen blur value comes as close as possible to % satisfying the following condition without changing the character of the % corresponding EWA filter: % % 'No-Op' Vertical and Horizontal Line Preservation Condition: Images with % only vertical or horizontal features are preserved when performing 'no-op" % with EWA distortion. % % The Lanczos2 and Lanczos2Sharp filters are 2-lobe versions of the Lanczos % filters. The 'sharp' version uses a blur factor of 0.9549963639785485, % again chosen because the resulting EWA filter comes as close as possible to % satisfying the above condition. % % Robidoux is another filter tuned for EWA. It is the Keys cubic filter % defined by B=(228 - 108 sqrt(2))/199. Robidoux satisfies the "'No-Op' % Vertical and Horizontal Line Preservation Condition" exactly, and it % moderately blurs high frequency 'pixel-hash' patterns under no-op. It turns % out to be close to both Mitchell and Lanczos2Sharp. For example, its first % crossing is at (36 sqrt(2) + 123)/(72 sqrt(2) + 47), almost the same as the % first crossing of Mitchell and Lanczos2Sharp. % % RodidouxSharp is a slightly sharper version of Rodidoux, some believe it % is too sharp. It is designed to minimize the maximum possible change in % a pixel value which is at one of the extremes (e.g., 0 or 255) under no-op % conditions. Amazingly Mitchell falls roughly between Rodidoux and % RodidouxSharp, though this seems to have been pure coincidence. % % 'EXPERT' OPTIONS: % % These artifact "defines" are not recommended for production use without % expert knowledge of resampling, filtering, and the effects they have on the % resulting resampled (resized or distorted) image. % % They can be used to override any and all filter default, and it is % recommended you make good use of "filter:verbose" to make sure that the % overall effect of your selection (before and after) is as expected. % % "filter:verbose" controls whether to output the exact results of the % filter selections made, as well as plotting data for graphing the % resulting filter over the filters support range. % % "filter:filter" select the main function associated with this filter % name, as the weighting function of the filter. This can be used to % set a windowing function as a weighting function, for special % purposes, such as graphing. % % If a "filter:window" operation has not been provided, a 'Box' % windowing function will be set to denote that no windowing function is % being used. % % "filter:window" Select this windowing function for the filter. While any % filter could be used as a windowing function, using the 'first lobe' of % that filter over the whole support window, using a non-windowing % function is not advisible. If no weighting filter function is specified % a 'SincFast' filter is used. % % "filter:lobes" Number of lobes to use for the Sinc/Jinc filter. This a % simpler method of setting filter support size that will correctly % handle the Sinc/Jinc switch for an operators filtering requirements. % Only integers should be given. % % "filter:support" Set the support size for filtering to the size given. % This not recommended for Sinc/Jinc windowed filters (lobes should be % used instead). This will override any 'filter:lobes' option. % % "filter:win-support" Scale windowing function to this size instead. This % causes the windowing (or self-windowing Lagrange filter) to act is if % the support window it much much larger than what is actually supplied % to the calling operator. The filter however is still clipped to the % real support size given, by the support range supplied to the caller. % If unset this will equal the normal filter support size. % % "filter:blur" Scale the filter and support window by this amount. A value % of > 1 will generally result in a more blurred image with more ringing % effects, while a value <1 will sharpen the resulting image with more % aliasing effects. % % "filter:sigma" The sigma value to use for the Gaussian filter only. % Defaults to '1/2'. Using a different sigma effectively provides a % method of using the filter as a 'blur' convolution. Particularly when % using it for Distort. % % "filter:b" % "filter:c" Override the preset B,C values for a Cubic filter. % If only one of these are given it is assumes to be a 'Keys' type of % filter such that B+2C=1, where Keys 'alpha' value = C. % % Examples: % % Set a true un-windowed Sinc filter with 10 lobes (very slow): % -define filter:filter=Sinc % -define filter:lobes=8 % % Set an 8 lobe Lanczos (Sinc or Jinc) filter: % -filter Lanczos % -define filter:lobes=8 % % The format of the AcquireResizeFilter method is: % % ResizeFilter *AcquireResizeFilter(const Image *image, % const FilterTypes filter_type,const MagickBooleanType cylindrical, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filter: the filter type, defining a preset filter, window and support. % The artifact settings listed above will override those selections. % % o blur: blur the filter by this amount, use 1.0 if unknown. Image % artifact "filter:blur" will override this API call usage, including any % internal change (such as for cylindrical usage). % % o radial: use a 1D orthogonal filter (Sinc) or 2D cylindrical (radial) % filter (Jinc). % % o exception: return any errors or warnings in this structure. % */ MagickExport ResizeFilter *AcquireResizeFilter(const Image *image, const FilterTypes filter,const MagickRealType blur, const MagickBooleanType cylindrical,ExceptionInfo *exception) { const char *artifact; FilterTypes filter_type, window_type; MagickRealType B, C, value; register ResizeFilter *resize_filter; /* Table Mapping given Filter, into Weighting and Windowing functions. A 'Box' windowing function means its a simble non-windowed filter. An 'SincFast' filter function could be upgraded to a 'Jinc' filter if a "cylindrical" is requested, unless a 'Sinc' or 'SincFast' filter was specifically requested by the user. WARNING: The order of this table must match the order of the FilterTypes enumeration specified in "resample.h", or the filter names will not match the filter being setup. You can check filter setups with the "filter:verbose" expert setting. */ static struct { FilterTypes filter, window; } const mapping[SentinelFilter] = { { UndefinedFilter, BoxFilter }, /* Undefined (default to Box) */ { PointFilter, BoxFilter }, /* SPECIAL: Nearest neighbour */ { BoxFilter, BoxFilter }, /* Box averaging filter */ { TriangleFilter, BoxFilter }, /* Linear interpolation filter */ { HermiteFilter, BoxFilter }, /* Hermite interpolation filter */ { SincFastFilter, HanningFilter }, /* Hanning -- cosine-sinc */ { SincFastFilter, HammingFilter }, /* Hamming -- '' variation */ { SincFastFilter, BlackmanFilter }, /* Blackman -- 2*cosine-sinc */ { GaussianFilter, BoxFilter }, /* Gaussian blur filter */ { QuadraticFilter, BoxFilter }, /* Quadratic Gaussian approx */ { CubicFilter, BoxFilter }, /* General Cubic Filter, Spline */ { CatromFilter, BoxFilter }, /* Cubic-Keys interpolator */ { MitchellFilter, BoxFilter }, /* 'Ideal' Cubic-Keys filter */ { JincFilter, BoxFilter }, /* Raw 3-lobed Jinc function */ { SincFilter, BoxFilter }, /* Raw 4-lobed Sinc function */ { SincFastFilter, BoxFilter }, /* Raw fast sinc ("Pade"-type) */ { SincFastFilter, KaiserFilter }, /* Kaiser -- square root-sinc */ { LanczosFilter, WelshFilter }, /* Welch -- parabolic (3 lobe) */ { SincFastFilter, CubicFilter }, /* Parzen -- cubic-sinc */ { SincFastFilter, BohmanFilter }, /* Bohman -- 2*cosine-sinc */ { SincFastFilter, TriangleFilter }, /* Bartlett -- triangle-sinc */ { LagrangeFilter, BoxFilter }, /* Lagrange self-windowing */ { LanczosFilter, LanczosFilter }, /* Lanczos Sinc-Sinc filters */ { LanczosSharpFilter, LanczosSharpFilter }, /* | these require */ { Lanczos2Filter, Lanczos2Filter }, /* | special handling */ { Lanczos2SharpFilter, Lanczos2SharpFilter }, { RobidouxFilter, BoxFilter }, /* Cubic Keys tuned for EWA */ { RobidouxSharpFilter, BoxFilter }, /* Sharper Cubic Keys for EWA */ { LanczosFilter, CosineFilter }, /* Cosine window (3 lobes) */ { SplineFilter, BoxFilter }, /* Spline Cubic Filter */ { LanczosRadiusFilter, LanczosFilter }, /* Lanczos with integer radius */ }; /* Table mapping the filter/window from the above table to an actual function. The default support size for that filter as a weighting function, the range to scale with to use that function as a sinc windowing function, (typ 1.0). Note that the filter_type -> function is 1 to 1 except for Sinc(), SincFast(), and CubicBC() functions, which may have multiple filter to function associations. See "filter:verbose" handling below for the function -> filter mapping. */ static struct { MagickRealType (*function)(const MagickRealType,const ResizeFilter*); double support, /* Default lobes/support size of the weighting filter. */ scale, /* Support when function used as a windowing function Typically equal to the location of the first zero crossing. */ B,C; /* BC-spline coefficients, ignored if not a CubicBC filter. */ } const filters[SentinelFilter] = { /* .--- support window (if used as a Weighting Function) | .--- first crossing (if used as a Windowing Function) | | .--- B value for Cubic Function | | | .---- C value for Cubic Function | | | | */ { Box, 0.5, 0.5, 0.0, 0.0 }, /* Undefined (default to Box) */ { Box, 0.0, 0.5, 0.0, 0.0 }, /* Point (special handling) */ { Box, 0.5, 0.5, 0.0, 0.0 }, /* Box */ { Triangle, 1.0, 1.0, 0.0, 0.0 }, /* Triangle */ { CubicBC, 1.0, 1.0, 0.0, 0.0 }, /* Hermite (cubic B=C=0) */ { Hanning, 1.0, 1.0, 0.0, 0.0 }, /* Hann, cosine window */ { Hamming, 1.0, 1.0, 0.0, 0.0 }, /* Hamming, '' variation */ { Blackman, 1.0, 1.0, 0.0, 0.0 }, /* Blackman, 2*cosine window */ { Gaussian, 2.0, 1.5, 0.0, 0.0 }, /* Gaussian */ { Quadratic, 1.5, 1.5, 0.0, 0.0 }, /* Quadratic gaussian */ { CubicBC, 2.0, 2.0, 1.0, 0.0 }, /* General Cubic Filter */ { CubicBC, 2.0, 1.0, 0.0, 0.5 }, /* Catmull-Rom (B=0,C=1/2) */ { CubicBC, 2.0, 8.0/7.0, 1./3., 1./3. }, /* Mitchell (B=C=1/3) */ { Jinc, 3.0, 1.2196698912665045, 0.0, 0.0 }, /* Raw 3-lobed Jinc */ { Sinc, 4.0, 1.0, 0.0, 0.0 }, /* Raw 4-lobed Sinc */ { SincFast, 4.0, 1.0, 0.0, 0.0 }, /* Raw fast sinc ("Pade"-type) */ { Kaiser, 1.0, 1.0, 0.0, 0.0 }, /* Kaiser (square root window) */ { Welsh, 1.0, 1.0, 0.0, 0.0 }, /* Welsh (parabolic window) */ { CubicBC, 2.0, 2.0, 1.0, 0.0 }, /* Parzen (B-Spline window) */ { Bohman, 1.0, 1.0, 0.0, 0.0 }, /* Bohman, 2*Cosine window */ { Triangle, 1.0, 1.0, 0.0, 0.0 }, /* Bartlett (triangle window) */ { Lagrange, 2.0, 1.0, 0.0, 0.0 }, /* Lagrange sinc approximation */ { SincFast, 3.0, 1.0, 0.0, 0.0 }, /* Lanczos, 3-lobed Sinc-Sinc */ { SincFast, 3.0, 1.0, 0.0, 0.0 }, /* Lanczos, Sharpened */ { SincFast, 2.0, 1.0, 0.0, 0.0 }, /* Lanczos, 2-lobed */ { SincFast, 2.0, 1.0, 0.0, 0.0 }, /* Lanczos2, sharpened */ /* Robidoux: Keys cubic close to Lanczos2D sharpened */ { CubicBC, 2.0, 1.1685777620836932, 0.37821575509399867, 0.31089212245300067 }, /* RobidouxSharp: Sharper version of Robidoux */ { CubicBC, 2.0, 1.105822933719019, 0.2620145123990142, 0.3689927438004929 }, { Cosine, 1.0, 1.0, 0.0, 0.0 }, /* Low level cosine window */ { CubicBC, 2.0, 2.0, 1.0, 0.0 }, /* Cubic B-Spline (B=1,C=0) */ { SincFast, 3.0, 1.0, 0.0, 0.0 }, /* Lanczos, Interger Radius */ }; /* The known zero crossings of the Jinc() or more accurately the Jinc(x*PI) function being used as a filter. It is used by the "filter:lobes" expert setting and for 'lobes' for Jinc functions in the previous table. This way users do not have to deal with the highly irrational lobe sizes of the Jinc filter. Values taken from http://cose.math.bas.bg/webMathematica/webComputing/BesselZeros.jsp using Jv-function with v=1, then dividing by PI. */ static double jinc_zeros[16] = { 1.2196698912665045, 2.2331305943815286, 3.2383154841662362, 4.2410628637960699, 5.2427643768701817, 6.2439216898644877, 7.2447598687199570, 8.2453949139520427, 9.2458926849494673, 10.246293348754916, 11.246622794877883, 12.246898461138105, 13.247132522181061, 14.247333735806849, 15.247508563037300, 16.247661874700962 }; /* Allocate resize filter. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(UndefinedFilter < filter && filter < SentinelFilter); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); resize_filter=(ResizeFilter *) AcquireMagickMemory(sizeof(*resize_filter)); if (resize_filter == (ResizeFilter *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(resize_filter,0,sizeof(*resize_filter)); /* Defaults for the requested filter. */ filter_type=mapping[filter].filter; window_type=mapping[filter].window; resize_filter->blur = blur; /* function argument blur factor (1.0) */ /* Promote 1D Windowed Sinc Filters to a 2D Windowed Jinc filters */ if (cylindrical != MagickFalse && filter_type == SincFastFilter && filter != SincFastFilter ) filter_type=JincFilter; /* 1D Windowed Sinc => 2D Windowed Jinc filters */ /* Expert filter setting override */ artifact=GetImageArtifact(image,"filter:filter"); if (artifact != (const char *) NULL) { ssize_t option; option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) { /* Raw filter request - no window function. */ filter_type=(FilterTypes) option; window_type=BoxFilter; } /* Filter override with a specific window function. */ artifact=GetImageArtifact(image,"filter:window"); if (artifact != (const char *) NULL) { option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) window_type=(FilterTypes) option; } } else { /* Window specified, but no filter function? Assume Sinc/Jinc. */ artifact=GetImageArtifact(image,"filter:window"); if (artifact != (const char *) NULL) { ssize_t option; option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) { filter_type=cylindrical != MagickFalse ? JincFilter : SincFastFilter; window_type=(FilterTypes) option; } } } /* Assign the real functions to use for the filters selected. */ resize_filter->filter=filters[filter_type].function; resize_filter->support=filters[filter_type].support; resize_filter->window=filters[window_type].function; resize_filter->scale=filters[window_type].scale; resize_filter->signature=MagickSignature; /* Filter Modifications for orthogonal/cylindrical usage */ if (cylindrical != MagickFalse) switch (filter_type) { case BoxFilter: /* Support for Cylindrical Box should be sqrt(2)/2 */ resize_filter->support=(MagickRealType) MagickSQ1_2; break; case LanczosFilter: case LanczosSharpFilter: case Lanczos2Filter: case Lanczos2SharpFilter: case LanczosRadiusFilter: resize_filter->filter=filters[JincFilter].function; resize_filter->window=filters[JincFilter].function; resize_filter->scale=filters[JincFilter].scale; /* number of lobes (support window size) remain unchanged */ break; default: break; } /* Global Sharpening (regardless of orthoginal/cylindrical) */ switch (filter_type) { case LanczosSharpFilter: resize_filter->blur *= (MagickRealType) 0.9812505644269356; break; case Lanczos2SharpFilter: resize_filter->blur *= (MagickRealType) 0.9549963639785485; break; /* case LanczosRadius: blur adjust is done after lobes */ default: break; } /* Expert Option Modifications. */ /* User Gaussian Sigma Override - no support change */ if ((resize_filter->filter == Gaussian) || (resize_filter->window == Gaussian) ) { value=0.5; /* guassian sigma default, half pixel */ artifact=GetImageArtifact(image,"filter:sigma"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); /* Define coefficents for Gaussian */ resize_filter->coefficient[0]=value; /* note sigma too */ resize_filter->coefficient[1]=PerceptibleReciprocal(2.0*value*value); /* sigma scaling */ resize_filter->coefficient[2]=PerceptibleReciprocal(Magick2PI*value*value); /* normalization - not actually needed or used! */ if ( value > 0.5 ) resize_filter->support *= value/0.5; /* increase support */ } /* User Kaiser Alpha Override - no support change */ if ((resize_filter->filter == Kaiser) || (resize_filter->window == Kaiser) ) { value=6.5; /* default beta value for Kaiser bessel windowing function */ artifact=GetImageArtifact(image,"filter:alpha"); /* FUTURE: depreciate */ if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"filter:kaiser-beta"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"filter:kaiser-alpha"); if (artifact != (const char *) NULL) value=(MagickRealType) (StringToDouble(artifact,(char **) NULL)*MagickPI); /* Define coefficents for Kaiser Windowing Function */ resize_filter->coefficient[0]=value; /* alpha */ resize_filter->coefficient[1]=PerceptibleReciprocal(I0(value)); /* normalization */ } /* Support Overrides */ artifact=GetImageArtifact(image,"filter:lobes"); if (artifact != (const char *) NULL) { ssize_t lobes; lobes=(ssize_t) StringToLong(artifact); if (lobes < 1) lobes=1; resize_filter->support=(MagickRealType) lobes; } /* Convert a Jinc function lobes value to a real support value */ if (resize_filter->filter == Jinc) { if (resize_filter->support > 16) resize_filter->support=jinc_zeros[15]; /* largest entry in table */ else resize_filter->support=jinc_zeros[((long)resize_filter->support)-1]; /* blur this filter so support is a integer value (lobes dependant) */ if (filter_type == LanczosRadiusFilter) { resize_filter->blur *= floor(resize_filter->support)/ resize_filter->support; } } /* Expert Blur Override */ artifact=GetImageArtifact(image,"filter:blur"); if (artifact != (const char *) NULL) resize_filter->blur*=StringToDouble(artifact,(char **) NULL); if (resize_filter->blur < MagickEpsilon) resize_filter->blur=(MagickRealType) MagickEpsilon; /* Expert override of the support setting */ artifact=GetImageArtifact(image,"filter:support"); if (artifact != (const char *) NULL) resize_filter->support=fabs(StringToDouble(artifact,(char **) NULL)); /* Scale windowing function separately to the support 'clipping' window that calling operator is planning to actually use. (Expert override) */ resize_filter->window_support=resize_filter->support; /* default */ artifact=GetImageArtifact(image,"filter:win-support"); if (artifact != (const char *) NULL) resize_filter->window_support=fabs(StringToDouble(artifact,(char **) NULL)); /* Adjust window function scaling to match windowing support for weighting function. This avoids a division on every filter call. */ resize_filter->scale/=resize_filter->window_support; /* * Set Cubic Spline B,C values, calculate Cubic coefficients. */ B=0.0; C=0.0; if ((resize_filter->filter == CubicBC) || (resize_filter->window == CubicBC) ) { B=filters[filter_type].B; C=filters[filter_type].C; if (filters[window_type].function == CubicBC) { B=filters[window_type].B; C=filters[window_type].C; } artifact=GetImageArtifact(image,"filter:b"); if (artifact != (const char *) NULL) { B=StringToDouble(artifact,(char **) NULL); C=(1.0-B)/2.0; /* Calculate C to get a Keys cubic filter. */ artifact=GetImageArtifact(image,"filter:c"); /* user C override */ if (artifact != (const char *) NULL) C=StringToDouble(artifact,(char **) NULL); } else { artifact=GetImageArtifact(image,"filter:c"); if (artifact != (const char *) NULL) { C=StringToDouble(artifact,(char **) NULL); B=1.0-2.0*C; /* Calculate B to get a Keys cubic filter. */ } } /* Convert B,C values into Cubic Coefficents. See CubicBC(). */ { const double twoB = B+B; resize_filter->coefficient[0]=1.0-(1.0/3.0)*B; resize_filter->coefficient[1]=-3.0+twoB+C; resize_filter->coefficient[2]=2.0-1.5*B-C; resize_filter->coefficient[3]=(4.0/3.0)*B+4.0*C; resize_filter->coefficient[4]=-8.0*C-twoB; resize_filter->coefficient[5]=B+5.0*C; resize_filter->coefficient[6]=(-1.0/6.0)*B-C; } } /* Expert Option Request for verbose details of the resulting filter. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp master { #endif artifact=GetImageArtifact(image,"filter:verbose"); if (IsMagickTrue(artifact)) { double support, x; /* Set the weighting function properly when the weighting function may not exactly match the filter of the same name. EG: a Point filter is really uses a Box weighting function with a different support than is typically used. */ if (resize_filter->filter == Box) filter_type=BoxFilter; if (resize_filter->filter == Sinc) filter_type=SincFilter; if (resize_filter->filter == SincFast) filter_type=SincFastFilter; if (resize_filter->filter == Jinc) filter_type=JincFilter; if (resize_filter->filter == CubicBC) filter_type=CubicFilter; if (resize_filter->window == Box) window_type=BoxFilter; if (resize_filter->window == Sinc) window_type=SincFilter; if (resize_filter->window == SincFast) window_type=SincFastFilter; if (resize_filter->window == Jinc) window_type=JincFilter; if (resize_filter->window == CubicBC) window_type=CubicFilter; /* Report Filter Details. */ support=GetResizeFilterSupport(resize_filter); /* practical_support */ (void) FormatLocaleFile(stdout,"# Resampling Filter (for graphing)\n#\n"); (void) FormatLocaleFile(stdout,"# filter = %s\n", CommandOptionToMnemonic(MagickFilterOptions,filter_type)); (void) FormatLocaleFile(stdout,"# window = %s\n", CommandOptionToMnemonic(MagickFilterOptions,window_type)); (void) FormatLocaleFile(stdout,"# support = %.*g\n", GetMagickPrecision(),(double) resize_filter->support); (void) FormatLocaleFile(stdout,"# window-support = %.*g\n", GetMagickPrecision(),(double) resize_filter->window_support); (void) FormatLocaleFile(stdout,"# scale-blur = %.*g\n", GetMagickPrecision(), (double)resize_filter->blur); if ( filter_type == GaussianFilter || window_type == GaussianFilter ) (void) FormatLocaleFile(stdout,"# gaussian-sigma = %.*g\n", GetMagickPrecision(), (double)resize_filter->coefficient[0]); if ( filter_type == KaiserFilter || window_type == KaiserFilter ) (void) FormatLocaleFile(stdout,"# kaiser-beta = %.*g\n", GetMagickPrecision(), (double)resize_filter->coefficient[0]); (void) FormatLocaleFile(stdout,"# practical-support = %.*g\n", GetMagickPrecision(), (double)support); if ( filter_type == CubicFilter || window_type == CubicFilter ) (void) FormatLocaleFile(stdout,"# B,C = %.*g,%.*g\n", GetMagickPrecision(),(double)B, GetMagickPrecision(),(double)C); (void) FormatLocaleFile(stdout,"\n"); /* Output values of resulting filter graph -- for graphing filter result. */ for (x=0.0; x <= support; x+=0.01f) (void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",x,GetMagickPrecision(), (double) GetResizeFilterWeight(resize_filter,x)); /* A final value so gnuplot can graph the 'stop' properly. */ (void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",support, GetMagickPrecision(),0.0); } /* Output the above once only for each image - remove setting */ (void) DeleteImageArtifact((Image *) image,"filter:verbose"); #if defined(MAGICKCORE_OPENMP_SUPPORT) } #endif return(resize_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveResizeImage() adaptively resize image with pixel resampling. % % This is shortcut function for a fast interpolative resize using mesh % interpolation. It works well for small resizes of less than +/- 50% % of the original image size. For larger resizing on images a full % filtered and slower resize function should be used instead. % % The format of the AdaptiveResizeImage method is: % % Image *AdaptiveResizeImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveResizeImage(const Image *image, const size_t columns,const size_t rows,ExceptionInfo *exception) { return(InterpolativeResizeImage(image,columns,rows,MeshInterpolatePixel, exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + B e s s e l O r d e r O n e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BesselOrderOne() computes the Bessel function of x of the first kind of % order 0. This is used to create the Jinc() filter function below. % % Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8] % % j1(x) = x*j1(x); % % For x in (8,inf) % % j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1)) % % where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow: % % cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4) % = 1/sqrt(2) * (sin(x) - cos(x)) % sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4) % = -1/sqrt(2) * (sin(x) + cos(x)) % % The format of the BesselOrderOne method is: % % MagickRealType BesselOrderOne(MagickRealType x) % % A description of each parameter follows: % % o x: MagickRealType value. % */ #undef I0 static MagickRealType I0(MagickRealType x) { MagickRealType sum, t, y; register ssize_t i; /* Zeroth order Bessel function of the first kind. */ sum=1.0; y=x*x/4.0; t=y; for (i=2; t > MagickEpsilon; i++) { sum+=t; t*=y/((MagickRealType) i*i); } return(sum); } #undef J1 static MagickRealType J1(MagickRealType x) { MagickRealType p, q; register ssize_t i; static const double Pone[] = { 0.581199354001606143928050809e+21, -0.6672106568924916298020941484e+20, 0.2316433580634002297931815435e+19, -0.3588817569910106050743641413e+17, 0.2908795263834775409737601689e+15, -0.1322983480332126453125473247e+13, 0.3413234182301700539091292655e+10, -0.4695753530642995859767162166e+7, 0.270112271089232341485679099e+4 }, Qone[] = { 0.11623987080032122878585294e+22, 0.1185770712190320999837113348e+20, 0.6092061398917521746105196863e+17, 0.2081661221307607351240184229e+15, 0.5243710262167649715406728642e+12, 0.1013863514358673989967045588e+10, 0.1501793594998585505921097578e+7, 0.1606931573481487801970916749e+4, 0.1e+1 }; p=Pone[8]; q=Qone[8]; for (i=7; i >= 0; i--) { p=p*x*x+Pone[i]; q=q*x*x+Qone[i]; } return(p/q); } #undef P1 static MagickRealType P1(MagickRealType x) { MagickRealType p, q; register ssize_t i; static const double Pone[] = { 0.352246649133679798341724373e+5, 0.62758845247161281269005675e+5, 0.313539631109159574238669888e+5, 0.49854832060594338434500455e+4, 0.2111529182853962382105718e+3, 0.12571716929145341558495e+1 }, Qone[] = { 0.352246649133679798068390431e+5, 0.626943469593560511888833731e+5, 0.312404063819041039923015703e+5, 0.4930396490181088979386097e+4, 0.2030775189134759322293574e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } #undef Q1 static MagickRealType Q1(MagickRealType x) { MagickRealType p, q; register ssize_t i; static const double Pone[] = { 0.3511751914303552822533318e+3, 0.7210391804904475039280863e+3, 0.4259873011654442389886993e+3, 0.831898957673850827325226e+2, 0.45681716295512267064405e+1, 0.3532840052740123642735e-1 }, Qone[] = { 0.74917374171809127714519505e+4, 0.154141773392650970499848051e+5, 0.91522317015169922705904727e+4, 0.18111867005523513506724158e+4, 0.1038187585462133728776636e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } static MagickRealType BesselOrderOne(MagickRealType x) { MagickRealType p, q; if (x == 0.0) return(0.0); p=x; if (x < 0.0) x=(-x); if (x < 8.0) return(p*J1(x)); q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin((double) x)- cos((double) x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin((double) x)+ cos((double) x)))); if (p < 0.0) q=(-q); return(q); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y R e s i z e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyResizeFilter() destroy the resize filter. % % The format of the DestroyResizeFilter method is: % % ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter) % % A description of each parameter follows: % % o resize_filter: the resize filter. % */ MagickExport ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickSignature); resize_filter->signature=(~MagickSignature); resize_filter=(ResizeFilter *) RelinquishMagickMemory(resize_filter); return(resize_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t R e s i z e F i l t e r S u p p o r t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetResizeFilterSupport() return the current support window size for this % filter. Note that this may have been enlarged by filter:blur factor. % % The format of the GetResizeFilterSupport method is: % % MagickRealType GetResizeFilterSupport(const ResizeFilter *resize_filter) % % A description of each parameter follows: % % o filter: Image filter to use. % */ MagickExport MagickRealType GetResizeFilterSupport( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickSignature); return(resize_filter->support*resize_filter->blur); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t R e s i z e F i l t e r W e i g h t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetResizeFilterWeight evaluates the specified resize filter at the point x % which usally lies between zero and the filters current 'support' and % returns the weight of the filter function at that point. % % The format of the GetResizeFilterWeight method is: % % MagickRealType GetResizeFilterWeight(const ResizeFilter *resize_filter, % const MagickRealType x) % % A description of each parameter follows: % % o filter: the filter type. % % o x: the point. % */ MagickExport MagickRealType GetResizeFilterWeight( const ResizeFilter *resize_filter,const MagickRealType x) { MagickRealType scale, weight, x_blur; /* Windowing function - scale the weighting filter by this amount. */ assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickSignature); x_blur=fabs((double) x)/resize_filter->blur; /* X offset with blur scaling */ if ((resize_filter->window_support < MagickEpsilon) || (resize_filter->window == Box)) scale=1.0; /* Point or Box Filter -- avoid division by zero */ else { scale=resize_filter->scale; scale=resize_filter->window(x_blur*scale,resize_filter); } weight=scale*resize_filter->filter(x_blur,resize_filter); return(weight); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p o l a t i v e R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpolativeResizeImage() resizes an image using the specified % interpolation method. % % The format of the InterpolativeResizeImage method is: % % Image *InterpolativeResizeImage(const Image *image,const size_t columns, % const size_t rows,const InterpolatePixelMethod method, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *InterpolativeResizeImage(const Image *image, const size_t columns,const size_t rows,const InterpolatePixelMethod method, ExceptionInfo *exception) { #define InterpolativeResizeImageTag "Resize/Image" CacheView *image_view, *resize_view; Image *resize_image; MagickBooleanType status; MagickOffsetType progress; PointInfo scale; ssize_t y; /* Interpolatively resize image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(resize_image,DirectClass) == MagickFalse) { InheritException(exception,&resize_image->exception); resize_image=DestroyImage(resize_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); scale.x=(double) image->columns/resize_image->columns; scale.y=(double) image->rows/resize_image->rows; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,resize_image,resize_image->rows,1) #endif for (y=0; y < (ssize_t) resize_image->rows; y++) { MagickPixelPacket pixel; PointInfo offset; register IndexPacket *restrict resize_indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1, exception); if (q == (PixelPacket *) NULL) continue; resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view); GetMagickPixelPacket(image,&pixel); offset.y=((MagickRealType) y+0.5)*scale.y-0.5; for (x=0; x < (ssize_t) resize_image->columns; x++) { offset.x=((MagickRealType) x+0.5)*scale.x-0.5; (void) InterpolateMagickPixelPacket(image,image_view,method, offset.x,offset.y,&pixel,exception); SetPixelPacket(resize_image,&pixel,q,resize_indexes+x); q++; } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) continue; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_InterpolativeResizeImage) #endif proceed=SetImageProgress(image,InterpolativeResizeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) resize_image=DestroyImage(resize_image); return(resize_image); } #if defined(MAGICKCORE_LQR_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i q u i d R e s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LiquidRescaleImage() rescales image with seam carving. % % The format of the LiquidRescaleImage method is: % % Image *LiquidRescaleImage(const Image *image, % const size_t columns,const size_t rows, % const double delta_x,const double rigidity,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the rescaled image. % % o rows: the number of rows in the rescaled image. % % o delta_x: maximum seam transversal step (0 means straight seams). % % o rigidity: introduce a bias for non-straight seams (typically 0). % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LiquidRescaleImage(const Image *image,const size_t columns, const size_t rows,const double delta_x,const double rigidity, ExceptionInfo *exception) { #define LiquidRescaleImageTag "Rescale/Image" CacheView *rescale_view; const char *map; guchar *packet; Image *rescale_image; int x, y; LqrCarver *carver; LqrRetVal lqr_status; MagickBooleanType status; MagickPixelPacket pixel; unsigned char *pixels; /* Liquid rescale image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); if ((columns <= 2) || (rows <= 2)) return(ResizeImage(image,columns,rows,image->filter,image->blur,exception)); if ((columns >= (2*image->columns)) || (rows >= (2*image->rows))) { Image *resize_image; size_t height, width; /* Honor liquid resize size limitations. */ for (width=image->columns; columns >= (2*width-1); width*=2); for (height=image->rows; rows >= (2*height-1); height*=2); resize_image=ResizeImage(image,width,height,image->filter,image->blur, exception); if (resize_image == (Image *) NULL) return((Image *) NULL); rescale_image=LiquidRescaleImage(resize_image,columns,rows,delta_x, rigidity,exception); resize_image=DestroyImage(resize_image); return(rescale_image); } map="RGB"; if (image->matte == MagickFalse) map="RGBA"; if (image->colorspace == CMYKColorspace) { map="CMYK"; if (image->matte == MagickFalse) map="CMYKA"; } pixels=(unsigned char *) AcquireQuantumMemory(image->columns,image->rows* strlen(map)*sizeof(*pixels)); if (pixels == (unsigned char *) NULL) return((Image *) NULL); status=ExportImagePixels(image,0,0,image->columns,image->rows,map,CharPixel, pixels,exception); if (status == MagickFalse) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } carver=lqr_carver_new(pixels,image->columns,image->rows,strlen(map)); if (carver == (LqrCarver *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } lqr_status=lqr_carver_init(carver,(int) delta_x,rigidity); lqr_status=lqr_carver_resize(carver,columns,rows); (void) lqr_status; rescale_image=CloneImage(image,lqr_carver_get_width(carver), lqr_carver_get_height(carver),MagickTrue,exception); if (rescale_image == (Image *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); return((Image *) NULL); } if (SetImageStorageClass(rescale_image,DirectClass) == MagickFalse) { InheritException(exception,&rescale_image->exception); rescale_image=DestroyImage(rescale_image); return((Image *) NULL); } GetMagickPixelPacket(rescale_image,&pixel); (void) lqr_carver_scan_reset(carver); rescale_view=AcquireAuthenticCacheView(rescale_image,exception); while (lqr_carver_scan(carver,&x,&y,&packet) != 0) { register IndexPacket *restrict rescale_indexes; register PixelPacket *restrict q; q=QueueCacheViewAuthenticPixels(rescale_view,x,y,1,1,exception); if (q == (PixelPacket *) NULL) break; rescale_indexes=GetCacheViewAuthenticIndexQueue(rescale_view); pixel.red=QuantumRange*(packet[0]/255.0); pixel.green=QuantumRange*(packet[1]/255.0); pixel.blue=QuantumRange*(packet[2]/255.0); if (image->colorspace != CMYKColorspace) { if (image->matte == MagickFalse) pixel.opacity=QuantumRange*(packet[3]/255.0); } else { pixel.index=QuantumRange*(packet[3]/255.0); if (image->matte == MagickFalse) pixel.opacity=QuantumRange*(packet[4]/255.0); } SetPixelPacket(rescale_image,&pixel,q,rescale_indexes); if (SyncCacheViewAuthenticPixels(rescale_view,exception) == MagickFalse) break; } rescale_view=DestroyCacheView(rescale_view); /* Relinquish resources. */ lqr_carver_destroy(carver); return(rescale_image); } #else MagickExport Image *LiquidRescaleImage(const Image *image, const size_t magick_unused(columns),const size_t magick_unused(rows), const double magick_unused(delta_x),const double magick_unused(rigidity), ExceptionInfo *exception) { assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); (void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError, "DelegateLibrarySupportNotBuiltIn","`%s' (LQR)",image->filename); return((Image *) NULL); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagnifyImage() is a convenience method that scales an image proportionally % to twice its size. % % The format of the MagnifyImage method is: % % Image *MagnifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception) { Image *magnify_image; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); magnify_image=ResizeImage(image,2*image->columns,2*image->rows,SplineFilter, 1.0,exception); return(magnify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M i n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MinifyImage() is a convenience method that scales an image proportionally to % half its size. % % The format of the MinifyImage method is: % % Image *MinifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception) { Image *minify_image; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); minify_image=ResizeImage(image,image->columns/2,image->rows/2,SplineFilter, 1.0,exception); return(minify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResampleImage() resize image in terms of its pixel size, so that when % displayed at the given resolution it will be the same size in terms of % real world units as the original image at the original resolution. % % The format of the ResampleImage method is: % % Image *ResampleImage(Image *image,const double x_resolution, % const double y_resolution,const FilterTypes filter,const double blur, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be resized to fit the given resolution. % % o x_resolution: the new image x resolution. % % o y_resolution: the new image y resolution. % % o filter: Image filter to use. % % o blur: the blur factor where > 1 is blurry, < 1 is sharp. % */ MagickExport Image *ResampleImage(const Image *image,const double x_resolution, const double y_resolution,const FilterTypes filter,const double blur, ExceptionInfo *exception) { #define ResampleImageTag "Resample/Image" Image *resample_image; size_t height, width; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); width=(size_t) (x_resolution*image->columns/(image->x_resolution == 0.0 ? 72.0 : image->x_resolution)+0.5); height=(size_t) (y_resolution*image->rows/(image->y_resolution == 0.0 ? 72.0 : image->y_resolution)+0.5); resample_image=ResizeImage(image,width,height,filter,blur,exception); if (resample_image != (Image *) NULL) { resample_image->x_resolution=x_resolution; resample_image->y_resolution=y_resolution; } return(resample_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResizeImage() scales an image to the desired dimensions, using the given % filter (see AcquireFilterInfo()). % % If an undefined filter is given the filter defaults to Mitchell for a % colormapped image, a image with a matte channel, or if the image is % enlarged. Otherwise the filter defaults to a Lanczos. % % ResizeImage() was inspired by Paul Heckbert's "zoom" program. % % The format of the ResizeImage method is: % % Image *ResizeImage(Image *image,const size_t columns, % const size_t rows,const FilterTypes filter,const double blur, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o filter: Image filter to use. % % o blur: the blur factor where > 1 is blurry, < 1 is sharp. Typically set % this to 1.0. % % o exception: return any errors or warnings in this structure. % */ typedef struct _ContributionInfo { MagickRealType weight; ssize_t pixel; } ContributionInfo; static ContributionInfo **DestroyContributionThreadSet( ContributionInfo **contribution) { register ssize_t i; assert(contribution != (ContributionInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (contribution[i] != (ContributionInfo *) NULL) contribution[i]=(ContributionInfo *) RelinquishAlignedMemory( contribution[i]); contribution=(ContributionInfo **) RelinquishMagickMemory(contribution); return(contribution); } static ContributionInfo **AcquireContributionThreadSet(const size_t count) { register ssize_t i; ContributionInfo **contribution; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); contribution=(ContributionInfo **) AcquireQuantumMemory(number_threads, sizeof(*contribution)); if (contribution == (ContributionInfo **) NULL) return((ContributionInfo **) NULL); (void) ResetMagickMemory(contribution,0,number_threads*sizeof(*contribution)); for (i=0; i < (ssize_t) number_threads; i++) { contribution[i]=(ContributionInfo *) MagickAssumeAligned( AcquireAlignedMemory(count,sizeof(**contribution))); if (contribution[i] == (ContributionInfo *) NULL) return(DestroyContributionThreadSet(contribution)); } return(contribution); } static inline double MagickMax(const double x,const double y) { if (x > y) return(x); return(y); } static inline double MagickMin(const double x,const double y) { if (x < y) return(x); return(y); } static MagickBooleanType HorizontalFilter(const ResizeFilter *resize_filter, const Image *image,Image *resize_image,const MagickRealType x_factor, const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception) { #define ResizeImageTag "Resize/Image" CacheView *image_view, *resize_view; ClassType storage_class; ContributionInfo **restrict contributions; MagickBooleanType status; MagickPixelPacket zero; MagickRealType scale, support; ssize_t x; /* Apply filter to resize horizontally from image to resize image. */ scale=MagickMax(1.0/x_factor+MagickEpsilon,1.0); support=scale*GetResizeFilterSupport(resize_filter); storage_class=support > 0.5 ? DirectClass : image->storage_class; if (SetImageStorageClass(resize_image,storage_class) == MagickFalse) { InheritException(exception,&resize_image->exception); return(MagickFalse); } if (support < 0.5) { /* Support too small even for nearest neighbour: Reduce to point sampling. */ support=(MagickRealType) 0.5; scale=1.0; } contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0)); if (contributions == (ContributionInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } status=MagickTrue; scale=PerceptibleReciprocal(scale); (void) ResetMagickMemory(&zero,0,sizeof(zero)); image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,resize_image,resize_image->columns,1) #endif for (x=0; x < (ssize_t) resize_image->columns; x++) { MagickRealType bisect, density; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ContributionInfo *restrict contribution; register IndexPacket *restrict resize_indexes; register PixelPacket *restrict q; register ssize_t y; ssize_t n, start, stop; if (status == MagickFalse) continue; bisect=(MagickRealType) (x+0.5)/x_factor+MagickEpsilon; start=(ssize_t) MagickMax(bisect-support+0.5,0.0); stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->columns); density=0.0; contribution=contributions[GetOpenMPThreadId()]; for (n=0; n < (stop-start); n++) { contribution[n].pixel=start+n; contribution[n].weight=GetResizeFilterWeight(resize_filter,scale* ((MagickRealType) (start+n)-bisect+0.5)); density+=contribution[n].weight; } if ((density != 0.0) && (density != 1.0)) { register ssize_t i; /* Normalize. */ density=PerceptibleReciprocal(density); for (i=0; i < n; i++) contribution[i].weight*=density; } p=GetCacheViewVirtualPixels(image_view,contribution[0].pixel,0,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1),image->rows,exception); q=QueueCacheViewAuthenticPixels(resize_view,x,0,1,resize_image->rows, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view); for (y=0; y < (ssize_t) resize_image->rows; y++) { MagickPixelPacket pixel; MagickRealType alpha; register ssize_t i; ssize_t j; pixel=zero; if (image->matte == MagickFalse) { for (i=0; i < n; i++) { j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i].pixel-contribution[0].pixel); alpha=contribution[i].weight; pixel.red+=alpha*GetPixelRed(p+j); pixel.green+=alpha*GetPixelGreen(p+j); pixel.blue+=alpha*GetPixelBlue(p+j); pixel.opacity+=alpha*GetPixelOpacity(p+j); } SetPixelRed(q,ClampToQuantum(pixel.red)); SetPixelGreen(q,ClampToQuantum(pixel.green)); SetPixelBlue(q,ClampToQuantum(pixel.blue)); SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if ((image->colorspace == CMYKColorspace) && (resize_image->colorspace == CMYKColorspace)) { for (i=0; i < n; i++) { j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i].pixel-contribution[0].pixel); alpha=contribution[i].weight; pixel.index+=alpha*GetPixelIndex(indexes+j); } SetPixelIndex(resize_indexes+y,ClampToQuantum(pixel.index)); } } else { double gamma; gamma=0.0; for (i=0; i < n; i++) { j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i].pixel-contribution[0].pixel); alpha=contribution[i].weight*QuantumScale*GetPixelAlpha(p+j); pixel.red+=alpha*GetPixelRed(p+j); pixel.green+=alpha*GetPixelGreen(p+j); pixel.blue+=alpha*GetPixelBlue(p+j); pixel.opacity+=contribution[i].weight*GetPixelOpacity(p+j); gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if ((image->colorspace == CMYKColorspace) && (resize_image->colorspace == CMYKColorspace)) { for (i=0; i < n; i++) { j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i].pixel-contribution[0].pixel); alpha=contribution[i].weight*QuantumScale*GetPixelAlpha(p+j); pixel.index+=alpha*GetPixelIndex(indexes+j); } SetPixelIndex(resize_indexes+y,ClampToQuantum(gamma*pixel.index)); } } if ((resize_image->storage_class == PseudoClass) && (image->storage_class == PseudoClass)) { i=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double) stop- 1.0)+0.5); j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i-start].pixel-contribution[0].pixel); SetPixelIndex(resize_indexes+y,GetPixelIndex(indexes+j)); } q++; } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_HorizontalFilter) #endif proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); contributions=DestroyContributionThreadSet(contributions); return(status); } static MagickBooleanType VerticalFilter(const ResizeFilter *resize_filter, const Image *image,Image *resize_image,const MagickRealType y_factor, const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception) { CacheView *image_view, *resize_view; ClassType storage_class; ContributionInfo **restrict contributions; MagickBooleanType status; MagickPixelPacket zero; MagickRealType scale, support; ssize_t y; /* Apply filter to resize vertically from image to resize image. */ scale=MagickMax(1.0/y_factor+MagickEpsilon,1.0); support=scale*GetResizeFilterSupport(resize_filter); storage_class=support > 0.5 ? DirectClass : image->storage_class; if (SetImageStorageClass(resize_image,storage_class) == MagickFalse) { InheritException(exception,&resize_image->exception); return(MagickFalse); } if (support < 0.5) { /* Support too small even for nearest neighbour: Reduce to point sampling. */ support=(MagickRealType) 0.5; scale=1.0; } contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0)); if (contributions == (ContributionInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } status=MagickTrue; scale=PerceptibleReciprocal(scale); (void) ResetMagickMemory(&zero,0,sizeof(zero)); image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,resize_image,resize_image->rows,1) #endif for (y=0; y < (ssize_t) resize_image->rows; y++) { MagickRealType bisect, density; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ContributionInfo *restrict contribution; register IndexPacket *restrict resize_indexes; register PixelPacket *restrict q; register ssize_t x; ssize_t n, start, stop; if (status == MagickFalse) continue; bisect=(MagickRealType) (y+0.5)/y_factor+MagickEpsilon; start=(ssize_t) MagickMax(bisect-support+0.5,0.0); stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->rows); density=0.0; contribution=contributions[GetOpenMPThreadId()]; for (n=0; n < (stop-start); n++) { contribution[n].pixel=start+n; contribution[n].weight=GetResizeFilterWeight(resize_filter,scale* ((MagickRealType) (start+n)-bisect+0.5)); density+=contribution[n].weight; } if ((density != 0.0) && (density != 1.0)) { register ssize_t i; /* Normalize. */ density=PerceptibleReciprocal(density); for (i=0; i < n; i++) contribution[i].weight*=density; } p=GetCacheViewVirtualPixels(image_view,0,contribution[0].pixel, image->columns,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1), exception); q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view); for (x=0; x < (ssize_t) resize_image->columns; x++) { MagickPixelPacket pixel; MagickRealType alpha; register ssize_t i; ssize_t j; pixel=zero; if (image->matte == MagickFalse) { for (i=0; i < n; i++) { j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[i].weight; pixel.red+=alpha*GetPixelRed(p+j); pixel.green+=alpha*GetPixelGreen(p+j); pixel.blue+=alpha*GetPixelBlue(p+j); pixel.opacity+=alpha*GetPixelOpacity(p+j); } SetPixelRed(q,ClampToQuantum(pixel.red)); SetPixelGreen(q,ClampToQuantum(pixel.green)); SetPixelBlue(q,ClampToQuantum(pixel.blue)); SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if ((image->colorspace == CMYKColorspace) && (resize_image->colorspace == CMYKColorspace)) { for (i=0; i < n; i++) { j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[i].weight; pixel.index+=alpha*GetPixelIndex(indexes+j); } SetPixelIndex(resize_indexes+x,ClampToQuantum(pixel.index)); } } else { double gamma; gamma=0.0; for (i=0; i < n; i++) { j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[i].weight*QuantumScale*GetPixelAlpha(p+j); pixel.red+=alpha*GetPixelRed(p+j); pixel.green+=alpha*GetPixelGreen(p+j); pixel.blue+=alpha*GetPixelBlue(p+j); pixel.opacity+=contribution[i].weight*GetPixelOpacity(p+j); gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if ((image->colorspace == CMYKColorspace) && (resize_image->colorspace == CMYKColorspace)) { for (i=0; i < n; i++) { j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[i].weight*QuantumScale*GetPixelAlpha(p+j); pixel.index+=alpha*GetPixelIndex(indexes+j); } SetPixelIndex(resize_indexes+x,ClampToQuantum(gamma*pixel.index)); } } if ((resize_image->storage_class == PseudoClass) && (image->storage_class == PseudoClass)) { i=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double) stop- 1.0)+0.5); j=(ssize_t) ((contribution[i-start].pixel-contribution[0].pixel)* image->columns+x); SetPixelIndex(resize_indexes+x,GetPixelIndex(indexes+j)); } q++; } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_VerticalFilter) #endif proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); contributions=DestroyContributionThreadSet(contributions); return(status); } MagickExport Image *ResizeImage(const Image *image,const size_t columns, const size_t rows,const FilterTypes filter,const double blur, ExceptionInfo *exception) { FilterTypes filter_type; Image *filter_image, *resize_image; MagickOffsetType offset; MagickRealType x_factor, y_factor; MagickSizeType span; MagickStatusType status; ResizeFilter *resize_filter; /* Acquire resize image. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows) && (filter == UndefinedFilter) && (blur == 1.0)) return(CloneImage(image,0,0,MagickTrue,exception)); resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image == (Image *) NULL) return(resize_image); /* Acquire resize filter. */ x_factor=(MagickRealType) columns/(MagickRealType) image->columns; y_factor=(MagickRealType) rows/(MagickRealType) image->rows; if (x_factor > y_factor) filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception); else filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception); if (filter_image == (Image *) NULL) return(DestroyImage(resize_image)); filter_type=LanczosFilter; if (filter != UndefinedFilter) filter_type=filter; else if ((x_factor == 1.0) && (y_factor == 1.0)) filter_type=PointFilter; else if ((image->storage_class == PseudoClass) || (image->matte != MagickFalse) || ((x_factor*y_factor) > 1.0)) filter_type=MitchellFilter; resize_filter=AcquireResizeFilter(image,filter_type,blur,MagickFalse, exception); /* Resize image. */ offset=0; if (x_factor > y_factor) { span=(MagickSizeType) (filter_image->columns+rows); status=HorizontalFilter(resize_filter,image,filter_image,x_factor,span, &offset,exception); status&=VerticalFilter(resize_filter,filter_image,resize_image,y_factor, span,&offset,exception); } else { span=(MagickSizeType) (filter_image->rows+columns); status=VerticalFilter(resize_filter,image,filter_image,y_factor,span, &offset,exception); status&=HorizontalFilter(resize_filter,filter_image,resize_image,x_factor, span,&offset,exception); } /* Free resources. */ filter_image=DestroyImage(filter_image); resize_filter=DestroyResizeFilter(resize_filter); if (status == MagickFalse) { resize_image=DestroyImage(resize_image); return((Image *) NULL); } resize_image->type=image->type; return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SampleImage() scales an image to the desired dimensions with pixel % sampling. Unlike other scaling methods, this method does not introduce % any additional color into the scaled image. % % The format of the SampleImage method is: % % Image *SampleImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the sampled image. % % o rows: the number of rows in the sampled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SampleImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define SampleImageTag "Sample/Image" CacheView *image_view, *sample_view; Image *sample_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t x; ssize_t *x_offset, y; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); sample_image=CloneImage(image,columns,rows,MagickTrue,exception); if (sample_image == (Image *) NULL) return((Image *) NULL); /* Allocate scan line buffer and column offset buffers. */ x_offset=(ssize_t *) AcquireQuantumMemory((size_t) sample_image->columns, sizeof(*x_offset)); if (x_offset == (ssize_t *) NULL) { sample_image=DestroyImage(sample_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (x=0; x < (ssize_t) sample_image->columns; x++) x_offset[x]=(ssize_t) (((MagickRealType) x*image->columns)/ sample_image->columns+0.5); /* Sample each row. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); sample_view=AcquireAuthenticCacheView(sample_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,sample_image,1,1) #endif for (y=0; y < (ssize_t) sample_image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict sample_indexes; register PixelPacket *restrict q; register ssize_t x; ssize_t y_offset; if (status == MagickFalse) continue; y_offset=(ssize_t) (((MagickRealType) y*image->rows)/sample_image->rows+ 0.5); p=GetCacheViewVirtualPixels(image_view,0,y_offset,image->columns,1, exception); q=QueueCacheViewAuthenticPixels(sample_view,0,y,sample_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); sample_indexes=GetCacheViewAuthenticIndexQueue(sample_view); /* Sample each column. */ for (x=0; x < (ssize_t) sample_image->columns; x++) *q++=p[x_offset[x]]; if ((image->storage_class == PseudoClass) || (image->colorspace == CMYKColorspace)) for (x=0; x < (ssize_t) sample_image->columns; x++) SetPixelIndex(sample_indexes+x,GetPixelIndex(indexes+x_offset[x])); if (SyncCacheViewAuthenticPixels(sample_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SampleImage) #endif proceed=SetImageProgress(image,SampleImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); sample_view=DestroyCacheView(sample_view); x_offset=(ssize_t *) RelinquishMagickMemory(x_offset); sample_image->type=image->type; if (status == MagickFalse) sample_image=DestroyImage(sample_image); return(sample_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleImage() changes the size of an image to the given dimensions. % % The format of the ScaleImage method is: % % Image *ScaleImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ScaleImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define ScaleImageTag "Scale/Image" CacheView *image_view, *scale_view; Image *scale_image; MagickBooleanType next_column, next_row, proceed, status; MagickPixelPacket pixel, *scale_scanline, *scanline, *x_vector, *y_vector, zero; MagickRealType alpha; PointInfo scale, span; register ssize_t i; ssize_t number_rows, y; /* Initialize scaled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); scale_image=CloneImage(image,columns,rows,MagickTrue,exception); if (scale_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(scale_image,DirectClass) == MagickFalse) { InheritException(exception,&scale_image->exception); scale_image=DestroyImage(scale_image); return((Image *) NULL); } /* Allocate memory. */ x_vector=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns, sizeof(*x_vector)); scanline=x_vector; if (image->rows != scale_image->rows) scanline=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns, sizeof(*scanline)); scale_scanline=(MagickPixelPacket *) AcquireQuantumMemory((size_t) scale_image->columns,sizeof(*scale_scanline)); y_vector=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns, sizeof(*y_vector)); if ((scanline == (MagickPixelPacket *) NULL) || (scale_scanline == (MagickPixelPacket *) NULL) || (x_vector == (MagickPixelPacket *) NULL) || (y_vector == (MagickPixelPacket *) NULL)) { scale_image=DestroyImage(scale_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Scale image. */ number_rows=0; next_row=MagickTrue; span.y=1.0; scale.y=(double) scale_image->rows/(double) image->rows; (void) ResetMagickMemory(y_vector,0,(size_t) image->columns* sizeof(*y_vector)); GetMagickPixelPacket(image,&pixel); (void) ResetMagickMemory(&zero,0,sizeof(zero)); i=0; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); scale_view=AcquireAuthenticCacheView(scale_image,exception); for (y=0; y < (ssize_t) scale_image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict scale_indexes; register MagickPixelPacket *restrict s, *restrict t; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) break; q=QueueCacheViewAuthenticPixels(scale_view,0,y,scale_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; break; } alpha=1.0; scale_indexes=GetCacheViewAuthenticIndexQueue(scale_view); if (scale_image->rows == image->rows) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,i++,image->columns,1, exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; break; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (image->matte != MagickFalse) alpha=QuantumScale*GetPixelAlpha(p); x_vector[x].red=(MagickRealType) (alpha*GetPixelRed(p)); x_vector[x].green=(MagickRealType) (alpha*GetPixelGreen(p)); x_vector[x].blue=(MagickRealType) (alpha*GetPixelBlue(p)); if (image->matte != MagickFalse) x_vector[x].opacity=(MagickRealType) GetPixelOpacity(p); if (indexes != (IndexPacket *) NULL) x_vector[x].index=(MagickRealType) (alpha*GetPixelIndex(indexes+x)); p++; } } else { /* Scale Y direction. */ while (scale.y < span.y) { if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows)) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,i++,image->columns,1, exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; break; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (image->matte != MagickFalse) alpha=QuantumScale*GetPixelAlpha(p); x_vector[x].red=(MagickRealType) (alpha*GetPixelRed(p)); x_vector[x].green=(MagickRealType) (alpha*GetPixelGreen(p)); x_vector[x].blue=(MagickRealType) (alpha*GetPixelBlue(p)); if (image->matte != MagickFalse) x_vector[x].opacity=(MagickRealType) GetPixelOpacity(p); if (indexes != (IndexPacket *) NULL) x_vector[x].index=(MagickRealType) (alpha* GetPixelIndex(indexes+x)); p++; } number_rows++; } for (x=0; x < (ssize_t) image->columns; x++) { y_vector[x].red+=scale.y*x_vector[x].red; y_vector[x].green+=scale.y*x_vector[x].green; y_vector[x].blue+=scale.y*x_vector[x].blue; if (scale_image->matte != MagickFalse) y_vector[x].opacity+=scale.y*x_vector[x].opacity; if (scale_indexes != (IndexPacket *) NULL) y_vector[x].index+=scale.y*x_vector[x].index; } span.y-=scale.y; scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows)) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,i++,image->columns,1, exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; break; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (image->matte != MagickFalse) alpha=QuantumScale*GetPixelAlpha(p); x_vector[x].red=(MagickRealType) (alpha*GetPixelRed(p)); x_vector[x].green=(MagickRealType) (alpha*GetPixelGreen(p)); x_vector[x].blue=(MagickRealType) (alpha*GetPixelBlue(p)); if (image->matte != MagickFalse) x_vector[x].opacity=(MagickRealType) GetPixelOpacity(p); if (indexes != (IndexPacket *) NULL) x_vector[x].index=(MagickRealType) (alpha* GetPixelIndex(indexes+x)); p++; } number_rows++; next_row=MagickFalse; } s=scanline; for (x=0; x < (ssize_t) image->columns; x++) { pixel.red=y_vector[x].red+span.y*x_vector[x].red; pixel.green=y_vector[x].green+span.y*x_vector[x].green; pixel.blue=y_vector[x].blue+span.y*x_vector[x].blue; if (image->matte != MagickFalse) pixel.opacity=y_vector[x].opacity+span.y*x_vector[x].opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index=y_vector[x].index+span.y*x_vector[x].index; s->red=pixel.red; s->green=pixel.green; s->blue=pixel.blue; if (scale_image->matte != MagickFalse) s->opacity=pixel.opacity; if (scale_indexes != (IndexPacket *) NULL) s->index=pixel.index; s++; y_vector[x]=zero; } scale.y-=span.y; if (scale.y <= 0) { scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } span.y=1.0; } if (scale_image->columns == image->columns) { /* Transfer scanline to scaled image. */ s=scanline; for (x=0; x < (ssize_t) scale_image->columns; x++) { if (scale_image->matte != MagickFalse) alpha=QuantumScale*(QuantumRange-s->opacity); alpha=PerceptibleReciprocal(alpha); SetPixelRed(q,ClampToQuantum(alpha*s->red)); SetPixelGreen(q,ClampToQuantum(alpha*s->green)); SetPixelBlue(q,ClampToQuantum(alpha*s->blue)); if (scale_image->matte != MagickFalse) SetPixelOpacity(q,ClampToQuantum(s->opacity)); if (scale_indexes != (IndexPacket *) NULL) SetPixelIndex(scale_indexes+x,ClampToQuantum(alpha*s->index)); q++; s++; } } else { /* Scale X direction. */ pixel=zero; next_column=MagickFalse; span.x=1.0; s=scanline; t=scale_scanline; for (x=0; x < (ssize_t) image->columns; x++) { scale.x=(double) scale_image->columns/(double) image->columns; while (scale.x >= span.x) { if (next_column != MagickFalse) { pixel=zero; t++; } pixel.red+=span.x*s->red; pixel.green+=span.x*s->green; pixel.blue+=span.x*s->blue; if (image->matte != MagickFalse) pixel.opacity+=span.x*s->opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index+=span.x*s->index; t->red=pixel.red; t->green=pixel.green; t->blue=pixel.blue; if (scale_image->matte != MagickFalse) t->opacity=pixel.opacity; if (scale_indexes != (IndexPacket *) NULL) t->index=pixel.index; scale.x-=span.x; span.x=1.0; next_column=MagickTrue; } if (scale.x > 0) { if (next_column != MagickFalse) { pixel=zero; next_column=MagickFalse; t++; } pixel.red+=scale.x*s->red; pixel.green+=scale.x*s->green; pixel.blue+=scale.x*s->blue; if (scale_image->matte != MagickFalse) pixel.opacity+=scale.x*s->opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index+=scale.x*s->index; span.x-=scale.x; } s++; } if (span.x > 0) { s--; pixel.red+=span.x*s->red; pixel.green+=span.x*s->green; pixel.blue+=span.x*s->blue; if (scale_image->matte != MagickFalse) pixel.opacity+=span.x*s->opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index+=span.x*s->index; } if ((next_column == MagickFalse) && ((ssize_t) (t-scale_scanline) < (ssize_t) scale_image->columns)) { t->red=pixel.red; t->green=pixel.green; t->blue=pixel.blue; if (scale_image->matte != MagickFalse) t->opacity=pixel.opacity; if (scale_indexes != (IndexPacket *) NULL) t->index=pixel.index; } /* Transfer scanline to scaled image. */ t=scale_scanline; for (x=0; x < (ssize_t) scale_image->columns; x++) { if (scale_image->matte != MagickFalse) alpha=QuantumScale*(QuantumRange-t->opacity); alpha=PerceptibleReciprocal(alpha); SetPixelRed(q,ClampToQuantum(alpha*t->red)); SetPixelGreen(q,ClampToQuantum(alpha*t->green)); SetPixelBlue(q,ClampToQuantum(alpha*t->blue)); if (scale_image->matte != MagickFalse) SetPixelOpacity(q,ClampToQuantum(t->opacity)); if (scale_indexes != (IndexPacket *) NULL) SetPixelIndex(scale_indexes+x,ClampToQuantum(alpha*t->index)); t++; q++; } } if (SyncCacheViewAuthenticPixels(scale_view,exception) == MagickFalse) { status=MagickFalse; break; } proceed=SetImageProgress(image,ScaleImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) { status=MagickFalse; break; } } scale_view=DestroyCacheView(scale_view); image_view=DestroyCacheView(image_view); /* Free allocated memory. */ y_vector=(MagickPixelPacket *) RelinquishMagickMemory(y_vector); scale_scanline=(MagickPixelPacket *) RelinquishMagickMemory(scale_scanline); if (scale_image->rows != image->rows) scanline=(MagickPixelPacket *) RelinquishMagickMemory(scanline); x_vector=(MagickPixelPacket *) RelinquishMagickMemory(x_vector); scale_image->type=image->type; if (status == MagickFalse) scale_image=DestroyImage(scale_image); return(scale_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T h u m b n a i l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ThumbnailImage() changes the size of an image to the given dimensions and % removes any associated profiles. The goal is to produce small low cost % thumbnail images suited for display on the Web. % % The format of the ThumbnailImage method is: % % Image *ThumbnailImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ThumbnailImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define SampleFactor 5 char value[MaxTextExtent]; const char *name; Image *thumbnail_image; MagickRealType x_factor, y_factor; size_t version; struct stat attributes; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); x_factor=(MagickRealType) columns/(MagickRealType) image->columns; y_factor=(MagickRealType) rows/(MagickRealType) image->rows; if ((x_factor*y_factor) > 0.1) thumbnail_image=ResizeImage(image,columns,rows,image->filter,image->blur, exception); else if (((SampleFactor*columns) < 128) || ((SampleFactor*rows) < 128)) thumbnail_image=ResizeImage(image,columns,rows,image->filter, image->blur,exception); else { Image *sample_image; sample_image=SampleImage(image,SampleFactor*columns,SampleFactor*rows, exception); if (sample_image == (Image *) NULL) return((Image *) NULL); thumbnail_image=ResizeImage(sample_image,columns,rows,image->filter, image->blur,exception); sample_image=DestroyImage(sample_image); } if (thumbnail_image == (Image *) NULL) return(thumbnail_image); (void) ParseAbsoluteGeometry("0x0+0+0",&thumbnail_image->page); if (thumbnail_image->matte == MagickFalse) (void) SetImageAlphaChannel(thumbnail_image,OpaqueAlphaChannel); thumbnail_image->depth=8; thumbnail_image->interlace=NoInterlace; /* Strip all profiles except color profiles. */ ResetImageProfileIterator(thumbnail_image); for (name=GetNextImageProfile(thumbnail_image); name != (const char *) NULL; ) { if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) { (void) DeleteImageProfile(thumbnail_image,name); ResetImageProfileIterator(thumbnail_image); } name=GetNextImageProfile(thumbnail_image); } (void) DeleteImageProperty(thumbnail_image,"comment"); (void) CopyMagickString(value,image->magick_filename,MaxTextExtent); if (strstr(image->magick_filename,"//") == (char *) NULL) (void) FormatLocaleString(value,MaxTextExtent,"file://%s", image->magick_filename); (void) SetImageProperty(thumbnail_image,"Thumb::URI",value); (void) CopyMagickString(value,image->magick_filename,MaxTextExtent); if (GetPathAttributes(image->filename,&attributes) != MagickFalse) { (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) attributes.st_mtime); (void) SetImageProperty(thumbnail_image,"Thumb::MTime",value); } (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) attributes.st_mtime); (void) FormatMagickSize(GetBlobSize(image),MagickFalse,value); (void) ConcatenateMagickString(value,"B",MaxTextExtent); (void) SetImageProperty(thumbnail_image,"Thumb::Size",value); (void) FormatLocaleString(value,MaxTextExtent,"image/%s",image->magick); LocaleLower(value); (void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value); (void) SetImageProperty(thumbnail_image,"software", GetMagickVersion(&version)); (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) image->magick_columns); (void) SetImageProperty(thumbnail_image,"Thumb::Image::Width",value); (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) image->magick_rows); (void) SetImageProperty(thumbnail_image,"Thumb::Image::height",value); (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) GetImageListLength(image)); (void) SetImageProperty(thumbnail_image,"Thumb::Document::Pages",value); return(thumbnail_image); }
1.hello.c
#include <stdio.h> #include <omp.h> /* Q1: How many times will you see the "Hello world!" */ /* message if the program is executed with "./1.hello"? */ /* Q2: Without changing the program, how to make it to */ /* print 4 times the "Hello World!" message? */ int main () { #pragma omp parallel //num_threads(4) printf("Hello world!\n"); return 0; }