source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
pkzip_fmt_plug.c | /*
* PKZIP patch for john to handle 'old' pkzip passwords (old 'native' format)
*
* Written by Jim Fougeron <jfoug at cox.net> in 2011. No copyright
* is claimed, and the software is hereby placed in the public domain.
* In case this attempt to disclaim copyright and place the software in the
* public domain is deemed null and void, then the software is
* Copyright (c) 2011 Jim Fougeron and it is hereby released to the
* general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* There's ABSOLUTELY NO WARRANTY, express or implied.
*
*/
#include "arch.h"
#if !AC_BUILT
#define HAVE_LIBZ 1 /* legacy build has -lz in LDFLAGS */
#endif
#if HAVE_LIBZ
#if FMT_EXTERNS_H
extern struct fmt_main fmt_pkzip;
#elif FMT_REGISTERS_H
john_register_one(&fmt_pkzip);
#else
#include <string.h>
#include <zlib.h>
#include "common.h"
#include "misc.h"
#include "formats.h"
#define USE_PKZIP_MAGIC 1
#include "pkzip.h"
#include "pkzip_inffixed.h" // This file is a data file, taken from zlib
#include "loader.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "memdbg.h"
#define FORMAT_LABEL "PKZIP"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "32/" ARCH_BITS_STR
#define FORMAT_TAG "$pkzip$"
#define FORMAT_TAG2 "$pkzip2$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define FORMAT_TAG2_LEN (sizeof(FORMAT_TAG2)-1)
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1000
#define PLAINTEXT_LENGTH 31
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define SALT_SIZE (sizeof(PKZ_SALT*))
#define SALT_ALIGN (sizeof(uint64_t))
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 64
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
//#define ZIP_DEBUG 1
//#define ZIP_DEBUG 2
/*
* It is likely that this should be put into the arch.h files for the different systems,
* IF we find a system which operates faster doing the non-table work.
* However, in current testing, it is always faster to use the multiply table. It only
* takes 16kb, and almost always stays in the cache for any system newer than a 386.
*/
#define PKZIP_USE_MULT_TABLE
#if ARCH_LITTLE_ENDIAN
#define KB1 0
#define KB2 3
#else
#define KB1 3
#define KB2 0
#endif
/*
* filename:$pkzip$C*B*[DT*MT{CL*UL*CR*OF*OX}*CT*DL*CS*DA]*$/pkzip$ (deprecated)
* filename:$pkzip2$C*B*[DT*MT{CL*UL*CR*OF*OX}*CT*DL*CS*TC*DA]*$/pkzip2$ (new format, with 2 checksums)
*
* All numeric and 'binary data' fields are stored in hex.
*
* C is the count of hashes present (the array of items, inside the [] C can be 1 to 3.).
* B is number of valid bytes in the checksum (1 or 2). Unix zip is 2 bytes, all others are 1
* ARRAY of data starts here (there will be C array elements)
* DT is a "Data Type enum". This will be 1 2 or 3. 1 is 'partial'. 2 and 3 are full file data (2 is inline, 3 is load from file).
* MT Magic Type enum. 0 is no 'type'. 255 is 'text'. Other types (like MS Doc, GIF, etc), see source.
* NOTE, CL, DL, CRC, OFF are only present if DT != 1
* CL Compressed length of file blob data (includes 12 byte IV).
* UL Uncompressed length of the file.
* CR CRC32 of the 'final' file.
* OF Offset to the PK\x3\x4 record for this file data. If DT==2, then this will be a 0, as it is not needed, all of the data is already included in the line.
* OX Additional offset (past OF), to get to the zip data within the file.
* END OF 'optional' fields.
* CT Compression type (0 or 8) 0 is stored, 8 is imploded.
* DL Length of the DA data.
* CS Checksum from crc32.
* TC Checksum from timestamp
* DA This is the 'data'. It will be hex data if DT==1 or 2. If DT==3, then it is a filename (name of the .zip file).
* END of array items.
* The format string will end with $/pkzip$
*
* NOTE, after some code testing, it has come to show, that the 'magic' may not be needed, or very useful. The problem with it, is IF the file
* ends up NOT starting with any of the magic values, then we will have a false negative, and NEVER be able to crack the zip's password. For now
* we have a #define (right before the #include "pkzip.h"). If that define is uncommented, then pkzip format will be built with magic logic.
* However, right now it is not being built that way.
*
*/
static struct fmt_tests tests[] = {
/* compression of a perl file. We have the same password, same file used twice in a row (pkzip, 1 byte checksum). NOTE, pkzip uses random IV, so both encrypted blobs are different */
{"\
$pkzip$1*1*2*0*e4*1c5*eda7a8de*0*4c*8*e4*eda7*194883130e4c7419bd735c53dec36f0c4b6de6daefea0f507d67ff7256a49b5ea93ccfd9b12f2ee99053ee0b1c9e1c2b88aeaeb6bd4e60094a1ea118785d4ded6dae94\
cade41199330f4f11b37cba7cda5d69529bdfa43e2700ba517bd2f7ff4a0d4b3d7f2559690ec044deb818c44844d6dd50adbebf02cec663ae8ebb0dde05d2abc31eaf6de36a2fc19fda65dd6a7e449f669d1f8c75e9daa0a3f7b\
e8feaa43bf84762d6dbcc9424285a93cedfa3a75dadc11e969065f94fe3991bc23c9b09eaa5318aa29fa02e83b6bee26cafec0a5e189242ac9e562c7a5ed673f599cefcd398617*$/pkzip$", "password" },
{"\
$pkzip$1*1*2*0*e4*1c5*eda7a8de*0*4c*8*e4*eda7*581f798527109cbadfca0b3318435a000be84366caf9723f841a2b13e27c2ed8cdb5628705a98c3fbbfb34552ed498c51a172641bf231f9948bca304a6be2138ab718f\
6a5b1c513a2fb80c49030ff1a404f7bd04dd47c684317adea4107e5d70ce13edc356c60bebd532418e0855428f9dd582265956e39a0b446a10fd8b7ffb2b4af559351bbd549407381c0d2acc270f3bcaffb275cbe2f628cb09e2\
978e87cd023d4ccb50caaa92b6c952ba779980d65f59f664dde2451cc456d435188be59301a5df1b1b4fed6b7509196334556c44208a9d7e2d9e237f591d6c9fc467b408bf0aaa*$/pkzip$", "password" },
/* Now the same file, compressed twice, using unix zip (info-zip), with 2 byte checksums */
{"\
$pkzip$1*2*2*0*e4*1c5*eda7a8de*0*47*8*e4*4bb6*436c9ffa4328870f6272349b591095e1b1126420c3041744650282bc4f575d0d4a5fc5fb34724e6a1cde742192387b9ed749ab5c72cd6bb0206f102e9216538f095fb7\
73661cfde82c2e2a619332998124648bf4cd0da56279f0c297567d9b5d684125ee92920dd513fd18c27afba2a9633614f75d8f8b9a14095e3fafe8165330871287222e6681dd9c0f830cf5d464457b257d0900eed29107fad8af\
3ac4f87cf5af5183ff0516ccd9aeac1186006c8d11b18742dfb526aadbf2906772fbfe8fb18798967fd397a724d59f6fcd4c32736550986d227a6b447ef70585c049a1a4d7bf25*$/pkzip$", "password" },
{"\
$pkzip$1*2*2*0*e4*1c5*eda7a8de*0*47*8*e4*4bb6*436c9ffa4328870f6272349b591095e1b1126420c3041744650282bc4f575d0d4a5fc5fb34724e6a1cde742192387b9ed749ab5c72cd6bb0206f102e9216538f095fb7\
73661cfde82c2e2a619332998124648bf4cd0da56279f0c297567d9b5d684125ee92920dd513fd18c27afba2a9633614f75d8f8b9a14095e3fafe8165330871287222e6681dd9c0f830cf5d464457b257d0900eed29107fad8af\
3ac4f87cf5af5183ff0516ccd9aeac1186006c8d11b18742dfb526aadbf2906772fbfe8fb18798967fd397a724d59f6fcd4c32736550986d227a6b447ef70585c049a1a4d7bf25*$/pkzip$", "password"},
/* now a pkzip archive, with 3 files, 1 byte checksum */
{"\
$pkzip$3*1*1*0*8*24*4001*8986ec4d693e86c1a42c1bd2e6a994cb0b98507a6ec937fe0a41681c02fe52c61e3cc046*1*0*8*24*4003*a087adcda58de2e14e73db0043a4ff0ed3acc6a9aee3985d7cb81d5ddb32b840ea20\
57d9*2*0*e4*1c5*eda7a8de*0*4c*8*e4*eda7*89a792af804bf38e31fdccc8919a75ab6eb75d1fd6e7ecefa3c5b9c78c3d50d656f42e582af95882a38168a8493b2de5031bb8b39797463cb4769a955a2ba72abe48ee75b103\
f93ef9984ae740559b9bd84cf848d693d86acabd84749853675fb1a79edd747867ef52f4ee82435af332d43f0d0bb056c49384d740523fa75b86a6d29a138da90a8de31dbfa89f2f6b0550c2b47c43d907395904453ddf42a665\
b5f7662de170986f89d46d944b519e1db9d13d4254a6b0a5ac02b3cfdd468d7a4965e4af05699a920e6f3ddcedb57d956a6b2754835b14e174070ba6aec4882d581c9f30*$/pkzip$", "3!files"},
/* following are from CMIYC 2012 */
{"$pkzip$1*1*2*0*163*2b5*cd154083*0*26*8*163*cd15*d6b094794b40116a8b387c10159225d776f815b178186e51faf16fa981fddbffdfa22f6c6f32d2f81dab35e141f2899841991f3cb8d53f8ee1f1d85657f7c7a82ebb2d63182803c6beee00e0bf6c72edeeb1b00dc9f07f917bb8544cc0e96ca01503cd0fb6632c296cebe3fb9b64543925daae6b7ea95cfd27c42f6f3465e0ab2c812b9aeeb15209ce3b691f27ea43a7a77b89c2387e31c4775866a044b6da783af8ddb72784ccaff4d9a246db96484e865ea208ade290b0131b4d2dd21f172693e6b5c90f2eb9b67572b55874b6d3a78763212b248629e744c07871a6054e24ef74b6d779e44970e1619df223b4e5a72a189bef40682b62be6fb7f65e087ca6ee19d1ebfc259fa7e3d98f3cb99347689f8360294352accffb146edafa9e91afba1f119f95145738ac366b332743d4ff40d49fac42b8758c43b0af5b60b8a1c63338359ffbff432774f2c92de3f8c49bd4611e134db98e6a3f2cfb148d2b20f75abab6*$/pkzip$", "passwort"},
{"$pkzip$1*1*2*0*163*2b6*46abc149*0*28*8*163*46ab*0f539b23b761a347a329f362f7f1f0249515f000404c77ec0b0ffe06f29140e8fa3e8e5a6354e57f3252fae3d744212d4d425dc44389dd4450aa9a4f2f3c072bee39d6ac6662620812978f7ab166c66e1acb703602707ab2da96bb28033485ec192389f213e48eda8fc7d9dad1965b097fafebfda6703117db90e0295db9a653058cb28215c3245e6e0f6ad321065bf7b8cc5f66f6f2636e0d02ea35a6ba64bbf0191c308098fd836e278abbce7f10c3360a0a682663f59f92d9c2dcfc87cde2aae27ea18a14d2e4a0752b6b51e7a5c4c8c2bab88f4fb0aba27fb20e448655021bb3ac63752fdb01e6b7c99f9223f9e15d71eb1bd8e323f522fc3da467ff0aae1aa17824085d5d6f1cdfc9c7c689cd7cb057005d94ba691f388484cfb842c8775baac220a5490ed945c8b0414dbfc4589254b856aade49f1aa386db86e9fc87e6475b452bd72c5e2122df239f8c2fd462ca54c1a5bddac36918c5f5cf0cc94aa6ee820*$/pkzip$", "Credit11"},
{"$pkzip$1*1*2*0*163*2b6*46abc149*0*26*8*163*46ab*7ea9a6b07ddc9419439311702b4800e7e1f620b0ab8535c5aa3b14287063557b176cf87a800b8ee496643c0b54a77684929cc160869db4443edc44338294458f1b6c8f056abb0fa27a5e5099e19a07735ff73dc91c6b20b05c023b3ef019529f6f67584343ac6d86fa3d12113f3d374b047efe90e2a325c0901598f31f7fb2a31a615c51ea8435a97d07e0bd4d4afbd228231dbc5e60bf1116ce49d6ce2547b63a1b057f286401acb7c21afbb673f3e26bc1b2114ab0b581f039c2739c7dd0af92c986fc4831b6c294783f1abb0765cf754eada132df751cf94cad7f29bb2fec0c7c47a7177dea82644fc17b455ba2b4ded6d9a24e268fcc4545cae73b14ceca1b429d74d1ebb6947274d9b0dcfb2e1ac6f6b7cd2be8f6141c3295c0dbe25b65ff89feb62cb24bd5be33853b88b8ac839fdd295f71e17a7ae1f054e27ba5e60ca03c6601b85c3055601ce41a33127938440600aaa16cfdd31afaa909fd80afc8690aaf*$/pkzip$", "7J0rdan!!"},
/* CMIYC 2013 "pro" hard hash */
{"$pkzip$1*2*2*0*6b*73*8e687a5b*0*46*8*6b*0d9d*636fedc7a78a7f80cda8542441e71092d87d13da94c93848c230ea43fab5978759e506110b77bd4bc10c95bc909598a10adfd4febc0d42f3cd31e4fec848d6f49ab24bb915cf939fb1ce09326378bb8ecafde7d3fe06b6013628a779e017be0f0ad278a5b04e41807ae9fc*$/pkzip$", "c00rslit3!"},
/* http://corkami.googlecode.com/files/ChristmasGIFts.zip (fixed with 2 byte checksums from timestamp, using new $pkzip2$ type) */
{"$pkzip2$3*2*1*2*8*c0*7224*72f6*6195f9f3401076b22f006105c4323f7ac8bb8ebf8d570dc9c7f13ddacd8f071783f6bef08e09ce4f749af00178e56bc948ada1953a0263c706fd39e96bb46731f827a764c9d55945a89b952f0503747703d40ed4748a8e5c31cb7024366d0ef2b0eb4232e250d343416c12c7cbc15d41e01e986857d320fb6a2d23f4c44201c808be107912dbfe4586e3bf2c966d926073078b92a2a91568081daae85cbcddec75692485d0e89994634c71090271ac7b4a874ede424dafe1de795075d2916eae*1*6*8*c0*26ee*461b*944bebb405b5eab4322a9ce6f7030ace3d8ec776b0a989752cf29569acbdd1fb3f5bd5fe7e4775d71f9ba728bf6c17aad1516f3aebf096c26f0c40e19a042809074caa5ae22f06c7dcd1d8e3334243bca723d20875bd80c54944712562c4ff5fdb25be5f4eed04f75f79584bfd28f8b786dd82fd0ffc760893dac4025f301c2802b79b3cb6bbdf565ceb3190849afdf1f17688b8a65df7bc53bc83b01a15c375e34970ae080307638b763fb10783b18b5dec78d8dfac58f49e3c3be62d6d54f9*2*0*2a*1e*4a204eab*ce8*2c*0*2a*4a20*7235*6b6e1a8de47449a77e6f0d126b217d6b2b72227c0885f7dc10a2fb3e7cb0e611c5c219a78f98a9069f30*$/pkzip2$", "123456"},
{NULL}
};
/* these static fields are used in the crypt_all loop, and the cmp_all/cmp_one we */
/* perform the pkzip 'checksum' checking. If we do get a 'hit', then that pass & */
/* salt pair is checked fully within the cmp_exact, where it gets inflated and */
/* checked (possibly also a 'sample TEXT record is done first, as a quick check */
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static u32 *K12;
static PKZ_SALT *salt;
static u8 *chk;
static int dirty=1;
#if USE_PKZIP_MAGIC
static ZIP_SIGS SIGS[256];
#endif
#ifdef PKZIP_USE_MULT_TABLE
static u8 mult_tab[16384];
#define PKZ_MULT(b,w) b^mult_tab[(u16)(w.u)>>2]
#else
inline u8 PKZ_MULT(u8 b, MY_WORD w) {u16 t = w.u|2; return b ^ (u8)(((u16)(t*(t^1))>>8)); }
#endif
extern struct fmt_main fmt_pkzip;
static const char *ValidateZipContents(FILE *in, long offset, u32 offex, int len, u32 crc);
/* Since the pkzip format textual representation is pretty complex, with multiple */
/* 'optional' sections, we have a VERY complete valid. Valid will make SURE that */
/* the format is completely valid. Thus, there is little or no error checking later */
/* in the rest of the code. It 'should' not be needed, and is done here. There is */
/* a little error checking later in the file, for some of the file opening stuff, */
/* since the file can change from the time of this 'valid' call, until when the data */
/* is actually read from the file. */
/* */
/* NOTE, we may want to later make a 'prepare()' function, and do all file loading */
/* there, so that we have a 'complete' format line, with the zip data contained. */
static int valid(char *ciphertext, struct fmt_main *self)
{
c8 *p, *cp, *cpkeep;
int cnt, ret=0;
u64 data_len;
u32 crc;
FILE *in;
const char *sFailStr;
long offset;
u32 offex;
int type;
u64 complen = 0;
int type2 = 0;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) {
if (!strncmp(ciphertext, FORMAT_TAG2, FORMAT_TAG2_LEN))
type2 = 1;
else
return ret;
}
cpkeep = strdup(ciphertext);
cp = cpkeep;
p = &cp[FORMAT_TAG_LEN];
if (type2)
++p;
if ((cp = strtokm(p, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) {
sFailStr = "Out of data, reading count of hashes field";
goto Bail;
}
sscanf(cp, "%x", &cnt);
if (cnt < 1 || cnt > MAX_PKZ_FILES) {
sFailStr = "Count of hashes field out of range";
goto Bail;
}
if ((cp = strtokm(NULL, "*")) == NULL || cp[0] < '0' || cp[0] > '2' || cp[1]) {
sFailStr = "Number of valid hash bytes empty or out of range";
goto Bail;
}
while (cnt--) {
if ((cp = strtokm(NULL, "*")) == NULL || cp[0]<'1' || cp[0]>'3' || cp[1]) {
sFailStr = "Invalid data enumeration type";
goto Bail;
}
type = cp[0] - '0';
if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) {
sFailStr = "Invalid type enumeration";
goto Bail;
}
if (type > 1) {
if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) {
sFailStr = "Invalid compressed length";
goto Bail;
}
sscanf(cp, "%"PRIx64, &complen);
if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) {
sFailStr = "Invalid data length value";
goto Bail;
}
if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) {
sFailStr = "Invalid CRC value";
goto Bail;
}
sscanf(cp, "%x", &crc);
if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) {
sFailStr = "Invalid offset length";
goto Bail;
}
sscanf(cp, "%lx", &offset);
if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) {
sFailStr = "Invalid offset length";
goto Bail;
}
sscanf(cp, "%x", &offex);
}
if ((cp = strtokm(NULL, "*")) == NULL || (cp[0] != '0' && cp[0] != '8') || cp[1]) {
sFailStr = "Compression type enumeration";
goto Bail;
}
if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) {
sFailStr = "Invalid data length value";
goto Bail;
}
sscanf(cp, "%"PRIx64, &data_len);
if ((cp = strtokm(NULL, "*")) == NULL || !ishexlc(cp) || strlen(cp) != 4) {
sFailStr = "invalid checksum value";
goto Bail;
}
if (type2) {
if ((cp = strtokm(NULL, "*")) == NULL || !ishexlc(cp) || strlen(cp) != 4) {
sFailStr = "invalid checksum2 value";
goto Bail;}
}
if ((cp = strtokm(NULL, "*")) == NULL) goto Bail;
if (type > 1) {
if (type == 3) {
if ( strlen(cp) != data_len) {
sFailStr = "invalid checksum value";
goto Bail;
}
in = fopen(cp, "rb"); /* have to open in bin mode for OS's where this matters, DOS/Win32 */
if (!in) {
/* this error is listed, even if not in pkzip debugging mode. */
/* But not if we're just reading old pot lines */
if (!ldr_in_pot)
fprintf(stderr, "Error loading a pkzip hash line. The ZIP file '%s' could NOT be found\n", cp);
return 0;
}
sFailStr = ValidateZipContents(in, offset, offex, complen, crc);
if (*sFailStr) {
/* this error is listed, even if not in pkzip debugging mode. */
fprintf(stderr, "pkzip validation failed [%s] Hash is %s\n", sFailStr, ciphertext);
fclose(in);
return 0;
}
fseek(in, offset+offex, SEEK_SET);
if (complen < 16*1024) {
/* simply load the whole blob */
void *tbuf = mem_alloc(complen);
if (fread(tbuf, 1, complen, in) != complen) {
MEM_FREE(tbuf);
fclose(in);
return 0;
}
data_len = complen;
MEM_FREE(tbuf);
}
fclose(in);
} else {
/* 'inline' data. */
if (complen != data_len) {
sFailStr = "length of full data does not match the salt len";
goto Bail;
}
if (!ishexlc(cp) || strlen(cp) != data_len<<1) {
sFailStr = "invalid inline data";
goto Bail;
}
}
} else {
if (!ishexlc(cp) || strlen(cp) != data_len<<1) {
sFailStr = "invalid partial data";
goto Bail;
}
}
}
if ((cp = strtokm(NULL, "*")) == NULL) goto Bail;
if (strtokm(NULL, "") != NULL) goto Bail;
if (type2) ret = !strcmp(cp, "$/pkzip2$");
else ret = !strcmp(cp, "$/pkzip$");
Bail:;
#ifdef ZIP_DEBUG
if (!ret) fprintf(stderr, "pkzip validation failed [%s] Hash is %.64s\n", sFailStr, ciphertext);
#endif
MEM_FREE(cpkeep);
return ret;
}
static const char *ValidateZipContents(FILE *fp, long offset, u32 offex, int _len, u32 _crc)
{
u32 id;
u16 version, flags, method, modtm, moddt, namelen, exlen;
u32 crc, complen, uncomplen;
if (fseek(fp, offset, SEEK_SET) != 0)
return "Not able to seek to specified offset in the .zip file, to read the zip blob data.";
id = fget32LE(fp);
if (id != 0x04034b50U)
return "Compressed zip file offset does not point to start of zip blob";
/* Ok, see if this IS the correct file blob. */
version = fget16LE(fp);
flags = fget16LE(fp);
method = fget16LE(fp);
modtm = fget16LE(fp);
moddt = fget16LE(fp);
crc = fget32LE(fp);
complen = fget32LE(fp);
uncomplen = fget32LE(fp);
namelen = fget16LE(fp);
exlen = fget16LE(fp);
/* unused vars. */
(void)uncomplen;
(void)modtm;
(void)moddt;
/* Even if we 'miss', we keep walking back. We 'can' miss if the CRC of file, or some other */
/* binary data happens to have the 0x04034b50 signature, thus giving us a false local header hit. */
if (_crc == crc && _len == complen && (0x14 == version || 0xA == version) && (flags & 1) && (method == 8 || method == 0) && offex==30+namelen+exlen)
return "";
return "We could NOT find the internal zip data in this ZIP file";
}
static u8 *buf_copy (char *p, int len)
{
u8 *op = mem_alloc_tiny(len, MEM_ALIGN_NONE);
memcpy(op, p, len);
return op;
}
static void init(struct fmt_main *self)
{
#ifdef PKZIP_USE_MULT_TABLE
unsigned short n=0;
#endif
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
K12 = mem_calloc(sizeof(*K12) * 3, self->params.max_keys_per_crypt);
chk = mem_calloc(sizeof(*chk), self->params.max_keys_per_crypt);
/*
* Precompute the multiply mangling, within several parts of the hash. There is a pattern,
* 64k entries long. However the exact same value is produced 4 times in a row, every
* time. Thus, we can build a 16k wide array, and then access the array using this
* ((val&0xFFFF) >> 2) This is faster on all current HW, since the 16kb array access
* (and the and/shift) is faster than performing the whole mult, 2 shifts, 2 adds and
* an and (if the compiler can optimize it to that)
*
* There is a # define at the top of this file that turns this OFF. if that define is
* not set, then these mult's will be done in the crypt_all and decrypt functions
*/
#ifdef PKZIP_USE_MULT_TABLE
for (n = 0; n < 16384; n++)
mult_tab[n] = (((unsigned)(n*4+3) * (n*4+2)) >> 8) & 0xff;
#endif
#if USE_PKZIP_MAGIC
//static char *MagicTypes[]= { "", "DOC", "XLS", "DOT", "XLT", "EXE", "DLL", "ZIP", "BMP", "DIB", "GIF", "PDF", "GZ", "TGZ", "BZ2", "TZ2", "FLV", "SWF", "MP3", NULL };
//static int MagicToEnum[] = {0, 1, 1, 1, 1, 2, 2, 3, 4, 4, 5, 6, 7, 7, 8, 8, 9, 10, 11, 0};
// decent sources of these:
// http://www.garykessler.net/library/file_sigs.html
// http://en.wikipedia.org/wiki/List_of_file_signatures
// http://toorcon.techpathways.com/uploads/headersig.txt
// not available, 2012-12-28)
// archive.org still has a version:
// http://web.archive.org/web/20110725085828/http://toorcon.techpathways.com/uploads/headersig.txt
// there are many more.
//case 1: // DOC/XLS
SIGS[1].magic_signature[0] = (u8*)str_alloc_copy("\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1");
SIGS[1].magic_sig_len[0] = 8;
SIGS[1].magic_signature[1] = buf_copy("\x50\x4B\x03\x04\x14\x00\x06\x00\x08", 10); // a .zip file 'sort of'
SIGS[1].magic_sig_len[1] = 9;
SIGS[1].magic_signature[2] = buf_copy("\x09\x04\x06\x00\x00\x00\x10\x00\xF6\x05\x5C\x00", 13); // older XLS format (office 95)
SIGS[1].magic_sig_len[2] = 12;
SIGS[1].magic_signature[3] = buf_copy("\x09\x02\x06\x00\x00\x00\x10\x00\xB9\x04\x5C\x00", 13); // older XLS v2
SIGS[1].magic_sig_len[3] = 12;
SIGS[1].magic_signature[4] = buf_copy("\x50\x4B\x03\x04\x14\x00\x00\x00\x00\x00", 11); //DOC Star Writer 6.0
SIGS[1].magic_sig_len[4] = 10;
SIGS[1].magic_signature[5] = buf_copy("\x31\xBE\x00\x00\x00\xAB\x00\x00", 9); //DOC MS Word for DOS v6 File
SIGS[1].magic_sig_len[5] = 8;
SIGS[1].magic_signature[6] = (u8*)str_alloc_copy("\x12\x34\x56\x78\x90\xFF"); //DOC MS Word 6.0 File
SIGS[1].magic_sig_len[6] = 6;
SIGS[1].magic_signature[7] = (u8*)str_alloc_copy("\x7F\xFE\x34\x0A"); //MS Word File
SIGS[1].magic_sig_len[7] = 4;
SIGS[1].magic_count = 8;
SIGS[1].max_len = 12;
//case 2: // Win32/DOS exe file MZ
SIGS[2].magic_signature[0] = (u8*)str_alloc_copy("MZ");
SIGS[2].magic_sig_len[0] = 2;
SIGS[2].magic_count = 1;
SIGS[2].max_len = 2;
//case 3: // PKZIP
SIGS[3].magic_signature[0] = (u8*)str_alloc_copy("\x50\x4B\x03\x04");
SIGS[3].magic_sig_len[0] = 4;
SIGS[3].magic_count = 1;
SIGS[3].max_len = 4;
//case 4: // BMP
SIGS[4].magic_signature[0] = (u8*)str_alloc_copy("BM");
SIGS[4].magic_sig_len[0] = 2;
SIGS[4].magic_count = 1;
SIGS[4].max_len = 2;
//case 5: // GIF
SIGS[5].magic_signature[0] = (u8*)str_alloc_copy("GIF87a");
SIGS[5].magic_sig_len[0] = 6;
SIGS[5].magic_signature[1] = (u8*)str_alloc_copy("GIF89a");
SIGS[5].magic_sig_len[1] = 6;
SIGS[5].magic_count = 2;
SIGS[5].max_len = 6;
//case 6: // PDF
SIGS[6].magic_signature[0] = (u8*)str_alloc_copy("%PDF");
SIGS[6].magic_sig_len[0] = 4;
SIGS[6].magic_count = 1;
SIGS[6].max_len = 4;
//case 7: // GZ
SIGS[7].magic_signature[0] = (u8*)str_alloc_copy("\x1F\x8B\x08");
SIGS[7].magic_sig_len[0] = 3;
SIGS[7].magic_count = 1;
SIGS[7].max_len = 3;
//case 8: // BZ2 (there is a 'magic' pi, but byte 4 is 1 to 9, so skip the 'pi')
SIGS[8].magic_signature[0] = (u8*)str_alloc_copy("BZh");
SIGS[8].magic_sig_len[0] = 3;
SIGS[8].magic_signature[1] = (u8*)str_alloc_copy("BZ0");
SIGS[8].magic_sig_len[1] = 3;
SIGS[8].magic_count = 2;
SIGS[8].max_len = 3;
//case 9: // FLV
SIGS[9].magic_signature[0] = (u8*)str_alloc_copy("FLV\x01");
SIGS[9].magic_sig_len[0] = 4;
SIGS[9].magic_count = 1;
SIGS[9].max_len = 4;
//case 10: // SWF
SIGS[10].magic_signature[0] = (u8*)str_alloc_copy("FWS");
SIGS[10].magic_sig_len[0] = 3;
SIGS[10].magic_signature[1] = (u8*)str_alloc_copy("CWS");
SIGS[10].magic_sig_len[1] = 3;
SIGS[10].magic_signature[2] = (u8*)str_alloc_copy("ZWS");
SIGS[10].magic_sig_len[2] = 3;
SIGS[10].magic_count = 3;
SIGS[10].max_len = 3;
//case 11: // MP3
SIGS[11].magic_signature[0] = (u8*)str_alloc_copy("ID3");
SIGS[11].magic_sig_len[0] = 3;
SIGS[11].magic_count = 1;
SIGS[11].max_len = 3;
SIGS[255].max_len = 64;
#endif
}
static void done(void)
{
MEM_FREE(chk);
MEM_FREE(K12);
MEM_FREE(saved_key);
}
static void set_salt(void *_salt)
{
salt = *((PKZ_SALT**)_salt);
if (salt->H[0].h && salt->H[1].h && salt->H[2].h)
return;
// we 'late' fixup the salt.
salt->H[0].h = &salt->zip_data[0];
salt->H[1].h = &salt->zip_data[1+salt->H[0].datlen];
salt->H[2].h = &salt->zip_data[2+salt->H[0].datlen+salt->H[1].datlen];
}
static void *get_salt(char *ciphertext)
{
/* NOTE, almost NO error checking at all in this function. Proper error checking done in valid() */
static union alignment {
unsigned char c[8];
uint64_t a[1]; // salt alignment of 8 bytes required. uint64_t values in the salt.
} a;
unsigned char *salt_p = a.c;
PKZ_SALT *salt, *psalt;
long offset=0;
char *H[3] = {0,0,0};
long ex_len[3] = {0,0,0};
u32 offex;
size_t i, j;
c8 *p, *cp, *cpalloc = (char*)mem_alloc(strlen(ciphertext)+1);
int type2 = 0;
/* Needs word align on REQ_ALIGN systems. May crash otherwise (in the sscanf) */
salt = mem_calloc(1, sizeof(PKZ_SALT));
cp = cpalloc;
strcpy(cp, ciphertext);
if (!strncmp(cp, FORMAT_TAG, FORMAT_TAG_LEN))
p = &cp[FORMAT_TAG_LEN];
else {
p = &cp[FORMAT_TAG2_LEN];
type2 = 1;
}
cp = strtokm(p, "*");
sscanf(cp, "%x", &(salt->cnt));
cp = strtokm(NULL, "*");
sscanf(cp, "%x", &(salt->chk_bytes));
for (i = 0; i < salt->cnt; ++i) {
int data_enum;
cp = strtokm(NULL, "*");
data_enum = *cp - '0';
cp = strtokm(NULL, "*");
#if USE_PKZIP_MAGIC
{
// mingw can't handle %hhx. Use 'normal' %x and assign back to uint_8 var
unsigned jnk;
sscanf(cp, "%x", &jnk);
salt->H[i].magic = (unsigned char)jnk;
}
salt->H[i].pSig = &SIGS[salt->H[i].magic];
#endif
if (data_enum > 1) {
cp = strtokm(NULL, "*");
sscanf(cp, "%"PRIx64, &(salt->compLen));
cp = strtokm(NULL, "*");
sscanf(cp, "%"PRIx64, &(salt->deCompLen));
cp = strtokm(NULL, "*");
sscanf(cp, "%x", &(salt->crc32));
cp = strtokm(NULL, "*");
sscanf(cp, "%lx", &offset);
cp = strtokm(NULL, "*");
sscanf(cp, "%x", &offex);
}
cp = strtokm(NULL, "*");
sscanf(cp, "%x", &(salt->H[i].compType));
cp = strtokm(NULL, "*");
sscanf(cp, "%"PRIx64, &(salt->H[i].datlen));
cp = strtokm(NULL, "*");
for (j = 0; j < 4; ++j) {
salt->H[i].c <<= 4;
salt->H[i].c |= atoi16[ARCH_INDEX(cp[j])];
}
if (type2) {
cp = strtokm(NULL, "*");
for (j = 0; j < 4; ++j) {
salt->H[i].c2 <<= 4;
salt->H[i].c2 |= atoi16[ARCH_INDEX(cp[j])];
}
} else
salt->H[i].c2 = salt->H[i].c; // fake out 2nd hash, by copying first hash
cp = strtokm(NULL, "*");
if (data_enum > 1) {
/* if 2 or 3, we have the FULL zip blob for decrypting. */
if (data_enum == 3) {
/* read from file. */
FILE *fp;
fp = fopen(cp, "rb");
if (!fp) {
fprintf(stderr, "Error opening file for pkzip data: %s\n", cp);
MEM_FREE(cpalloc);
return 0;
}
fseek(fp, offset+offex, SEEK_SET);
if (salt->compLen < 16*1024) {
/* simply load the whole blob */
ex_len[i] = salt->compLen;
H[i] = mem_alloc(salt->compLen);
if (fread(H[i], 1, salt->compLen, fp) != salt->compLen) {
fprintf(stderr, "Error reading zip file for pkzip data: %s\n", cp);
fclose(fp);
MEM_FREE(cpalloc);
return 0;
}
fclose(fp);
salt->H[i].datlen = salt->compLen;
}
else {
/* Only load a small part (to be used in crypt_all), and set the filename in */
/* the salt->fname string, so that cmp_all can open the file, and buffered */
/* read the zip data only when it 'needs' it. */
strnzcpy(salt->fname, (const char *)cp, sizeof(salt->fname));
salt->offset = offset+offex;
ex_len[i] = 384;
H[i] = mem_alloc(384);
if (fread(H[i], 1, 384, fp) != 384) {
fprintf(stderr, "Error reading zip file for pkzip data: %s\n", cp);
fclose(fp);
MEM_FREE(cpalloc);
return 0;
}
fclose(fp);
salt->H[i].datlen = 384;
}
} else {
ex_len[i] = salt->compLen;
H[i] = mem_alloc(salt->compLen);
for (j = 0; j < salt->H[i].datlen; ++j)
H[i][j] = (atoi16[ARCH_INDEX(cp[j*2])]<<4) + atoi16[ARCH_INDEX(cp[j*2+1])];
}
/* we also load this into the 'building' salt */
salt->compType = salt->H[i].compType;
/* Now, set the 'is full zip' flag, so we later process as a zip file. */
salt->H[i].full_zip = 1;
salt->full_zip_idx = i;
} else {
ex_len[i] = salt->H[i].datlen;
H[i] = mem_alloc(salt->H[i].datlen);
for (j = 0; j < salt->H[i].datlen; ++j)
H[i][j] = (atoi16[ARCH_INDEX(cp[j*2])]<<4) + atoi16[ARCH_INDEX(cp[j*2+1])];
}
}
MEM_FREE(cpalloc);
// Ok, we want to add some 'logic' to remove the magic testing, except for specific cases.
// If the only file blobs we have are stored, and long blobs, then we want magic (3 file, 2 byte checksum does not need magic).
// A single 1 byte file, even if deflated, we want to keep magic. (possibly).
j = 0;
for (i = 0; i < salt->cnt; ++i) {
if (salt->H[i].compType == 8) {
if (salt->cnt == 1 && salt->chk_bytes == 1)
j += 10;
else
break;
}
j += 1;
}
// ok, if j == 1, then we 'might' want to use magic. Otherwise, we want to 'clear' all magic values.
if (j >= 20)
j = 0;
if (j && salt->chk_bytes == 2 && salt->cnt > 1)
j = 0; // we do not need to use magic, on 2 or 3 stored 2 byte checksum files. We already have 2^32 or 2^48 in the checksum checking
if (j && salt->chk_bytes == 1 && salt->cnt == 3)
j = 0; // we do not need to use magic, on 3 stored 2 byte checksum files. We already have 2^32 or 2^48 in the checksum checking
if (!j) {
for (i = 0; i < salt->cnt; ++i)
salt->H[i].magic = 0; // remove any 'magic' logic from this hash.
}
psalt = mem_calloc(1, sizeof(PKZ_SALT) + ex_len[0]+ex_len[1]+ex_len[2]+2);
memcpy(psalt, salt, sizeof(*salt));
memcpy(psalt->zip_data, H[0], ex_len[0]);
MEM_FREE(H[0]);
if (salt->cnt > 1)
memcpy(psalt->zip_data+ex_len[0]+1, H[1], ex_len[1]);
MEM_FREE(H[1]);
if (salt->cnt > 2)
memcpy(psalt->zip_data+ex_len[0]+ex_len[1]+2, H[2], ex_len[2]);
MEM_FREE(H[2]);
MEM_FREE(salt);
psalt->dsalt.salt_alloc_needs_free = 1; // we used mem_calloc, so JtR CAN free our pointer when done with them.
// set the JtR core linkage stuff for this dyna_salt
memcpy(salt_p, &psalt, sizeof(psalt));
psalt->dsalt.salt_cmp_offset = SALT_CMP_OFF(PKZ_SALT, cnt);
psalt->dsalt.salt_cmp_size =
SALT_CMP_SIZE(PKZ_SALT, cnt, zip_data, ex_len[0]+ex_len[1]+ex_len[2]+2);
return salt_p;
}
static void set_key(char *key, int index)
{
/* Keep the PW, so we can return it in get_key if asked to do so */
strnzcpyn(saved_key[index], key, sizeof(*saved_key));
dirty = 1;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int cmp_one(void *binary, int idx)
{
return chk[idx] == 1;
}
static int cmp_all(void *binary, int count)
{
int i,j;
for (i=j=0; i<count; ++i)
j+=chk[i]; /* hopefully addition like this is faster than 'count' conditional if statments */
return j;
}
/* this function is used by cmp_exact_loadfile. It will load the next
* part of the file then decrypt the data, and return just how many
* bytes were loaded.
*
* This function is 'similar' to an fread(). However, it also decrypts data
*/
static int get_next_decrypted_block(u8 *in, int sizeof_n, FILE *fp, u32 *inp_used, MY_WORD *pkey0, MY_WORD *pkey1, MY_WORD *pkey2)
{
u32 new_bytes = sizeof_n, k;
u8 C;
/* we have read all the bytes, we're done */
if (*inp_used >= salt->compLen)
return 0;
if (*inp_used + new_bytes > salt->compLen)
/* this is the last block. Only load the bytes that are left */
new_bytes = salt->compLen - *inp_used;
/* return the correct 'offset', so we can track when the file buffer has been fully read */
*inp_used += new_bytes;
/* read the data */
if (fread(in, 1, new_bytes, fp) != new_bytes)
return 0;
/* decrypt the data bytes (in place, in same buffer). Easy to do, only requires 1 temp character variable. */
for (k = 0; k < new_bytes; ++k) {
C = PKZ_MULT(in[k],(*pkey2));
pkey0->u = jtr_crc32 (pkey0->u, C);
pkey1->u = (pkey1->u + pkey0->c[KB1]) * 134775813 + 1;
pkey2->u = jtr_crc32 (pkey2->u, pkey1->c[KB2]);
in[k] = C;
}
/* return the number of bytes we read from the file on this read */
return new_bytes;
}
/* Ok, this is the more complex example. Here we have to load the file (which may be HUGE)
* decrypt the bytes from this file, and then inflate that data, and crc the bytes which we
* have inflated from that stream. Then in the end, when we use all input bytes, if we have
* inflated the right amount of data, ended up with a Z_STREAM_END, and the proper sized
* decompression buffer, and the CRC matches, then we know we have the correct password
*
* This function is called from cmp_exact(), when cmp_exact finds out we have to decrypt from
* the stored .zip file.
*
* this code is modifications made to the zpipe.c 'example' code from the zlib web site.
*/
#define CHUNK (64*1024)
static int cmp_exact_loadfile(int index)
{
int ret;
u32 have, k;
z_stream strm;
unsigned char in[CHUNK];
unsigned char out[CHUNK];
FILE *fp;
MY_WORD key0, key1, key2;
u8 *b, C;
u32 inp_used, decomp_len=0;
u32 crc = 0xFFFFFFFF;
/* Open the zip file, and 'seek' to the proper offset of the binary zip blob */
fp = fopen(salt->fname, "rb");
if (!fp) {
fprintf(stderr, "\nERROR, the zip file: %s has been removed.\nWe are a possible password has been found, but FULL validation can not be done!\n", salt->fname);
return 1;
}
if (fseek(fp, salt->offset, SEEK_SET)) {
fprintf(stderr, "\nERROR, the zip file: %s fseek() failed.\nWe are a possible password has been found, but FULL validation can not be done!\n", salt->fname);
fclose(fp);
return 1;
}
/* 'seed' the decryption with the IV. We do NOT use these bytes, they simply seed us. */
key0.u = K12[index*3], key1.u = K12[index*3+1], key2.u = K12[index*3+2];
k=12;
if (fread(in, 1, 12, fp) != 12) {
fprintf(stderr, "\nERROR, the zip file: %s fread() failed.\nWe are a possible password has been found, but FULL validation can not be done!\n", salt->fname);
fclose(fp);
return 1;
}
b = salt->H[salt->full_zip_idx].h;
do {
C = PKZ_MULT(*b++,key2);
key0.u = jtr_crc32 (key0.u, C);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
}
while(--k);
/* this is 'sort of' our file pointer. It is the 'index' into the file's encrypted, compressed data buffer. */
/* we have read the 12 bytes of IV data, and updated our keys. Now we start processing the rest of the bytes */
/* to get the data to inflate, and crc check */
inp_used = 12;
if (salt->H[salt->full_zip_idx].compType == 0) {
// handle a stored blob (we do not have to decrypt it.
int avail_in;
crc = 0xFFFFFFFF;
avail_in = get_next_decrypted_block(in, CHUNK, fp, &inp_used, &key0, &key1, &key2);
while (avail_in) {
for (k = 0; k < avail_in; ++k)
crc = jtr_crc32(crc,in[k]);
avail_in = get_next_decrypted_block(in, CHUNK, fp, &inp_used, &key0, &key1, &key2);
}
fclose(fp);
return ~crc == salt->crc32;
}
/* allocate inflate state */
strm.zalloc = Z_NULL;
strm.zfree = Z_NULL;
strm.opaque = Z_NULL;
strm.avail_in = 0;
strm.next_in = Z_NULL;
ret = inflateInit2(&strm, -15);
if (ret != Z_OK) /* if zlib is hosed, then likely there is no reason at all to continue. Better to exit, and let the user 'fix' the system */
perror("Error, initializing the libz inflateInit2() system\n");
/* decompress until deflate stream ends or end of file */
do {
strm.avail_in = get_next_decrypted_block(in, CHUNK, fp, &inp_used, &key0, &key1, &key2);
if (ferror(fp)) {
inflateEnd(&strm);
fclose(fp);
fprintf(stderr, "\nERROR, the zip file: %s fread() failed.\nWe are a possible password has been found, but FULL validation can not be done!\n", salt->fname);
return 1;
}
if (strm.avail_in == 0)
break;
strm.next_in = in;
/* run inflate() on input until output buffer not full */
do {
strm.avail_out = CHUNK;
strm.next_out = out;
ret = inflate(&strm, Z_NO_FLUSH);
switch (ret) {
case Z_NEED_DICT:
case Z_DATA_ERROR:
case Z_MEM_ERROR:
inflateEnd(&strm);
fclose(fp);
return 0;
}
have = CHUNK - strm.avail_out;
/* now update our crc value */
for (k = 0; k < have; ++k)
crc = jtr_crc32(crc,out[k]);
decomp_len += have;
} while (strm.avail_out == 0);
/* done when inflate() says it's done */
} while (ret != Z_STREAM_END);
/* clean up and return */
inflateEnd(&strm);
fclose(fp);
return ret == Z_STREAM_END && inp_used == salt->compLen && decomp_len == salt->deCompLen && salt->crc32 == ~crc;
}
static int cmp_exact(char *source, int index)
{
const u8 *b;
u8 C, *decompBuf, *decrBuf, *B;
u32 k, crc;
MY_WORD key0, key1, key2;
z_stream strm;
int ret;
if (salt->H[salt->full_zip_idx].full_zip == 0)
/* we do not have a zip file, this is 'checksum' only
* POSSIBLY, we should log and output to screen that
* we are not 100% 'sure' we have the right password!! */
return 1;
#ifdef ZIP_DEBUG
fprintf(stderr, "FULL zip test being done. (pass=%s)\n", saved_key[index]);
#endif
if (salt->fname[0] == 0) {
/* we have the whole zip blob in memory, simply allocate a decrypt buffer, decrypt
* in one step, crc and be done with it. This is the 'trivial' type. */
decrBuf = mem_alloc(salt->compLen-12);
key0.u = K12[index*3], key1.u = K12[index*3+1], key2.u = K12[index*3+2];
b = salt->H[salt->full_zip_idx].h;
k=12;
do {
C = PKZ_MULT(*b++,key2);
key0.u = jtr_crc32 (key0.u, C);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
}
while(--k);
B = decrBuf;
k = salt->compLen-12;
do {
C = PKZ_MULT(*b++,key2);
key0.u = jtr_crc32 (key0.u, C);
*B++ = C;
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
} while (--k);
if (salt->H[salt->full_zip_idx].compType == 0) {
// handle a stored blob (we do not have to decrypt it.
crc = 0xFFFFFFFF;
for (k = 0; k < salt->compLen-12; ++k)
crc = jtr_crc32(crc,decrBuf[k]);
MEM_FREE(decrBuf);
return ~crc == salt->crc32;
}
strm.zalloc = Z_NULL;
strm.zfree = Z_NULL;
strm.opaque = Z_NULL;
strm.next_in = Z_NULL;
strm.avail_in = 0;
ret = inflateInit2(&strm, -15); /* 'raw', since we do not have gzip header, or gzip crc. .ZIP files are 'raw' implode data. */
if (ret != Z_OK)
perror("Error, initializing the libz inflateInit2() system\n");
decompBuf = mem_alloc(salt->deCompLen);
strm.next_in = decrBuf;
strm.avail_in = salt->compLen-12;
strm.avail_out = salt->deCompLen;
strm.next_out = decompBuf;
ret = inflate(&strm, Z_SYNC_FLUSH);
inflateEnd(&strm);
if (ret != Z_STREAM_END || strm.total_out != salt->deCompLen) {
MEM_FREE(decompBuf);
MEM_FREE(decrBuf);
return 0;
}
crc = 0xFFFFFFFF;
for (k = 0; k < strm.total_out; ++k)
crc = jtr_crc32(crc,decompBuf[k]);
MEM_FREE(decompBuf);
MEM_FREE(decrBuf);
return ~crc == salt->crc32;
}
/* we have a stand alone function to handle this more complex method of
* loading from file, decrypting, decompressing, and crc'ing the data
* It is complex enough of a task, to have its own function. */
return cmp_exact_loadfile(index);
}
#if USE_PKZIP_MAGIC
const char exBytesUTF8[64] = {
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5
};
static int isLegalUTF8_char(const u8 *source, int length)
{
u8 a;
int len;
const u8 *srcptr;
if (*source < 0xC0)
return 1;
len = exBytesUTF8[*source&0x3f];
srcptr = source+len;
if (len+1 > length)
return -1;
switch (len) {
default: return -1;
/* Everything else falls through when "true"... */
case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return -1;
case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return -1;
case 2: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return -1;
switch (*source) {
/* no fall-through in this inner switch */
case 0xE0: if (a < 0xA0) return -1;
break;
case 0xED: if (a > 0x9F) return -1;
break;
case 0xF0: if (a < 0x90) return -1;
break;
case 0xF4: if (a > 0x8F) return -1;
}
case 1: if (*source >= 0x80 && *source < 0xC2) return -1;
}
if (*source > 0xF4) return -1;
return len+1;
}
static int validate_ascii(const u8 *out, int inplen)
{
int i;
int unicode=0;
for (i = 0; i < inplen-1; ++i) {
if (out[i] > 0x7E) {
// first check to 'see' if this is a valid utf8 character. If so, let it 'pass'.
if (unicode)
return 0; // in unicode mode, we ONLY handle 'ascii' bytes in the low byte.
if (out[i] > 0xC0) {
int len;
if (i > inplen-4)
return 1;
len = isLegalUTF8_char(&out[i], 5);
if (len < 0) return 0;
i += (len-1);
}
else {
if (i) {
// check for utf8 BOM \xEF \xBB \xBF
if (out[0] == 0xEF && out[1] == 0xBB && out[2] == 0xBF) {
i = 2;
continue;
}
/* check for Unicode BOM (FF FE for utf16le, FE FF for utf16be, FF FE 00 00 for utf32le, not sure if 00 00 FE FF is utf32be, but likely is) */
if (out[0] == 0xFF && out[1] == 0xFE) {
unicode = 1;
i++;
continue;
}
/* unicode BE bom */
if (out[0] == 0xFE && out[1] == 0xFF) {
unicode = 1;
i += 2;
continue;
}
/* utf32 LE */
if (out[0] == 0xFF && out[1] == 0xFE && out[2] == 0 && out[3] == 0) {
unicode = 3;
i += 3;
continue;
}
/* utf32 BE bom */
if (out[0] == 0 && out[1] == 0 && out[2] == 0xFE && out[3] == 0xFF) {
unicode = 3;
i += 6;
continue;
}
// allow a 'single' byte > 0x7E as long as bytes following are ascii.
if (out[1] <= 0x7E && out[1] >= 0x20) {
++i;
continue;
}
return 0;
}
}
} else if (out[i] < 0x20) {
/* we do not need to deal with DOS EOF char 0x1a, since we will never have the 'end' of the file */
/* we do allow the ESC character for ANSI files, however, they are frequently also binary, so will fail in other places */
if (out[i]!='\n' && out[i]!='\r' && out[i]!='\t' && out[i]!=0x1B)
return 0;
}
i += unicode; // skip the null bytes
}
return 1;
}
static int CheckSigs(const u8 *p, int len, ZIP_SIGS *pSig)
{
int i, j;
for (i = 0; i < pSig->magic_count; ++i) {
int fnd = 1;
u8 *pS = pSig->magic_signature[i];
for (j = 0; j < pSig->magic_sig_len[i]; ++j) {
if (p[j] != pS[j]) {
fnd = 0;
break;
}
}
if (fnd)
return 1;
}
return 0;
}
#endif
/* note, Buf is the 'full' decrypted zip buffer (len bytes long). It DOES contain the first 3 bits, which have already
* been decoded, and have told us we had a code 2 (var table block)
* all done without BITS(), PULLBYTE(), BITSNEEDED() macros. We 'know' the data we need, and we know that we have
* 'enough', so we do not worry about all of the overhead, and validation logic.
*
* In testing, this function catches ALL bad decryptions, except about 1/300 to 1/350. So, it is not too bad.
*/
MAYBE_INLINE static int check_inflate_CODE2(u8 *next)
{
u32 bits, hold, thisget, have, i;
int left;
u32 ncode;
u32 ncount[2]; // ends up being an array of 8 u8 count values. But we can clear it, and later 'check' it with 2 u32 instructions.
u8 *count; // this will point to ncount array. NOTE, this is alignment required 'safe' for Sparc systems or others requiring alignment.
#if (ARCH_LITTLE_ENDIAN==1) && (ARCH_ALLOWS_UNALIGNED==1)
// 'speedup' for x86 type systems. pkzip/inflate was designed here, so why not use it.
hold = *((u32*)next);
#else
hold = *next + (((u32)next[1])<<8) + (((u32)next[2])<<16) + (((u32)next[3])<<24);
#endif
next += 3; // we pre-increment when pulling it in the loop, thus we need to be 1 byte back.
hold >>= 3; // we already processed 3 bits
count = (u8*)ncount;
if (257+(hold&0x1F) > 286)
return 0; // nlen, but we do not use it.
hold >>= 5;
if (1+(hold&0x1F) > 30)
return 0; // ndist, but we do not use it.
hold >>= 5;
ncode = 4+(hold&0xF);
hold >>= 4;
// we have 15 bits left.
hold += ((u32)(*++next)) << 15;
hold += ((u32)(*++next)) << 23;
// we now have 31 bits. We need to know this for the loop below.
bits = 31;
// We have 31 bits now, in accum. If we are processing 19 codes, we do 7, then have 10 bits.
// Add 16 more and have 26, then use 21, have 5. Then load 16 more, then eat 15 of them.
have = 0;
ncount[0] = ncount[1] = 0;
for (;;) {
if (have+7>ncode)
thisget = ncode-have;
else
thisget = 7;
have += thisget;
bits -= thisget*3;
while (thisget--) {
++count[hold&7];
hold>>=3;
}
if (have == ncode)
break;
hold += ((u32)(*++next)) << bits;
bits += 8;
hold += ((u32)(*++next)) << bits;
bits += 8;
}
count[0] = 0;
if (!ncount[0] && !ncount[1])
return 0; /* if no codes at all, then simply bail, that is invalid */
/* check for an over-subscribed or incomplete set of lengths */
/* this will catch about 319 out of 320 'bad' passwords that */
/* have made it into this function. Note, only 1/4 of the */
/* passwords which pass the checksum, can make it here. Of */
/* those, we drop 319/320 or about that many (a good check!) */
left = 1;
for (i = 1; i <= 7; ++i) {
left <<= 1;
left -= count[i];
if (left < 0)
return 0; /* over-subscribed */
}
if (left > 0)
return 0; /* incomplete set */
return 1; /* Passed this check! */
}
//static code const * const lcode = lenfix;
//static code const * const dcode = distfix;
/* This function handles inflate CODE type 1. This is a 'fixed' table code. We set the fixed table, */
/* and then inflate some data (without writing anything. If we find any BAD lookback data, we can */
/* return a failure. We have 24 bytes of inflate data, and this almost always is more than enough */
/* to turn up an error. If we find we need more, we will do more than 24 */
MAYBE_INLINE static int check_inflate_CODE1(u8 *next, int left)
{
u32 whave = 0, op, bits, hold,len;
code here;
#if (ARCH_LITTLE_ENDIAN==1) && (ARCH_ALLOWS_UNALIGNED==1)
// 'speedup' for x86 type systems. pkzip/inflate was designed here, so why not use it.
hold = *((u32*)next);
#else
hold = *next + (((u32)next[1])<<8) + (((u32)next[2])<<16) + (((u32)next[3])<<24);
#endif
next += 3; // we pre-increment when pulling it in the loop, thus we need to be 1 byte back.
left -= 4;
hold >>= 3; // we already processed 3 bits
bits = 32-3;
for (;;) {
if (bits < 15) {
if (left < 2)
return 1; // we are out of bytes. Return we had no error.
left -= 2;
hold += (u32)(*++next) << bits;
bits += 8;
hold += (u32)(*++next) << bits;
bits += 8;
}
here=lenfix[hold & 0x1FF];
op = (unsigned)(here.bits);
hold >>= op;
bits -= op;
op = (unsigned)(here.op);
if (op == 0) /* literal */
++whave;
else if (op & 16) { /* length base */
len = (unsigned)(here.val);
op &= 15; /* number of extra bits */
if (op) {
if (bits < op) {
if (!left)
return 1; /*we are out of bytes. Return we had no error.*/
--left;
hold += (u32)(*++next) << bits;
bits += 8;
}
len += (unsigned)hold & ((1U << op) - 1);
hold >>= op;
bits -= op;
}
if (bits < 15) {
if (left < 2)
return 1; /*we are out of bytes. Return we had no error.*/
left -= 2;
hold += (u32)(*++next) << bits;
bits += 8;
hold += (u32)(*++next) << bits;
bits += 8;
}
here = distfix[hold & 0x1F];
// dodist:
op = (unsigned)(here.bits);
hold >>= op;
bits -= op;
op = (unsigned)(here.op);
if (op & 16) { /* distance base */
u32 dist = (unsigned)(here.val);
op &= 15; /* number of extra bits */
if (bits < op) {
if (!left)
return 1; /*we are out of bytes. Return we had no error.*/
--left;
hold += (u32)(*++next) << bits;
bits += 8;
if (bits < op) {
if (!left)
return 1; /*we are out of bytes. Return we had no error.*/
--left;
hold += (u32)(*++next) << bits;
bits += 8;
}
}
dist += (unsigned)hold & ((1U << op) - 1);
if (dist > whave)
return 0; /*invalid distance too far back*/
hold >>= op;
bits -= op;
//***** start of patched code from Pavel Semjanov (see original code below)
whave += len;
}
else
return 0; /*invalid distance code*/
}
else if (op & 32) {
// end of block [may present in short sequences, but only at the end.] NOTE, we need to find out if we EVER hit the end of a block, at only 24 bytes???
if (left == 0)
return 1;
return 0;
}
else {
return 0; // invalid literal/length code.
}
//***** End of patched code from Pavel
}
}
// original code block (for above), prior to patch from Pavel Semjanov [pavel@semjanov.com]
// this code would be a direct drop in between the comments starting and stopping with //***** above
// also the dodist label was commented out (no longer used).
#if 0
whave += dist;
}
else if ((op & 64) == 0) { /* 2nd level distance code */
here = distfix[here.val + (hold & ((1U << op) - 1))];
goto dodist;
}
else
return 0; /*invalid distance code*/
}
else if (op & 64) {
// 2nd level length code.
//here = lcode[here.val + (hold & ((1U << op) - 1))];
//goto dolen;
// this causes an infinite loop. Also, I VERY seriously doubt, this will EVER happen in the first
// 24 bytes of code. NOTE, there may be problems, in the fact this causes a inf loop!, but for now,
// simply return 0, then debug later.
return 0;
}
else if (op & 32) {
// end of block NOTE, we need to find out if we EVER hit the end of a block, at only 24 bytes???
// It is VERY likely we do SHOULD NOT EVER hit this. If that is the case, return that this block is bogus.
// check next OP (if we have enough bits left), if CODE=3, fail. If code==0, check
return 0;
}
else {
return 0; // invalid literal/length code.
}
#endif
/*
* Crypt_all simply performs the checksum .zip validatation of the data. It performs
* this for ALL hashes provided. If any of them fail to match, then crypt all puts the
* complement of the 'proper' checksum of the first hash into the output. These 2 bytes
* are checked against the binary for this salt/password combination. Thus, if any
* checksum fails, it will never match binary. However, if ALL of the checksums match
* we then put the checksum bytes from the first hash, into our output data. Then, when
* the binary check (cmp_all, cmp_one) is performed, it WILL match. NOTE, this does
* not mean we have found the password. Just that all hashes quick check checksums
* for this password 'work'.
*/
static int crypt_all(int *pcount, struct db_salt *_salt)
{
const int _count = *pcount;
int idx;
#if (ZIP_DEBUG==2)
static int CNT, FAILED, FAILED2;
++CNT;
#endif
// pkzip kinda sucks a little for multi-threading, since there is different amount of work to be
// done, depenging upon the password. Thus, we pack in OMP_MOD passwords into each thread, and
// hopefully some of the differnces will even themselves out in the end. If we have 2 threads
// then thread 1 gets 0 to 127 password, and thread 2 gets 128-256. Once they 'get' their data,
// there should be no mutexing of the runtime data, thus the threads should run fast.
// Also, since we have 'multiple' files in a .zip file (and multiple checksums), we bail as at the
// first time we fail to match checksum. So, there may be some threads which check more checksums.
// Again, hopefully globbing many tests into a threads working set will flatten out these differences.
#ifdef _OPENMP
#pragma omp parallel for private(idx)
#endif
for (idx = 0; idx < _count; ++idx) {
int cur_hash_count = salt->cnt;
int cur_hash_idx = -1;
MY_WORD key0, key1, key2;
u8 C;
const u8 *b;
u8 curDecryBuf[256];
#if USE_PKZIP_MAGIC
u8 curInfBuf[128];
#endif
int k, SigChecked;
u16 e, e2, v1, v2;
z_stream strm;
int ret;
/* use the pwkey for each hash. We mangle on the 12 bytes of IV to what was computed in the pwkey load. */
if (dirty) {
u8 *p = (u8*)saved_key[idx];
/* load the 'pwkey' one time, put it into the K12 array */
key0.u = 0x12345678UL; key1.u = 0x23456789UL; key2.u = 0x34567890UL;
do {
key0.u = jtr_crc32 (key0.u, *p++);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
} while (*p);
K12[idx*3] = key0.u, K12[idx*3+1] = key1.u, K12[idx*3+2] = key2.u;
goto SkipKeyLoadInit;
}
do
{
// 2nd, and later times through the loop, AND if keys are not dirty (i.e. multiple salts
// for the same key load), we do NOT perform the key compute, but instead load the pre-computed
// key data from the array.
key0.u = K12[idx*3], key1.u = K12[idx*3+1], key2.u = K12[idx*3+2];
SkipKeyLoadInit:;
b = salt->H[++cur_hash_idx].h;
k=11;
e = salt->H[cur_hash_idx].c;
e2 = salt->H[cur_hash_idx].c2;
do
{
C = PKZ_MULT(*b++,key2);
key0.u = jtr_crc32 (key0.u, C);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
}
while(--k);
/* if the hash is a 2 byte checksum type, then check that value first */
/* There is no reason to continue if this byte does not check out. */
if (salt->chk_bytes == 2 && C != (e&0xFF) && C != (e2&0xFF))
goto Failed_Bailout;
C = PKZ_MULT(*b++,key2);
#if 1
// https://github.com/magnumripper/JohnTheRipper/issues/467
// Fixed, JimF. Added checksum test for crc32 and timestamp.
if (C != (e>>8) && C != (e2>>8))
goto Failed_Bailout;
#endif
// Now, update the key data (with that last byte.
key0.u = jtr_crc32 (key0.u, C);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
// Ok, we now have validated this checksum. We need to 'do some' extra pkzip validation work.
// What we do here, is to decrypt a little data (possibly only 1 byte), and perform a single
// 'inflate' check (if type is 8). If type is 0 (stored), and we have a signature check, then
// we do that here. Also, if the inflate code is a 0 (stored block), and we do sig check, then
// we can do that WITHOUT having to call inflate. however, if there IS a sig check, we will have
// to call inflate on 'some' data, to get a few bytes (or error code). Also, if this is a type
// 2 or 3, then we do the FULL inflate, CRC check here.
e = 0;
// First, we want to get the inflate CODE byte (the first one).
C = PKZ_MULT(*b++,key2);
SigChecked = 0;
if ( salt->H[cur_hash_idx].compType == 0) {
// handle a stored file.
// We can ONLY deal with these IF we are handling 'magic' testing.
#if USE_PKZIP_MAGIC
// Ok, if we have a signature, check it here, WITHOUT having to call zLib's inflate.
if (salt->H[cur_hash_idx].pSig->max_len) {
int len = salt->H[cur_hash_idx].pSig->max_len;
if (len > salt->H[cur_hash_idx].datlen-12)
len = salt->H[cur_hash_idx].datlen-12;
SigChecked = 1;
curDecryBuf[0] = C;
for (; e < len;) {
key0.u = jtr_crc32 (key0.u, curDecryBuf[e]);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
curDecryBuf[++e] = PKZ_MULT(*b++,key2);
}
if (salt->H[cur_hash_idx].magic == 255) {
if (!validate_ascii(&curDecryBuf[5], len-5))
goto Failed_Bailout;
} else {
if (!CheckSigs(curDecryBuf, len, salt->H[cur_hash_idx].pSig))
goto Failed_Bailout;
}
}
#endif
continue;
}
#if 1
// https://github.com/magnumripper/JohnTheRipper/issues/467
// Ok, if this is a code 3, we are done.
// Code moved to after the check for stored type. (FIXED) This check was INVALID for a stored type file.
if ( (C & 6) == 6)
goto Failed_Bailout;
#endif
if ( (C & 6) == 0) {
// Check that checksum2 is 0 or 1. If not, I 'think' we can be done
if (C > 1)
goto Failed_Bailout;
// now get 4 bytes. This is the length. It is made up of 2 16 bit values.
// these 2 values are checksumed, so it is easy to tell if the data is WRONG.
// correct data is u16_1 == (u16_2^0xFFFF)
curDecryBuf[0] = C;
for (e = 0; e <= 4; ) {
key0.u = jtr_crc32 (key0.u, curDecryBuf[e]);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
curDecryBuf[++e] = PKZ_MULT(*b++,key2);
}
v1 = curDecryBuf[1] | (((u16)curDecryBuf[2])<<8);
v2 = curDecryBuf[3] | (((u16)curDecryBuf[4])<<8);
if (v1 != (v2^0xFFFF))
goto Failed_Bailout;
#if USE_PKZIP_MAGIC
// Ok, if we have a signature, check it here, WITHOUT having to call zLib's inflate.
if (salt->H[cur_hash_idx].pSig->max_len) {
int len = salt->H[cur_hash_idx].pSig->max_len + 5;
if (len > salt->H[cur_hash_idx].datlen-12)
len = salt->H[cur_hash_idx].datlen-12;
SigChecked = 1;
for (; e < len;) {
key0.u = jtr_crc32 (key0.u, curDecryBuf[e]);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
curDecryBuf[++e] = PKZ_MULT(*b++,key2);
}
if (salt->H[cur_hash_idx].magic == 255) {
if (!validate_ascii(&curDecryBuf[5], len-5))
goto Failed_Bailout;
} else {
if (!CheckSigs(&curDecryBuf[5], len-5, salt->H[cur_hash_idx].pSig))
goto Failed_Bailout;
}
}
#endif
}
else {
// Ok, now we have handled inflate code type 3 and inflate code 0 (50% of 'random' data)
// We now have the 2 'hard' ones left (fixed table, and variable table)
curDecryBuf[0] = C;
if ((C&6) == 4) { // inflate 'code' 2 (variable table)
#if (ZIP_DEBUG==2)
static unsigned count, found;
++count;
#endif
// we need 4 bytes, + 2, + 4 at most.
for (; e < 10;) {
key0.u = jtr_crc32 (key0.u, curDecryBuf[e]);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
curDecryBuf[++e] = PKZ_MULT(*b++,key2);
}
if (!check_inflate_CODE2(curDecryBuf))
goto Failed_Bailout;
#if (ZIP_DEBUG==2)
fprintf(stderr, "CODE2 Pass=%s count = %u, found = %u\n", saved_key[idx], count, ++found);
#endif
}
else {
int til;
#if (ZIP_DEBUG==2)
static unsigned count, found;
++count;
#endif
til = 36;
if (salt->H[cur_hash_idx].datlen-12 < til)
til = salt->H[cur_hash_idx].datlen-12;
for (; e < til;) {
key0.u = jtr_crc32 (key0.u, curDecryBuf[e]);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
curDecryBuf[++e] = PKZ_MULT(*b++,key2);
}
if (!check_inflate_CODE1(curDecryBuf, til))
goto Failed_Bailout;
#if (ZIP_DEBUG==2)
fprintf(stderr, "CODE1 Pass=%s count = %u, found = %u\n", saved_key[idx], count, ++found);
#endif
}
}
#if USE_PKZIP_MAGIC
// Ok, now see if we need to check sigs, or do a FULL inflate/crc check.
if (!SigChecked && salt->H[cur_hash_idx].pSig->max_len) {
int til = 180;
if (salt->H[cur_hash_idx].datlen-12 < til)
til = salt->H[cur_hash_idx].datlen-12;
for (; e < til;) {
key0.u = jtr_crc32 (key0.u, curDecryBuf[e]);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
curDecryBuf[++e] = PKZ_MULT(*b++,key2);
}
strm.zalloc = Z_NULL;
strm.zfree = Z_NULL;
strm.opaque = Z_NULL;
strm.next_in = Z_NULL;
strm.avail_in = til;
ret = inflateInit2(&strm, -15); /* 'raw', since we do not have gzip header, or gzip crc. .ZIP files are 'raw' implode data. */
if (ret != Z_OK)
perror("Error, initializing the libz inflateInit2() system\n");
strm.next_in = curDecryBuf;
strm.avail_out = sizeof(curInfBuf);
strm.next_out = curInfBuf;
ret = inflate(&strm, Z_SYNC_FLUSH);
inflateEnd(&strm);
if (ret != Z_OK) {
// we need to handle zips smaller than sizeof curInfBuf. If we find a zip of this
// size, the return is Z_STREAM_END, BUT things are fine.
if (ret == Z_STREAM_END && salt->deCompLen == strm.total_out)
; // things are ok.
else
goto Failed_Bailout;
}
if (!strm.total_out)
goto Failed_Bailout;
ret = salt->H[cur_hash_idx].pSig->max_len;
if (salt->H[cur_hash_idx].magic == 255) {
if (!validate_ascii(curInfBuf, strm.total_out))
goto Failed_Bailout;
} else {
if (strm.total_out < ret)
goto Failed_Bailout;
if (!CheckSigs(curInfBuf, strm.total_out, salt->H[cur_hash_idx].pSig))
goto Failed_Bailout;
}
}
#endif
if (salt->H[cur_hash_idx].full_zip) {
u8 inflateBufTmp[1024];
if (salt->compLen > 240 && salt->H[cur_hash_idx].datlen >= 200) {
for (;e < 200;) {
key0.u = jtr_crc32 (key0.u, curDecryBuf[e]);
key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1;
key2.u = jtr_crc32 (key2.u, key1.c[KB2]);
curDecryBuf[++e] = PKZ_MULT(*b++,key2);
}
strm.zalloc = Z_NULL;
strm.zfree = Z_NULL;
strm.opaque = Z_NULL;
strm.next_in = Z_NULL;
strm.avail_in = e;
ret = inflateInit2(&strm, -15); /* 'raw', since we do not have gzip header, or gzip crc. .ZIP files are 'raw' implode data. */
if (ret != Z_OK)
perror("Error, initializing the libz inflateInit2() system\n");
strm.next_in = curDecryBuf;
strm.avail_out = sizeof(inflateBufTmp);
strm.next_out = inflateBufTmp;
ret = inflate(&strm, Z_SYNC_FLUSH);
inflateEnd(&strm);
if (ret != Z_OK) {
#if (ZIP_DEBUG==2)
fprintf(stderr, "fail=%d fail2=%d tot="LLd"\n", ++FAILED, FAILED2, ((long long)CNT)*_count);
#endif
goto Failed_Bailout;
}
}
goto KnownSuccess;
}
}
while(--cur_hash_count);
/* We got a checksum HIT!!!! All hash checksums matched. */
/* We load the proper checksum value for the gethash */
KnownSuccess: ;
chk[idx] = 1;
continue;
Failed_Bailout: ;
/* We load the wrong checksum value for the gethash */
chk[idx] = 0;
}
/* clear the 'dirty' flag. Then on multiple different salt calls, we will not have to */
/* encrypt the passwords again. They will have already been loaded in the K12[] array. */
dirty = 0;
return _count;
}
struct fmt_main fmt_pkzip = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_DYNA_SALT | FMT_HUGE_INPUT,
{ NULL },
{ FORMAT_TAG, FORMAT_TAG2 },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_dyna_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_LIBZ */
|
densemat_rm.c | #define _XOPEN_SOURCE 500
#include "ghost/config.h"
#include "ghost/types.h"
#include "ghost/core.h"
#include "ghost/datatransfers.h"
#include "ghost/densemat_rm.h"
#include "ghost/util.h"
#include "ghost/locality.h"
#include "ghost/context.h"
#include "ghost/instr.h"
#include "ghost/machine.h"
#include "ghost/log.h"
#include "ghost/bindensemat.h"
#include "ghost/densemat_cm.h"
#include "ghost/constants.h"
#include <stdio.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
#ifdef GHOST_HAVE_CUDA
#include <cuda_runtime.h>
#endif
#define ROWMAJOR
#include "ghost/densemat_iter_macros.h"
#include "ghost/densemat_common.c.def"
ghost_error ghost_densemat_rm_distributeVector(ghost_densemat *vec, ghost_densemat *nodeVec, ghost_context *ctx)
{
GHOST_DEBUG_LOG(1,"Distributing vector");
int me;
int nprocs;
GHOST_CALL_RETURN(ghost_rank(&me, ctx->mpicomm));
GHOST_CALL_RETURN(ghost_nrank(&nprocs, ctx->mpicomm));
bool uniformstorage;
GHOST_CALL_RETURN(ghost_densemat_uniformstorage(&uniformstorage,vec,ctx->mpicomm));
if (!uniformstorage) {
GHOST_ERROR_LOG("Cannot collect vectors of different storage order");
return GHOST_ERR_INVALID_ARG;
}
ghost_lidx c;
#ifdef GHOST_HAVE_MPI
GHOST_DEBUG_LOG(2,"Scattering global vector to local vectors");
ghost_mpi_datatype mpidt;
GHOST_CALL_RETURN(ghost_mpi_datatype_get(&mpidt,vec->traits.datatype));
int i;
MPI_Request req[vec->traits.ncols*2*(nprocs-1)];
MPI_Status stat[vec->traits.ncols*2*(nprocs-1)];
int msgcount = 0;
for (i=0;i<vec->traits.ncols*2*(nprocs-1);i++)
req[i] = MPI_REQUEST_NULL;
if (me != 0) {
for (c=0; c<vec->traits.ncols; c++) {
MPI_CALL_RETURN(MPI_Irecv(DENSEMAT_VALPTR(nodeVec,0,c),ctx->row_map->ldim[me],mpidt,0,me,ctx->mpicomm,&req[msgcount]));
msgcount++;
}
} else {
for (c=0; c<vec->traits.ncols; c++) {
memcpy(DENSEMAT_VALPTR(nodeVec,0,c),DENSEMAT_VALPTR(vec,0,c),vec->elSize*ctx->row_map->ldim[0]);
for (i=1;i<nprocs;i++) {
MPI_CALL_RETURN(MPI_Isend(DENSEMAT_VALPTR(vec,c,ctx->row_map->goffs[i]),ctx->row_map->ldim[i],mpidt,i,i,ctx->mpicomm,&req[msgcount]));
msgcount++;
}
}
}
MPI_CALL_RETURN(MPI_Waitall(msgcount,req,stat));
#else
for (c=0; c<vec->traits.ncols; c++) {
memcpy(DENSEMAT_VALPTR(nodeVec,0,c),DENSEMAT_VALPTR(vec,0,c),DM_NROWS(vec)*vec->elSize);
}
// *nodeVec = vec->clone(vec);
#endif
ghost_densemat_upload(nodeVec);
GHOST_DEBUG_LOG(1,"Vector distributed successfully");
return GHOST_SUCCESS;
}
ghost_error ghost_densemat_rm_collectVectors(ghost_densemat *vec, ghost_densemat *totalVec, ghost_context *ctx)
{
#ifdef GHOST_HAVE_MPI
ghost_lidx c;
int me;
int nprocs;
ghost_mpi_datatype mpidt;
GHOST_CALL_RETURN(ghost_rank(&me, ctx->mpicomm));
GHOST_CALL_RETURN(ghost_nrank(&nprocs, ctx->mpicomm));
GHOST_CALL_RETURN(ghost_mpi_datatype_get(&mpidt,vec->traits.datatype));
bool uniformstorage;
GHOST_CALL_RETURN(ghost_densemat_uniformstorage(&uniformstorage,vec,ctx->mpicomm));
if (!uniformstorage) {
GHOST_ERROR_LOG("Cannot collect vectors of different storage order");
return GHOST_ERR_INVALID_ARG;
}
// if (ctx != NULL)
// vec->permute(vec,ctx->invRowPerm);
int i;
MPI_Request req[vec->traits.ncols*2*(nprocs-1)];
MPI_Status stat[vec->traits.ncols*2*(nprocs-1)];
int msgcount = 0;
for (i=0;i<vec->traits.ncols*2*(nprocs-1);i++)
req[i] = MPI_REQUEST_NULL;
if (me != 0) {
for (c=0; c<vec->traits.ncols; c++) {
MPI_CALL_RETURN(MPI_Isend(DENSEMAT_VALPTR(vec,0,c),ctx->row_map->ldim[me],mpidt,0,me,ctx->mpicomm,&req[msgcount]));
msgcount++;
}
} else {
for (c=0; c<vec->traits.ncols; c++) {
memcpy(DENSEMAT_VALPTR(totalVec,0,c),DENSEMAT_VALPTR(vec,0,c),vec->elSize*ctx->row_map->ldim[0]);
for (i=1;i<nprocs;i++) {
MPI_CALL_RETURN(MPI_Irecv(DENSEMAT_VALPTR(totalVec,c,ctx->row_map->goffs[i]),ctx->row_map->ldim[i],mpidt,i,i,ctx->mpicomm,&req[msgcount]));
msgcount++;
}
}
}
MPI_CALL_RETURN(MPI_Waitall(msgcount,req,stat));
#else
UNUSED(ctx);
ghost_densemat_init_densemat(totalVec,vec,0,0);
#endif
return GHOST_SUCCESS;
}
ghost_error ghost_densemat_rm_compress(ghost_densemat *vec)
{
if (!(vec->traits.flags & GHOST_DENSEMAT_SCATTERED)) {
return GHOST_SUCCESS;
}
if (vec->traits.location & GHOST_LOCATION_HOST) {
ghost_lidx v,i;
char *val = NULL;
if (vec->traits.location & GHOST_LOCATION_DEVICE) {
GHOST_CALL_RETURN(ghost_malloc_pinned((void **)&val,
(size_t)vec->traits.ncolspadded*DM_NROWSPAD(vec)*
vec->elSize));
} else {
GHOST_CALL_RETURN(ghost_malloc_align((void **)&val,
(size_t)vec->traits.ncolspadded*DM_NROWSPAD(vec)*
vec->elSize,GHOST_DATA_ALIGNMENT));
}
#pragma omp parallel for schedule(runtime) private(v)
for (i=0; i<DM_NROWSPAD(vec); i++) {
for (v=0; v<vec->traits.ncolspadded; v++) {
val[(v*DM_NROWSPAD(vec)+i)*vec->elSize] = 0;
}
}
DENSEMAT_ITER(vec,memcpy(&val[(row*vec->traits.ncolspadded+col)*vec->elSize],
valptr,vec->elSize));
vec->val = val;
}
if (vec->traits.location & GHOST_LOCATION_DEVICE) {
#ifdef GHOST_HAVE_CUDA
char *cu_val;
GHOST_CALL_RETURN(ghost_cu_malloc((void **)&cu_val,DM_NROWSPAD(vec)*vec->traits.ncolspadded*vec->elSize));
DENSEMAT_ITER(vec,ghost_cu_memcpy(&cu_val[(row*vec->traits.ncolspadded+col)*vec->elSize],
DENSEMAT_CUVALPTR(vec,memrow,memcol),vec->elSize));
if (!(vec->traits.flags & GHOST_DENSEMAT_VIEW)) {
GHOST_CALL_RETURN(ghost_cu_free(vec->cu_val));
}
vec->cu_val = cu_val;
#endif
}
ghost_bitmap_set_range(vec->colmask,0,vec->traits.ncols-1);
ghost_bitmap_set_range(vec->rowmask,0,DM_NROWS(vec)-1);
vec->traits.flags &= ~(ghost_densemat_flags)GHOST_DENSEMAT_VIEW;
vec->traits.flags &= ~(ghost_densemat_flags)GHOST_DENSEMAT_SCATTERED;
vec->stride = vec->traits.ncolspadded;
vec->src = vec;
return GHOST_SUCCESS;
}
ghost_error ghost_densemat_rm_halocommInit(ghost_densemat *vec, ghost_context *ctx, ghost_densemat_halo_comm *comm)
{
#ifdef GHOST_HAVE_MPI
GHOST_FUNC_ENTER(GHOST_FUNCTYPE_COMMUNICATION);
ghost_error ret = GHOST_SUCCESS;
int i, to_PE, from_PE, partner;
int nprocs;
GHOST_CALL_GOTO(ghost_nrank(&nprocs, ctx->mpicomm),err,ret);
GHOST_CALL_GOTO(ghost_densemat_halocommInit_common(vec,ctx,comm),err,ret);
if (!comm->tmprecv) {
GHOST_CALL_GOTO(ghost_malloc((void **)&comm->tmprecv,nprocs*sizeof(char *)),err,ret);
if (vec->stride == vec->traits.ncols) {
if (vec->traits.location & GHOST_LOCATION_DEVICE) {
#ifdef GHOST_HAVE_GPUDIRECT
for (from_PE=0; from_PE<nprocs; from_PE++) {
comm->tmprecv[from_PE] = DENSEMAT_CUVALPTR(vec,ctx->hput_pos[from_PE],0);
}
#else
GHOST_INSTR_START("hostAlloc")
GHOST_CALL_GOTO(ghost_cu_malloc_pinned((void **)&comm->work,(size_t)vec->traits.ncols*comm->acc_dues*vec->elSize),err,ret);
GHOST_INSTR_STOP("hostAlloc")
GHOST_CALL_GOTO(ghost_malloc((void **)&comm->tmprecv_mem,vec->traits.ncols*vec->elSize*comm->acc_wishes),err,ret);
for (from_PE=0; from_PE<nprocs; from_PE++){
comm->tmprecv[from_PE] = &comm->tmprecv_mem[comm->wishptr[from_PE]*vec->traits.ncols*vec->elSize];
}
#endif
GHOST_INSTR_START("deviceAlloc")
GHOST_CALL_GOTO(ghost_cu_malloc(&comm->cu_work,vec->traits.ncols*comm->acc_dues*vec->elSize),err,ret);
GHOST_INSTR_STOP("deviceAlloc")
} else {
for (from_PE=0; from_PE<nprocs; from_PE++) {
comm->tmprecv[from_PE] = DENSEMAT_VALPTR(vec,ctx->hput_pos[from_PE],0);
}
GHOST_CALL_GOTO(ghost_malloc((void **)&comm->work,(size_t)vec->traits.ncols*comm->acc_dues*vec->elSize),err,ret);
}
} else {
GHOST_CALL_GOTO(ghost_malloc((void **)&comm->tmprecv_mem,vec->traits.ncols*vec->elSize*comm->acc_wishes),err,ret);
for (from_PE=0; from_PE<nprocs; from_PE++){
comm->tmprecv[from_PE] = &comm->tmprecv_mem[comm->wishptr[from_PE]*vec->traits.ncols*vec->elSize];
}
if (vec->traits.location & GHOST_LOCATION_DEVICE) {
GHOST_INSTR_START("hostAlloc")
GHOST_CALL_GOTO(ghost_cu_malloc_pinned((void **)&comm->work,(size_t)vec->traits.ncols*comm->acc_dues*vec->elSize),err,ret);
GHOST_INSTR_STOP("hostAlloc")
GHOST_INSTR_START("deviceAlloc")
GHOST_CALL_GOTO(ghost_cu_malloc(&comm->cu_work,vec->traits.ncols*comm->acc_dues*vec->elSize),err,ret);
GHOST_INSTR_STOP("deviceAlloc")
} else {
GHOST_CALL_GOTO(ghost_malloc((void **)&comm->work,(size_t)vec->traits.ncols*comm->acc_dues*vec->elSize),err,ret);
}
}
}
GHOST_INSTR_START("assemble_buf");
if (ctx->col_map->loc_perm) {
#ifdef GHOST_HAVE_CUDA
if (vec->traits.location & GHOST_LOCATION_DEVICE) {
ghost_densemat_cu_rm_communicationassembly(comm->cu_work,comm->dueptr,comm->acc_dues,vec,ctx,ctx->col_map->cu_loc_perm);
} else
#endif
if (vec->traits.location & GHOST_LOCATION_HOST) {
for (partner = 0; partner<ctx->nduepartners; partner++) {
to_PE = ctx->duepartners[partner];
#pragma omp parallel for
for (i=0; i<ctx->dues[to_PE]; i++){
memcpy(comm->work + (comm->dueptr[to_PE]+i)*vec->elSize*vec->traits.ncols,DENSEMAT_VALPTR(vec,ctx->col_map->loc_perm[ctx->duelist[to_PE][i]],0),vec->elSize*vec->traits.ncols);
}
}
}
} else {
#ifdef GHOST_HAVE_CUDA
if (vec->traits.location & GHOST_LOCATION_DEVICE) {
ghost_densemat_cu_rm_communicationassembly(comm->cu_work,comm->dueptr,comm->acc_dues,vec,ctx,NULL);
} else
#endif
if (vec->traits.location & GHOST_LOCATION_HOST) {
for (partner = 0; partner<ctx->nduepartners; partner++) {
to_PE = ctx->duepartners[partner];
#pragma omp parallel for
for (i=0; i<ctx->dues[to_PE]; i++){
memcpy(comm->work + (comm->dueptr[to_PE]+i)*vec->elSize*vec->traits.ncols,DENSEMAT_VALPTR(vec,ctx->duelist[to_PE][i],0),vec->elSize*vec->traits.ncols);
}
}
}
}
GHOST_INSTR_STOP("assemble_buf");
#ifdef GHOST_HAVE_CUDA
if (vec->traits.location & GHOST_LOCATION_DEVICE) {
GHOST_INSTR_START("download_buf");
#ifdef GHOST_TRACK_DATATRANSFERS
ghost_datatransfer_register("spmv_halo",GHOST_DATATRANSFER_IN,GHOST_DATATRANSFER_RANK_GPU,vec->traits.ncols*comm->acc_dues*vec->elSize);
#endif
#ifdef GHOST_HAVE_GPUDIRECT
if (vec->traits.ncols != vec->stride)
#endif
{
GHOST_CALL_GOTO(ghost_cu_download(comm->work,comm->cu_work,vec->traits.ncols*comm->acc_dues*vec->elSize),err,ret);
}
GHOST_INSTR_STOP("download_buf");
}
#endif
goto out;
err:
out:
GHOST_FUNC_EXIT(GHOST_FUNCTYPE_COMMUNICATION);
return ret;
#else
UNUSED(vec);
UNUSED(comm);
UNUSED(ctx);
return GHOST_ERR_NOT_IMPLEMENTED;
#endif
}
ghost_error ghost_densemat_rm_halocommFinalize(ghost_densemat *vec, ghost_context *ctx, ghost_densemat_halo_comm *comm)
{
#ifdef GHOST_HAVE_MPI
GHOST_FUNC_ENTER(GHOST_FUNCTYPE_COMMUNICATION);
ghost_error ret = GHOST_SUCCESS;
int nprocs;
int i, from_PE, partner;
GHOST_CALL_GOTO(ghost_nrank(&nprocs, ctx->mpicomm),err,ret);
GHOST_CALL_GOTO(ghost_densemat_halocommFinalize_common(comm),err,ret);
if ((vec->stride != vec->traits.ncols) && (vec->traits.location == GHOST_LOCATION_HOST)) {
GHOST_INSTR_START("Assemble row-major view");
for (partner=0; partner<ctx->nwishpartners; partner++){
from_PE = ctx->wishpartners[partner];
for (i=0; i<ctx->wishes[from_PE]; i++){
memcpy(DENSEMAT_VALPTR(vec,ctx->hput_pos[from_PE]+i,0),&comm->tmprecv[from_PE][(i*vec->traits.ncols)*vec->elSize],vec->elSize*vec->traits.ncols);
}
}
GHOST_INSTR_STOP("Assemble row-major view");
}
#ifdef GHOST_HAVE_CUDA
GHOST_INSTR_START("upload")
if (vec->traits.location & GHOST_LOCATION_DEVICE) {
#ifdef GHOST_TRACK_DATATRANSFERS
ghost_datatransfer_register("spmv_halo",GHOST_DATATRANSFER_OUT,GHOST_DATATRANSFER_RANK_GPU,ctx->col_map->nhalo*vec->traits.ncols*vec->elSize);
#endif
#ifdef GHOST_HAVE_GPUDIRECT
if (vec->traits.ncols != vec->stride)
#endif
{
GHOST_CALL_GOTO(ghost_cu_upload2d(DENSEMAT_CUVALPTR(vec,vec->map->dimhalo-vec->map->nhalo,0),vec->stride*vec->elSize,comm->tmprecv_mem,vec->traits.ncols*vec->elSize,vec->traits.ncols*vec->elSize,comm->acc_wishes),err,ret);
}
}
GHOST_INSTR_STOP("upload");
#endif
if (vec->traits.location & GHOST_LOCATION_DEVICE) {
GHOST_CALL_GOTO(ghost_cu_free(comm->cu_work),err,ret); comm->cu_work = NULL;
GHOST_CALL_GOTO(ghost_cu_free_host(comm->work),err,ret); comm->work = NULL;
} else {
free(comm->work); comm->work = NULL;
}
free(comm->tmprecv_mem); comm->tmprecv_mem = NULL;
free(comm->tmprecv); comm->tmprecv = NULL;
free(comm->request); comm->request = NULL;
free(comm->status); comm->status = NULL;
free(comm->dueptr); comm->dueptr = NULL;
free(comm->wishptr); comm->wishptr = NULL;
goto out;
err:
out:
GHOST_FUNC_EXIT(GHOST_FUNCTYPE_COMMUNICATION);
return ret;
#else
UNUSED(vec);
UNUSED(comm);
UNUSED(ctx);
return GHOST_ERR_NOT_IMPLEMENTED;
#endif
}
|
omp_hello.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main (int argc, char *argv[])
{
/* Uncomment the code below to manually set the number of threads */
omp_set_dynamic(0);
omp_set_num_threads(2);
int nthreads, tid;
#pragma omp parallel private(nthreads, tid)
{
tid = omp_get_thread_num();
printf("Hello World from thread = %d\n", tid);
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
}
return 0;
}
|
teams-distr-on-host.c | // The test supposes no offload, pure host execution.
// It checks that the bug in implementation of distribute construct is fixed.
// RUN: %libomp-compile-and-run
// UNSUPPORTED: icc
#include <stdio.h>
#include <omp.h>
int main()
{
const int size = 4;
int wrong_counts = 0;
omp_set_num_threads(2);
#pragma omp parallel reduction(+:wrong_counts)
{
int i;
int A[size];
int th = omp_get_thread_num();
for(i = 0; i < size; i++)
A[i] = 0;
#pragma omp target teams distribute map(tofrom: A[:size]) private(i)
for(i = 0; i < size; i++)
{
A[i] = i;
printf("th %d, team %d, i %d\n", th, omp_get_team_num(), i);
}
#pragma omp critical
{
printf("tid = %d\n", th);
for(i = 0; i < size; i++)
{
if (A[i] != i) wrong_counts++;
printf(" %d", A[i]);
}
printf("\n");
}
}
if (wrong_counts) {
printf("failed\n");
} else {
printf("passed\n");
}
return wrong_counts;
}
|
quantize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE %
% Q Q U U A A NN N T I ZZ E %
% Q Q U U AAAAA N N N T I ZZZ EEEEE %
% Q QQ U U A A N NN T I ZZ E %
% QQQQ UUU A A N N T IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Methods to Reduce the Number of Unique Colors in an Image %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Realism in computer graphics typically requires using 24 bits/pixel to
% generate an image. Yet many graphic display devices do not contain the
% amount of memory necessary to match the spatial and color resolution of
% the human eye. The Quantize methods takes a 24 bit image and reduces
% the number of colors so it can be displayed on raster device with less
% bits per pixel. In most instances, the quantized image closely
% resembles the original reference image.
%
% A reduction of colors in an image is also desirable for image
% transmission and real-time animation.
%
% QuantizeImage() takes a standard RGB or monochrome images and quantizes
% them down to some fixed number of colors.
%
% For purposes of color allocation, an image is a set of n pixels, where
% each pixel is a point in RGB space. RGB space is a 3-dimensional
% vector space, and each pixel, Pi, is defined by an ordered triple of
% red, green, and blue coordinates, (Ri, Gi, Bi).
%
% Each primary color component (red, green, or blue) represents an
% intensity which varies linearly from 0 to a maximum value, Cmax, which
% corresponds to full saturation of that color. Color allocation is
% defined over a domain consisting of the cube in RGB space with opposite
% vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax =
% 255.
%
% The algorithm maps this domain onto a tree in which each node
% represents a cube within that domain. In the following discussion
% these cubes are defined by the coordinate of two opposite vertices (vertex
% nearest the origin in RGB space and the vertex farthest from the origin).
%
% The tree's root node represents the entire domain, (0,0,0) through
% (Cmax,Cmax,Cmax). Each lower level in the tree is generated by
% subdividing one node's cube into eight smaller cubes of equal size.
% This corresponds to bisecting the parent cube with planes passing
% through the midpoints of each edge.
%
% The basic algorithm operates in three phases: Classification,
% Reduction, and Assignment. Classification builds a color description
% tree for the image. Reduction collapses the tree until the number it
% represents, at most, the number of colors desired in the output image.
% Assignment defines the output image's color map and sets each pixel's
% color by restorage_class in the reduced tree. Our goal is to minimize
% the numerical discrepancies between the original colors and quantized
% colors (quantization error).
%
% Classification begins by initializing a color description tree of
% sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color description
% tree in the storage_class phase for realistic values of Cmax. If
% colors components in the input image are quantized to k-bit precision,
% so that Cmax= 2k-1, the tree would need k levels below the root node to
% allow representing each possible input color in a leaf. This becomes
% prohibitive because the tree's total number of nodes is 1 +
% sum(i=1, k, 8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing the pixel's color. It updates the following data for each
% such node:
%
% n1: Number of pixels whose color is contained in the RGB cube which
% this node represents;
%
% n2: Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb: Sums of the red, green, and blue component values for all
% pixels not classified at a lower depth. The combination of these sums
% and n2 will ultimately characterize the mean color of a set of pixels
% represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the
% quantization error for a node.
%
% Reduction repeatedly prunes the tree until the number of nodes with n2
% > 0 is less than or equal to the maximum number of colors allowed in
% the output image. On any given iteration over the tree, it selects
% those nodes whose E count is minimal for pruning and merges their color
% statistics upward. It uses a pruning threshold, Ep, to govern node
% selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors within
% the cubic volume which the node represents. This includes n1 - n2
% pixels whose colors should be defined by nodes at a lower level in the
% tree.
%
% Assignment generates the output image from the pruned tree. The output
% image consists of two parts: (1) A color map, which is an array of
% color descriptions (RGB triples) for each color present in the output
% image; (2) A pixel array, which represents each pixel as an index
% into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% This method is based on a similar algorithm written by Paul Raveling.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compare.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
/*
Define declarations.
*/
#if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE)
#define CacheShift 2
#else
#define CacheShift 3
#endif
#define ErrorQueueLength 16
#define ErrorRelativeWeight PerceptibleReciprocal(16)
#define MaxNodes 266817
#define MaxTreeDepth 8
#define NodesInAList 1920
/*
Typdef declarations.
*/
typedef struct _DoublePixelPacket
{
double
red,
green,
blue,
alpha;
} DoublePixelPacket;
typedef struct _NodeInfo
{
struct _NodeInfo
*parent,
*child[16];
MagickSizeType
number_unique;
DoublePixelPacket
total_color;
double
quantize_error;
size_t
color_number,
id,
level;
} NodeInfo;
typedef struct _Nodes
{
NodeInfo
*nodes;
struct _Nodes
*next;
} Nodes;
typedef struct _CubeInfo
{
NodeInfo
*root;
size_t
colors,
maximum_colors;
ssize_t
transparent_index;
MagickSizeType
transparent_pixels;
DoublePixelPacket
target;
double
distance,
pruning_threshold,
next_threshold;
size_t
nodes,
free_nodes,
color_number;
NodeInfo
*next_node;
Nodes
*node_queue;
MemoryInfo
*memory_info;
ssize_t
*cache;
DoublePixelPacket
error[ErrorQueueLength];
double
diffusion,
weights[ErrorQueueLength];
QuantizeInfo
*quantize_info;
MagickBooleanType
associate_alpha;
ssize_t
x,
y;
size_t
depth;
MagickOffsetType
offset;
MagickSizeType
span;
} CubeInfo;
/*
Method prototypes.
*/
static CubeInfo
*GetCubeInfo(const QuantizeInfo *,const size_t,const size_t);
static NodeInfo
*GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *);
static MagickBooleanType
AssignImageColors(Image *,CubeInfo *,ExceptionInfo *),
ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *),
DitherImage(Image *,CubeInfo *,ExceptionInfo *),
SetGrayscaleImage(Image *,ExceptionInfo *),
SetImageColormap(Image *,CubeInfo *,ExceptionInfo *);
static void
ClosestColor(const Image *,CubeInfo *,const NodeInfo *),
DefineImageColormap(Image *,CubeInfo *,NodeInfo *),
DestroyCubeInfo(CubeInfo *),
PruneLevel(CubeInfo *,const NodeInfo *),
PruneToCubeDepth(CubeInfo *,const NodeInfo *),
ReduceImageColors(const Image *,CubeInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireQuantizeInfo() allocates the QuantizeInfo structure.
%
% The format of the AcquireQuantizeInfo method is:
%
% QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
{
QuantizeInfo
*quantize_info;
quantize_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*quantize_info));
GetQuantizeInfo(quantize_info);
if (image_info != (ImageInfo *) NULL)
{
const char
*option;
quantize_info->dither_method=image_info->dither == MagickFalse ?
NoDitherMethod : RiemersmaDitherMethod;
option=GetImageOption(image_info,"dither");
if (option != (const char *) NULL)
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,option);
quantize_info->measure_error=image_info->verbose;
}
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A s s i g n I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AssignImageColors() generates the output image from the pruned tree. The
% output image consists of two parts: (1) A color map, which is an array
% of color descriptions (RGB triples) for each color present in the
% output image; (2) A pixel array, which represents each pixel as an
% index into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% The format of the AssignImageColors() method is:
%
% MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static inline void AssociateAlphaPixel(const Image *image,
const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(GetPixelAlpha(image,pixel) == OpaqueAlpha))
{
alpha_pixel->red=(double) GetPixelRed(image,pixel);
alpha_pixel->green=(double) GetPixelGreen(image,pixel);
alpha_pixel->blue=(double) GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
return;
}
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel));
alpha_pixel->red=alpha*GetPixelRed(image,pixel);
alpha_pixel->green=alpha*GetPixelGreen(image,pixel);
alpha_pixel->blue=alpha*GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
}
static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info,
const PixelInfo *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(pixel->alpha == OpaqueAlpha))
{
alpha_pixel->red=(double) pixel->red;
alpha_pixel->green=(double) pixel->green;
alpha_pixel->blue=(double) pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
return;
}
alpha=(double) (QuantumScale*pixel->alpha);
alpha_pixel->red=alpha*pixel->red;
alpha_pixel->green=alpha*pixel->green;
alpha_pixel->blue=alpha*pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
}
static inline size_t ColorToNodeId(const CubeInfo *cube_info,
const DoublePixelPacket *pixel,size_t index)
{
size_t
id;
id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) |
((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 |
((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2);
if (cube_info->associate_alpha != MagickFalse)
id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3;
return(id);
}
static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define AssignImageTag "Assign/Image"
ColorspaceType
colorspace;
ssize_t
y;
/*
Allocate image colormap.
*/
colorspace=image->colorspace;
if (cube_info->quantize_info->colorspace != UndefinedColorspace)
(void) TransformImageColorspace(image,cube_info->quantize_info->colorspace,
exception);
cube_info->transparent_pixels=0;
cube_info->transparent_index=(-1);
if (SetImageColormap(image,cube_info,exception) == MagickFalse)
return(MagickFalse);
/*
Create a reduced color image.
*/
if (cube_info->quantize_info->dither_method != NoDitherMethod)
(void) DitherImage(image,cube_info,exception);
else
{
CacheView
*image_view;
MagickBooleanType
status;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CubeInfo
cube;
Quantum
*magick_restrict q;
ssize_t
count,
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
for (x=0; x < (ssize_t) image->columns; x+=count)
{
DoublePixelPacket
pixel;
const NodeInfo
*node_info;
ssize_t
i;
size_t
id,
index;
/*
Identify the deepest node containing the pixel's color.
*/
for (count=1; (x+count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,q,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,&cube,q,&pixel);
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
index=cube.color_number;
for (i=0; i < (ssize_t) count; i++)
{
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(
image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(
image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(
image->colormap[index].blue),q);
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(
image->colormap[index].alpha),q);
}
q+=GetPixelChannels(image);
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
if (cube_info->quantize_info->measure_error != MagickFalse)
(void) GetImageQuantizeError(image,exception);
if ((cube_info->quantize_info->number_colors == 2) &&
(IsGrayColorspace(cube_info->quantize_info->colorspace)))
{
double
intensity;
/*
Monochrome image.
*/
intensity=GetPixelInfoLuma(image->colormap+0) < QuantumRange/2.0 ? 0.0 :
QuantumRange;
if (image->colors > 1)
{
intensity=0.0;
if (GetPixelInfoLuma(image->colormap+0) >
GetPixelInfoLuma(image->colormap+1))
intensity=(double) QuantumRange;
}
image->colormap[0].red=intensity;
image->colormap[0].green=intensity;
image->colormap[0].blue=intensity;
if (image->colors > 1)
{
image->colormap[1].red=(double) QuantumRange-intensity;
image->colormap[1].green=(double) QuantumRange-intensity;
image->colormap[1].blue=(double) QuantumRange-intensity;
}
}
(void) SyncImage(image,exception);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(IssRGBCompatibleColorspace(colorspace) == MagickFalse))
(void) TransformImageColorspace(image,colorspace,exception);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClassifyImageColors() begins by initializing a color description tree
% of sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color
% description tree in the storage_class phase for realistic values of
% Cmax. If colors components in the input image are quantized to k-bit
% precision, so that Cmax= 2k-1, the tree would need k levels below the
% root node to allow representing each possible input color in a leaf.
% This becomes prohibitive because the tree's total number of nodes is
% 1 + sum(i=1,k,8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing It updates the following data for each such node:
%
% n1 : Number of pixels whose color is contained in the RGB cube
% which this node represents;
%
% n2 : Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb : Sums of the red, green, and blue component values for
% all pixels not classified at a lower depth. The combination of
% these sums and n2 will ultimately characterize the mean color of a
% set of pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the quantization
% error for a node.
%
% The format of the ClassifyImageColors() method is:
%
% MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
% const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o image: the image.
%
*/
static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info)
{
MagickBooleanType
associate_alpha;
associate_alpha=image->alpha_trait != UndefinedPixelTrait ? MagickTrue :
MagickFalse;
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
associate_alpha=MagickFalse;
cube_info->associate_alpha=associate_alpha;
}
static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
const Image *image,ExceptionInfo *exception)
{
#define ClassifyImageTag "Classify/Image"
CacheView
*image_view;
double
bisect;
DoublePixelPacket
error,
mid,
midpoint,
pixel;
MagickBooleanType
proceed;
NodeInfo
*node_info;
size_t
count,
id,
index,
level;
ssize_t
y;
/*
Classify the first cube_info->maximum_colors colors to a tree depth of 8.
*/
SetAssociatedAlpha(image,cube_info);
if (cube_info->quantize_info->colorspace != image->colorspace)
{
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,
cube_info->quantize_info->colorspace,exception);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace((Image *) image,sRGBColorspace,
exception);
}
midpoint.red=(double) QuantumRange/2.0;
midpoint.green=(double) QuantumRange/2.0;
midpoint.blue=(double) QuantumRange/2.0;
midpoint.alpha=(double) QuantumRange/2.0;
error.alpha=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= MaxTreeDepth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
continue;
}
if (level == MaxTreeDepth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance) != 0)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
if (cube_info->colors > cube_info->maximum_colors)
{
PruneToCubeDepth(cube_info,cube_info->root);
break;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
for (y++; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= cube_info->depth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
image->filename);
continue;
}
if (level == cube_info->depth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance) != 0)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
if (cube_info->quantize_info->colorspace != image->colorspace)
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace,exception);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneQuantizeInfo() makes a duplicate of the given quantize info structure,
% or if quantize info is NULL, a new one.
%
% The format of the CloneQuantizeInfo method is:
%
% QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o clone_info: Method CloneQuantizeInfo returns a duplicate of the given
% quantize info, or if image info is NULL a new one.
%
% o quantize_info: a structure of type info.
%
*/
MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
{
QuantizeInfo
*clone_info;
clone_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetQuantizeInfo(clone_info);
if (quantize_info == (QuantizeInfo *) NULL)
return(clone_info);
clone_info->number_colors=quantize_info->number_colors;
clone_info->tree_depth=quantize_info->tree_depth;
clone_info->dither_method=quantize_info->dither_method;
clone_info->colorspace=quantize_info->colorspace;
clone_info->measure_error=quantize_info->measure_error;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o s e s t C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClosestColor() traverses the color cube tree at a particular node and
% determines which colormap entry best represents the input color.
%
% The format of the ClosestColor method is:
%
% void ClosestColor(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void ClosestColor(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
ClosestColor(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
double
alpha,
beta,
distance,
pixel;
DoublePixelPacket
*magick_restrict q;
PixelInfo
*magick_restrict p;
/*
Determine if this color is "closest".
*/
p=image->colormap+node_info->color_number;
q=(&cube_info->target);
alpha=1.0;
beta=1.0;
if (cube_info->associate_alpha != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*p->alpha);
beta=(MagickRealType) (QuantumScale*q->alpha);
}
pixel=alpha*p->red-beta*q->red;
distance=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->green-beta*q->green;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->blue-beta*q->blue;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
if (cube_info->associate_alpha != MagickFalse)
{
pixel=p->alpha-q->alpha;
distance+=pixel*pixel;
}
if (distance <= cube_info->distance)
{
cube_info->distance=distance;
cube_info->color_number=node_info->color_number;
}
}
}
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p r e s s I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompressImageColormap() compresses an image colormap by removing any
% duplicate or unused color entries.
%
% The format of the CompressImageColormap method is:
%
% MagickBooleanType CompressImageColormap(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CompressImageColormap(Image *image,
ExceptionInfo *exception)
{
QuantizeInfo
quantize_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsPaletteImage(image) == MagickFalse)
return(MagickFalse);
GetQuantizeInfo(&quantize_info);
quantize_info.number_colors=image->colors;
quantize_info.tree_depth=MaxTreeDepth;
return(QuantizeImage(&quantize_info,image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineImageColormap() traverses the color cube tree and notes each colormap
% entry. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero.
%
% The format of the DefineImageColormap method is:
%
% void DefineImageColormap(Image *image,CubeInfo *cube_info,
% NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void DefineImageColormap(Image *image,CubeInfo *cube_info,
NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
DefineImageColormap(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
double
alpha;
PixelInfo
*magick_restrict q;
/*
Colormap entry is defined by the mean color in this cube.
*/
q=image->colormap+image->colors;
alpha=(double) ((MagickOffsetType) node_info->number_unique);
alpha=PerceptibleReciprocal(alpha);
if (cube_info->associate_alpha == MagickFalse)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
q->alpha=(double) OpaqueAlpha;
}
else
{
double
opacity;
opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha);
q->alpha=(double) ClampToQuantum(opacity);
if (q->alpha == OpaqueAlpha)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
}
else
{
double
gamma;
gamma=(double) (QuantumScale*q->alpha);
gamma=PerceptibleReciprocal(gamma);
q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.blue);
if (node_info->number_unique > cube_info->transparent_pixels)
{
cube_info->transparent_pixels=node_info->number_unique;
cube_info->transparent_index=(ssize_t) image->colors;
}
}
}
node_info->color_number=image->colors++;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyCubeInfo() deallocates memory associated with an image.
%
% The format of the DestroyCubeInfo method is:
%
% DestroyCubeInfo(CubeInfo *cube_info)
%
% A description of each parameter follows:
%
% o cube_info: the address of a structure of type CubeInfo.
%
*/
static void DestroyCubeInfo(CubeInfo *cube_info)
{
Nodes
*nodes;
/*
Release color cube tree storage.
*/
do
{
nodes=cube_info->node_queue->next;
cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory(
cube_info->node_queue->nodes);
cube_info->node_queue=(Nodes *) RelinquishMagickMemory(
cube_info->node_queue);
cube_info->node_queue=nodes;
} while (cube_info->node_queue != (Nodes *) NULL);
if (cube_info->memory_info != (MemoryInfo *) NULL)
cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info);
cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info);
cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo
% structure.
%
% The format of the DestroyQuantizeInfo method is:
%
% QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
*/
MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
quantize_info->signature=(~MagickCoreSignature);
quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info);
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DitherImage() distributes the difference between an original image and
% the corresponding color reduced algorithm to neighboring pixels using
% serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns
% MagickTrue if the image is dithered otherwise MagickFalse.
%
% The format of the DitherImage method is:
%
% MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels)
{
ssize_t
i;
assert(pixels != (DoublePixelPacket **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (DoublePixelPacket *) NULL)
pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static DoublePixelPacket **AcquirePixelThreadSet(const size_t count)
{
DoublePixelPacket
**pixels;
size_t
number_threads;
ssize_t
i;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (DoublePixelPacket **) NULL)
return((DoublePixelPacket **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2*
sizeof(**pixels));
if (pixels[i] == (DoublePixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static inline ssize_t CacheOffset(CubeInfo *cube_info,
const DoublePixelPacket *pixel)
{
#define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift)))
#define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift)))
#define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift)))
#define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift)))
ssize_t
offset;
offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) |
GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) |
BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue))));
if (cube_info->associate_alpha != MagickFalse)
offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha)));
return(offset);
}
static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
DoublePixelPacket
**pixels;
MagickBooleanType
status;
ssize_t
y;
/*
Distribute quantization error using Floyd-Steinberg.
*/
pixels=AcquirePixelThreadSet(image->columns);
if (pixels == (DoublePixelPacket **) NULL)
return(MagickFalse);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
CubeInfo
cube;
DoublePixelPacket
*current,
*previous;
Quantum
*magick_restrict q;
size_t
index;
ssize_t
x,
v;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
current=pixels[id]+(y & 0x01)*image->columns;
previous=pixels[id]+((y+1) & 0x01)*image->columns;
v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1);
for (x=0; x < (ssize_t) image->columns; x++)
{
DoublePixelPacket
color,
pixel;
ssize_t
i;
ssize_t
u;
u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x;
AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel);
if (x > 0)
{
pixel.red+=7.0*cube_info->diffusion*current[u-v].red/16;
pixel.green+=7.0*cube_info->diffusion*current[u-v].green/16;
pixel.blue+=7.0*cube_info->diffusion*current[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=7.0*cube_info->diffusion*current[u-v].alpha/16;
}
if (y > 0)
{
if (x < (ssize_t) (image->columns-1))
{
pixel.red+=cube_info->diffusion*previous[u+v].red/16;
pixel.green+=cube_info->diffusion*previous[u+v].green/16;
pixel.blue+=cube_info->diffusion*previous[u+v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=cube_info->diffusion*previous[u+v].alpha/16;
}
pixel.red+=5.0*cube_info->diffusion*previous[u].red/16;
pixel.green+=5.0*cube_info->diffusion*previous[u].green/16;
pixel.blue+=5.0*cube_info->diffusion*previous[u].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=5.0*cube_info->diffusion*previous[u].alpha/16;
if (x > 0)
{
pixel.red+=3.0*cube_info->diffusion*previous[u-v].red/16;
pixel.green+=3.0*cube_info->diffusion*previous[u-v].green/16;
pixel.blue+=3.0*cube_info->diffusion*previous[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=3.0*cube_info->diffusion*previous[u-v].alpha/16;
}
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube.associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(&cube,&pixel);
if (cube.cache[i] < 0)
{
NodeInfo
*node_info;
size_t
node_id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
node_id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[node_id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[node_id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
cube.cache[i]=(ssize_t) cube.color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) cube.cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image));
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),
q+u*GetPixelChannels(image));
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),
q+u*GetPixelChannels(image));
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),
q+u*GetPixelChannels(image));
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),
q+u*GetPixelChannels(image));
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
/*
Store the error.
*/
AssociateAlphaPixelInfo(&cube,image->colormap+index,&color);
current[u].red=pixel.red-color.red;
current[u].green=pixel.green-color.green;
current[u].blue=pixel.blue-color.blue;
if (cube.associate_alpha != MagickFalse)
current[u].alpha=pixel.alpha-color.alpha;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
image_view=DestroyCacheView(image_view);
pixels=DestroyPixelThreadSet(pixels);
return(MagickTrue);
}
static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view,
CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CubeInfo
*p;
DoublePixelPacket
color,
pixel;
MagickBooleanType
proceed;
size_t
index;
p=cube_info;
if ((p->x >= 0) && (p->x < (ssize_t) image->columns) &&
(p->y >= 0) && (p->y < (ssize_t) image->rows))
{
Quantum
*magick_restrict q;
ssize_t
i;
/*
Distribute error.
*/
q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
AssociateAlphaPixel(image,cube_info,q,&pixel);
for (i=0; i < ErrorQueueLength; i++)
{
pixel.red+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].red;
pixel.green+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].green;
pixel.blue+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].blue;
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].alpha;
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(cube_info,&pixel);
if (p->cache[i] < 0)
{
NodeInfo
*node_info;
size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=p->root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(cube_info,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
p->target=pixel;
p->distance=(double) (4.0*(QuantumRange+1.0)*((double)
QuantumRange+1.0)+1.0);
ClosestColor(image,p,node_info->parent);
p->cache[i]=(ssize_t) p->color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) p->cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube_info->quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q);
if (cube_info->associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
return(MagickFalse);
/*
Propagate the error as the last entry of the error queue.
*/
(void) memmove(p->error,p->error+1,(ErrorQueueLength-1)*
sizeof(p->error[0]));
AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color);
p->error[ErrorQueueLength-1].red=pixel.red-color.red;
p->error[ErrorQueueLength-1].green=pixel.green-color.green;
p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue;
if (cube_info->associate_alpha != MagickFalse)
p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha;
proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span);
if (proceed == MagickFalse)
return(MagickFalse);
p->offset++;
}
switch (direction)
{
case WestGravity: p->x--; break;
case EastGravity: p->x++; break;
case NorthGravity: p->y--; break;
case SouthGravity: p->y++; break;
}
return(MagickTrue);
}
static MagickBooleanType Riemersma(Image *image,CacheView *image_view,
CubeInfo *cube_info,const size_t level,const unsigned int direction,
ExceptionInfo *exception)
{
MagickBooleanType
status;
status=MagickTrue;
if (level == 1)
switch (direction)
{
case WestGravity:
{
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
break;
}
case EastGravity:
{
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
break;
}
case NorthGravity:
{
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
break;
}
case SouthGravity:
{
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
break;
}
default:
break;
}
else
switch (direction)
{
case WestGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
break;
}
case EastGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
break;
}
case NorthGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
break;
}
case SouthGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
break;
}
default:
break;
}
return(status);
}
static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*artifact;
MagickBooleanType
status;
size_t
extent,
level;
artifact=GetImageArtifact(image,"dither:diffusion-amount");
if (artifact != (const char *) NULL)
cube_info->diffusion=StringToDoubleInterval(artifact,1.0);
if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod)
return(FloydSteinbergDither(image,cube_info,exception));
/*
Distribute quantization error along a Hilbert curve.
*/
(void) memset(cube_info->error,0,ErrorQueueLength*sizeof(*cube_info->error));
cube_info->x=0;
cube_info->y=0;
extent=MagickMax(image->columns,image->rows);
level=(size_t) log2((double) extent);
if (((size_t) 1UL << level) < extent)
level++;
cube_info->offset=0;
cube_info->span=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
status=MagickTrue;
if (level > 0)
status=Riemersma(image,image_view,cube_info,level,NorthGravity,exception);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCubeInfo() initialize the Cube data structure.
%
% The format of the GetCubeInfo method is:
%
% CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info,
% const size_t depth,const size_t maximum_colors)
%
% A description of each parameter follows.
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o depth: Normally, this integer value is zero or one. A zero or
% one tells Quantize to choose a optimal tree depth of Log4(number_colors).
% A tree of this depth generally allows the best representation of the
% reference image with the least amount of memory and the fastest
% computational speed. In some cases, such as an image with low color
% dispersion (a few number of colors), a value other than
% Log4(number_colors) is required. To expand the color tree completely,
% use a value of 8.
%
% o maximum_colors: maximum colors.
%
*/
static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info,
const size_t depth,const size_t maximum_colors)
{
CubeInfo
*cube_info;
double
weight;
size_t
length;
ssize_t
i;
/*
Initialize tree to describe color cube_info.
*/
cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info));
if (cube_info == (CubeInfo *) NULL)
return((CubeInfo *) NULL);
(void) memset(cube_info,0,sizeof(*cube_info));
cube_info->depth=depth;
if (cube_info->depth > MaxTreeDepth)
cube_info->depth=MaxTreeDepth;
if (cube_info->depth < 2)
cube_info->depth=2;
cube_info->maximum_colors=maximum_colors;
/*
Initialize root node.
*/
cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL);
if (cube_info->root == (NodeInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->root->parent=cube_info->root;
cube_info->quantize_info=CloneQuantizeInfo(quantize_info);
if (cube_info->quantize_info->dither_method == NoDitherMethod)
return(cube_info);
/*
Initialize dither resources.
*/
length=(size_t) (1UL << (4*(8-CacheShift)));
cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache));
if (cube_info->memory_info == (MemoryInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info);
/*
Initialize color cache.
*/
(void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*length);
/*
Distribute weights along a curve of exponential decay.
*/
weight=1.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[i]=PerceptibleReciprocal(weight);
weight*=exp(log(1.0/ErrorRelativeWeight)/(ErrorQueueLength-1.0));
}
cube_info->diffusion=1.0;
return(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t N o d e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNodeInfo() allocates memory for a new node in the color cube tree and
% presets all fields to zero.
%
% The format of the GetNodeInfo method is:
%
% NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
% const size_t level,NodeInfo *parent)
%
% A description of each parameter follows.
%
% o node: The GetNodeInfo method returns a pointer to a queue of nodes.
%
% o id: Specifies the child number of the node.
%
% o level: Specifies the level in the storage_class the node resides.
%
*/
static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
const size_t level,NodeInfo *parent)
{
NodeInfo
*node_info;
if (cube_info->free_nodes == 0)
{
Nodes
*nodes;
/*
Allocate a new queue of nodes.
*/
nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes));
if (nodes == (Nodes *) NULL)
return((NodeInfo *) NULL);
nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList,
sizeof(*nodes->nodes));
if (nodes->nodes == (NodeInfo *) NULL)
return((NodeInfo *) NULL);
nodes->next=cube_info->node_queue;
cube_info->node_queue=nodes;
cube_info->next_node=nodes->nodes;
cube_info->free_nodes=NodesInAList;
}
cube_info->nodes++;
cube_info->free_nodes--;
node_info=cube_info->next_node++;
(void) memset(node_info,0,sizeof(*node_info));
node_info->parent=parent;
node_info->id=id;
node_info->level=level;
return(node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t i z e E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantizeError() measures the difference between the original
% and quantized images. This difference is the total quantization error.
% The error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% The format of the GetImageQuantizeError method is:
%
% MagickBooleanType GetImageQuantizeError(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageQuantizeError(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
alpha,
area,
beta,
distance,
maximum_error,
mean_error,
mean_error_per_pixel;
ssize_t
index,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->total_colors=GetNumberColors(image,(FILE *) NULL,exception);
(void) memset(&image->error,0,sizeof(image->error));
if (image->storage_class == DirectClass)
return(MagickTrue);
alpha=1.0;
beta=1.0;
area=3.0*image->columns*image->rows;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) GetPixelIndex(image,p);
if (image->alpha_trait != UndefinedPixelTrait)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,p));
beta=(double) (QuantumScale*image->colormap[index].alpha);
}
distance=fabs((double) (alpha*GetPixelRed(image,p)-beta*
image->colormap[index].red));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta*
image->colormap[index].green));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta*
image->colormap[index].blue));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area;
image->error.normalized_mean_error=(double) QuantumScale*QuantumScale*
mean_error/area;
image->error.normalized_maximum_error=(double) QuantumScale*maximum_error;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetQuantizeInfo() initializes the QuantizeInfo structure.
%
% The format of the GetQuantizeInfo method is:
%
% GetQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to a QuantizeInfo structure.
%
*/
MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
(void) memset(quantize_info,0,sizeof(*quantize_info));
quantize_info->number_colors=256;
quantize_info->dither_method=RiemersmaDitherMethod;
quantize_info->colorspace=UndefinedColorspace;
quantize_info->measure_error=MagickFalse;
quantize_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% K m e a n s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% KmeansImage() applies k-means color reduction to an image. This is a
% colorspace clustering or segmentation technique.
%
% The format of the KmeansImage method is:
%
% MagickBooleanType KmeansImage(Image *image,const size_t number_colors,
% const size_t max_iterations,const double tolerance,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_colors: number of colors to use as seeds.
%
% o max_iterations: maximum number of iterations while converging.
%
% o tolerance: the maximum tolerance.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _KmeansInfo
{
double
red,
green,
blue,
alpha,
black,
count,
distortion;
} KmeansInfo;
static KmeansInfo **DestroyKmeansThreadSet(KmeansInfo **kmeans_info)
{
ssize_t
i;
assert(kmeans_info != (KmeansInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (kmeans_info[i] != (KmeansInfo *) NULL)
kmeans_info[i]=(KmeansInfo *) RelinquishMagickMemory(kmeans_info[i]);
kmeans_info=(KmeansInfo **) RelinquishMagickMemory(kmeans_info);
return(kmeans_info);
}
static KmeansInfo **AcquireKmeansThreadSet(const size_t number_colors)
{
KmeansInfo
**kmeans_info;
ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
kmeans_info=(KmeansInfo **) AcquireQuantumMemory(number_threads,
sizeof(*kmeans_info));
if (kmeans_info == (KmeansInfo **) NULL)
return((KmeansInfo **) NULL);
(void) memset(kmeans_info,0,number_threads*sizeof(*kmeans_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
kmeans_info[i]=(KmeansInfo *) AcquireQuantumMemory(number_colors,
sizeof(**kmeans_info));
if (kmeans_info[i] == (KmeansInfo *) NULL)
return(DestroyKmeansThreadSet(kmeans_info));
}
return(kmeans_info);
}
static inline double KmeansMetric(const Image *magick_restrict image,
const Quantum *magick_restrict p,const PixelInfo *magick_restrict q)
{
double
gamma,
metric,
pixel;
gamma=1.0;
metric=0.0;
if ((image->alpha_trait != UndefinedPixelTrait) ||
(q->alpha_trait != UndefinedPixelTrait))
{
pixel=GetPixelAlpha(image,p)-(q->alpha_trait != UndefinedPixelTrait ?
q->alpha : OpaqueAlpha);
metric+=pixel*pixel;
if (image->alpha_trait != UndefinedPixelTrait)
gamma*=QuantumScale*GetPixelAlpha(image,p);
if (q->alpha_trait != UndefinedPixelTrait)
gamma*=QuantumScale*q->alpha;
}
if (image->colorspace == CMYKColorspace)
{
pixel=QuantumScale*(GetPixelBlack(image,p)-q->black);
metric+=gamma*pixel*pixel;
gamma*=QuantumScale*(QuantumRange-GetPixelBlack(image,p));
gamma*=QuantumScale*(QuantumRange-q->black);
}
metric*=3.0;
pixel=QuantumScale*(GetPixelRed(image,p)-q->red);
if (IsHueCompatibleColorspace(image->colorspace) != MagickFalse)
{
if (fabs((double) pixel) > 0.5)
pixel-=0.5;
pixel*=2.0;
}
metric+=gamma*pixel*pixel;
pixel=QuantumScale*(GetPixelGreen(image,p)-q->green);
metric+=gamma*pixel*pixel;
pixel=QuantumScale*(GetPixelBlue(image,p)-q->blue);
metric+=gamma*pixel*pixel;
return(metric);
}
MagickExport MagickBooleanType KmeansImage(Image *image,
const size_t number_colors,const size_t max_iterations,const double tolerance,
ExceptionInfo *exception)
{
#define KmeansImageTag "Kmeans/Image"
#define RandomColorComponent(info) (QuantumRange*GetPseudoRandomValue(info))
CacheView
*image_view;
const char
*colors;
double
previous_tolerance;
KmeansInfo
**kmeans_pixels;
MagickBooleanType
verbose,
status;
ssize_t
n;
size_t
number_threads;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
colors=GetImageArtifact(image,"kmeans:seed-colors");
if (colors == (const char *) NULL)
{
CubeInfo
*cube_info;
QuantizeInfo
*quantize_info;
size_t
colors,
depth;
/*
Seed clusters from color quantization.
*/
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->colorspace=image->colorspace;
quantize_info->number_colors=number_colors;
quantize_info->dither_method=NoDitherMethod;
colors=number_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
cube_info=GetCubeInfo(quantize_info,depth,number_colors);
if (cube_info == (CubeInfo *) NULL)
{
quantize_info=DestroyQuantizeInfo(quantize_info);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=ClassifyImageColors(cube_info,image,exception);
if (status != MagickFalse)
{
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
status=SetImageColormap(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
quantize_info=DestroyQuantizeInfo(quantize_info);
if (status == MagickFalse)
return(status);
}
else
{
char
color[MagickPathExtent];
const char
*p;
/*
Seed clusters from color list (e.g. red;green;blue).
*/
status=AcquireImageColormap(image,number_colors,exception);
if (status == MagickFalse)
return(status);
for (n=0, p=colors; n < (ssize_t) image->colors; n++)
{
const char
*q;
for (q=p; *q != '\0'; q++)
if (*q == ';')
break;
(void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1,
MagickPathExtent));
(void) QueryColorCompliance(color,AllCompliance,image->colormap+n,
exception);
if (*q == '\0')
{
n++;
break;
}
p=q+1;
}
if (n < (ssize_t) image->colors)
{
RandomInfo
*random_info;
/*
Seed clusters from random values.
*/
random_info=AcquireRandomInfo();
for ( ; n < (ssize_t) image->colors; n++)
{
(void) QueryColorCompliance("#000",AllCompliance,image->colormap+n,
exception);
image->colormap[n].red=RandomColorComponent(random_info);
image->colormap[n].green=RandomColorComponent(random_info);
image->colormap[n].blue=RandomColorComponent(random_info);
if (image->alpha_trait != UndefinedPixelTrait)
image->colormap[n].alpha=RandomColorComponent(random_info);
if (image->colorspace == CMYKColorspace)
image->colormap[n].black=RandomColorComponent(random_info);
}
random_info=DestroyRandomInfo(random_info);
}
}
/*
Iterative refinement.
*/
kmeans_pixels=AcquireKmeansThreadSet(number_colors);
if (kmeans_pixels == (KmeansInfo **) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
previous_tolerance=0.0;
verbose=IsStringTrue(GetImageArtifact(image,"debug"));
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
image_view=AcquireAuthenticCacheView(image,exception);
for (n=0; n < (ssize_t) max_iterations; n++)
{
double
distortion;
ssize_t
i;
ssize_t
y;
for (i=0; i < (ssize_t) number_threads; i++)
(void) memset(kmeans_pixels[i],0,image->colors*sizeof(*kmeans_pixels[i]));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
min_distance;
ssize_t
i;
ssize_t
j;
/*
Assign each pixel whose mean has the least squared color distance.
*/
j=0;
min_distance=KmeansMetric(image,q,image->colormap+0);
for (i=1; i < (ssize_t) image->colors; i++)
{
double
distance;
if (min_distance <= MagickEpsilon)
break;
distance=KmeansMetric(image,q,image->colormap+i);
if (distance < min_distance)
{
min_distance=distance;
j=i;
}
}
kmeans_pixels[id][j].red+=QuantumScale*GetPixelRed(image,q);
kmeans_pixels[id][j].green+=QuantumScale*GetPixelGreen(image,q);
kmeans_pixels[id][j].blue+=QuantumScale*GetPixelBlue(image,q);
if (image->alpha_trait != UndefinedPixelTrait)
kmeans_pixels[id][j].alpha+=QuantumScale*GetPixelAlpha(image,q);
if (image->colorspace == CMYKColorspace)
kmeans_pixels[id][j].black+=QuantumScale*GetPixelBlack(image,q);
kmeans_pixels[id][j].count++;
kmeans_pixels[id][j].distortion+=min_distance;
SetPixelIndex(image,(Quantum) j,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
if (status == MagickFalse)
break;
/*
Reduce sums to [0] entry.
*/
for (i=1; i < (ssize_t) number_threads; i++)
{
ssize_t
j;
for (j=0; j < (ssize_t) image->colors; j++)
{
kmeans_pixels[0][j].red+=kmeans_pixels[i][j].red;
kmeans_pixels[0][j].green+=kmeans_pixels[i][j].green;
kmeans_pixels[0][j].blue+=kmeans_pixels[i][j].blue;
if (image->alpha_trait != UndefinedPixelTrait)
kmeans_pixels[0][j].alpha+=kmeans_pixels[i][j].alpha;
if (image->colorspace == CMYKColorspace)
kmeans_pixels[0][j].black+=kmeans_pixels[i][j].black;
kmeans_pixels[0][j].count+=kmeans_pixels[i][j].count;
kmeans_pixels[0][j].distortion+=kmeans_pixels[i][j].distortion;
}
}
/*
Calculate the new means (centroids) of the pixels in the new clusters.
*/
distortion=0.0;
for (i=0; i < (ssize_t) image->colors; i++)
{
double
gamma;
gamma=PerceptibleReciprocal((double) kmeans_pixels[0][i].count);
image->colormap[i].red=gamma*QuantumRange*kmeans_pixels[0][i].red;
image->colormap[i].green=gamma*QuantumRange*kmeans_pixels[0][i].green;
image->colormap[i].blue=gamma*QuantumRange*kmeans_pixels[0][i].blue;
if (image->alpha_trait != UndefinedPixelTrait)
image->colormap[i].alpha=gamma*QuantumRange*kmeans_pixels[0][i].alpha;
if (image->colorspace == CMYKColorspace)
image->colormap[i].black=gamma*QuantumRange*kmeans_pixels[0][i].black;
distortion+=kmeans_pixels[0][i].distortion;
}
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,"distortion[%.20g]: %*g %*g\n",(double) n,
GetMagickPrecision(),distortion,GetMagickPrecision(),
fabs(distortion-previous_tolerance));
if (fabs(distortion-previous_tolerance) <= tolerance)
break;
previous_tolerance=distortion;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,KmeansImageTag,(MagickOffsetType) n,
max_iterations);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
kmeans_pixels=DestroyKmeansThreadSet(kmeans_pixels);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
(void) SetImageProgress(image,KmeansImageTag,(MagickOffsetType)
max_iterations-1,max_iterations);
if (status == MagickFalse)
return(status);
return(SyncImage(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t e r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PosterizeImage() reduces the image to a limited number of colors for a
% "poster" effect.
%
% The format of the PosterizeImage method is:
%
% MagickBooleanType PosterizeImage(Image *image,const size_t levels,
% const DitherMethod dither_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o levels: Number of color levels allowed in each channel. Very low values
% (2, 3, or 4) have the most visible effect.
%
% o dither_method: choose from UndefinedDitherMethod, NoDitherMethod,
% RiemersmaDitherMethod, FloydSteinbergDitherMethod.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels,
const DitherMethod dither_method,ExceptionInfo *exception)
{
#define PosterizeImageTag "Posterize/Image"
#define PosterizePixel(pixel) ClampToQuantum((MagickRealType) QuantumRange*( \
MagickRound(QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1))
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
QuantizeInfo
*quantize_info;
ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Posterize colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double)
PosterizePixel(image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double)
PosterizePixel(image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double)
PosterizePixel(image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double)
PosterizePixel(image->colormap[i].alpha);
}
/*
Posterize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels*
levels,MaxColormapSize+1);
quantize_info->dither_method=dither_method;
quantize_info->tree_depth=MaxTreeDepth;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e C h i l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneChild() deletes the given node and merges its statistics into its
% parent.
%
% The format of the PruneSubtree method is:
%
% PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
{
NodeInfo
*parent;
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneChild(cube_info,node_info->child[i]);
if (cube_info->nodes > cube_info->maximum_colors)
{
/*
Merge color statistics into parent.
*/
parent=node_info->parent;
parent->number_unique+=node_info->number_unique;
parent->total_color.red+=node_info->total_color.red;
parent->total_color.green+=node_info->total_color.green;
parent->total_color.blue+=node_info->total_color.blue;
parent->total_color.alpha+=node_info->total_color.alpha;
parent->child[node_info->id]=(NodeInfo *) NULL;
cube_info->nodes--;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e L e v e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneLevel() deletes all nodes at the bottom level of the color tree merging
% their color statistics into their parent node.
%
% The format of the PruneLevel method is:
%
% PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneLevel(cube_info,node_info->child[i]);
if (node_info->level == cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e T o C u b e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneToCubeDepth() deletes any nodes at a depth greater than
% cube_info->depth while merging their color statistics into their parent
% node.
%
% The format of the PruneToCubeDepth method is:
%
% PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneToCubeDepth(cube_info,node_info->child[i]);
if (node_info->level > cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImage() analyzes the colors within a reference image and chooses a
% fixed number of colors to represent the image. The goal of the algorithm
% is to minimize the color difference between the input and output image while
% minimizing the processing time.
%
% The format of the QuantizeImage method is:
%
% MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
Image *image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
ImageType
type;
MagickBooleanType
status;
size_t
depth,
maximum_colors;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
type=IdentifyImageType(image,exception);
if (IsGrayImageType(type) != MagickFalse)
(void) SetGrayscaleImage(image,exception);
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2))
depth--;
if ((image->alpha_trait != UndefinedPixelTrait) && (depth > 5))
depth--;
if (IsGrayImageType(type) != MagickFalse)
depth=MaxTreeDepth;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,image,exception);
if (status != MagickFalse)
{
/*
Reduce the number of colors in the image.
*/
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImages() analyzes the colors within a set of reference images and
% chooses a fixed number of colors to represent the set. The goal of the
% algorithm is to minimize the color difference between the input and output
% images while minimizing the processing time.
%
% The format of the QuantizeImages method is:
%
% MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
% Image *images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: Specifies a pointer to a list of Image structures.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
Image *images,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
proceed,
status;
MagickProgressMonitor
progress_monitor;
size_t
depth,
maximum_colors,
number_images;
ssize_t
i;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (GetNextImageInList(images) == (Image *) NULL)
{
/*
Handle a single image with QuantizeImage.
*/
status=QuantizeImage(quantize_info,images,exception);
return(status);
}
status=MagickFalse;
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if (quantize_info->dither_method != NoDitherMethod)
depth--;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(MagickFalse);
}
number_images=GetImageListLength(images);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,
image->client_data);
status=ClassifyImageColors(cube_info,image,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
if (status != MagickFalse)
{
/*
Reduce the number of colors in an image sequence.
*/
ReduceImageColors(images,cube_info);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor)
NULL,image->client_data);
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,
image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u a n t i z e E r r o r F l a t t e n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeErrorFlatten() traverses the color cube and flattens the quantization
% error into a sorted 1D array. This accelerates the color reduction process.
%
% Contributed by Yoya.
%
% The format of the QuantizeErrorFlatten method is:
%
% size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
% const NodeInfo *node_info,const ssize_t offset,
% double *quantize_error)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is current pointer.
%
% o offset: quantize error offset.
%
% o quantize_error: the quantization error vector.
%
*/
static size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
const NodeInfo *node_info,const ssize_t offset,double *quantize_error)
{
size_t
n,
number_children;
ssize_t
i;
if (offset >= (ssize_t) cube_info->nodes)
return(0);
quantize_error[offset]=node_info->quantize_error;
n=1;
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children ; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n,
quantize_error);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Reduce() traverses the color cube tree and prunes any node whose
% quantization error falls below a particular threshold.
%
% The format of the Reduce method is:
%
% Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
Reduce(cube_info,node_info->child[i]);
if (node_info->quantize_error <= cube_info->pruning_threshold)
PruneChild(cube_info,node_info);
else
{
/*
Find minimum pruning threshold.
*/
if (node_info->number_unique > 0)
cube_info->colors++;
if (node_info->quantize_error < cube_info->next_threshold)
cube_info->next_threshold=node_info->quantize_error;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceImageColors() repeatedly prunes the tree until the number of nodes
% with n2 > 0 is less than or equal to the maximum number of colors allowed
% in the output image. On any given iteration over the tree, it selects
% those nodes whose E value is minimal for pruning and merges their
% color statistics upward. It uses a pruning threshold, Ep, to govern
% node selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors
% within the cubic volume which the node represents. This includes n1 -
% n2 pixels whose colors should be defined by nodes at a lower level in
% the tree.
%
% The format of the ReduceImageColors method is:
%
% ReduceImageColors(const Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static int QuantizeErrorCompare(const void *error_p,const void *error_q)
{
double
*p,
*q;
p=(double *) error_p;
q=(double *) error_q;
if (*p > *q)
return(1);
if (fabs(*q-*p) <= MagickEpsilon)
return(0);
return(-1);
}
static void ReduceImageColors(const Image *image,CubeInfo *cube_info)
{
#define ReduceImageTag "Reduce/Image"
MagickBooleanType
proceed;
MagickOffsetType
offset;
size_t
span;
cube_info->next_threshold=0.0;
if (cube_info->colors > cube_info->maximum_colors)
{
double
*quantize_error;
/*
Enable rapid reduction of the number of unique colors.
*/
quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes,
sizeof(*quantize_error));
if (quantize_error != (double *) NULL)
{
(void) QuantizeErrorFlatten(cube_info,cube_info->root,0,
quantize_error);
qsort(quantize_error,cube_info->nodes,sizeof(double),
QuantizeErrorCompare);
if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100))
cube_info->next_threshold=quantize_error[cube_info->nodes-110*
(cube_info->maximum_colors+1)/100];
quantize_error=(double *) RelinquishMagickMemory(quantize_error);
}
}
for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; )
{
cube_info->pruning_threshold=cube_info->next_threshold;
cube_info->next_threshold=cube_info->root->quantize_error-1;
cube_info->colors=0;
Reduce(cube_info,cube_info->root);
offset=(MagickOffsetType) span-cube_info->colors;
proceed=SetImageProgress(image,ReduceImageTag,offset,span-
cube_info->maximum_colors+1);
if (proceed == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImage() replaces the colors of an image with the closest of the colors
% from the reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
Image *image,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(remap_image != (Image *) NULL);
assert(remap_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
% Image *images,Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
Image *images,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
status;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
if (remap_image == (Image *) NULL)
{
/*
Create a global colormap for an image sequence.
*/
status=QuantizeImages(quantize_info,images,exception);
return(status);
}
/*
Classify image colors from the reference image.
*/
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetGrayscaleImage() converts an image to a PseudoClass grayscale image.
%
% The format of the SetGrayscaleImage method is:
%
% MagickBooleanType SetGrayscaleImage(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
double
intensity;
PixelInfo
*color_1,
*color_2;
color_1=(PixelInfo *) x;
color_2=(PixelInfo *) y;
intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)-
GetPixelInfoIntensity((const Image *) NULL,color_2);
if (intensity < (double) INT_MIN)
intensity=(double) INT_MIN;
if (intensity > (double) INT_MAX)
intensity=(double) INT_MAX;
return((int) intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType SetGrayscaleImage(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
PixelInfo
*colormap;
size_t
extent;
ssize_t
*colormap_index,
i,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace,exception);
extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1));
colormap_index=(ssize_t *) AcquireQuantumMemory(extent,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
(void) memset(colormap_index,(-1),extent*sizeof(*colormap_index));
if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
image->colors=0;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(image,q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=(double)
GetPixelRed(image,q);
image->colormap[image->colors].green=(double)
GetPixelGreen(image,q);
image->colormap[image->colors].blue=(double)
GetPixelBlue(image,q);
image->colors++;
}
}
SetPixelIndex(image,(Quantum) colormap_index[intensity],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
(void) memset(colormap_index,0,extent*sizeof(*colormap_index));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].alpha=(double) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelInfo),
IntensityCompare);
colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap));
if (colormap == (PixelInfo *) NULL)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].alpha]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap(
GetPixelIndex(image,q))],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (SetImageMonochrome(image,exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColormap() traverses the color cube tree and sets the colormap of
% the image. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero.
%
% The format of the SetImageColormap method is:
%
% MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info,
% ExceptionInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
size_t
number_colors;
number_colors=MagickMax(cube_info->maximum_colors,cube_info->colors);
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
DefineImageColormap(image,cube_info,cube_info->root);
if (image->colors != number_colors)
{
image->colormap=(PixelInfo *) ResizeQuantumMemory(image->colormap,
image->colors+1,sizeof(*image->colormap));
if (image->colormap == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
return(MagickTrue);
}
|
compatibility.h | // -*- C++ -*-
// Copyright (C) 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/compatibility.h
* @brief Compatibility layer, mostly concerned with atomic operations.
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by Felix Putze.
#ifndef _GLIBCXX_PARALLEL_COMPATIBILITY_H
#define _GLIBCXX_PARALLEL_COMPATIBILITY_H 1
#include <parallel/types.h>
#include <parallel/base.h>
#if defined(__SUNPRO_CC) && defined(__sparc)
#include <sys/atomic.h>
#endif
#if !defined(_WIN32) || defined (__CYGWIN__)
#include <sched.h>
#endif
#if defined(_MSC_VER)
#include <Windows.h>
#include <intrin.h>
#undef max
#undef min
#endif
#ifdef __MINGW32__
// Including <windows.h> will drag in all the windows32 names. Since
// that can cause user code portability problems, we just declare the
// one needed function here.
extern "C"
__attribute((dllimport)) void __attribute__((stdcall)) Sleep (unsigned long);
#endif
namespace __gnu_parallel
{
#if defined(__ICC)
template<typename _MustBeInt = int>
int32_t __faa32(int32_t* __x, int32_t __inc)
{
asm volatile("lock xadd %0,%1"
: "=__r" (__inc), "=__m" (*__x)
: "0" (__inc)
: "memory");
return __inc;
}
#if defined(__x86_64)
template<typename _MustBeInt = int>
int64_t __faa64(int64_t* __x, int64_t __inc)
{
asm volatile("lock xadd %0,%1"
: "=__r" (__inc), "=__m" (*__x)
: "0" (__inc)
: "memory");
return __inc;
}
#endif
#endif
// atomic functions only work on integers
/** @brief Add a value to a variable, atomically.
*
* Implementation is heavily platform-dependent.
* @param __ptr Pointer to a 32-bit signed integer.
* @param __addend Value to add.
*/
inline int32_t
__fetch_and_add_32(volatile int32_t* __ptr, int32_t __addend)
{
#if defined(__ICC) //x86 version
return _InterlockedExchangeAdd((void*)__ptr, __addend);
#elif defined(__ECC) //IA-64 version
return _InterlockedExchangeAdd((void*)__ptr, __addend);
#elif defined(__ICL) || defined(_MSC_VER)
return _InterlockedExchangeAdd(reinterpret_cast<volatile long*>(__ptr),
__addend);
#elif defined(__GNUC__)
return __sync_fetch_and_add(__ptr, __addend);
#elif defined(__SUNPRO_CC) && defined(__sparc)
volatile int32_t __before, __after;
do
{
__before = *__ptr;
__after = __before + __addend;
} while (atomic_cas_32((volatile unsigned int*)__ptr, __before,
__after) != __before);
return __before;
#else //fallback, slow
#pragma message("slow __fetch_and_add_32")
int32_t __res;
#pragma omp critical
{
__res = *__ptr;
*(__ptr) += __addend;
}
return __res;
#endif
}
/** @brief Add a value to a variable, atomically.
*
* Implementation is heavily platform-dependent.
* @param __ptr Pointer to a 64-bit signed integer.
* @param __addend Value to add.
*/
inline int64_t
__fetch_and_add_64(volatile int64_t* __ptr, int64_t __addend)
{
#if defined(__ICC) && defined(__x86_64) //x86 version
return __faa64<int>((int64_t*)__ptr, __addend);
#elif defined(__ECC) //IA-64 version
return _InterlockedExchangeAdd64((void*)__ptr, __addend);
#elif defined(__ICL) || defined(_MSC_VER)
#ifndef _WIN64
_GLIBCXX_PARALLEL_ASSERT(false); //not available in this case
return 0;
#else
return _InterlockedExchangeAdd64(__ptr, __addend);
#endif
#elif defined(__GNUC__) && defined(__x86_64)
return __sync_fetch_and_add(__ptr, __addend);
#elif defined(__GNUC__) && defined(__i386) && \
(defined(__i686) || defined(__pentium4) || defined(__athlon) \
|| defined(__k8) || defined(__core2))
return __sync_fetch_and_add(__ptr, __addend);
#elif defined(__SUNPRO_CC) && defined(__sparc)
volatile int64_t __before, __after;
do
{
__before = *__ptr;
__after = __before + __addend;
} while (atomic_cas_64((volatile unsigned long long*)__ptr, __before,
__after) != __before);
return __before;
#else //fallback, slow
#if defined(__GNUC__) && defined(__i386)
// XXX doesn'__t work with -march=native
//#warning "please compile with -march=i686 or better"
#endif
#pragma message("slow __fetch_and_add_64")
int64_t __res;
#pragma omp critical
{
__res = *__ptr;
*(__ptr) += __addend;
}
return __res;
#endif
}
/** @brief Add a value to a variable, atomically.
*
* Implementation is heavily platform-dependent.
* @param __ptr Pointer to a signed integer.
* @param __addend Value to add.
*/
template<typename _Tp>
inline _Tp
__fetch_and_add(volatile _Tp* __ptr, _Tp __addend)
{
if (sizeof(_Tp) == sizeof(int32_t))
return
(_Tp)__fetch_and_add_32((volatile int32_t*) __ptr, (int32_t)__addend);
else if (sizeof(_Tp) == sizeof(int64_t))
return
(_Tp)__fetch_and_add_64((volatile int64_t*) __ptr, (int64_t)__addend);
else
_GLIBCXX_PARALLEL_ASSERT(false);
}
#if defined(__ICC)
template<typename _MustBeInt = int>
inline int32_t
__cas32(volatile int32_t* __ptr, int32_t __old, int32_t __nw)
{
int32_t __before;
__asm__ __volatile__("lock; cmpxchgl %1,%2"
: "=a"(__before)
: "q"(__nw), "__m"(*(volatile long long*)(__ptr)),
"0"(__old)
: "memory");
return __before;
}
#if defined(__x86_64)
template<typename _MustBeInt = int>
inline int64_t
__cas64(volatile int64_t *__ptr, int64_t __old, int64_t __nw)
{
int64_t __before;
__asm__ __volatile__("lock; cmpxchgq %1,%2"
: "=a"(__before)
: "q"(__nw), "__m"(*(volatile long long*)(__ptr)),
"0"(__old)
: "memory");
return __before;
}
#endif
#endif
/** @brief Compare @c *__ptr and @c __comparand. If equal, let @c
* *__ptr=__replacement and return @c true, return @c false otherwise.
*
* Implementation is heavily platform-dependent.
* @param __ptr Pointer to 32-bit signed integer.
* @param __comparand Compare value.
* @param __replacement Replacement value.
*/
inline bool
__compare_and_swap_32(volatile int32_t* __ptr, int32_t __comparand,
int32_t __replacement)
{
#if defined(__ICC) //x86 version
return _InterlockedCompareExchange((void*)__ptr, __replacement,
__comparand) == __comparand;
#elif defined(__ECC) //IA-64 version
return _InterlockedCompareExchange((void*)__ptr, __replacement,
__comparand) == __comparand;
#elif defined(__ICL) || defined(_MSC_VER)
return _InterlockedCompareExchange(
reinterpret_cast<volatile long*>(__ptr),
__replacement, __comparand)
== __comparand;
#elif defined(__GNUC__)
return __sync_bool_compare_and_swap(__ptr, __comparand, __replacement);
#elif defined(__SUNPRO_CC) && defined(__sparc)
return atomic_cas_32((volatile unsigned int*)__ptr, __comparand,
__replacement) == __comparand;
#else
#pragma message("slow __compare_and_swap_32")
bool __res = false;
#pragma omp critical
{
if (*__ptr == __comparand)
{
*__ptr = __replacement;
__res = true;
}
}
return __res;
#endif
}
/** @brief Compare @c *__ptr and @c __comparand. If equal, let @c
* *__ptr=__replacement and return @c true, return @c false otherwise.
*
* Implementation is heavily platform-dependent.
* @param __ptr Pointer to 64-bit signed integer.
* @param __comparand Compare value.
* @param __replacement Replacement value.
*/
inline bool
__compare_and_swap_64(volatile int64_t* __ptr, int64_t __comparand,
int64_t __replacement)
{
#if defined(__ICC) && defined(__x86_64) //x86 version
return __cas64<int>(__ptr, __comparand, __replacement) == __comparand;
#elif defined(__ECC) //IA-64 version
return _InterlockedCompareExchange64((void*)__ptr, __replacement,
__comparand) == __comparand;
#elif defined(__ICL) || defined(_MSC_VER)
#ifndef _WIN64
_GLIBCXX_PARALLEL_ASSERT(false); //not available in this case
return 0;
#else
return _InterlockedCompareExchange64(__ptr, __replacement,
__comparand) == __comparand;
#endif
#elif defined(__GNUC__) && defined(__x86_64)
return __sync_bool_compare_and_swap(__ptr, __comparand, __replacement);
#elif defined(__GNUC__) && defined(__i386) && \
(defined(__i686) || defined(__pentium4) || defined(__athlon) \
|| defined(__k8) || defined(__core2))
return __sync_bool_compare_and_swap(__ptr, __comparand, __replacement);
#elif defined(__SUNPRO_CC) && defined(__sparc)
return atomic_cas_64((volatile unsigned long long*)__ptr,
__comparand, __replacement) == __comparand;
#else
#if defined(__GNUC__) && defined(__i386)
// XXX -march=native
//#warning "please compile with -march=i686 or better"
#endif
#pragma message("slow __compare_and_swap_64")
bool __res = false;
#pragma omp critical
{
if (*__ptr == __comparand)
{
*__ptr = __replacement;
__res = true;
}
}
return __res;
#endif
}
/** @brief Compare @c *__ptr and @c __comparand. If equal, let @c
* *__ptr=__replacement and return @c true, return @c false otherwise.
*
* Implementation is heavily platform-dependent.
* @param __ptr Pointer to signed integer.
* @param __comparand Compare value.
* @param __replacement Replacement value. */
template<typename _Tp>
inline bool
__compare_and_swap(volatile _Tp* __ptr, _Tp __comparand, _Tp __replacement)
{
if (sizeof(_Tp) == sizeof(int32_t))
return __compare_and_swap_32((volatile int32_t*) __ptr,
(int32_t)__comparand,
(int32_t)__replacement);
else if (sizeof(_Tp) == sizeof(int64_t))
return __compare_and_swap_64((volatile int64_t*) __ptr,
(int64_t)__comparand,
(int64_t)__replacement);
else
_GLIBCXX_PARALLEL_ASSERT(false);
}
/** @brief Yield the control to another thread, without waiting for
the end to the time slice. */
inline void
__yield()
{
#if defined (_WIN32) && !defined (__CYGWIN__)
Sleep(0);
#else
sched_yield();
#endif
}
} // end namespace
#endif /* _GLIBCXX_PARALLEL_COMPATIBILITY_H */
|
correlation.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#define EXTRALARGE_DATASET
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "correlation.h"
/* Array initialization. */
static
void init_array (int m,
int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n))
{
int i, j;
*float_n = 1.2;
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_correlation(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n),
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m),
DATA_TYPE POLYBENCH_1D(stddev,M,m))
{
int i, j, j1, j2;
DATA_TYPE eps = 0.1f;
#define sqrt_of_array_cell(x,j) sqrt(x[j])
#pragma scop
/* Determine mean of column vectors of input data matrix */
#pragma omp parallel private(i, j, j2) num_threads(#P3)
{
#pragma omp for schedule(#P1, #P2)
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
/* Determine standard deviations of column vectors of data matrix. */
#pragma omp for schedule(#P1, #P2)
for (j = 0; j < _PB_M; j++)
{
stddev[j] = 0.0;
for (i = 0; i < _PB_N; i++)
stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]);
stddev[j] /= float_n;
stddev[j] = sqrt_of_array_cell(stddev, j);
/* The following in an inelegant but usual way to handle
near-zero std. dev. values, which below would cause a zero-
divide. */
stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j];
}
/* Center and reduce the column vectors. */
#pragma omp for schedule(#P1, #P2)
for (i = 0; i < _PB_N; i++)
for (j = 0; j < _PB_M; j++)
{
data[i][j] -= mean[j];
data[i][j] /= sqrt(float_n) * stddev[j];
}
/* Calculate the m * m correlation matrix. */
#pragma omp for schedule(#P1, #P2)
for (j1 = 0; j1 < _PB_M-1; j1++)
{
symmat[j1][j1] = 1.0;
for (j2 = j1+1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
symmat[j1][j2] += (data[i][j1] * data[i][j2]);
symmat[j2][j1] = symmat[j1][j2];
}
}
}
#pragma endscop
symmat[_PB_M-1][_PB_M-1] = 1.0;
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
POLYBENCH_1D_ARRAY_DECL(stddev,DATA_TYPE,M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_correlation (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(mean),
POLYBENCH_ARRAY(stddev));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(mean);
POLYBENCH_FREE_ARRAY(stddev);
return 0;
}
|
utils.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
#pragma once
#include <fcntl.h>
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iostream>
#include <string>
#include <memory>
#include <random>
#include <set>
#ifdef __APPLE__
#else
#include <malloc.h>
#endif
#ifdef _WINDOWS
#include <Windows.h>
typedef HANDLE FileHandle;
#else
#include <unistd.h>
typedef int FileHandle;
#endif
#include "logger.h"
#include "cached_io.h"
#include "common_includes.h"
#include "windows_customizations.h"
#ifdef EXEC_ENV_OLS
#include "content_buf.h"
#include "memory_mapped_files.h"
#endif
// taken from
// https://github.com/Microsoft/BLAS-on-flash/blob/master/include/utils.h
// round up X to the nearest multiple of Y
#define ROUND_UP(X, Y) \
((((uint64_t)(X) / (Y)) + ((uint64_t)(X) % (Y) != 0)) * (Y))
#define DIV_ROUND_UP(X, Y) (((uint64_t)(X) / (Y)) + ((uint64_t)(X) % (Y) != 0))
// round down X to the nearest multiple of Y
#define ROUND_DOWN(X, Y) (((uint64_t)(X) / (Y)) * (Y))
// alignment tests
#define IS_ALIGNED(X, Y) ((uint64_t)(X) % (uint64_t)(Y) == 0)
#define IS_512_ALIGNED(X) IS_ALIGNED(X, 512)
#define IS_4096_ALIGNED(X) IS_ALIGNED(X, 4096)
typedef uint64_t _u64;
typedef int64_t _s64;
typedef uint32_t _u32;
typedef int32_t _s32;
typedef uint16_t _u16;
typedef int16_t _s16;
typedef uint8_t _u8;
typedef int8_t _s8;
namespace diskann {
static const size_t MAX_SIZE_OF_STREAMBUF = 2LL * 1024 * 1024 * 1024;
enum Metric { L2 = 0, INNER_PRODUCT = 1, FAST_L2 = 2, PQ = 3 };
inline void alloc_aligned(void** ptr, size_t size, size_t align) {
*ptr = nullptr;
assert(IS_ALIGNED(size, align));
#ifndef _WINDOWS
*ptr = ::aligned_alloc(align, size);
#else
*ptr = ::_aligned_malloc(size, align); // note the swapped arguments!
#endif
assert(*ptr != nullptr);
}
inline void aligned_free(void* ptr) {
// Gopal. Must have a check here if the pointer was actually allocated by
// _alloc_aligned
if (ptr == nullptr) {
return;
}
#ifndef _WINDOWS
free(ptr);
#else
::_aligned_free(ptr);
#endif
}
inline void GenRandom(std::mt19937& rng, unsigned* addr, unsigned size,
unsigned N) {
for (unsigned i = 0; i < size; ++i) {
addr[i] = rng() % (N - size);
}
std::sort(addr, addr + size);
for (unsigned i = 1; i < size; ++i) {
if (addr[i] <= addr[i - 1]) {
addr[i] = addr[i - 1] + 1;
}
}
unsigned off = rng() % N;
for (unsigned i = 0; i < size; ++i) {
addr[i] = (addr[i] + off) % N;
}
}
// get_bin_metadata functions START
inline void get_bin_metadata_impl(std::basic_istream<char>& reader,
size_t& nrows, size_t& ncols) {
int nrows_32, ncols_32;
reader.read((char*) &nrows_32, sizeof(int));
reader.read((char*) &ncols_32, sizeof(int));
nrows = nrows_32;
ncols = ncols_32;
}
#ifdef EXEC_ENV_OLS
inline void get_bin_metadata(MemoryMappedFiles& files,
const std::string& bin_file, size_t& nrows,
size_t& ncols) {
diskann::cout << "Getting metadata for file: " << bin_file << std::endl;
auto fc = files.getContent(bin_file);
auto cb = ContentBuf((char*) fc._content, fc._size);
std::basic_istream<char> reader(&cb);
get_bin_metadata_impl(reader, nrows, ncols);
}
#endif
inline void get_bin_metadata(const std::string& bin_file, size_t& nrows,
size_t& ncols) {
std::ifstream reader(bin_file.c_str(), std::ios::binary);
get_bin_metadata_impl(reader, nrows, ncols);
}
// get_bin_metadata functions END
template<typename T>
inline std::string getValues(T* data, size_t num) {
std::stringstream stream;
stream << "[";
for (size_t i = 0; i < num; i++) {
stream << std::to_string(data[i]) << ",";
}
stream << "]" << std::endl;
return stream.str();
}
// load_bin functions START
template<typename T>
inline void load_bin_impl(std::basic_istream<char>& reader,
size_t actual_file_size, T*& data, size_t& npts,
size_t& dim) {
int npts_i32, dim_i32;
reader.read((char*) &npts_i32, sizeof(int));
reader.read((char*) &dim_i32, sizeof(int));
npts = (unsigned) npts_i32;
dim = (unsigned) dim_i32;
diskann::cout << "Metadata: #pts = " << npts << ", #dims = " << dim << "..."
<< std::endl;
size_t expected_actual_file_size =
npts * dim * sizeof(T) + 2 * sizeof(uint32_t);
if (actual_file_size != expected_actual_file_size) {
std::stringstream stream;
stream << "Error. File size mismatch. Actual size is " << actual_file_size
<< " while expected size is " << expected_actual_file_size
<< " npts = " << npts << " dim = " << dim
<< " size of <T>= " << sizeof(T) << std::endl;
diskann::cout << stream.str();
throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__,
__LINE__);
}
data = new T[npts * dim];
reader.read((char*) data, npts * dim * sizeof(T));
// diskann::cout << "Last bytes: "
// << getValues<T>(data + (npts - 2) * dim, dim);
// diskann::cout << "Finished reading bin file." << std::endl;
}
#ifdef EXEC_ENV_OLS
template<typename T>
inline void load_bin(MemoryMappedFiles& files, const std::string& bin_file,
T*& data, size_t& npts, size_t& dim) {
diskann::cout << "Reading bin file " << bin_file.c_str() << " ..."
<< std::endl;
auto fc = files.getContent(bin_file);
uint32_t t_npts, t_dim;
uint32_t* contentAsIntPtr = (uint32_t*) (fc._content);
t_npts = *(contentAsIntPtr);
t_dim = *(contentAsIntPtr + 1);
npts = t_npts;
dim = t_dim;
auto actual_file_size = npts * dim * sizeof(T) + 2 * sizeof(uint32_t);
if (actual_file_size != fc._size) {
std::stringstream stream;
stream << "Error. File size mismatch. Actual size is " << fc._size
<< " while expected size is " << actual_file_size
<< " npts = " << npts << " dim = " << dim
<< " size of <T>= " << sizeof(T) << std::endl;
diskann::cout << stream.str();
throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__,
__LINE__);
}
data =
(T*) ((char*) fc._content + 2 * sizeof(uint32_t)); // No need to copy!
}
#endif
inline void wait_for_keystroke() {
int a;
std::cout << "Press any number to continue.." << std::endl;
std::cin >> a;
}
template<typename T>
inline void load_bin(const std::string& bin_file, T*& data, size_t& npts,
size_t& dim) {
// OLS
//_u64 read_blk_size = 64 * 1024 * 1024;
// cached_ifstream reader(bin_file, read_blk_size);
// size_t actual_file_size = reader.get_file_size();
// END OLS
diskann::cout << "Reading bin file " << bin_file.c_str() << " ..."
<< std::endl;
std::ifstream reader(bin_file, std::ios::binary | std::ios::ate);
uint64_t fsize = reader.tellg();
reader.seekg(0);
load_bin_impl<T>(reader, fsize, data, npts, dim);
}
// load_bin functions END
inline void load_truthset(const std::string& bin_file, uint32_t*& ids,
float*& dists, size_t& npts, size_t& dim) {
_u64 read_blk_size = 64 * 1024 * 1024;
cached_ifstream reader(bin_file, read_blk_size);
diskann::cout << "Reading truthset file " << bin_file.c_str() << " ..."
<< std::endl;
size_t actual_file_size = reader.get_file_size();
int npts_i32, dim_i32;
reader.read((char*) &npts_i32, sizeof(int));
reader.read((char*) &dim_i32, sizeof(int));
npts = (unsigned) npts_i32;
dim = (unsigned) dim_i32;
diskann::cout << "Metadata: #pts = " << npts << ", #dims = " << dim << "..."
<< std::endl;
int truthset_type = -1; // 1 means truthset has ids and distances, 2 means
// only ids, -1 is error
size_t expected_file_size_with_dists =
2 * npts * dim * sizeof(uint32_t) + 2 * sizeof(uint32_t);
if (actual_file_size == expected_file_size_with_dists)
truthset_type = 1;
size_t expected_file_size_just_ids =
npts * dim * sizeof(uint32_t) + 2 * sizeof(uint32_t);
if (actual_file_size == expected_file_size_just_ids)
truthset_type = 2;
if (truthset_type == -1) {
std::stringstream stream;
stream << "Error. File size mismatch. File should have bin format, with "
"npts followed by ngt followed by npts*ngt ids and optionally "
"followed by npts*ngt distance values; actual size: "
<< actual_file_size
<< ", expected: " << expected_file_size_with_dists << " or "
<< expected_file_size_just_ids;
diskann::cout << stream.str();
throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__,
__LINE__);
}
ids = new uint32_t[npts * dim];
reader.read((char*) ids, npts * dim * sizeof(uint32_t));
if (truthset_type == 1) {
dists = new float[npts * dim];
reader.read((char*) dists, npts * dim * sizeof(float));
}
}
inline void prune_truthset_for_range(const std::string& bin_file, float range, std::vector<std::vector<_u32>> &groundtruth,
size_t& npts) {
_u64 read_blk_size = 64 * 1024 * 1024;
cached_ifstream reader(bin_file, read_blk_size);
diskann::cout << "Reading truthset file " << bin_file.c_str() << " ..."
<< std::endl;
size_t actual_file_size = reader.get_file_size();
int npts_i32, dim_i32;
reader.read((char*) &npts_i32, sizeof(int));
reader.read((char*) &dim_i32, sizeof(int));
npts = (unsigned) npts_i32;
_u64 dim = (unsigned) dim_i32;
_u32* ids;
float* dists;
diskann::cout << "Metadata: #pts = " << npts << ", #dims = " << dim << "..."
<< std::endl;
int truthset_type = -1; // 1 means truthset has ids and distances, 2 means
// only ids, -1 is error
size_t expected_file_size_with_dists =
2 * npts * dim * sizeof(uint32_t) + 2 * sizeof(uint32_t);
if (actual_file_size == expected_file_size_with_dists)
truthset_type = 1;
if (truthset_type == -1) {
std::stringstream stream;
stream << "Error. File size mismatch. File should have bin format, with "
"npts followed by ngt followed by npts*ngt ids and optionally "
"followed by npts*ngt distance values; actual size: "
<< actual_file_size
<< ", expected: " << expected_file_size_with_dists;
diskann::cout << stream.str();
throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__,
__LINE__);
}
ids = new uint32_t[npts * dim];
reader.read((char*) ids, npts * dim * sizeof(uint32_t));
if (truthset_type == 1) {
dists = new float[npts * dim];
reader.read((char*) dists, npts * dim * sizeof(float));
}
float min_dist = std::numeric_limits<float>::max();
float max_dist = 0;
groundtruth.resize(npts);
for (_u32 i = 0; i < npts; i++) {
groundtruth[i].clear();
for (_u32 j = 0; j < dim; j++) {
if (dists[i*dim + j] <= range) {
groundtruth[i].emplace_back(ids[i*dim+j]);
}
min_dist = min_dist > dists[i*dim+j] ? dists[i*dim + j] : min_dist;
max_dist = max_dist < dists[i*dim+j] ? dists[i*dim + j] : max_dist;
}
//std::cout<<groundtruth[i].size() << " " ;
}
std::cout<<"Min dist: " << min_dist <<", Max dist: "<< max_dist << std::endl;
delete[] ids;
delete[] dists;
}
inline void load_range_truthset(const std::string& bin_file, std::vector<std::vector<_u32>> &groundtruth, _u64 & gt_num) {
_u64 read_blk_size = 64 * 1024 * 1024;
cached_ifstream reader(bin_file, read_blk_size);
diskann::cout << "Reading truthset file " << bin_file.c_str() << " ..."
<< std::endl;
size_t actual_file_size = reader.get_file_size();
int npts_u32, total_u32;
reader.read((char*) &npts_u32, sizeof(int));
reader.read((char*) &total_u32, sizeof(int));
gt_num = (_u64) npts_u32;
_u64 total_res = (_u64) total_u32;
diskann::cout << "Metadata: #pts = " << gt_num << ", #total_results = " << total_res << "..."
<< std::endl;
size_t expected_file_size =
2*sizeof(_u32) + gt_num*sizeof(_u32) + total_res*sizeof(_u32);
if (actual_file_size != expected_file_size) {
std::stringstream stream;
stream << "Error. File size mismatch in range truthset. actual size: "
<< actual_file_size
<< ", expected: " << expected_file_size;
diskann::cout << stream.str();
throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__,
__LINE__);
}
groundtruth.clear();
groundtruth.resize(gt_num);
std::vector<_u32> gt_count(gt_num);
reader.read((char*) gt_count.data(), sizeof(_u32)*gt_num);
std::vector<_u32> gt_stats(gt_count);
std::sort(gt_stats.begin(), gt_stats.end());
std::cout<<"GT count percentiles:" << std::endl;
for (_u32 p = 0; p < 100; p += 5)
std::cout << "percentile " << p << ": "
<< gt_stats[std::floor((p / 100.0) * gt_num)] << std::endl;
std::cout << "percentile 100"
<< ": " << gt_stats[gt_num - 1] << std::endl;
for (_u32 i = 0; i < gt_num; i++) {
groundtruth[i].clear();
groundtruth[i].resize(gt_count[i]);
if (gt_count[i]!=0)
reader.read((char*) groundtruth[i].data(), sizeof(_u32)*gt_count[i]);
// debugging code
/* if (i < 10) {
std::cout<<gt_count[i] <<" nbrs, ids: ";
for (auto &x : groundtruth[i])
std::cout<<x <<" ";
std::cout<<std::endl;
} */
}
}
#ifdef EXEC_ENV_OLS
template<typename T>
inline void load_bin(MemoryMappedFiles& files, const std::string& bin_file,
std::unique_ptr<T[]>& data, size_t& npts, size_t& dim) {
T* ptr;
load_bin<T>(files, bin_file, ptr, npts, dim);
data.reset(ptr);
}
#endif
template<typename T>
inline void load_bin(const std::string& bin_file, std::unique_ptr<T[]>& data,
size_t& npts, size_t& dim) {
T* ptr;
load_bin<T>(bin_file, ptr, npts, dim);
data.reset(ptr);
}
template<typename T>
inline void save_bin(const std::string& filename, T* data, size_t npts,
size_t ndims) {
std::ofstream writer(filename, std::ios::binary | std::ios::out);
diskann::cout << "Writing bin: " << filename.c_str() << std::endl;
int npts_i32 = (int) npts, ndims_i32 = (int) ndims;
writer.write((char*) &npts_i32, sizeof(int));
writer.write((char*) &ndims_i32, sizeof(int));
diskann::cout << "bin: #pts = " << npts << ", #dims = " << ndims
<< ", size = " << npts * ndims * sizeof(T) + 2 * sizeof(int)
<< "B" << std::endl;
// data = new T[npts_u64 * ndims_u64];
writer.write((char*) data, npts * ndims * sizeof(T));
writer.close();
diskann::cout << "Finished writing bin." << std::endl;
}
// load_aligned_bin functions START
template<typename T>
inline void load_aligned_bin_impl(std::basic_istream<char>& reader,
size_t actual_file_size, T*& data,
size_t& npts, size_t& dim,
size_t& rounded_dim) {
int npts_i32, dim_i32;
reader.read((char*) &npts_i32, sizeof(int));
reader.read((char*) &dim_i32, sizeof(int));
npts = (unsigned) npts_i32;
dim = (unsigned) dim_i32;
size_t expected_actual_file_size =
npts * dim * sizeof(T) + 2 * sizeof(uint32_t);
if (actual_file_size != expected_actual_file_size) {
std::stringstream stream;
stream << "Error. File size mismatch. Actual size is " << actual_file_size
<< " while expected size is " << expected_actual_file_size
<< " npts = " << npts << " dim = " << dim
<< " size of <T>= " << sizeof(T) << std::endl;
diskann::cout << stream.str() << std::endl;
throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__,
__LINE__);
}
rounded_dim = ROUND_UP(dim, 8);
diskann::cout << "Metadata: #pts = " << npts << ", #dims = " << dim
<< ", aligned_dim = " << rounded_dim << "..." << std::flush;
size_t allocSize = npts * rounded_dim * sizeof(T);
diskann::cout << "allocating aligned memory, " << allocSize << " bytes..."
<< std::flush;
alloc_aligned(((void**) &data), allocSize, 8 * sizeof(T));
diskann::cout << "done. Copying data..." << std::flush;
for (size_t i = 0; i < npts; i++) {
reader.read((char*) (data + i * rounded_dim), dim * sizeof(T));
memset(data + i * rounded_dim + dim, 0, (rounded_dim - dim) * sizeof(T));
}
diskann::cout << " done." << std::endl;
}
#ifdef EXEC_ENV_OLS
template<typename T>
inline void load_aligned_bin(MemoryMappedFiles& files,
const std::string& bin_file, T*& data,
size_t& npts, size_t& dim, size_t& rounded_dim) {
diskann::cout << "Reading bin file " << bin_file << " ..." << std::flush;
FileContent fc = files.getContent(bin_file);
ContentBuf buf((char*) fc._content, fc._size);
std::basic_istream<char> reader(&buf);
size_t actual_file_size = fc._size;
load_aligned_bin_impl(reader, actual_file_size, data, npts, dim,
rounded_dim);
}
#endif
template<typename T>
inline void load_aligned_bin(const std::string& bin_file, T*& data,
size_t& npts, size_t& dim, size_t& rounded_dim) {
diskann::cout << "Reading bin file " << bin_file << " ..." << std::flush;
// START OLS
//_u64 read_blk_size = 64 * 1024 * 1024;
// cached_ifstream reader(bin_file, read_blk_size);
// size_t actual_file_size = reader.get_file_size();
// END OLS
std::ifstream reader(bin_file, std::ios::binary | std::ios::ate);
uint64_t fsize = reader.tellg();
reader.seekg(0);
load_aligned_bin_impl(reader, fsize, data, npts, dim, rounded_dim);
}
template<typename InType, typename OutType>
void convert_types(const InType* srcmat, OutType* destmat, size_t npts,
size_t dim) {
#pragma omp parallel for schedule(static, 65536)
for (int64_t i = 0; i < (_s64) npts; i++) {
for (uint64_t j = 0; j < dim; j++) {
destmat[i * dim + j] = (OutType) srcmat[i * dim + j];
}
}
}
// this function will take in_file of n*d dimensions and save the output as a
// floating point matrix
// with n*(d+1) dimensions. All vectors are scaled by a large value M so that
// the norms are <=1 and the final coordinate is set so that the resulting
// norm (in d+1 coordinates) is equal to 1 this is a classical transformation
// from MIPS to L2 search from "On Symmetric and Asymmetric LSHs for Inner
// Product Search" by Neyshabur and Srebro
template<typename T>
float prepare_base_for_inner_products(const std::string in_file,
const std::string out_file) {
std::cout << "Pre-processing base file by adding extra coordinate"
<< std::endl;
std::ifstream in_reader(in_file.c_str(), std::ios::binary);
std::ofstream out_writer(out_file.c_str(), std::ios::binary);
_u64 npts, in_dims, out_dims;
float max_norm = 0;
_u32 npts32, dims32;
in_reader.read((char*) &npts32, sizeof(uint32_t));
in_reader.read((char*) &dims32, sizeof(uint32_t));
npts = npts32;
in_dims = dims32;
out_dims = in_dims + 1;
_u32 outdims32 = (_u32) out_dims;
out_writer.write((char*) &npts32, sizeof(uint32_t));
out_writer.write((char*) &outdims32, sizeof(uint32_t));
size_t BLOCK_SIZE = 100000;
size_t block_size = npts <= BLOCK_SIZE ? npts : BLOCK_SIZE;
std::unique_ptr<T[]> in_block_data =
std::make_unique<T[]>(block_size * in_dims);
std::unique_ptr<float[]> out_block_data =
std::make_unique<float[]>(block_size * out_dims);
std::memset(out_block_data.get(), 0, sizeof(float) * block_size * out_dims);
_u64 num_blocks = DIV_ROUND_UP(npts, block_size);
std::vector<float> norms(npts, 0);
for (_u64 b = 0; b < num_blocks; b++) {
_u64 start_id = b * block_size;
_u64 end_id = (b + 1) * block_size < npts ? (b + 1) * block_size : npts;
_u64 block_pts = end_id - start_id;
in_reader.read((char*) in_block_data.get(),
block_pts * in_dims * sizeof(T));
for (_u64 p = 0; p < block_pts; p++) {
for (_u64 j = 0; j < in_dims; j++) {
norms[start_id + p] +=
in_block_data[p * in_dims + j] * in_block_data[p * in_dims + j];
}
max_norm =
max_norm > norms[start_id + p] ? max_norm : norms[start_id + p];
}
}
max_norm = std::sqrt(max_norm);
in_reader.seekg(2 * sizeof(_u32), std::ios::beg);
for (_u64 b = 0; b < num_blocks; b++) {
_u64 start_id = b * block_size;
_u64 end_id = (b + 1) * block_size < npts ? (b + 1) * block_size : npts;
_u64 block_pts = end_id - start_id;
in_reader.read((char*) in_block_data.get(),
block_pts * in_dims * sizeof(T));
for (_u64 p = 0; p < block_pts; p++) {
for (_u64 j = 0; j < in_dims; j++) {
out_block_data[p * out_dims + j] =
in_block_data[p * in_dims + j] / max_norm;
}
float res = 1 - (norms[start_id + p] / (max_norm * max_norm));
res = res <= 0 ? 0 : std::sqrt(res);
out_block_data[p * out_dims + out_dims - 1] = res;
}
out_writer.write((char*) out_block_data.get(),
block_pts * out_dims * sizeof(float));
}
out_writer.close();
return max_norm;
}
// plain saves data as npts X ndims array into filename
template<typename T>
void save_Tvecs(const char* filename, T* data, size_t npts, size_t ndims) {
std::string fname(filename);
// create cached ofstream with 64MB cache
cached_ofstream writer(fname, 64 * 1048576);
unsigned dims_u32 = (unsigned) ndims;
// start writing
for (uint64_t i = 0; i < npts; i++) {
// write dims in u32
writer.write((char*) &dims_u32, sizeof(unsigned));
// get cur point in data
T* cur_pt = data + i * ndims;
writer.write((char*) cur_pt, ndims * sizeof(T));
}
}
// NOTE :: good efficiency when total_vec_size is integral multiple of 64
inline void prefetch_vector(const char* vec, size_t vecsize) {
size_t max_prefetch_size = (vecsize / 64) * 64;
for (size_t d = 0; d < max_prefetch_size; d += 64)
_mm_prefetch((const char*) vec + d, _MM_HINT_T0);
}
// NOTE :: good efficiency when total_vec_size is integral multiple of 64
inline void prefetch_vector_l2(const char* vec, size_t vecsize) {
size_t max_prefetch_size = (vecsize / 64) * 64;
for (size_t d = 0; d < max_prefetch_size; d += 64)
_mm_prefetch((const char*) vec + d, _MM_HINT_T1);
}
}; // namespace diskann
struct PivotContainer {
PivotContainer() = default;
PivotContainer(size_t pivo_id, float pivo_dist)
: piv_id{pivo_id}, piv_dist{pivo_dist} {
}
bool operator<(const PivotContainer& p) const {
return p.piv_dist < piv_dist;
}
bool operator>(const PivotContainer& p) const {
return p.piv_dist > piv_dist;
}
size_t piv_id;
float piv_dist;
};
inline bool file_exists(const std::string& name) {
struct stat buffer;
auto val = stat(name.c_str(), &buffer);
diskann::cout << " Stat(" << name.c_str() << ") returned: " << val
<< std::endl;
return (val == 0);
}
inline _u64 get_file_size(const std::string& fname) {
std::ifstream reader(fname, std::ios::binary | std::ios::ate);
if (!reader.fail() && reader.is_open()) {
_u64 end_pos = reader.tellg();
diskann::cout << " Tellg: " << reader.tellg() << " as u64: " << end_pos
<< std::endl;
reader.close();
return end_pos;
} else {
diskann::cout << "Could not open file: " << fname << std::endl;
return 0;
}
}
inline bool validate_file_size(const std::string& name) {
std::ifstream in(std::string(name), std::ios::binary);
in.seekg(0, in.end);
size_t actual_file_size = in.tellg();
in.seekg(0, in.beg);
size_t expected_file_size;
in.read((char*) &expected_file_size, sizeof(uint64_t));
if (actual_file_size != expected_file_size) {
diskann::cout << "Error loading" << name
<< ". Expected "
"size (metadata): "
<< expected_file_size
<< ", actual file size : " << actual_file_size
<< ". Exitting." << std::endl;
in.close();
return false;
}
in.close();
return true;
}
#ifdef _WINDOWS
#include <intrin.h>
#include <Psapi.h>
inline void printProcessMemory(const char* message) {
PROCESS_MEMORY_COUNTERS counters;
HANDLE h = GetCurrentProcess();
GetProcessMemoryInfo(h, &counters, sizeof(counters));
diskann::cout << message << " [Peaking Working Set size: "
<< counters.PeakWorkingSetSize * 1.0 / (1024 * 1024 * 1024)
<< "GB Working set size: "
<< counters.WorkingSetSize * 1.0 / (1024 * 1024 * 1024)
<< "GB Private bytes "
<< counters.PagefileUsage * 1.0 / (1024 * 1024 * 1024) << "GB]"
<< std::endl;
}
#else
// need to check and change this
inline bool avx2Supported() {
return true;
}
inline void printProcessMemory(const char* message) {
diskann::cout << message << std::endl;
}
#endif
extern bool AvxSupportedCPU;
extern bool Avx2SupportedCPU;
|
sample_barrier_master.c | /* Andre Augusto Giannotti Scota (https://sites.google.com/view/a2gs/) */
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "openmp_util.h"
int main(int argc, char *argv[])
{
/* #pragma omp parallel num_threads(3) */
/* omp_set_num_threads(2); */
#pragma omp parallel
{
function_a(0);
function_b(3);
#pragma omp barrier
#pragma omp master
{
printf("Join all threads.\n");
}
}
function_c(1);
function_d(0);
return(0);
}
|
reduction.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main (int argc, char *argv[])
{
int i, n;
float a[100], b[100], sum;
/* Some initializations */
n = 100;
for (i=0; i < n; i++)
a[i] = b[i] = i * 1.0;
sum = 0.0;
#pragma omp parallel for reduction(+:sum)
for (i=0; i < n; i++)
sum = sum + (a[i] * b[i]);
printf(" Sum = %f\n",sum);
}
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 16;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4));
ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(16*t2-Nz-3,16));t3<=min(min(min(floord(4*Nt+Ny-9,16),floord(8*t1+Ny+7,16)),floord(16*t2+Ny+3,16)),floord(16*t1-16*t2+Nz+Ny+5,16));t3++) {
for (t4=max(max(max(0,ceild(t1-255,256)),ceild(16*t2-Nz-2035,2048)),ceild(16*t3-Ny-2035,2048));t4<=min(min(min(min(floord(4*Nt+Nx-9,2048),floord(8*t1+Nx+7,2048)),floord(16*t2+Nx+3,2048)),floord(16*t3+Nx+3,2048)),floord(16*t1-16*t2+Nz+Nx+5,2048));t4++) {
for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(2048*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),Nt-1),2*t1+3),4*t2+2),4*t3+2),512*t4+510);t5++) {
for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) {
lbv=max(2048*t4,4*t5+4);
ubv=min(2048*t4+2047,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
UtilitiesOMP.h | //
// UtilitiesOMP.h
// Gauss
//
// Created by David Levin on 6/7/17.
//
//
#ifndef UtilitiesOMP_h
#define UtilitiesOMP_h
inline int omp_thread_count() {
int n = 0;
#pragma omp parallel reduction(+:n)
n += 1;
return n;
}
#endif /* UtilitiesOMP_h */
|
declare-variant-9.c | /* { dg-do compile } */
/* { dg-additional-options "-fdump-tree-gimple" } */
/* { dg-additional-options "-mno-sse3" { target { i?86-*-* x86_64-*-* } } } */
#undef i386
void f01 (void);
#pragma omp declare variant (f01) match (device={isa("avx512f",avx512bw)})
void f02 (void);
void f03 (void);
#pragma omp declare variant (f03) match (device={kind(any),arch(x86_64),isa("avx512f","avx512bw")})
void f04 (void);
void f05 (void);
#pragma omp declare variant (f05) match (device={kind(gpu)})
void f06 (void);
void f07 (void);
#pragma omp declare variant (f07) match (device={kind("cpu")})
void f08 (void);
void f09 (void);
#pragma omp declare variant (f09) match (device={isa(sm_35)})
void f10 (void);
void f11 (void);
#pragma omp declare variant (f11) match (device={arch(nvptx)})
void f12 (void);
void f13 (void);
#pragma omp declare variant (f13) match (device={arch("i386"),isa(sse4)})
void f14 (void);
void f15 (void);
#pragma omp declare variant (f15) match (device={isa(sse4,ssse3),arch(i386)})
void f16 (void);
void f17 (void);
#pragma omp declare variant (f17) match (device={kind("any","fpga")})
void f18 (void);
void
test1 (void)
{
int i;
f02 (); /* { dg-final { scan-tree-dump-times "f02 \\\(\\\);" 1 "gimple" } } */
f14 (); /* { dg-final { scan-tree-dump-times "f14 \\\(\\\);" 1 "gimple" } } */
f18 (); /* { dg-final { scan-tree-dump-times "f18 \\\(\\\);" 1 "gimple" } } */
}
#if defined(__i386__) || defined(__x86_64__)
__attribute__((target ("avx512f,avx512bw")))
#endif
void
test2 (void)
{
f04 (); /* { dg-final { scan-tree-dump-times "f03 \\\(\\\);" 1 "gimple" { target { { i?86-*-* x86_64-*-* } && lp64 } } } } */
/* { dg-final { scan-tree-dump-times "f04 \\\(\\\);" 1 "gimple" { target { { ! lp64 } || { ! { i?86-*-* x86_64-*-* } } } } } } */
f16 (); /* { dg-final { scan-tree-dump-times "f15 \\\(\\\);" 1 "gimple" { target ia32 } } } */
/* { dg-final { scan-tree-dump-times "f16 \\\(\\\);" 1 "gimple" { target { ! ia32 } } } } */
}
void
test3 (void)
{
f06 (); /* { dg-final { scan-tree-dump-times "f06 \\\(\\\);" 1 "gimple" { target { ! { nvptx*-*-* amdgcn*-*-* } } } } } */
f08 (); /* { dg-final { scan-tree-dump-times "f07 \\\(\\\);" 1 "gimple" { target { ! { nvptx*-*-* amdgcn*-*-* } } } } } */
f10 (); /* { dg-final { scan-tree-dump-times "f10 \\\(\\\);" 1 "gimple" { target { ! { nvptx*-*-* amdgcn*-*-* } } } } } */
f12 (); /* { dg-final { scan-tree-dump-times "f12 \\\(\\\);" 1 "gimple" { target { ! { nvptx*-*-* } } } } } */
/* { dg-final { scan-tree-dump-times "f11 \\\(\\\);" 1 "gimple" { target { nvptx*-*-* } } } } */
}
|
GB_unop__cos_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__cos_fp64_fp64
// op(A') function: GB_unop_tran__cos_fp64_fp64
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = cos (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cos (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = cos (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_COS || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__cos_fp64_fp64
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = cos (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__cos_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__identity_uint8_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint8_uint8
// op(A') function: GB_tran__identity_uint8_uint8
// C type: uint8_t
// A type: uint8_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint8_uint8
(
uint8_t *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint8_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
NeuralNetwork_OMP_CPU1.c | /* NEURAL NETWORK OMP CPU1.c
* by Lut99
*
* Created:
* 4/18/2020, 11:25:46 PM
* Last edited:
* 19/11/2020, 17:10:57
* Auto updated?
* Yes
*
* Description:
* The NeuralNetwork class implements a matrix-based Feedforward Neural
* Network which is hardcoded to use Mean Squared Error for cost function and
* sigmoid as activation function.
*
* This file implements the first of eight different OpenMP-optimised
* versions for the CPU. It optimises the Forward pass only using threads.
**/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
#include <sys/time.h>
#include "NeuralNetwork.h"
/***** OPTIONAL PARAMETERS *****/
static unsigned int n_threads = 16;
/***** OPENMP DECLARATIONS *****/
extern int omp_set_num_threads();
extern int omp_get_num_procs();
extern int omp_get_thread_num();
/***** HELPER FUNCTIONS *****/
#define TIMEVAL_TO_MS(T_START, T_END) (((T_END.tv_sec - T_START.tv_sec) * 1000000 + (T_END.tv_usec - T_START.tv_usec)) / 1000000.0)
extern size_t max(size_t length, const size_t* list);
/***** NEURAL NETWORK OPERATIONS *****/
void nn_train(neural_net* nn, size_t n_samples, double** inputs, double** expected, double learning_rate, size_t n_iterations) {
#ifdef BENCHMARK
// Declare all timers
struct timeval s_total, e_total, s_iters, e_iters, s_fwd, e_fwd, s_bck_out, e_bck_out, s_bck_hid, e_bck_hid, s_upd, e_upd;
// Set some shortcuts for the timers
size_t half_iters = n_iterations / 2;
size_t half_samples = n_samples / 2;
// Start the total timer
gettimeofday(&s_total, NULL);
#endif
// Also obtain links to all biases / matrices
double** biases = nn->biases;
double** weights = nn->weights;
// Make some shortcuts for the number-of-nodes information
size_t n_layers = nn->n_layers;
size_t* nodes_per_layer = nn->nodes_per_layer;
// Initialize the temporary delta memory to the correct size
double* deltas = malloc(sizeof(double) * max(n_layers, nodes_per_layer));
// Also make a prev list to avoid accidentally changing the deltas as we go
double* prev_deltas = malloc(sizeof(double) * max(n_layers, nodes_per_layer));
// Create a list that is used to store intermediate outputs. The first input layer (=first column)
// is linked and not copied to the input data
double* layer_outputs[n_samples][n_layers];
for (size_t s = 0; s < n_samples; s++) {
// Link the input layer
layer_outputs[s][0] = inputs[s];
// Allocate arrays for the other layers
for (size_t l = 1; l < n_layers; l++) {
layer_outputs[s][l] = malloc(sizeof(double) * nodes_per_layer[l]);
}
}
// Create the delta_biases and delta_weights arrays / matrices
double* delta_biases[nn->n_weights];
double* delta_weights[nn->n_weights];
for(size_t l = 0; l < nn->n_weights; l++) {
delta_biases[l] = malloc(sizeof(double) * nodes_per_layer[l + 1]);
delta_weights[l] = malloc(sizeof(double) * nodes_per_layer[l] * nodes_per_layer[l + 1]);
// Fill with zeros
for (size_t n = 0; n < nodes_per_layer[l + 1]; n++) {
delta_biases[l][n] = 0;
for (size_t prev_n = 0; prev_n < nodes_per_layer[l]; prev_n++) {
delta_weights[l][prev_n * nodes_per_layer[l + 1] + n] = 0;
}
}
}
#ifdef BENCHMARK
// Start the iterations timer
gettimeofday(&s_iters, NULL);
#endif
// Perform the training for n_iterations (always)
for (size_t i = 0; i < n_iterations; i++) {
// Loop through all samples to compute the forward cost
#pragma omp parallel for schedule(static)
for (size_t s = 0; s < n_samples; s++) {
/***** FORWARD PASS *****/
#ifdef BENCHMARK
// Start the forward pass timer
if (i == half_iters && s == half_samples) {
gettimeofday(&s_fwd, NULL);
}
#endif
// sample_outputs is a 2D flattened array for this layer
double** sample_outputs = layer_outputs[s];
// Iterate over each layer to feedforward through the network
for (size_t l = 1; l < n_layers; l++) {
// Get some references to the bias list, weight matrix and outputs of the previous and this layer
double* bias = biases[l - 1];
double* weight = weights[l - 1];
double* prev_output = sample_outputs[l - 1];
double* output = sample_outputs[l];
// Compute the activation for each node on this layer
size_t this_nodes = nodes_per_layer[l];
size_t prev_nodes = nodes_per_layer[l - 1];
for (size_t n = 0; n < this_nodes; n++) {
// Sum the weighted inputs for this node
double z = bias[n];
for (size_t prev_n = 0; prev_n < prev_nodes; prev_n++) {
z += prev_output[prev_n] * weight[prev_n * this_nodes + n];
}
// Run the activation function over this input and store it in the output
output[n] = 1 / (1 + exp(-z));
}
}
#ifdef BENCHMARK
// End the forward timer, start the backward pass output timer
if (i == half_iters && s == half_samples) {
gettimeofday(&e_fwd, NULL);
}
#endif
}
/***** BACKWARD PASS *****/
// Implementation: https://towardsdatascience.com/simple-neural-network-implementation-in-c-663f51447547
// Loop through all samples to compute the backward cost
size_t last_nodes = nodes_per_layer[n_layers - 1];
size_t last_prev_nodes = nodes_per_layer[n_layers - 2];
double* last_delta_bias = delta_biases[n_layers - 2];
double* last_delta_weight = delta_weights[n_layers - 2];
for (size_t s = 0; s < n_samples; s++) {
#ifdef BENCHMARK
// End the forward timer, start the backward pass output timer
if (i == half_iters && s == half_samples) {
gettimeofday(&s_bck_out, NULL);
}
#endif
// Backpropagate the error from the last layer to the first.
double** sample_outputs = layer_outputs[s];
double* sample_expected = expected[s];
// Do the output layer: compute the deltas
double* output = sample_outputs[n_layers - 1];
for (size_t n = 0; n < last_nodes; n++) {
double output_val = output[n];
prev_deltas[n] = (sample_expected[n] - output_val) * output_val * (1 - output_val);
}
// Do the output layer: compute the bias & weight updates
// Add all deltas as delta_biases for this layer
for (size_t n = 0; n < last_nodes; n++) {
last_delta_bias[n] += prev_deltas[n];
}
// Same for all the weights, except we compute the delta_weights first
double* last_prev_output = sample_outputs[n_layers - 2];
for (size_t prev_n = 0; prev_n < last_prev_nodes; prev_n++) {
for (size_t n = 0; n < last_nodes; n++) {
last_delta_weight[prev_n * last_nodes + n] += last_prev_output[prev_n] * prev_deltas[n];
}
}
#ifdef BENCHMARK
// End the backward pass output timer, start the backward pass hidden timer
if (i == half_iters && s == half_samples) {
gettimeofday(&e_bck_out, NULL);
gettimeofday(&s_bck_hid, NULL);
}
#endif
// Then, the rest of the hidden layers
for (size_t l = n_layers - 2; l > 0; l--) {
double* delta_bias = delta_biases[l - 1];
double* delta_weight = delta_weights[l - 1];
double* output = sample_outputs[l];
double* prev_output = sample_outputs[l - 1];
size_t next_nodes = nodes_per_layer[l + 1];
size_t this_nodes = nodes_per_layer[l];
size_t prev_nodes = nodes_per_layer[l - 1];
// Loop through all nodes in this layer to compute their deltas by summing all deltas of the next layer in a weighted fashion
double* weight_next = weights[l];
for (size_t n = 0; n < this_nodes; n++) {
// Take the weighted sum of all connection of that node with this layer
double error = 0;
for (size_t next_n = 0; next_n < next_nodes; next_n++) {
error += prev_deltas[next_n] * weight_next[n * next_nodes + next_n];
}
// Multiply the error with the derivative of the activation function to find the result
double output_val = output[n];
deltas[n] = error * output_val * (1 - output_val);
}
// Add all deltas as delta_biases for this layer
for (size_t n = 0; n < this_nodes; n++) {
delta_bias[n] += deltas[n];
}
// Same for all the weights, except we compute the delta_weights first
for (size_t prev_n = 0; prev_n < prev_nodes; prev_n++) {
for (size_t n = 0; n < this_nodes; n++) {
delta_weight[prev_n * this_nodes + n] += prev_output[prev_n] * deltas[n];
}
}
// Swap the two delta lists
double* temp = deltas;
deltas = prev_deltas;
prev_deltas = temp;
}
#ifdef BENCHMARK
// End the backward pass hidden timer
if (i == half_iters && s == half_samples) {
gettimeofday(&e_bck_hid, NULL);
}
#endif
}
#ifdef BENCHMARK
// Start the updates timer
if (i == half_iters) {
gettimeofday(&s_upd, NULL);
}
#endif
// Actually update the weights, and reset the delta updates to 0 for next iteration
#pragma omp parallel for schedule(static)
for (size_t l = 0; l < nn->n_weights; l++) {
double* bias = biases[l];
double* delta_bias = delta_biases[l];
double* weight = weights[l];
double* delta_weight = delta_weights[l];
// Update the biases & reset delta_biases
size_t this_nodes = nodes_per_layer[l + 1];
for (size_t n = 0; n < this_nodes; n++) {
bias[n] += delta_bias[n] * learning_rate;
delta_bias[n] = 0;
}
// Update the weights & reset delta_weights
size_t prev_nodes = nodes_per_layer[l];
for (size_t i = 0; i < this_nodes * prev_nodes; i++) {
weight[i] += delta_weight[i] * learning_rate;
delta_weight[i] = 0;
}
}
#ifdef BENCHMARK
// Stop the updates timer
if (i == half_iters) {
gettimeofday(&e_upd, NULL);
}
#endif
}
#ifdef BENCHMARK
// End the iterations timer
gettimeofday(&e_iters, NULL);
#endif
// Cleanup
// Free the delta biases / weights
for(size_t l = 0; l < n_layers - 1; l++) {
free(delta_biases[l]);
free(delta_weights[l]);
}
// Free the layer_outputs (skip the first, as these merely link the input rather than copy 'em)
for (size_t s = 0; s < n_samples; s++) {
for (size_t l = 1; l < n_layers; l++) {
free(layer_outputs[s][l]);
}
}
// Cleanup the deltas
free(deltas);
free(prev_deltas);
#ifdef BENCHMARK
// End the total timer
gettimeofday(&e_total, NULL);
// Print the results
printf("%f\n", TIMEVAL_TO_MS(s_total, e_total));
printf("%f\n", TIMEVAL_TO_MS(s_iters, e_iters));
printf("%f\n", TIMEVAL_TO_MS(s_fwd, e_fwd));
printf("%f\n", TIMEVAL_TO_MS(s_bck_out, e_bck_out));
printf("%f\n", TIMEVAL_TO_MS(s_bck_hid, e_bck_hid));
printf("%f\n", TIMEVAL_TO_MS(s_upd, e_upd));
#endif
}
/***** OTHER TOOLS *****/
void parse_opt_args(int argc, char** argv) {
// Parse and set number of threads as first argument
if (argc >= 1) {
// Set the number of threads
n_threads = atoi(argv[0]);
}
omp_set_num_threads(n_threads);
}
void print_opt_args() {
printf(" - Variation : OpenMP CPU 1 (Forward only)\n");
printf(" - Number of threads : %u\n", n_threads);
}
|
for-19.c | /* Verify that if GOMP_parallel_loop_dynamic_start is used, variables
mentioned in the INIT, COND and INCR expressions aren't unnecessarily
copied to the omp_fn function. */
/* { dg-do compile } */
/* { dg-options "-O -fopenmp -fdump-tree-gimple" } */
void foo (int *a, int i, int j, int k, int l, int m)
{
#pragma omp parallel for num_threads (3 * i) schedule (dynamic, i * 4)
for (j = 0; j <= (6 * l + 4 * k); j++)
a[j] = 1;
#pragma omp parallel for num_threads (3 * i) schedule (dynamic, i * 4)
for (j = m; j <= l; j += (k + l - m))
a[j] = 1;
}
/* { dg-final { scan-tree-dump-times "shared\\(a\\)" 2 "gimple" } } */
/* { dg-final { scan-tree-dump-times "shared\\(k\\)" 0 "gimple" { xfail *-*-* } } } */
/* { dg-final { scan-tree-dump-times "shared\\(l\\)" 0 "gimple" { xfail *-*-* } } } */
/* { dg-final { scan-tree-dump-times "shared\\(m\\)" 0 "gimple" { xfail *-*-* } } } */
/* { dg-final { cleanup-tree-dump "gimple" } } */
|
millionaire_with_equality.h | /*
Authors: Mayank Rathee
Copyright:
Copyright (c) 2020 Microsoft Research
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef MILLIONAIRE_WITH_EQ_H__
#define MILLIONAIRE_WITH_EQ_H__
#include "Millionaire/millionaire.h"
#include "OT/emp-ot.h"
#include "utils/emp-tool.h"
#include <cmath>
class MillionaireWithEquality {
public:
sci::IOPack *iopack;
sci::OTPack *otpack;
TripleGenerator *triple_gen;
MillionaireProtocol *mill;
int party;
int l, r, log_alpha, beta, beta_pow;
int num_digits, num_triples, log_num_digits;
uint8_t mask_beta, mask_r;
MillionaireWithEquality(int party, sci::IOPack *iopack, sci::OTPack *otpack,
int bitlength = 32, int radix_base = MILL_PARAM) {
this->party = party;
this->iopack = iopack;
this->otpack = otpack;
this->mill =
new MillionaireProtocol(party, iopack, otpack, bitlength, radix_base);
this->triple_gen = mill->triple_gen;
configure(bitlength, radix_base);
}
void configure(int bitlength, int radix_base = MILL_PARAM) {
assert(radix_base <= 8);
assert(bitlength <= 64);
this->l = bitlength;
this->beta = radix_base;
this->num_digits = ceil((double)l / beta);
this->r = l % beta;
this->log_alpha = sci::bitlen(num_digits) - 1;
this->log_num_digits = log_alpha + 1;
this->num_triples = 2 * num_digits - 2;
if (beta == 8)
this->mask_beta = -1;
else
this->mask_beta = (1 << beta) - 1;
this->mask_r = (1 << r) - 1;
this->beta_pow = 1 << beta;
}
~MillionaireWithEquality() { delete mill; }
void bitlen_lt_beta(uint8_t *res_cmp, uint8_t *res_eq, uint64_t *data,
int num_cmps, int bitlength, bool greater_than = true,
int radix_base = MILL_PARAM) {
uint8_t N = 1 << bitlength;
uint8_t mask = N - 1;
if (party == sci::ALICE) {
sci::PRG128 prg;
prg.random_data(res_cmp, num_cmps * sizeof(uint8_t));
prg.random_data(res_eq, num_cmps * sizeof(uint8_t));
uint8_t **leaf_messages = new uint8_t *[num_cmps];
for (int i = 0; i < num_cmps; i++) {
res_cmp[i] &= 1;
res_eq[i] &= 1;
leaf_messages[i] = new uint8_t[N];
this->mill->set_leaf_ot_messages(leaf_messages[i], (data[i] & mask), N,
res_cmp[i], res_eq[i], greater_than,
true);
}
if (bitlength > 1) {
otpack->kkot[bitlength - 1]->send(leaf_messages, num_cmps, 2);
} else {
otpack->iknp_straight->send(leaf_messages, num_cmps, 2);
}
for (int i = 0; i < num_cmps; i++)
delete[] leaf_messages[i];
delete[] leaf_messages;
} else { // party == BOB
uint8_t *choice = new uint8_t[num_cmps];
for (int i = 0; i < num_cmps; i++) {
choice[i] = data[i] & mask;
}
if (bitlength > 1) {
otpack->kkot[bitlength - 1]->recv(res_cmp, choice, num_cmps, 2);
} else {
otpack->iknp_straight->recv(res_cmp, choice, num_cmps, 2);
}
for (int i = 0; i < num_cmps; i++) {
res_eq[i] = res_cmp[i] & 1;
res_cmp[i] >>= 1;
}
delete[] choice;
}
return;
}
void compare_with_eq(uint8_t *res_cmp, uint8_t *res_eq, uint64_t *data,
int num_cmps, int bitlength, bool greater_than = true,
int radix_base = MILL_PARAM) {
configure(bitlength, radix_base);
if (bitlength <= beta) {
bitlen_lt_beta(res_cmp, res_eq, data, num_cmps, bitlength, greater_than,
radix_base);
return;
}
int old_num_cmps = num_cmps;
// num_cmps should be a multiple of 8
num_cmps = ceil(num_cmps / 8.0) * 8;
// padding with 0s if data dim not multiple of 8
uint64_t *data_ext;
if (old_num_cmps == num_cmps)
data_ext = data;
else {
data_ext = new uint64_t[num_cmps];
memcpy(data_ext, data, old_num_cmps * sizeof(uint64_t));
memset(data_ext + old_num_cmps, 0,
(num_cmps - old_num_cmps) * sizeof(uint64_t));
}
uint8_t *digits; // num_digits * num_cmps
uint8_t *leaf_res_cmp; // num_digits * num_cmps
uint8_t *leaf_res_eq; // num_digits * num_cmps
digits = new uint8_t[num_digits * num_cmps];
leaf_res_cmp = new uint8_t[num_digits * num_cmps];
leaf_res_eq = new uint8_t[num_digits * num_cmps];
// Extract radix-digits from data
for (int i = 0; i < num_digits; i++) // Stored from LSB to MSB
for (int j = 0; j < num_cmps; j++)
if ((i == num_digits - 1) && (r != 0))
digits[i * num_cmps + j] =
(uint8_t)(data_ext[j] >> i * beta) & mask_r;
else
digits[i * num_cmps + j] =
(uint8_t)(data_ext[j] >> i * beta) & mask_beta;
// ======================
// Set leaf OT messages now
if (party == sci::ALICE) {
uint8_t *
*leaf_ot_messages; // (num_digits * num_cmps) X beta_pow (=2^beta)
leaf_ot_messages = new uint8_t *[num_digits * num_cmps];
for (int i = 0; i < num_digits * num_cmps; i++)
leaf_ot_messages[i] = new uint8_t[beta_pow];
// Set Leaf OT messages
triple_gen->prg->random_bool((bool *)leaf_res_cmp, num_digits * num_cmps);
triple_gen->prg->random_bool((bool *)leaf_res_eq, num_digits * num_cmps);
for (int i = 0; i < num_digits; i++) {
for (int j = 0; j < num_cmps; j++) {
if (i == (num_digits - 1) && (r > 0)) {
this->mill->set_leaf_ot_messages(
leaf_ot_messages[i * num_cmps + j], digits[i * num_cmps + j],
1ULL << r, leaf_res_cmp[i * num_cmps + j],
leaf_res_eq[i * num_cmps + j], greater_than);
} else {
this->mill->set_leaf_ot_messages(
leaf_ot_messages[i * num_cmps + j], digits[i * num_cmps + j],
beta_pow, leaf_res_cmp[i * num_cmps + j],
leaf_res_eq[i * num_cmps + j], greater_than);
}
}
}
// Perform Leaf OTs with comparison and equality
if (r == 1) {
// All branches except r
otpack->kkot[beta - 1]->send(leaf_ot_messages,
num_cmps * (num_digits - 1), 2);
// r branch
otpack->iknp_straight->send(
leaf_ot_messages + num_cmps * (num_digits - 1), num_cmps, 2);
} else if (r != 0) {
// All branches except r
otpack->kkot[beta - 1]->send(leaf_ot_messages,
num_cmps * (num_digits - 1), 2);
// r branch
otpack->kkot[r - 1]->send(
leaf_ot_messages + num_cmps * (num_digits - 1), num_cmps, 2);
} else {
// All branches including r, r is 0
otpack->kkot[beta - 1]->send(leaf_ot_messages, num_cmps * (num_digits),
2);
}
// Cleanup
for (int i = 0; i < num_digits * num_cmps; i++)
delete[] leaf_ot_messages[i];
delete[] leaf_ot_messages;
} else // party = sci::BOB
{
// Perform Leaf OTs
if (r == 1) {
// All branches except r
otpack->kkot[beta - 1]->recv(leaf_res_cmp, digits,
num_cmps * (num_digits - 1), 2);
// r branch
otpack->iknp_straight->recv(leaf_res_cmp + num_cmps * (num_digits - 1),
digits + num_cmps * (num_digits - 1),
num_cmps, 2);
} else if (r != 0) {
// All branches except r
otpack->kkot[beta - 1]->recv(leaf_res_cmp, digits,
num_cmps * (num_digits - 1), 2);
// r branch
otpack->kkot[r - 1]->recv(leaf_res_cmp + num_cmps * (num_digits - 1),
digits + num_cmps * (num_digits - 1),
num_cmps, 2);
} else {
// All branches including r, r is 0
otpack->kkot[beta - 1]->recv(leaf_res_cmp, digits,
num_cmps * (num_digits), 2);
}
// Extract equality result from leaf_res_cmp
for (int i = 0; i < num_digits * num_cmps; i++) {
leaf_res_eq[i] = leaf_res_cmp[i] & 1;
leaf_res_cmp[i] >>= 1;
}
}
traverse_and_compute_ANDs(num_cmps, leaf_res_eq, leaf_res_cmp);
for (int i = 0; i < old_num_cmps; i++) {
res_cmp[i] = leaf_res_cmp[i];
res_eq[i] = leaf_res_eq[i];
}
// Cleanup
if (old_num_cmps != num_cmps)
delete[] data_ext;
delete[] digits;
delete[] leaf_res_cmp;
delete[] leaf_res_eq;
}
/**************************************************************************************************
* AND computation related functions
**************************************************************************************************/
void traverse_and_compute_ANDs(int num_cmps, uint8_t *leaf_res_eq,
uint8_t *leaf_res_cmp) {
Triple triples_corr((num_triples)*num_cmps, true, num_cmps);
// Generate required Bit-Triples
triple_gen->generate(party, &triples_corr, _8KKOT);
// Combine leaf OT results in a bottom-up fashion
int counter_triples_used = 0, old_counter_triples_used = 0;
uint8_t *ei = new uint8_t[(num_triples * num_cmps) / 8];
uint8_t *fi = new uint8_t[(num_triples * num_cmps) / 8];
uint8_t *e = new uint8_t[(num_triples * num_cmps) / 8];
uint8_t *f = new uint8_t[(num_triples * num_cmps) / 8];
for (int i = 1; i < num_digits;
i *= 2) { // i denotes the distance between 2 nodes which should be
// ANDed together
for (int j = 0; j < num_digits and j + i < num_digits;
j += 2 * i) { // j=0 is LSD and j=num_digits-1 is MSD
// CMP_j: Use 1 triple for opening e = a + cmp_j and f = b + eq_j+i.
this->mill->AND_step_1(
ei + (2 * counter_triples_used * num_cmps) / 8,
fi + (2 * counter_triples_used * num_cmps) / 8,
leaf_res_cmp + j * num_cmps, leaf_res_eq + (j + i) * num_cmps,
(triples_corr.ai) + (2 * counter_triples_used * num_cmps) / 8,
(triples_corr.bi) + (2 * counter_triples_used * num_cmps) / 8,
num_cmps);
// EQ_j: Use 1 triple for opening e = a + eq_j and f = b + eq_j+i.
this->mill->AND_step_1(
ei + ((2 * counter_triples_used + 1) * num_cmps) / 8,
fi + ((2 * counter_triples_used + 1) * num_cmps) / 8,
leaf_res_eq + j * num_cmps, leaf_res_eq + (j + i) * num_cmps,
(triples_corr.ai) + ((2 * counter_triples_used + 1) * num_cmps) / 8,
(triples_corr.bi) + ((2 * counter_triples_used + 1) * num_cmps) / 8,
num_cmps);
counter_triples_used++;
}
int offset = (2 * old_counter_triples_used * num_cmps) / 8;
int size_used =
(2 * (counter_triples_used - old_counter_triples_used) * num_cmps) /
8;
#pragma omp parallel num_threads(2)
{
if (omp_get_thread_num() == 1) {
if (party == sci::ALICE) {
iopack->io_rev->recv_data(e + offset, size_used);
iopack->io_rev->recv_data(e + offset, size_used);
iopack->io_rev->recv_data(f + offset, size_used);
iopack->io_rev->recv_data(f + offset, size_used);
} else { // party == sci::BOB
iopack->io_rev->send_data(ei + offset, size_used);
iopack->io_rev->send_data(ei + offset, size_used);
iopack->io_rev->send_data(fi + offset, size_used);
iopack->io_rev->send_data(fi + offset, size_used);
}
} else {
if (party == sci::ALICE) {
iopack->io->send_data(ei + offset, size_used);
iopack->io->send_data(ei + offset, size_used);
iopack->io->send_data(fi + offset, size_used);
iopack->io->send_data(fi + offset, size_used);
} else { // party == sci::BOB
iopack->io->recv_data(e + offset, size_used);
iopack->io->recv_data(e + offset, size_used);
iopack->io->recv_data(f + offset, size_used);
iopack->io->recv_data(f + offset, size_used);
}
}
}
// Reconstruct e and f
for (int i = 0; i < size_used; i++) {
e[i + offset] ^= ei[i + offset];
f[i + offset] ^= fi[i + offset];
}
counter_triples_used = old_counter_triples_used;
// Step 2 of AND computation
for (int j = 0; j < num_digits and j + i < num_digits;
j += 2 * i) { // j=0 is LSD and j=num_digits-1 is MSD
// CMP_j: Use 1 triple compute cmp_j AND eq_j+i.
this->mill->AND_step_2(
leaf_res_cmp + j * num_cmps,
e + (2 * counter_triples_used * num_cmps) / 8,
f + (2 * counter_triples_used * num_cmps) / 8,
nullptr, // not used in function
nullptr, // not used in function
(triples_corr.ai) + (2 * counter_triples_used * num_cmps) / 8,
(triples_corr.bi) + (2 * counter_triples_used * num_cmps) / 8,
(triples_corr.ci) + (2 * counter_triples_used * num_cmps) / 8,
num_cmps);
// EQ_j: Use 1 triple compute eq_j AND eq_j+i.
this->mill->AND_step_2(
leaf_res_eq + j * num_cmps,
e + ((2 * counter_triples_used + 1) * num_cmps) / 8,
f + ((2 * counter_triples_used + 1) * num_cmps) / 8,
nullptr, // not used in function
nullptr, // not used in function
(triples_corr.ai) + ((2 * counter_triples_used + 1) * num_cmps) / 8,
(triples_corr.bi) + ((2 * counter_triples_used + 1) * num_cmps) / 8,
(triples_corr.ci) + ((2 * counter_triples_used + 1) * num_cmps) / 8,
num_cmps);
for (int k = 0; k < num_cmps; k++) {
leaf_res_cmp[j * num_cmps + k] ^=
leaf_res_cmp[(j + i) * num_cmps + k];
}
counter_triples_used++;
}
old_counter_triples_used = counter_triples_used;
}
assert(2 * counter_triples_used == num_triples);
// cleanup
delete[] ei;
delete[] fi;
delete[] e;
delete[] f;
}
};
#endif // MILLIONAIRE_WITH_EQ_H__
|
foo.c | /* C.D.Luminate <cdluminate@gmail.com> */
/* MIT LICENCE */
/* you can use ./md5bin to generate this file,
with the redirection function of shell */
#define MD5_FILE_TO_CRACK "hhhh.md5"
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <openssl/md5.h>
#include <omp.h>
int
main (int argc, char **argv)
{
if (argc != 2)
exit (1);
char md_ans[16];
bzero (md_ans, 16);
/*char md_ans[16] = { this is md5 of '0000'
0x4a, 0x7d, 0x1e, 0xd4,
0x14, 0x47, 0x4e, 0x40,
0x33, 0xac, 0x29, 0xcc,
0xb8, 0x65, 0x3d, 0x9b }; */
int fd = open (argv[1], O_RDONLY);
if (fd == -1) {
perror ("open");
exit (1);
}
read (fd, md_ans, 16);
char c0 = 0;
char c1 = 0;
char c2 = 0;
char c3 = 0;
// #pragma omp parallel for num_threads(4) private(c1,c2,c3)
for (c0 = 32; c0 < 127; c0++) {
char c[5];
bzero (c, 5);
char md[16];
for (c1 = 32; c1 < 127; c1++) {
for (c2 = 32; c2 < 127; c2++) {
for (c3 = 32; c3 < 127; c3++) {
c[0] = c0;
c[1] = c1;
c[2] = c2;
c[3] = c3;
MD5 ((const unsigned char *)c, 4, (unsigned char *)md);
if (memcmp (md, md_ans, 16) == 0) {
write (1, c, 4);
write (1, "\n", 2);
exit (0);
}
}
}
}
}
return 0;
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 16;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define PrimitiveExtentPad 2048
#define MaxBezierCoordinates 67108864
#define ThrowPointExpectedException(token,exception) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),DrawError, \
"NonconformingDrawingPrimitiveDefinition","`%s'",token); \
status=MagickFalse; \
break; \
}
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _MVGInfo
{
PrimitiveInfo
**primitive_info;
size_t
*extent;
ssize_t
offset;
PointInfo
point;
ExceptionInfo
*exception;
} MVGInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static Image
*DrawClippingMask(Image *,const DrawInfo *,const char *,const char *,
ExceptionInfo *);
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *,
ExceptionInfo *),
RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *),
TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(MVGInfo *,const size_t),
TraceCircle(MVGInfo *,const PointInfo,const PointInfo),
TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
static PrimitiveInfo
*TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *);
static ssize_t
TracePath(MVGInfo *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
ExceptionInfo
*exception;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
exception=AcquireExceptionInfo();
if (draw_info->id != (char *) NULL)
(void) CloneString(&clone_info->id,draw_info->id);
if (draw_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->compliance=draw_info->compliance;
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
exception);
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)*
sizeof(*clone_info->dash_pattern));
(void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t)
(x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops,
(size_t) number_stops*sizeof(*clone_info->gradient.stops));
}
clone_info->bounds=draw_info->bounds;
clone_info->fill_alpha=draw_info->fill_alpha;
clone_info->stroke_alpha=draw_info->stroke_alpha;
clone_info->element_reference=draw_info->element_reference;
clone_info->clip_path=draw_info->clip_path;
clone_info->clip_units=draw_info->clip_units;
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0,
MagickTrue,exception);
if (draw_info->composite_mask != (Image *) NULL)
clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0,
MagickTrue,exception);
clone_info->render=draw_info->render;
clone_info->debug=IsEventLogging();
exception=DestroyExceptionInfo(exception);
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
%
% A description of each parameter follows:
%
% o Method ConvertPathToPolygon returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int DrawCompareEdges(const void *p_edge,const void *q_edge)
{
#define DrawCompareEdge(p,q) \
{ \
if (((p)-(q)) < 0.0) \
return(-1); \
if (((p)-(q)) > 0.0) \
return(1); \
}
register const PointInfo
*p,
*q;
/*
Edge sorting for right-handed coordinate system.
*/
p=((const EdgeInfo *) p_edge)->points;
q=((const EdgeInfo *) q_edge)->points;
DrawCompareEdge(p[0].y,q[0].y);
DrawCompareEdge(p[0].x,q[0].x);
DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)*
(q[1].x-q[0].x));
DrawCompareEdge(p[1].y,q[1].y);
DrawCompareEdge(p[1].x,q[1].x);
return(0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
return((PolygonInfo *) NULL);
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
(void) memset(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) memset(&point,0,sizeof(point));
(void) memset(&bounds,0,sizeof(bounds));
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=0.0;
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) direction;
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->number_edges=0;
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < MagickEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
ghostline=MagickFalse;
edge++;
}
}
polygon_info->number_edges=edge;
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),DrawCompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o Method ConvertPrimitiveToPath returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info)
{
MagickBooleanType
closed_subpath;
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case AlphaPrimitive:
case ColorPrimitive:
case ImagePrimitive:
case PointPrimitive:
case TextPrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
return((PathInfo *) NULL);
coordinates=0;
closed_subpath=MagickFalse;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
/*
New subpath.
*/
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
closed_subpath=primitive_info[i].closed_subpath;
}
coordinates--;
if ((code == MoveToCode) || (coordinates <= 0) ||
(fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
/*
Eliminate duplicate points.
*/
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue; /* next point in current subpath */
if (closed_subpath != MagickFalse)
{
closed_subpath=MagickFalse;
continue;
}
/*
Mark the p point as open if the subpath is not closed.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1),
sizeof(*path_info));
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
assert(draw_info != (DrawInfo *) NULL);
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->id != (char *) NULL)
draw_info->id=DestroyString(draw_info->id);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask);
if (draw_info->composite_mask != (Image *) NULL)
draw_info->composite_mask=DestroyImage(draw_info->composite_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y E d g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyEdge() destroys the specified polygon edge.
%
% The format of the DestroyEdge method is:
%
% ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
% o edge: the polygon edge number to destroy.
%
*/
static size_t DestroyEdge(PolygonInfo *polygon_info,
const size_t edge)
{
assert(edge < polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < polygon_info->number_edges)
(void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P o l y g o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPolygonInfo() destroys the PolygonInfo data structure.
%
% The format of the DestroyPolygonInfo method is:
%
% PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
if (polygon_info->edges != (EdgeInfo *) NULL)
{
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(
polygon_info->edges);
}
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
% o exception: return any errors or warnings in this structure.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine,ExceptionInfo *exception)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
extent[4],
min,
max;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
PointInfo
point;
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetPixelInfo(image,&zero);
start=(ssize_t) ceil(edge.y1-0.5);
stop=(ssize_t) floor(edge.y2+0.5);
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
PixelInfo
composite,
pixel;
PointInfo
point;
register ssize_t
x;
register Quantum
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),
1,exception);
if (q == (Quantum *) NULL)
continue;
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel,
point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
GetPixelInfoPixel(image,q,&composite);
CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha,
&composite);
SetPixelViaPixelInfo(image,&composite,q);
x_offset++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% MagickBooleanType DrawBoundingRectangles(Image *image,
% const DrawInfo *draw_info,PolygonInfo *polygon_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawBoundingRectangles(Image *image,
const DrawInfo *draw_info,const PolygonInfo *polygon_info,
ExceptionInfo *exception)
{
double
mid;
DrawInfo
*clone_info;
MagickStatusType
status;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
(void) memset(primitive_info,0,sizeof(primitive_info));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
clone_info->stroke_width/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke,
exception);
else
status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
break;
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
if (status == MagickFalse)
break;
}
if (i < (ssize_t) polygon_info->number_edges)
{
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
}
status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *id,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *id,ExceptionInfo *exception)
{
const char
*clip_path;
Image
*clipping_mask;
MagickBooleanType
status;
clip_path=GetImageArtifact(image,id);
if (clip_path == (const char *) NULL)
return(MagickFalse);
clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path,
exception);
if (clipping_mask == (Image *) NULL)
return(MagickFalse);
status=SetImageMask(image,WritePixelMask,clipping_mask,exception);
clipping_mask=DestroyImage(clipping_mask);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p p i n g M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClippingMask() draws the clip path and returns it as an image clipping
% mask.
%
% The format of the DrawClippingMask method is:
%
% Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *clip_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o clip_path: the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *clip_path,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
Image
*clip_mask,
*separate_mask;
MagickStatusType
status;
/*
Draw a clip path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
clip_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(clip_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(clip_mask));
status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception);
status=QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
clip_mask->background_color.alpha_trait=BlendPixelTrait;
status=SetImageBackgroundColor(clip_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,clip_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
if (clone_info->clip_mask != (char *) NULL)
clone_info->clip_mask=DestroyString(clone_info->clip_mask);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
clone_info->clip_path=MagickTrue;
status=RenderMVGContent(clip_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(clip_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
clip_mask=DestroyImage(clip_mask);
clip_mask=separate_mask;
status=NegateImage(clip_mask,MagickFalse,exception);
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
}
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(clip_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C o m p o s i t e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawCompositeMask() draws the mask path and returns it as an image mask.
%
% The format of the DrawCompositeMask method is:
%
% Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *mask_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the mask path id.
%
% o mask_path: the mask path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *mask_path,ExceptionInfo *exception)
{
Image
*composite_mask,
*separate_mask;
DrawInfo
*clone_info;
MagickStatusType
status;
/*
Draw a mask path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
composite_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(composite_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(composite_mask));
status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL,
exception);
status=QueryColorCompliance("#0000",AllCompliance,
&composite_mask->background_color,exception);
composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
composite_mask->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(composite_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,mask_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
status=RenderMVGContent(composite_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(composite_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
composite_mask=DestroyImage(composite_mask);
composite_mask=separate_mask;
status=NegateImage(composite_mask,MagickFalse,exception);
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
}
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path");
return(composite_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception)
{
double
length,
maximum_length,
offset,
scale,
total_length;
DrawInfo
*clone_info;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register double
dx,
dy;
register ssize_t
i;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+32UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
return(MagickFalse);
(void) memset(dash_polygon,0,(2UL*number_vertices+32UL)*
sizeof(*dash_polygon));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*draw_info->dash_pattern[0];
offset=fabs(draw_info->dash_offset) >= MagickEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*draw_info->dash_pattern[n];
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > (MaxBezierCoordinates >> 2))
break;
if (fabs(length) < MagickEpsilon)
{
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
j=1;
}
else
{
if ((j+1) > (ssize_t) number_vertices)
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
if (status == MagickFalse)
break;
}
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((status != MagickFalse) && (total_length < maximum_length) &&
((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
static int StopInfoCompare(const void *x,const void *y)
{
StopInfo
*stop_1,
*stop_2;
stop_1=(StopInfo *) x;
stop_2=(StopInfo *) y;
if (stop_1->offset > stop_2->offset)
return(1);
if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon)
return(0);
return(-1);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info,ExceptionInfo *exception)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo),
StopInfoCompare);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
double
alpha,
offset;
PixelInfo
composite,
pixel;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
double
repeat;
MagickBooleanType
antialias;
antialias=MagickFalse;
repeat=0.0;
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=PerceptibleReciprocal(length)*repeat;
}
else
{
repeat=fmod(offset,gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,gradient->radius);
else
repeat=fmod(offset,gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha,
&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info,
const size_t pad)
{
double
extent;
size_t
quantum;
/*
Check if there is enough storage for drawing pimitives.
*/
extent=(double) mvg_info->offset+pad+PrimitiveExtentPad;
quantum=sizeof(**mvg_info->primitive_info);
if (((extent*quantum) < (double) SSIZE_MAX) &&
((extent*quantum) < (double) GetMaxMemoryRequest()))
{
if (extent <= (double) *mvg_info->extent)
return(MagickTrue);
*mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(
*mvg_info->primitive_info,(size_t) extent,quantum);
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
{
register ssize_t
i;
*mvg_info->extent=(size_t) extent;
for (i=mvg_info->offset+1; i < (ssize_t) extent; i++)
(*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive;
return(MagickTrue);
}
}
/*
Reallocation failed, allocate a primitive to facilitate unwinding.
*/
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
*mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(
*mvg_info->primitive_info);
*mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory(
PrimitiveExtentPad*quantum);
(void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum);
*mvg_info->extent=1;
return(MagickFalse);
}
static inline double GetDrawValue(const char *magick_restrict string,
char **magick_restrict sentinal)
{
char
**magick_restrict q;
double
value;
q=sentinal;
value=InterpretLocaleValue(string,q);
if ((IsNaN(value) != 0) || (value < -(SSIZE_MAX-512.0)) ||
(value > (SSIZE_MAX-512.0)))
return(0.0);
sentinal=q;
return(value);
}
static int MVGMacroCompare(const void *target,const void *source)
{
const char
*p,
*q;
p=(const char *) target;
q=(const char *) source;
return(strcmp(p,q));
}
static SplayTreeInfo *GetMVGMacros(const char *primitive)
{
char
*macro,
*token;
const char
*q;
size_t
extent;
SplayTreeInfo
*macros;
/*
Scan graphic primitives for definitions and classes.
*/
if (primitive == (const char *) NULL)
return((SplayTreeInfo *) NULL);
macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory,
RelinquishMagickMemory);
macro=AcquireString(primitive);
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
for (q=primitive; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare("push",token) == 0)
{
register const char
*end,
*start;
(void) GetNextToken(q,&q,extent,token);
if (*q == '"')
{
char
name[MagickPathExtent];
const char
*p;
ssize_t
n;
/*
Named macro (e.g. push graphic-context "wheel").
*/
(void) GetNextToken(q,&q,extent,token);
start=q;
end=q;
(void) CopyMagickString(name,token,MagickPathExtent);
n=1;
for (p=q; *p != '\0'; )
{
if (GetNextToken(p,&p,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare(token,"pop") == 0)
{
end=p-strlen(token)-1;
n--;
}
if (LocaleCompare(token,"push") == 0)
n++;
if ((n == 0) && (end > start))
{
/*
Extract macro.
*/
(void) GetNextToken(p,&p,extent,token);
(void) CopyMagickString(macro,start,(size_t) (end-start));
(void) AddValueToSplayTree(macros,ConstantString(name),
ConstantString(macro));
break;
}
}
}
}
}
token=DestroyString(token);
macro=DestroyString(macro);
return(macros);
}
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=GetDrawValue(point,&p);
return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse :
MagickTrue);
}
static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->closed_subpath=MagickFalse;
primitive_info->point=point;
return(MagickTrue);
}
static MagickBooleanType RenderMVGContent(Image *image,
const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
keyword[MagickPathExtent],
geometry[MagickPathExtent],
*next_token,
pattern[MagickPathExtent],
*primitive,
*token;
const char
*q;
double
angle,
coordinates,
cursor,
factor,
primitive_extent;
DrawInfo
*clone_info,
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
MVGInfo
mvg_info;
PointInfo
point;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_points,
number_stops;
SplayTreeInfo
*macros;
ssize_t
defsDepth,
j,
k,
n,
symbolDepth;
StopInfo
*stops;
TypeMetric
metrics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (depth > MagickMaxRecursionDepth)
ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply",
image->filename);
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
{
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
if (status == MagickFalse)
return(MagickFalse);
}
if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) &&
(*(draw_info->primitive+1) != '-') && (depth == 0))
primitive=FileToString(draw_info->primitive+1,~0UL,exception);
else
primitive=AcquireString(draw_info->primitive);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"mvg:vector-graphics",primitive);
n=0;
number_stops=0;
stops=(StopInfo *) NULL;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=PrimitiveExtentPad;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(primitive_info,0,(size_t) number_points*
sizeof(*primitive_info));
(void) memset(&mvg_info,0,sizeof(mvg_info));
mvg_info.primitive_info=(&primitive_info);
mvg_info.extent=(&number_points);
mvg_info.exception=exception;
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
defsDepth=0;
symbolDepth=0;
cursor=0.0;
macros=GetMVGMacros(primitive);
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
if (GetNextToken(q,&q,MagickPathExtent,keyword) < 1)
break;
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
*token='\0';
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.rx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ry=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.tx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("alpha",keyword) == 0)
{
primitive_type=AlphaPrimitive;
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->border_color,exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("class",keyword) == 0)
{
const char
*mvg_class;
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
if (LocaleCompare(token,graphic_context[n]->id) == 0)
break;
mvg_class=(const char *) GetValueFromSplayTree(macros,token);
if ((mvg_class != (const char *) NULL) && (p > primitive))
{
char
*elements;
ssize_t
offset;
/*
Inject class elements in stream.
*/
offset=(ssize_t) (p-primitive);
elements=AcquireString(primitive);
elements[offset]='\0';
(void) ConcatenateString(&elements,mvg_class);
(void) ConcatenateString(&elements,"\n");
(void) ConcatenateString(&elements,q);
primitive=DestroyString(primitive);
primitive=elements;
q=primitive+offset;
}
break;
}
if (LocaleCompare("clip-path",keyword) == 0)
{
const char
*clip_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
(void) CloneString(&graphic_context[n]->clip_mask,token);
clip_path=(const char *) GetValueFromSplayTree(macros,token);
if (clip_path != (const char *) NULL)
{
if (graphic_context[n]->clipping_mask != (Image *) NULL)
graphic_context[n]->clipping_mask=
DestroyImage(graphic_context[n]->clipping_mask);
graphic_context[n]->clipping_mask=DrawClippingMask(image,
graphic_context[n],token,clip_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
{
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,
graphic_context[n]->clip_mask,clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
}
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
(void) GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
if (LocaleCompare("compliance",keyword) == 0)
{
/*
MVG compliance associates a clipping mask with an image; SVG
compliance associates a clipping mask with a graphics context.
*/
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->compliance=(ComplianceType) ParseCommandOption(
MagickComplianceOptions,MagickFalse,token);
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
(void) GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
(void) GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->fill,exception);
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->fill_alpha*=opacity;
else
graphic_context[n]->fill_alpha=QuantumRange*opacity;
if (graphic_context[n]->fill.alpha != TransparentAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
else
graphic_context[n]->fill.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
(void) GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
(void) GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
(void) GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
(void) GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
(void) GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("letter-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (IsPoint(token) == MagickFalse)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
clone_info->text=AcquireString(" ");
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
graphic_context[n]->kerning=metrics.width*
GetDrawValue(token,&next_token);
clone_info=DestroyDrawInfo(clone_info);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("mask",keyword) == 0)
{
const char
*mask_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
mask_path=(const char *) GetValueFromSplayTree(macros,token);
if (mask_path != (const char *) NULL)
{
if (graphic_context[n]->composite_mask != (Image *) NULL)
graphic_context[n]->composite_mask=
DestroyImage(graphic_context[n]->composite_mask);
graphic_context[n]->composite_mask=DrawCompositeMask(image,
graphic_context[n],token,mask_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
status=SetImageMask(image,CompositePixelMask,
graphic_context[n]->composite_mask,exception);
}
break;
}
status=MagickFalse;
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
{
graphic_context[n]->fill_alpha*=opacity;
graphic_context[n]->stroke_alpha*=opacity;
}
else
{
graphic_context[n]->fill_alpha=QuantumRange*opacity;
graphic_context[n]->stroke_alpha=QuantumRange*opacity;
}
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
break;
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
{
defsDepth--;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
DrawError,"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if ((graphic_context[n]->clip_mask != (char *) NULL) &&
(graphic_context[n]->compliance != SVGCompliance))
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
status=SetImageMask(image,WritePixelMask,(Image *) NULL,
exception);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("mask",token) == 0)
break;
if (LocaleCompare("pattern",token) == 0)
break;
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth--;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
{
/*
Class context.
*/
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"class") != 0)
continue;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("clip-path",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"clip-path") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("defs",token) == 0)
{
defsDepth++;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent],
type[MagickPathExtent];
SegmentInfo
segment;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
segment.x1=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y1=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.x2=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y2=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (LocaleCompare(type,"radial") == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
if (*q == '"')
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->id,token);
}
break;
}
if (LocaleCompare("mask",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent];
RectangleInfo
bounds;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
bounds.x=(ssize_t) ceil(GetDrawValue(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.y=(ssize_t) ceil(GetDrawValue(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.width=(size_t) floor(GetDrawValue(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.height=(size_t) floor(GetDrawValue(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double)
bounds.height,(double) bounds.x,(double) bounds.y);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth++;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
PixelInfo
stop_color;
number_stops++;
if (number_stops == 1)
stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops));
else
if (number_stops > 2)
stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops,
sizeof(*stops));
if (stops == (StopInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,&stop_color,
exception);
stops[number_stops-1].color=stop_color;
(void) GetNextToken(q,&q,extent,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
stops[number_stops-1].offset=factor*GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->stroke,exception);
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=
graphic_context[n]->stroke_alpha;
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*r;
r=q;
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
status=MagickFalse;
break;
}
(void) memset(graphic_context[n]->dash_pattern,0,(size_t)
(2*x+2)*sizeof(*graphic_context[n]->dash_pattern));
for (j=0; j < x; j++)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
(void) GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
(void) GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->stroke_alpha*=opacity;
else
graphic_context[n]->stroke_alpha=QuantumRange*opacity;
if (graphic_context[n]->stroke.alpha != TransparentAlpha)
graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha;
else
graphic_context[n]->stroke.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
graphic_context[n]->stroke_width=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
cursor=0.0;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->undercolor,exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.tx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
cursor=0.0;
break;
}
status=MagickFalse;
break;
}
case 'u':
case 'U':
{
if (LocaleCompare("use",keyword) == 0)
{
const char
*use;
/*
Get a macro from the MVG document, and "use" it here.
*/
(void) GetNextToken(q,&q,extent,token);
use=(const char *) GetValueFromSplayTree(macros,token);
if (use != (const char *) NULL)
{
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
(void) CloneString(&clone_info->primitive,use);
status=RenderMVGContent(image,clone_info,depth+1,exception);
clone_info=DestroyDrawInfo(clone_info);
}
break;
}
status=MagickFalse;
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(GetDrawValue(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(GetDrawValue(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) floor(GetDrawValue(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) floor(GetDrawValue(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'w':
case 'W':
{
if (LocaleCompare("word-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= MagickEpsilon) ||
(fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) ||
(fabs(affine.sy-1.0) >= MagickEpsilon) ||
(fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if (*q == '\0')
{
if (number_stops > 1)
{
GradientType
type;
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,stops,number_stops,
exception);
}
if (number_stops > 0)
stops=(StopInfo *) RelinquishMagickMemory(stops);
}
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p-1),p);
continue;
}
/*
Parse the primitive attributes.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
i=0;
mvg_info.offset=i;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
primitive_info[0].coordinates=0;
primitive_info[0].method=FloodfillMethod;
primitive_info[0].closed_subpath=MagickFalse;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
(void) GetNextToken(q,&q,extent,token);
point.x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
point.y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
primitive_info[i].closed_subpath=MagickFalse;
i++;
mvg_info.offset=i;
if (i < (ssize_t) number_points)
continue;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
if (status == MagickFalse)
break;
if ((primitive_info[j].primitive == TextPrimitive) ||
(primitive_info[j].primitive == ImagePrimitive))
if (primitive_info[j].text != (char *) NULL)
primitive_info[j].text=DestroyString(primitive_info[j].text);
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].closed_subpath=MagickFalse;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
coordinates=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
coordinates*=5.0;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot((double) alpha,(double) beta);
coordinates*=5.0;
coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0*
BezierQuantum+360.0;
break;
}
case BezierPrimitive:
{
coordinates=(double) (BezierQuantum*primitive_info[j].coordinates);
if (primitive_info[j].coordinates > (107*BezierQuantum))
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
break;
}
case PathPrimitive:
{
char
*s,
*t;
(void) GetNextToken(q,&q,extent,token);
coordinates=1.0;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=GetDrawValue(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
coordinates++;
}
for (s=token; *s != '\0'; s++)
if (strspn(s,"AaCcQqSsTt") != 0)
coordinates+=(20.0*BezierQuantum)+360.0;
break;
}
case CirclePrimitive:
case ArcPrimitive:
case EllipsePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0;
if (coordinates > (107*BezierQuantum))
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
break;
}
default:
break;
}
if (status == MagickFalse)
break;
if (((size_t) (i+coordinates)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=coordinates+1;
if (number_points < (size_t) coordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
mvg_info.offset=i;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad);
if (status == MagickFalse)
break;
mvg_info.offset=j;
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
status&=TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
double
dx,
dy,
maximum_length;
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > (MaxBezierCoordinates/100.0))
ThrowPointExpectedException(keyword,exception);
status&=TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+2].point.x < 0.0) ||
(primitive_info[j+2].point.y < 0.0))
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0)
{
status=MagickFalse;
break;
}
status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
status&=TraceArc(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x < 0.0) ||
(primitive_info[j+1].point.y < 0.0))
{
status=MagickFalse;
break;
}
status&=TraceEllipse(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceCircle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
{
if (primitive_info[j].coordinates < 1)
{
status=MagickFalse;
break;
}
break;
}
case PolygonPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
primitive_info[j].closed_subpath=MagickTrue;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
status&=TraceBezier(&mvg_info,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
coordinates=(double) TracePath(&mvg_info,token,exception);
if (coordinates < 0.0)
{
status=MagickFalse;
break;
}
i=(ssize_t) (j+coordinates);
break;
}
case AlphaPrimitive:
case ColorPrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
/*
Compute text cursor offset.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) &&
(fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon))
{
mvg_info.point=primitive_info->point;
primitive_info->point.x+=cursor;
}
else
{
mvg_info.point=primitive_info->point;
cursor=0.0;
}
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
clone_info->render=MagickFalse;
clone_info->text=AcquireString(token);
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
clone_info=DestroyDrawInfo(clone_info);
cursor+=metrics.width;
if (graphic_context[n]->compliance != SVGCompliance)
cursor=0.0;
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
break;
}
}
mvg_info.offset=i;
if (status == 0)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),
p);
/*
Sanity check.
*/
status&=CheckPrimitiveExtent(&mvg_info,
ExpandAffine(&graphic_context[n]->affine));
if (status == 0)
break;
status&=CheckPrimitiveExtent(&mvg_info,graphic_context[n]->stroke_width);
if (status == 0)
break;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) &&
(graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
{
const char
*clip_path;
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,graphic_context[n]->clip_mask,
clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
status&=DrawPrimitive(image,graphic_context[n],primitive_info,
exception);
}
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
macros=DestroySplayTree(macros);
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
{
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
}
primitive=DestroyString(primitive);
if (stops != (StopInfo *) NULL)
stops=(StopInfo *) RelinquishMagickMemory(stops);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition",
keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
ExceptionInfo *exception)
{
return(RenderMVGContent(image,draw_info,0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern,
ExceptionInfo *exception)
{
char
property[MagickPathExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MagickPathExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info,exception);
image_info=DestroyImageInfo(image_info);
(void) QueryColorCompliance("#00000000",AllCompliance,
&(*pattern)->background_color,exception);
(void) SetImageBackgroundColor(*pattern,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=DestroyImage(clone_info->stroke_pattern);
(void) FormatLocaleString(property,MagickPathExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=RenderMVGContent(*pattern,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(
const PrimitiveInfo *primitive_info)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
return((PolygonInfo **) NULL);
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(primitive_info);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
polygon_info[i]=ConvertPathToPolygon(path_info);
if (polygon_info[i] == (PolygonInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static double GetFillAlpha(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_alpha)
{
double
alpha,
beta,
distance,
subpath_alpha;
PointInfo
delta;
register const PointInfo
*q;
register EdgeInfo
*p;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_alpha=0.0;
subpath_alpha=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,(size_t) j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta <= 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta >= alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=PerceptibleReciprocal(alpha);
beta=delta.x*(y-q->y)-delta.y*(x-q->x)+MagickEpsilon;
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_alpha < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_alpha=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25)))
*stroke_alpha=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_alpha=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < MagickEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_alpha < (alpha*alpha))
subpath_alpha=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_alpha >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) (p->number_points-1); i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_alpha);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
fill,
status;
double
mid;
PolygonInfo
**magick_restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates <= 1)
return(MagickTrue);
/*
Compute bounding box.
*/
polygon_info=AcquirePolygonThreadSet(primitive_info);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
bounds=polygon_info[0]->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.y1-=(mid+1.0);
bounds.x2+=(mid+1.0);
bounds.y2+=(mid+1.0);
if ((bounds.x1 >= (double) image->columns) ||
(bounds.y1 >= (double) image->rows) ||
(bounds.x2 <= 0.0) || (bounds.y2 <= 0.0))
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(MagickTrue); /* virtual polygon */
}
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x1;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y1;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x2;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for ( ; x <= stop_x; x++)
{
if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) &&
(y == (ssize_t) ceil(primitive_info->point.y-0.5)))
{
GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+
1),1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
double
fill_alpha,
stroke_alpha;
PixelInfo
fill_color,
stroke_color;
/*
Fill and/or stroke.
*/
fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule,
x,y,&stroke_alpha);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_alpha=fill_alpha > 0.5 ? 1.0 : 0.0;
stroke_alpha=stroke_alpha > 0.5 ? 1.0 : 0.0;
}
GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception);
CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception);
CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
point,
q;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
status=MagickTrue;
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelInfoGray(&draw_info->fill) == MagickFalse) ||
(IsPixelInfoGray(&draw_info->stroke) == MagickFalse)))
status&=SetImageColorspace(image,sRGBColorspace,exception);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask,
exception);
status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask,
exception);
}
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
if (image->alpha_trait == UndefinedPixelTrait)
status&=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
PixelInfo
pixel,
target;
status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
ChannelType
channel_mask;
PixelInfo
target;
status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
(void) SetImageChannelMask(image,channel_mask);
break;
}
case ResetMethod:
{
PixelInfo
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
}
break;
}
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetPixelInfo(image,&pixel);
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
PixelInfo
pixel,
target;
status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
PixelInfo
target;
status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
break;
}
case ResetMethod:
{
PixelInfo
pixel;
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MagickPathExtent];
Image
*composite_image,
*composite_images;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
composite_images=(Image *) NULL;
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_images=ReadInlineImage(clone_info,primitive_info->text,
exception);
else
if (*primitive_info->text != '\0')
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
status&=SetImageInfo(clone_info,0,exception);
if ((LocaleNCompare(clone_info->magick,"http",4) == 0) ||
(LocaleCompare(clone_info->magick,"mpri") == 0))
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
composite_images=ReadImage(clone_info,exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_images == (Image *) NULL)
{
status=MagickFalse;
break;
}
composite_image=RemoveFirstImageFromList(&composite_images);
composite_images=DestroyImageList(composite_images);
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=(ssize_t) ceil(primitive_info[1].point.x-0.5);
y1=(ssize_t) ceil(primitive_info[1].point.y-0.5);
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
/*
Resize image.
*/
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
status&=TransformImage(&composite_image,(char *) NULL,
composite_geometry,exception);
}
if (composite_image->alpha_trait == UndefinedPixelTrait)
status&=SetImageAlphaChannel(composite_image,OpaqueAlphaChannel,
exception);
if (draw_info->alpha != OpaqueAlpha)
status&=SetImageAlpha(composite_image,draw_info->alpha,exception);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
if ((draw_info->compose == OverCompositeOp) ||
(draw_info->compose == SrcOverCompositeOp))
status&=DrawAffineImage(image,composite_image,&affine,exception);
else
status&=CompositeImage(image,composite_image,draw_info->compose,
MagickTrue,geometry.x,geometry.y,exception);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelInfo
fill_color;
register Quantum
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&fill_color,exception);
CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,(double)
GetPixelAlpha(image,q),q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.alpha != (Quantum) TransparentAlpha))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
if (status != MagickFalse)
status&=DrawDashPolygon(draw_info,primitive_info,image,exception);
break;
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.alpha != (Quantum) TransparentAlpha) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
double
x,
y;
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
closed_path=primitive_info[0].closed_subpath;
i=(ssize_t) primitive_info[0].coordinates;
x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x);
y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
closed_path=MagickTrue;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,
exception);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
if (status != MagickFalse)
status&=DrawStrokePolygon(image,draw_info,primitive_info,exception);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception);
break;
}
}
image_view=DestroyCacheView(image_view);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception);
status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static MagickBooleanType DrawRoundLinecap(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*MagickEpsilon;
linecap[2].point.x+=2.0*MagickEpsilon;
linecap[2].point.y+=2.0*MagickEpsilon;
linecap[3].point.y+=2.0*MagickEpsilon;
linecap[4].primitive=UndefinedPrimitive;
return(DrawPolygonPrimitive(image,draw_info,linecap,exception));
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
if (p->coordinates == 1)
continue;
stroke_polygon=TraceStrokePolygon(image,draw_info,p);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
if (status == 0)
break;
q=p+p->coordinates-1;
closed_path=p->closed_subpath;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
status&=DrawRoundLinecap(image,draw_info,p,exception);
status&=DrawRoundLinecap(image,draw_info,q,exception);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) memset(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) memset(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke,
exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->alpha=OpaqueAlpha;
draw_info->fill_alpha=OpaqueAlpha;
draw_info->stroke_alpha=OpaqueAlpha;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
draw_info->pointsize=12.0;
draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha;
draw_info->compose=OverCompositeOp;
draw_info->render=MagickTrue;
draw_info->clip_path=MagickFalse;
draw_info->debug=IsEventLogging();
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
if (fabs(clone_info->pointsize) >= MagickEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->border_color=clone_info->border_color;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->fill,
exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke,
exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor,
exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radius;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radius.x=fabs(center.x-start.x);
radius.y=fabs(center.y-start.y);
return(TraceEllipse(mvg_info,center,radius,degrees));
}
static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
MagickStatusType
status;
PointInfo
center,
points[3],
radii;
register double
cosine,
sine;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
ssize_t
offset;
offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
return(TracePoint(primitive_info,end));
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon))
return(TraceLine(primitive_info,start,end));
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
if (fabs(alpha*alpha+beta*beta) < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+
MagickEpsilon))));
status=MagickTrue;
p=primitive_info;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
status&=TraceBezier(mvg_info,4);
if (status == 0)
break;
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
p+=p->coordinates;
}
if (status == 0)
return(MagickFalse);
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceBezier(MVGInfo *mvg_info,
const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coefficients.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=MagickMin(quantum/number_coordinates,BezierQuantum);
coefficients=(double *) AcquireQuantumMemory(number_coordinates,
sizeof(*coefficients));
points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates*
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
{
if (points != (PointInfo *) NULL)
points=(PointInfo *) RelinquishMagickMemory(points);
if (coefficients != (double *) NULL)
coefficients=(double *) RelinquishMagickMemory(coefficients);
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
control_points=quantum*number_coordinates;
if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
if (TracePoint(p,points[i]) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
}
if (TracePoint(p,end) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickTrue);
}
static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
return(TraceEllipse(mvg_info,start,offset,degrees));
}
static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center,
const PointInfo radii,const PointInfo arc)
{
double
coordinates,
delta,
step,
x,
y;
PointInfo
angle,
point;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return(MagickTrue);
delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y));
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0);
angle.x=DegreesToRadians(arc.x);
y=arc.y;
while (y < arc.x)
y+=360.0;
angle.y=DegreesToRadians(y);
coordinates=ceil((angle.y-angle.x)/step+1.0);
if (coordinates > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse)
return(MagickFalse);
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
x=fabs(primitive_info[0].point.x-
primitive_info[primitive_info->coordinates-1].point.x);
y=fabs(primitive_info[0].point.y-
primitive_info[primitive_info->coordinates-1].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
if (TracePoint(primitive_info,start) == MagickFalse)
return(MagickFalse);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return(MagickTrue);
}
if (TracePoint(primitive_info+1,end) == MagickFalse)
return(MagickFalse);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
primitive_info->closed_subpath=MagickFalse;
return(MagickTrue);
}
static ssize_t TracePath(MVGInfo *mvg_info,const char *path,
ExceptionInfo *exception)
{
char
*next_token,
token[MagickPathExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
MagickBooleanType
status;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
ssize_t
subpath_offset;
subpath_offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
status=MagickTrue;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
if (status == MagickFalse)
break;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle = 0.0;
MagickBooleanType
large_arc = MagickFalse,
sweep = MagickFalse;
PointInfo
arc = {0.0, 0.0};
/*
Elliptical arc.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
/*
Move to.
*/
if (mvg_info->offset != subpath_offset)
{
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
}
i=0;
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (status == MagickFalse)
break;
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
/*
Close path.
*/
point=start;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
primitive_info->closed_subpath=MagickTrue;
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
z_count++;
break;
}
default:
{
ThrowPointExpectedException(token,exception);
break;
}
}
}
if (status == MagickFalse)
return(-1);
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return((ssize_t) number_coordinates);
}
static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,end) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
point,
segment;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
ssize_t
offset;
offset=mvg_info->offset;
segment.x=fabs(end.x-start.x);
segment.y=fabs(end.y-start.y);
if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon))
{
(*mvg_info->primitive_info+mvg_info->offset)->coordinates=0;
return(MagickTrue);
}
if (arc.x > (0.5*segment.x))
arc.x=0.5*segment.x;
if (arc.y > (0.5*segment.y))
arc.y=0.5*segment.y;
point.x=start.x+segment.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+segment.x-arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
register double
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
return(MagickTrue);
}
static PrimitiveInfo *TraceStrokePolygon(const Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
#define MaxStrokePad (6*BezierQuantum+360)
#define CheckPathExtent(pad_p,pad_q) \
{ \
if ((pad_p) > MaxBezierCoordinates) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
else \
if ((ssize_t) (p+(pad_p)) >= (ssize_t) extent_p) \
{ \
if (~extent_p < (pad_p)) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
else \
{ \
extent_p+=(pad_p); \
stroke_p=(PointInfo *) ResizeQuantumMemory(stroke_p,extent_p+ \
MaxStrokePad,sizeof(*stroke_p)); \
} \
} \
if ((pad_q) > MaxBezierCoordinates) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
else \
if ((ssize_t) (q+(pad_q)) >= (ssize_t) extent_q) \
{ \
if (~extent_q < (pad_q)) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
else \
{ \
extent_q+=(pad_q); \
stroke_q=(PointInfo *) ResizeQuantumMemory(stroke_q,extent_q+ \
MaxStrokePad,sizeof(*stroke_q)); \
} \
} \
if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) \
{ \
if (stroke_p != (PointInfo *) NULL) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
if (stroke_q != (PointInfo *) NULL) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
polygon_primitive=(PrimitiveInfo *) \
RelinquishMagickMemory(polygon_primitive); \
return((PrimitiveInfo *) NULL); \
} \
}
typedef struct _StrokeSegment
{
double
p,
q;
} StrokeSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*stroke_p,
*stroke_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
extent_p,
extent_q,
number_vertices;
ssize_t
j,
n,
p,
q;
StrokeSegment
dx = {0.0, 0.0},
dy = {0.0, 0.0},
inverse_slope = {0.0, 0.0},
slope = {0.0, 0.0},
theta = {0.0, 0.0};
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if (polygon_primitive == (PrimitiveInfo *) NULL)
return((PrimitiveInfo *) NULL);
(void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices*
sizeof(*polygon_primitive));
offset.x=primitive_info[number_vertices-1].point.x-primitive_info[0].point.x;
offset.y=primitive_info[number_vertices-1].point.y-primitive_info[0].point.y;
closed_path=(fabs(offset.x) < MagickEpsilon) &&
(fabs(offset.y) < MagickEpsilon) ? MagickTrue : MagickFalse;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
{
if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse))
{
/*
Zero length subpath.
*/
stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory(
sizeof(*stroke_polygon));
stroke_polygon[0]=polygon_primitive[0];
stroke_polygon[0].coordinates=0;
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
n=(ssize_t) number_vertices-1L;
}
extent_p=2*number_vertices;
extent_q=2*number_vertices;
stroke_p=(PointInfo *) AcquireQuantumMemory((size_t) extent_p+MaxStrokePad,
sizeof(*stroke_p));
stroke_q=(PointInfo *) AcquireQuantumMemory((size_t) extent_q+MaxStrokePad,
sizeof(*stroke_q));
if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL))
{
if (stroke_p != (PointInfo *) NULL)
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
if (stroke_q != (PointInfo *) NULL)
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *)
RelinquishMagickMemory(polygon_primitive);
return((PrimitiveInfo *) NULL);
}
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < MagickEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.p) < MagickEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
(void) TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
stroke_q[p++]=box_q[0];
stroke_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < MagickEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.q) < MagickEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
CheckPathExtent(MaxStrokePad,MaxStrokePad);
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_p[p++]=box_p[4];
else
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
stroke_q[q++]=box_q[4];
stroke_p[p++]=box_p[4];
}
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_p[p++]=box_p[4];
else
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(MaxStrokePad,arc_segments+MaxStrokePad);
stroke_q[q].x=box_q[1].x;
stroke_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
stroke_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
stroke_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
stroke_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_q[q++]=box_q[4];
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
stroke_q[q++]=box_q[4];
stroke_p[p++]=box_p[4];
}
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_q[q++]=box_q[4];
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+MaxStrokePad,MaxStrokePad);
stroke_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
stroke_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
stroke_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
stroke_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
stroke_p[p++]=box_p[1];
stroke_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon != (PrimitiveInfo *) NULL)
{
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
}
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
parallel-numthreads.c | // Test if/num_threads clause handling
#include <assert.h>
#include <stdio.h>
#include <omp.h>
int main(void)
{
int i=0;
#pragma omp parallel if(i==0) num_threads(3)
{
#pragma omp single
{
assert (omp_get_num_threads() == 3 );
}
printf("Mutual exclusive output 1.\n");
}
#pragma omp parallel if(i!=0) num_threads(3)
{
#pragma omp single
{
assert (omp_get_num_threads() == 1 );
}
printf("Mutual exclusive output 2.\n");
}
return 0;
}
|
neuralClasses.h | #pragma once
#include <iostream>
#include <fstream>
#include <algorithm>
#include <cassert>
#include <cmath>
#include <vector>
#include <boost/unordered_map.hpp>
#include <Eigen/Dense>
#include "maybe_omp.h"
#include "util.h"
#include "graphClasses.h"
#include "USCMatrix.h"
// classes for various kinds of layers
#include "SoftmaxLoss.h"
#include "Activation_function.h"
//#define EIGEN_DONT_PARALLELIZE
//#define EIGEN_DEFAULT_TO_ROW_MAJOR
using namespace std;
namespace nplm
{
// is this cheating?
using Eigen::Matrix;
using Eigen::Array;
using Eigen::MatrixBase;
using Eigen::Dynamic;
typedef boost::unordered_map<int,bool> int_map;
struct Clipper{
user_data_t operator() (user_data_t x) const {
return std::min(user_data_t(0.5), std::max(x,user_data_t(-0.5)));
//return(x);
}
};
class Linear_layer
{
private:
Matrix<user_data_t,Dynamic,Dynamic> U;
Matrix<user_data_t,Dynamic,Dynamic> U_gradient;
Matrix<user_data_t,Dynamic,Dynamic> U_velocity;
Matrix<user_data_t,Dynamic,Dynamic> U_running_gradient;
Matrix<user_data_t,Dynamic,Dynamic> U_running_parameter_update;
// Biases
Matrix<user_data_t,Dynamic,1> b;
Matrix<user_data_t,Dynamic,1> b_velocity;
Matrix<user_data_t,Dynamic,1> b_running_gradient;
Matrix<user_data_t,Dynamic,1> b_running_parameter_update;
Matrix<user_data_t,Dynamic,1> b_gradient;
friend class model;
public:
Linear_layer() { }
Linear_layer(int rows, int cols) { resize(rows, cols); }
void resize(int rows, int cols)
{
U.setZero(rows, cols);
U_gradient.setZero(rows, cols);
//U_running_gradient.setZero(rows, cols);
//U_running_parameter_updates.setZero(rows, cols);
//U_velocity.setZero(rows, cols);
b.resize(rows);
b_gradient.setZero(rows);
//b_running_gradient.resize(rows);
//b_velocity.resize(rows);
}
void read_weights(std::ifstream &U_file) { readMatrix(U_file, U); }
void write_weights(std::ofstream &U_file) { writeMatrix(U, U_file); }
void read_biases(std::ifstream &b_file) { readMatrix(b_file, b); }
void write_biases(std::ofstream &b_file) { writeMatrix(b, b_file); }
template <typename Engine>
void initialize(Engine &engine,
bool init_normal,
user_data_t init_range,
string ¶meter_update,
user_data_t adagrad_epsilon)
{
if (parameter_update == "ADA") {
U_running_gradient = Matrix<user_data_t,Dynamic,Dynamic>::Ones(U.rows(),U.cols())*adagrad_epsilon;
b_running_gradient = Matrix<user_data_t,Dynamic,1>::Ones(b.size())*adagrad_epsilon;
}
if (parameter_update == "ADAD") {
U_running_gradient.setZero(U.rows(),U.cols());
b_running_gradient.setZero(b.size());
U_running_parameter_update.setZero(U.rows(),U.cols());
b_running_parameter_update.setZero(b.size());
}
initMatrix(engine, U, init_normal, init_range);
initBias(engine, b, init_normal, init_range);
}
int n_inputs () const { return U.cols(); }
int n_outputs () const { return U.rows(); }
template <typename DerivedIn, typename DerivedOut>
void fProp(const MatrixBase<DerivedIn> &input,
const MatrixBase<DerivedOut> &output) const
{
UNCONST(DerivedOut, output, my_output);
my_output.leftCols(input.cols()).noalias() = U*input;
int num_examples = input.cols();
for (int example = 0;example < num_examples;example++)
{
my_output.leftCols(input.cols()).col(example) += b;
}
}
// Sparse input
template <typename ScalarIn, typename DerivedOut>
void fProp(const USCMatrix<ScalarIn> &input,
const MatrixBase<DerivedOut> &output_const) const
{
UNCONST(DerivedOut, output_const, output);
output.setZero();
uscgemm(1.0, U, input, output.leftCols(input.cols()));
// Each column corresponds to a training example. We
// parallelize the adding of biases per dimension.
int num_examples = input.cols();
for (int example = 0;example < num_examples;example++)
{
output.leftCols(input.cols()).col(example) += b;
}
}
template <typename DerivedGOut, typename DerivedGIn>
void bProp(const MatrixBase<DerivedGOut> &input,
MatrixBase<DerivedGIn> &output) const
{
UNCONST(DerivedGIn, output, my_output);
my_output.noalias() = U.transpose()*input;
}
template <typename DerivedGOut, typename DerivedIn>
void computeGradient( const MatrixBase<DerivedGOut> &bProp_input,
const MatrixBase<DerivedIn> &fProp_input,
user_data_t learning_rate, user_data_t momentum, user_data_t L2_reg)
{
U_gradient.noalias() = bProp_input*fProp_input.transpose();
// get the bias gradient for all dimensions in parallel
int size = b.size();
b_gradient = bProp_input.rowwise().sum();
// This used to be multithreaded, but there was no measureable difference
if (L2_reg > 0.0)
{
U_gradient -= 2*L2_reg*U;
b_gradient -= 2*L2_reg*b;
}
if (momentum > 0.0)
{
U_velocity = momentum*U_velocity + U_gradient;
U += learning_rate * U_velocity;
b_velocity = momentum*b_velocity + b_gradient;
b += learning_rate * b_velocity;
}
else
{
U += learning_rate * U_gradient;
b += learning_rate * b_gradient;
/*
//UPDATE CLIPPING
U += (learning_rate*U_gradient).array().unaryExpr(Clipper()).matrix();
b += (learning_rate*b_gradient).array().unaryExpr(Clipper()).matrix();
//GRADIENT CLIPPING
//U += learning_rate*(U_gradient.array().unaryExpr(Clipper())).matrix();
//b += learning_rate*(b_gradient.array().unaryExpr(Clipper())).matrix();
*/
}
}
template <typename DerivedGOut, typename DerivedIn>
void computeGradientAdagrad(const MatrixBase<DerivedGOut> &bProp_input,
const MatrixBase<DerivedIn> &fProp_input,
user_data_t learning_rate,
user_data_t L2_reg)
{
U_gradient.noalias() = bProp_input*fProp_input.transpose();
// get the bias gradient for all dimensions in parallel
int size = b.size();
b_gradient.noalias() = bProp_input.rowwise().sum();
if (L2_reg != 0)
{
U_gradient -= 2*L2_reg*U;
b_gradient -= 2*L2_reg*b;
}
// ignore momentum?
#pragma omp parallel for
for (int col=0; col<U.cols(); col++) {
U_running_gradient.col(col) += U_gradient.col(col).array().square().matrix();
U.col(col) += learning_rate * (U_gradient.col(col).array() /
U_running_gradient.col(col).array().sqrt()).matrix();
/*
//UPDATE CLIPPING
U.col(col) += (learning_rate * (U_gradient.col(col).array() / U_running_gradient.col(col).array().sqrt())).
unaryExpr(Clipper()).matrix();
*/
}
b_running_gradient += b_gradient.array().square().matrix();
b += learning_rate * (b_gradient.array()/b_running_gradient.array().sqrt()).matrix();
/*
//UPDATE CLIPPING
b += (learning_rate * (b_gradient.array()/b_running_gradient.array().sqrt())).unaryExpr(Clipper()).matrix();
*/
}
template <typename DerivedGOut, typename DerivedIn>
void computeGradientAdadelta(const MatrixBase<DerivedGOut> &bProp_input,
const MatrixBase<DerivedIn> &fProp_input,
user_data_t learning_rate,
user_data_t L2_reg,
user_data_t conditioning_constant,
user_data_t decay)
{
//cerr<<"decay is "<<decay<<" and conditioning constant is "<<conditioning_constant<<endl;
U_gradient.noalias() = bProp_input*fProp_input.transpose();
Array<user_data_t,Dynamic,1> b_current_parameter_update;
// get the bias gradient for all dimensions in parallel
int size = b.size();
b_gradient.noalias() = bProp_input.rowwise().sum();
if (L2_reg != 0)
{
U_gradient -= 2*L2_reg*U;
b_gradient -= 2*L2_reg*b;
}
// ignore momentum?
#pragma omp parallel for
//cerr<<"U gradient is "<<U_gradient<<endl;
for (int col=0; col<U.cols(); col++) {
Array<user_data_t,Dynamic,1> U_current_parameter_update;
U_running_gradient.col(col) = decay*U_running_gradient.col(col) +
(1-decay)*U_gradient.col(col).array().square().matrix();
//cerr<<"U running gradient is "<<U_running_gradient.col(col)<<endl;
//getchar();
U_current_parameter_update = ((U_running_parameter_update.col(col).array()+conditioning_constant).sqrt()/
(U_running_gradient.col(col).array()+conditioning_constant).sqrt()) *
U_gradient.col(col).array();
//cerr<<"U current parameter update is "<<U_current_parameter_update<<endl;
//getchar();
//update the running parameter update
U_running_parameter_update.col(col) = decay*U_running_parameter_update.col(col) +
(1.-decay)*U_current_parameter_update.square().matrix();
U.col(col) += learning_rate*U_current_parameter_update.matrix();
}
b_running_gradient = decay*b_running_gradient +
(1.-decay)*b_gradient.array().square().matrix();
b_current_parameter_update = ((b_running_parameter_update.array()+conditioning_constant).sqrt()/
(b_running_gradient.array()+conditioning_constant).sqrt()) *
b_gradient.array();
b_running_parameter_update = decay*(b_running_parameter_update) +
(1.-decay)*b_current_parameter_update.square().matrix();
b += learning_rate*b_current_parameter_update.matrix();
}
template <typename DerivedGOut, typename DerivedIn, typename DerivedGW>
void computeGradientCheck(const MatrixBase<DerivedGOut> &bProp_input,
const MatrixBase<DerivedIn> &fProp_input,
const MatrixBase<DerivedGW> &gradient) const
{
UNCONST(DerivedGW, gradient, my_gradient);
my_gradient.noalias() = bProp_input*fProp_input.transpose();
}
};
class Output_word_embeddings
{
private:
// row-major is better for uscgemm
//Matrix<user_data_t,Dynamic,Dynamic,Eigen::RowMajor> W;
// Having W be a pointer to a matrix allows ease of sharing
// input and output word embeddings
Matrix<user_data_t,Dynamic,Dynamic,Eigen::RowMajor> *W;
std::vector<user_data_t> W_data;
Matrix<user_data_t,Dynamic,1> b;
Matrix<user_data_t,Dynamic,Dynamic,Eigen::RowMajor> W_running_gradient;
Matrix<user_data_t,Dynamic,Dynamic,Eigen::RowMajor> W_gradient;
Matrix<user_data_t,Dynamic,Dynamic,Eigen::RowMajor> W_running_parameter_update;
Matrix<user_data_t,Dynamic,1> b_running_gradient;
Matrix<user_data_t,Dynamic,1> b_gradient;
Matrix<user_data_t,Dynamic,1> b_running_parameter_update;
public:
Output_word_embeddings() { }
Output_word_embeddings(int rows, int cols) { resize(rows, cols); }
void resize(int rows, int cols)
{
W->setZero(rows, cols);
b.setZero(rows);
}
void set_W(Matrix<user_data_t,Dynamic,Dynamic,Eigen::RowMajor> *input_W) {
W = input_W;
}
void read_weights(std::ifstream &W_file) { readMatrix(W_file, *W); }
void write_weights(std::ofstream &W_file) { writeMatrix(*W, W_file); }
void read_biases(std::ifstream &b_file) { readMatrix(b_file, b); }
void write_biases(std::ofstream &b_file) { writeMatrix(b, b_file); }
template <typename Engine>
void initialize(Engine &engine,
bool init_normal,
user_data_t init_range,
user_data_t init_bias,
string ¶meter_update,
user_data_t adagrad_epsilon)
{
W_gradient.setZero(W->rows(),W->cols());
b_gradient.setZero(b.size());
if (parameter_update == "ADA") {
W_running_gradient = Matrix<user_data_t,Dynamic,Dynamic>::Ones(W->rows(),W->cols())*adagrad_epsilon;
b_running_gradient = Matrix<user_data_t,Dynamic,1>::Ones(b.size())*adagrad_epsilon;
//W_gradient.setZero(W->rows(),W->cols());
//b_gradient.setZero(b.size());
}
if (parameter_update == "ADAD") {
W_running_gradient.setZero(W->rows(),W->cols());
b_running_gradient.setZero(b.size());
W_gradient.setZero(W->rows(),W->cols());
//b_gradient.setZero(b.size());
//W_running_parameter_update.setZero(W->rows(),W->cols());
b_running_parameter_update.setZero(b.size());
}
initMatrix(engine, *W, init_normal, init_range);
b.fill(init_bias);
}
int n_inputs () const { return W->cols(); }
int n_outputs () const { return W->rows(); }
template <typename DerivedIn, typename DerivedOut>
void fProp(const MatrixBase<DerivedIn> &input,
const MatrixBase<DerivedOut> &output) const
{
UNCONST(DerivedOut, output, my_output);
my_output = ((*W) * input).colwise() + b;
/* TODO: without EIGEN_NO_DEBUG - is this a bug?
ProductBase.h:102: Eigen::ProductBase<Derived, Lhs, Rhs>::ProductBase(const Lhs&
, const Rhs&) [with Derived = Eigen::GeneralProduct<Eigen::Matrix<user_data_t, -1, -1
, 1>, Eigen::Matrix<user_data_t, -1, -1>, 5>; Lhs = Eigen::Matrix<user_data_t, -1, -1, 1>;
Rhs = Eigen::Matrix<user_data_t, -1, -1>]: Assertion `a_lhs.cols() == a_rhs.rows() &
& "invalid matrix product" && "if you wanted a coeff-wise or a dot product use t
he respective explicit functions"' failed.
(gdb) p a_lhs.cols()
$3 = 50
(gdb) p a_rhs.rows()
$4 = 100
(gdb) p a_lhs.rows()
$5 = 2
(gdb) p a_rhs.cols()
$6 = 1
from lookup_ngram normalization prop.skip_hidden in neuralNetwork.h:100
*/
}
// Sparse output version
template <typename DerivedIn, typename DerivedOutI, typename DerivedOutV>
void fProp(const MatrixBase<DerivedIn> &input,
const MatrixBase<DerivedOutI> &samples,
const MatrixBase<DerivedOutV> &output) const
{
UNCONST(DerivedOutV, output, my_output);
#pragma omp parallel for
for (int instance_id = 0; instance_id < samples.cols(); instance_id++)
{
for (int sample_id = 0; sample_id < samples.rows(); sample_id++)
{
my_output(sample_id, instance_id) = b(samples(sample_id, instance_id));
}
}
USCMatrix<user_data_t> sparse_output(W->rows(), samples, my_output);
uscgemm_masked(1.0, *W, input, sparse_output);
my_output = sparse_output.values; // too bad, so much copying
}
// Return single element of output matrix
template <typename DerivedIn>
user_data_t fProp(const MatrixBase<DerivedIn> &input,
int word,
int instance) const
{
return W->row(word).dot(input.col(instance)) + b(word);
}
// Dense versions (for log-likelihood loss)
template <typename DerivedGOut, typename DerivedGIn>
void bProp(const MatrixBase<DerivedGOut> &input_bProp_matrix,
const MatrixBase<DerivedGIn> &bProp_matrix) const
{
// W is vocab_size x output_embedding_dimension
// input_bProp_matrix is vocab_size x minibatch_size
// bProp_matrix is output_embedding_dimension x minibatch_size
UNCONST(DerivedGIn, bProp_matrix, my_bProp_matrix);
my_bProp_matrix.leftCols(input_bProp_matrix.cols()).noalias() =
W->transpose() * input_bProp_matrix;
}
template <typename DerivedIn, typename DerivedGOut>
void computeGradient(const MatrixBase<DerivedIn> &predicted_embeddings,
const MatrixBase<DerivedGOut> &bProp_input,
user_data_t learning_rate,
user_data_t momentum) //not sure if we want to use momentum here
{
// W is vocab_size x output_embedding_dimension
// b is vocab_size x 1
// predicted_embeddings is output_embedding_dimension x minibatch_size
// bProp_input is vocab_size x minibatch_size
W->noalias() += learning_rate * bProp_input * predicted_embeddings.transpose();
b += learning_rate * bProp_input.rowwise().sum();
/*
//GRADIENT CLIPPING
W->noalias() += learning_rate *
((bProp_input * predicted_embeddings.transpose()).array().unaryExpr(Clipper())).matrix();
b += learning_rate * (bProp_input.rowwise().sum().array().unaryExpr(Clipper())).matrix();
//UPDATE CLIPPING
W->noalias() += (learning_rate *
(bProp_input * predicted_embeddings.transpose())).array().unaryExpr(Clipper()).matrix();
b += (learning_rate * (bProp_input.rowwise().sum())).array().unaryExpr(Clipper()).matrix();
*/
}
template <typename DerivedIn, typename DerivedGOut>
void computeGradientAdagrad(
const MatrixBase<DerivedIn> &predicted_embeddings,
const MatrixBase<DerivedGOut> &bProp_input,
user_data_t learning_rate) //not sure if we want to use momentum here
{
// W is vocab_size x output_embedding_dimension
// b is vocab_size x 1
// predicted_embeddings is output_embedding_dimension x minibatch_size
// bProp_input is vocab_size x minibatch_sizea
W_gradient.setZero(W->rows(), W->cols());
b_gradient.setZero(b.size());
W_gradient.noalias() = bProp_input * predicted_embeddings.transpose();
b_gradient.noalias() = bProp_input.rowwise().sum();
W_running_gradient += W_gradient.array().square().matrix();
b_running_gradient += b_gradient.array().square().matrix();
W->noalias() += learning_rate * (W_gradient.array()/W_running_gradient.array().sqrt()).matrix();
b += learning_rate * (b_gradient.array()/b_running_gradient.array().sqrt()).matrix();
/*
//UPDATE CLIPPING
*W += (learning_rate * (W_gradient.array()/W_running_gradient.array().sqrt())).unaryExpr(Clipper()).matrix();
b += (learning_rate * (b_gradient.array()/b_running_gradient.array().sqrt())).unaryExpr(Clipper()).matrix();
*/
}
template <typename DerivedIn, typename DerivedGOut>
void computeGradientAdadelta(const MatrixBase<DerivedIn> &predicted_embeddings,
const MatrixBase<DerivedGOut> &bProp_input,
user_data_t learning_rate,
user_data_t conditioning_constant,
user_data_t decay) //not sure if we want to use momentum here
{
// W is vocab_size x output_embedding_dimension
// b is vocab_size x 1
// predicted_embeddings is output_embedding_dimension x minibatch_size
// bProp_input is vocab_size x minibatch_size
Array<user_data_t,Dynamic,Dynamic> W_current_parameter_update;
Array<user_data_t,Dynamic,1> b_current_parameter_update;
W_gradient.setZero(W->rows(), W->cols());
b_gradient.setZero(b.size());
W_gradient.noalias() = bProp_input * predicted_embeddings.transpose();
b_gradient.noalias() = bProp_input.rowwise().sum();
W_running_gradient = decay*W_running_gradient +
(1.-decay)*W_gradient.array().square().matrix();
b_running_gradient = decay*b_running_gradient+
(1.-decay)*b_gradient.array().square().matrix();
W_current_parameter_update = ((W_running_parameter_update.array()+conditioning_constant).sqrt()/
(W_running_gradient.array()+conditioning_constant).sqrt())*
W_gradient.array();
b_current_parameter_update = ((b_running_parameter_update.array()+conditioning_constant).sqrt()/
(b_running_gradient.array()+conditioning_constant).sqrt())*
b_gradient.array();
W_running_parameter_update = decay*W_running_parameter_update +
(1.-decay)*W_current_parameter_update.square().matrix();
b_running_parameter_update = decay*b_running_parameter_update +
(1.-decay)*b_current_parameter_update.square().matrix();
*W += learning_rate*W_current_parameter_update.matrix();
b += learning_rate*b_current_parameter_update.matrix();
}
// Sparse versions
template <typename DerivedGOutI, typename DerivedGOutV, typename DerivedGIn>
void bProp(const MatrixBase<DerivedGOutI> &samples,
const MatrixBase<DerivedGOutV> &weights,
const MatrixBase<DerivedGIn> &bProp_matrix) const
{
UNCONST(DerivedGIn, bProp_matrix, my_bProp_matrix);
my_bProp_matrix.setZero();
uscgemm(1.0,
W->transpose(),
USCMatrix<user_data_t>(W->rows(), samples, weights),
my_bProp_matrix.leftCols(samples.cols())); // narrow bProp_matrix for possible short minibatch
}
template <typename DerivedIn, typename DerivedGOutI, typename DerivedGOutV>
void computeGradient(const MatrixBase<DerivedIn> &predicted_embeddings,
const MatrixBase<DerivedGOutI> &samples,
const MatrixBase<DerivedGOutV> &weights,
user_data_t learning_rate, user_data_t momentum) //not sure if we want to use momentum here
{
//cerr<<"in gradient"<<endl;
USCMatrix<user_data_t> gradient_output(W->rows(), samples, weights);
uscgemm(learning_rate,
gradient_output,
predicted_embeddings.leftCols(gradient_output.cols()).transpose(),
*W); // narrow predicted_embeddings for possible short minibatch
uscgemv(learning_rate,
gradient_output,
Matrix<user_data_t,Dynamic,1>::Ones(gradient_output.cols()),
b);
/*
//IN ORDER TO IMPLEMENT CLIPPING, WE HAVE TO COMPUTE THE GRADIENT
//FIRST
USCMatrix<user_data_t> gradient_output(W->rows(), samples, weights);
uscgemm(1.0,
gradient_output,
predicted_embeddings.leftCols(samples.cols()).transpose(),
W_gradient);
uscgemv(1.0,
gradient_output,
Matrix<user_data_t,Dynamic,1>::Ones(weights.cols()),
b_gradient);
int_map update_map; //stores all the parameters that have been updated
for (int sample_id=0; sample_id<samples.rows(); sample_id++)
for (int train_id=0; train_id<samples.cols(); train_id++)
update_map[samples(sample_id, train_id)] = 1;
// Convert to std::vector for parallelization
std::vector<int> update_items;
for (int_map::iterator it = update_map.begin(); it != update_map.end(); ++it)
update_items.push_back(it->first);
int num_items = update_items.size();
//#pragma omp parallel for
for (int item_id=0; item_id<num_items; item_id++)
{
int update_item = update_items[item_id];
//W->row(update_item) += learning_rate * W_gradient.row(update_item);
//b(update_item) += learning_rate * b_gradient(update_item);
//UPDATE CLIPPING
W->row(update_item) += (learning_rate * W_gradient.row(update_item)).array().unaryExpr(Clipper()).matrix();
user_data_t update = learning_rate * b_gradient(update_item);
b(update_item) += std::min(0.5, std::max(update,-0.5));
//GRADIENT CLIPPING
W_gradient.row(update_item).setZero();
b_gradient(update_item) = 0.;
}
*/
//cerr<<"Finished gradient"<<endl;
}
template <typename DerivedIn, typename DerivedGOutI, typename DerivedGOutV>
void computeGradientAdagrad(const MatrixBase<DerivedIn> &predicted_embeddings,
const MatrixBase<DerivedGOutI> &samples,
const MatrixBase<DerivedGOutV> &weights,
user_data_t learning_rate) //not sure if we want to use momentum here
{
//W_gradient.setZero(W->rows(), W->cols());
//b_gradient.setZero(b.size());
//FOR CLIPPING, WE DO NOT MULTIPLY THE GRADIENT WITH THE LEARNING RATE
USCMatrix<user_data_t> gradient_output(W->rows(), samples, weights);
uscgemm(1.0,
gradient_output,
predicted_embeddings.leftCols(samples.cols()).transpose(),
W_gradient);
uscgemv(1.0,
gradient_output,
Matrix<user_data_t,Dynamic,1>::Ones(weights.cols()),
b_gradient);
int_map update_map; //stores all the parameters that have been updated
for (int sample_id=0; sample_id<samples.rows(); sample_id++)
for (int train_id=0; train_id<samples.cols(); train_id++)
update_map[samples(sample_id, train_id)] = 1;
// Convert to std::vector for parallelization
std::vector<int> update_items;
for (int_map::iterator it = update_map.begin(); it != update_map.end(); ++it)
update_items.push_back(it->first);
int num_items = update_items.size();
//#pragma omp parallel for
for (int item_id=0; item_id<num_items; item_id++)
{
int update_item = update_items[item_id];
W_running_gradient.row(update_item) += W_gradient.row(update_item).array().square().matrix();
b_running_gradient(update_item) += b_gradient(update_item) * b_gradient(update_item);
W->row(update_item) += learning_rate * (W_gradient.row(update_item).array() / W_running_gradient.row(update_item).array().sqrt()).matrix();
b(update_item) += learning_rate * b_gradient(update_item) / sqrt(b_running_gradient(update_item));
/*
//UPDATE CLIPPING
W->row(update_item) += (learning_rate * (W_gradient.row(update_item).array() / W_running_gradient.row(update_item).array().sqrt())).unaryExpr(Clipper()).matrix();
user_data_t update = learning_rate * b_gradient(update_item) / sqrt(b_running_gradient(update_item));
b(update_item) += Clipper(update);//std::min(0.5, std::max(update,-0.5));
*/
W_gradient.row(update_item).setZero();
b_gradient(update_item) = 0.;
}
}
template <typename DerivedIn, typename DerivedGOutI, typename DerivedGOutV>
void computeGradientAdadelta(const MatrixBase<DerivedIn> &predicted_embeddings,
const MatrixBase<DerivedGOutI> &samples,
const MatrixBase<DerivedGOutV> &weights,
user_data_t learning_rate,
user_data_t conditioning_constant,
user_data_t decay) //not sure if we want to use momentum here
{
//cerr<<"decay is "<<decay<<" and constant is "<<conditioning_constant<<endl;
//W_gradient.setZero(W->rows(), W->cols());
//b_gradient.setZero(b.size());
USCMatrix<user_data_t> gradient_output(W->rows(), samples, weights);
uscgemm(1.0,
gradient_output,
predicted_embeddings.leftCols(samples.cols()).transpose(),
W_gradient);
uscgemv(1.0,
gradient_output,
Matrix<user_data_t,Dynamic,1>::Ones(weights.cols()),
b_gradient);
int_map update_map; //stores all the parameters that have been updated
for (int sample_id=0; sample_id<samples.rows(); sample_id++)
for (int train_id=0; train_id<samples.cols(); train_id++)
update_map[samples(sample_id, train_id)] = 1;
// Convert to std::vector for parallelization
std::vector<int> update_items;
for (int_map::iterator it = update_map.begin(); it != update_map.end(); ++it)
update_items.push_back(it->first);
int num_items = update_items.size();
#pragma omp parallel for
for (int item_id=0; item_id<num_items; item_id++)
{
Array<user_data_t,1,Dynamic> W_current_parameter_update;
user_data_t b_current_parameter_update;
int update_item = update_items[item_id];
W_running_gradient.row(update_item) = decay*W_running_gradient.row(update_item)+
(1.-decay)*W_gradient.row(update_item).array().square().matrix();
b_running_gradient(update_item) = decay*b_running_gradient(update_item)+
(1.-decay)*b_gradient(update_item)*b_gradient(update_item);
//cerr<<"Output: W gradient is "<<W_gradient.row(update_item)<<endl;
//getchar();
//cerr<<"Output: W running gradient is "<<W_running_gradient.row(update_item)<<endl;
//getchar();
W_current_parameter_update = ((W_running_parameter_update.row(update_item).array()+conditioning_constant).sqrt()/
(W_running_gradient.row(update_item).array()+conditioning_constant).sqrt())*
W_gradient.row(update_item).array();
b_current_parameter_update = (sqrt(b_running_parameter_update(update_item)+conditioning_constant)/
sqrt(b_running_gradient(update_item)+conditioning_constant))*
b_gradient(update_item);
//cerr<<"Output: W current parameter update is "<<W_current_parameter_update<<endl;
//getchar();
//cerr<<"Output: W running parameter update before is "<<W_running_parameter_update.row(update_item)<<endl;
//getchar();
//cerr<<"the second term is "<<(1.-decay)*W_current_parameter_update.square().matrix()<<endl;
W_running_parameter_update.row(update_item) = decay*W_running_parameter_update.row(update_item)+
(1.-decay)*(W_current_parameter_update.square().matrix());
b_running_parameter_update(update_item) = decay*b_running_parameter_update(update_item)+
(1.-decay)*b_current_parameter_update*b_current_parameter_update;
//cerr<<"Output: W running parameter update is "<<W_running_parameter_update.row(update_item)<<endl;
//getchar();
W->row(update_item) += learning_rate*W_current_parameter_update.matrix();
b(update_item) += learning_rate*b_current_parameter_update;
W_gradient.row(update_item).setZero();
b_gradient(update_item) = 0.;
}
}
template <typename DerivedIn, typename DerivedGOutI, typename DerivedGOutV, typename DerivedGW, typename DerivedGb>
void computeGradientCheck(const MatrixBase<DerivedIn> &predicted_embeddings,
const MatrixBase<DerivedGOutI> &samples,
const MatrixBase<DerivedGOutV> &weights,
const MatrixBase<DerivedGW> &gradient_W,
const MatrixBase<DerivedGb> &gradient_b) const
{
UNCONST(DerivedGW, gradient_W, my_gradient_W);
UNCONST(DerivedGb, gradient_b, my_gradient_b);
my_gradient_W.setZero();
my_gradient_b.setZero();
USCMatrix<user_data_t> gradient_output(W->rows(), samples, weights);
uscgemm(1.0,
gradient_output,
predicted_embeddings.leftCols(samples.cols()).transpose(),
my_gradient_W);
uscgemv(1.0, gradient_output,
Matrix<user_data_t,Dynamic,1>::Ones(weights.cols()), my_gradient_b);
}
};
class Input_word_embeddings
{
private:
Matrix<user_data_t,Dynamic,Dynamic,Eigen::RowMajor> *W;
int context_size, vocab_size;
Matrix<user_data_t,Dynamic,Dynamic,Eigen::RowMajor> W_running_gradient;
Matrix<user_data_t,Dynamic,Dynamic,Eigen::RowMajor> W_running_parameter_update;
Matrix<user_data_t,Dynamic,Dynamic,Eigen::RowMajor> W_gradient;
friend class model;
public:
Input_word_embeddings() : context_size(0), vocab_size(0) { }
Input_word_embeddings(int rows, int cols, int context) { resize(rows, cols, context); }
void set_W(Matrix<user_data_t,Dynamic,Dynamic,Eigen::RowMajor> *input_W) {
W = input_W;
}
void resize(int rows, int cols, int context)
{
context_size = context;
vocab_size = rows;
W->setZero(rows, cols);
}
void zero(int output_id)
{
W->row(output_id).setZero();
}
void read(std::ifstream &W_file) { readMatrix(W_file, *W); }
void write(std::ofstream &W_file) { writeMatrix(*W, W_file); }
template <typename Engine>
void initialize(Engine &engine,
bool init_normal,
user_data_t init_range,
string ¶meter_update,
user_data_t adagrad_epsilon)
{
W_gradient.setZero(W->rows(),W->cols());
if (parameter_update == "ADA") {
W_running_gradient = Matrix<user_data_t,Dynamic,Dynamic>::Ones(W->rows(),W->cols())*adagrad_epsilon;
//W_gradient.setZero(W->rows(),W->cols());
}
if (parameter_update == "ADAD") {
W_running_gradient.setZero(W->rows(),W->cols());
//W_gradient.setZero(W->rows(),W->cols());
W_running_parameter_update.setZero(W->rows(),W->cols());
}
initMatrix(engine,
*W,
init_normal,
init_range);
}
int n_inputs() const { return -1; }
int n_outputs() const { return W->cols() * context_size; }
// set output_id's embedding to the weighted average of all embeddings
template <typename Dist>
void average(const Dist &dist, int output_id)
{
W->row(output_id).setZero();
for (int i=0; i < W->rows(); i++)
if (i != output_id)
W->row(output_id) += dist.prob(i) * W->row(i);
}
template <typename DerivedIn, typename DerivedOut>
void fProp(const MatrixBase<DerivedIn> &input,
const MatrixBase<DerivedOut> &output) const
{
int embedding_dimension = W->cols();
// W is vocab_size x embedding_dimension
// input is ngram_size*vocab_size x minibatch_size
// output is ngram_size*embedding_dimension x minibatch_size
/*
// Dense version:
for (int ngram=0; ngram<context_size; ngram++)
output.middleRows(ngram*embedding_dimension, embedding_dimension) = W.transpose() * input.middleRows(ngram*vocab_size, vocab_size);
*/
UNCONST(DerivedOut, output, my_output);
my_output.setZero();
for (int ngram=0; ngram<context_size; ngram++)
{
// input might be narrower than expected due to a short minibatch,
// so narrow output to match
uscgemm(1.0,
W->transpose(),
USCMatrix<user_data_t>(W->rows(),input.middleRows(ngram, 1),Matrix<user_data_t,1,Dynamic>::Ones(input.cols())),
my_output.block(ngram*embedding_dimension, 0, embedding_dimension, input.cols()));
}
}
// When model is premultiplied, this layer doesn't get used,
// but this method is used to get the input into a sparse matrix.
// Hopefully this can get eliminated someday
template <typename DerivedIn, typename ScalarOut>
void munge(const MatrixBase<DerivedIn> &input, USCMatrix<ScalarOut> &output) const
{
output.resize(vocab_size*context_size, context_size, input.cols());
for (int i=0; i < context_size; i++)
output.indexes.row(i).array() = input.row(i).array() + i*vocab_size;
output.values.fill(1.0);
}
template <typename DerivedGOut, typename DerivedIn>
void computeGradient(const MatrixBase<DerivedGOut> &bProp_input,
const MatrixBase<DerivedIn> &input_words,
user_data_t learning_rate, user_data_t momentum, user_data_t L2_reg)
{
int embedding_dimension = W->cols();
// W is vocab_size x embedding_dimension
// input is ngram_size*vocab_size x minibatch_size
// bProp_input is ngram_size*embedding_dimension x minibatch_size
/*
// Dense version:
for (int ngram=0; ngram<context_size; ngram++)
W += learning_rate * input_words.middleRows(ngram*vocab_size, vocab_size) * bProp_input.middleRows(ngram*embedding_dimension, embedding_dimension).transpose()
*/
for (int ngram=0; ngram<context_size; ngram++)
{
uscgemm(learning_rate,
USCMatrix<user_data_t>(W->rows(), input_words.middleRows(ngram, 1), Matrix<user_data_t,1,Dynamic>::Ones(input_words.cols())),
bProp_input.block(ngram*embedding_dimension,0,embedding_dimension,input_words.cols()).transpose(),
*W);
}
/*
//IF WE WANT TO DO GRADIENT CLIPPING, THEN WE FIRST COMPUTE THE GRADIENT AND THEN
//PERFORM CLIPPING WHILE UPDATING
for (int ngram=0; ngram<context_size; ngram++)
{
uscgemm(1.0,
USCMatrix<user_data_t>(W->rows(),input_words.middleRows(ngram, 1),Matrix<user_data_t,1,Dynamic>::Ones(input_words.cols())),
bProp_input.block(ngram*embedding_dimension, 0, embedding_dimension, input_words.cols()).transpose(),
W_gradient);
}
int_map update_map; //stores all the parameters that have been updated
for (int ngram=0; ngram<context_size; ngram++)
{
for (int train_id=0; train_id<input_words.cols(); train_id++)
{
update_map[input_words(ngram,train_id)] = 1;
}
}
// Convert to std::vector for parallelization
std::vector<int> update_items;
for (int_map::iterator it = update_map.begin(); it != update_map.end(); ++it)
{
update_items.push_back(it->first);
}
int num_items = update_items.size();
#pragma omp parallel for
for (int item_id=0; item_id<num_items; item_id++)
{
int update_item = update_items[item_id];
//UPDATE CLIPPING
W->row(update_item) += (learning_rate*
W_gradient.row(update_item).array().unaryExpr(Clipper())).matrix();
//GRADIENT CLIPPING
//W->row(update_item) += learning_rate*
// W_gradient.row(update_item).array().unaryExpr(Clipper()).matrix();
//SETTING THE GRADIENT TO ZERO
W_gradient.row(update_item).setZero();
}
*/
}
template <typename DerivedGOut, typename DerivedIn>
void computeGradientAdagrad(const MatrixBase<DerivedGOut> &bProp_input,
const MatrixBase<DerivedIn> &input_words,
user_data_t learning_rate,
user_data_t L2_reg)
{
int embedding_dimension = W->cols();
//W_gradient.setZero(W->rows(), W->cols());
/*
if (W_running_gradient.rows() != W->rows() || W_running_gradient.cols() != W->cols())
W_running_gradient = Ones(W->rows(), W->cols())*adagrad_epsilon;
*/
for (int ngram=0; ngram<context_size; ngram++)
{
uscgemm(1.0,
USCMatrix<user_data_t>(W->rows(),input_words.middleRows(ngram, 1),Matrix<user_data_t,1,Dynamic>::Ones(input_words.cols())),
bProp_input.block(ngram*embedding_dimension, 0, embedding_dimension, input_words.cols()).transpose(),
W_gradient);
}
int_map update_map; //stores all the parameters that have been updated
for (int ngram=0; ngram<context_size; ngram++)
{
for (int train_id=0; train_id<input_words.cols(); train_id++)
{
update_map[input_words(ngram,train_id)] = 1;
}
}
// Convert to std::vector for parallelization
std::vector<int> update_items;
for (int_map::iterator it = update_map.begin(); it != update_map.end(); ++it)
{
update_items.push_back(it->first);
}
int num_items = update_items.size();
#pragma omp parallel for
for (int item_id=0; item_id<num_items; item_id++)
{
int update_item = update_items[item_id];
W_running_gradient.row(update_item) += W_gradient.row(update_item).array().square().matrix();
W->row(update_item) += learning_rate *
(W_gradient.row(update_item).array() / W_running_gradient.row(update_item).array().sqrt()).matrix();
/*
//UPDATE CLIPPING
W->row(update_item) += (learning_rate *
(W_gradient.row(update_item).array() / W_running_gradient.row(update_item).array().sqrt()))
.unaryExpr(Clipper()).matrix();
*/
W_gradient.row(update_item).setZero();
}
}
template <typename DerivedGOut, typename DerivedIn>
void computeGradientAdadelta(const MatrixBase<DerivedGOut> &bProp_input,
const MatrixBase<DerivedIn> &input_words,
user_data_t learning_rate,
user_data_t L2_reg,
user_data_t conditioning_constant,
user_data_t decay)
{
int embedding_dimension = W->cols();
//W_gradient.setZero(W->rows(), W->cols());
/*
if (W_running_gradient.rows() != W->rows() || W_running_gradient.cols() != W->cols())
W_running_gradient = Ones(W->rows(), W->cols())*adagrad_epsilon;
*/
for (int ngram=0; ngram<context_size; ngram++)
{
uscgemm(1.0,
USCMatrix<user_data_t>(W->rows(),input_words.middleRows(ngram, 1),Matrix<user_data_t,1,Dynamic>::Ones(input_words.cols())),
bProp_input.block(ngram*embedding_dimension, 0, embedding_dimension, input_words.cols()).transpose(),
W_gradient);
}
int_map update_map; //stores all the parameters that have been updated
for (int ngram=0; ngram<context_size; ngram++)
{
for (int train_id=0; train_id<input_words.cols(); train_id++)
{
update_map[input_words(ngram,train_id)] = 1;
}
}
// Convert to std::vector for parallelization
std::vector<int> update_items;
for (int_map::iterator it = update_map.begin(); it != update_map.end(); ++it)
{
update_items.push_back(it->first);
}
int num_items = update_items.size();
#pragma omp parallel for
for (int item_id=0; item_id<num_items; item_id++)
{
Array<user_data_t,1,Dynamic> W_current_parameter_update;
int update_item = update_items[item_id];
W_running_gradient.row(update_item) = decay*W_running_gradient.row(update_item)+
(1.-decay)*W_gradient.row(update_item).array().square().matrix();
W_current_parameter_update = ((W_running_parameter_update.row(update_item).array()+conditioning_constant).sqrt()/
(W_running_gradient.row(update_item).array()+conditioning_constant).sqrt())*
W_gradient.row(update_item).array();
//cerr<<"Input: W current parameter update is "<<W_current_parameter_update<<endl;
//getchar();
W_running_parameter_update.row(update_item) = decay*W_running_parameter_update.row(update_item)+
(1.-decay)*W_current_parameter_update.square().matrix();
W->row(update_item) += learning_rate*W_current_parameter_update.matrix();
//cerr<<"Input: After update, W is "<<W->row(update_item)<<endl;
//getchar();
W_gradient.row(update_item).setZero();
}
}
template <typename DerivedGOut, typename DerivedIn, typename DerivedGW>
void computeGradientCheck(const MatrixBase<DerivedGOut> &bProp_input,
const MatrixBase<DerivedIn> &input_words,
int x, int minibatch_size,
const MatrixBase<DerivedGW> &gradient) const //not sure if we want to use momentum here
{
UNCONST(DerivedGW, gradient, my_gradient);
int embedding_dimension = W->cols();
my_gradient.setZero();
for (int ngram=0; ngram<context_size; ngram++)
uscgemm(1.0,
USCMatrix<user_data_t>(W->rows(),input_words.middleRows(ngram, 1),Matrix<user_data_t,1,Dynamic>::Ones(input_words.cols())),
bProp_input.block(ngram*embedding_dimension, 0, embedding_dimension, input_words.cols()).transpose(),
my_gradient);
}
};
} // namespace nplm
|
common.h | /* * MIT License
*
* © ESI Group, 2015
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
*
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
*
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef LIBPVKERNEL_RUSH_TESTS_COMMON_H
#define LIBPVKERNEL_RUSH_TESTS_COMMON_H
#include "test-env.h"
#include "helpers.h"
#include <pvkernel/rush/PVExtractor.h>
#include <pvkernel/rush/PVFileDescription.h>
#include <pvkernel/rush/PVFormat.h>
#include <pvkernel/rush/PVInputDescription.h>
#include <pvkernel/rush/PVInputFile.h>
#include <pvkernel/rush/PVPluginsLoad.h>
#include <pvkernel/rush/PVSourceCreator.h>
#include <pvkernel/rush/PVTests.h>
#include <pvkernel/rush/PVNraw.h>
#include <pvkernel/rush/PVUnicodeSource.h>
#include <pvkernel/filter/PVChunkFilterByElt.h>
#include <pvkernel/filter/PVPluginsLoad.h>
#include <QCoreApplication>
#include <functional>
#include <omp.h>
#include <sstream>
namespace pvtest
{
/**
* Get a tmp filename not already use.
*
* @warning, It can be use between this call and your creation.
*/
std::string get_tmp_filename()
{
char buffer[L_tmpnam];
return tmpnam(buffer);
}
/**
* Prepare Context to run tests.
*
* * Set environment variables
* * Prepare QCoreApplication
* * Load plugins
* * Init cpu features
*/
void init_ctxt()
{
// Need this core application to find plugins path.
std::string prog_name = "test_pvkernel_rush";
char* arg = const_cast<char*>(prog_name.c_str());
int argc = 1;
QCoreApplication app(argc, &arg);
init_env();
}
/**
* Duplicate input log dup times and return the new file with these data.
*/
std::string duplicate_log_file(std::string const& log_file, size_t dup)
{
if (dup == 1) {
return log_file;
}
std::ifstream ifs(log_file);
std::string content{std::istreambuf_iterator<char>(ifs), std::istreambuf_iterator<char>()};
std::string big_log_file = get_tmp_filename();
std::ofstream big_file(big_log_file);
// Duplicate file to have one millions lines
for (size_t i = 0; i < dup; i++) {
big_file << content;
}
return big_log_file;
}
/**
* Create sources and splitter to be read for splitting.
*
* Usefull to check splitter behavior.
*/
class TestSplitter
{
public:
TestSplitter(std::string const& log_file = "", size_t dup = 1)
{
init_ctxt();
if (log_file.size() != 0) {
reset(log_file, dup);
}
}
~TestSplitter()
{
if (_need_cleanup)
std::remove(_big_file_path.c_str());
}
std::tuple<size_t, size_t, std::string>
run_normalization(PVFilter::PVChunkFilterByElt const& flt_f)
{
if (_source.get() == nullptr) {
throw std::runtime_error("source not created");
}
std::string output_file = get_tmp_filename();
// Extract source and split fields.
std::ofstream ofs(output_file);
size_t nelts_org = 0;
size_t nelts_valid = 0;
double duration = 0.;
std::vector<PVCore::PVTextChunk*> _chunks;
while (PVCore::PVTextChunk* pc = (*_source.get())()) {
_chunks.push_back(pc);
}
#pragma omp parallel reduction(+ : nelts_org, nelts_valid) reduction(max : duration)
{
std::ostringstream oss;
double local_duration = 0.;
#pragma omp for nowait
for (auto it = _chunks.begin(); it < _chunks.end(); ++it) {
PVCore::PVTextChunk* pc = *it;
auto start = std::chrono::steady_clock::now();
flt_f(pc);
std::chrono::duration<double> dur(std::chrono::steady_clock::now() - start);
local_duration += dur.count();
size_t no = 0;
size_t nv = 0;
pc->get_elts_stat(no, nv);
nelts_org += no;
nelts_valid += nv;
dump_chunk_csv(*pc, oss);
pc->free();
}
duration = local_duration;
#pragma omp for ordered
for (int i = 0; i < omp_get_num_threads(); i++) {
#pragma omp ordered
ofs << oss.str();
}
}
std::cout << duration;
return std::make_tuple(nelts_org, nelts_valid, output_file);
}
void reset(std::string const& log_file, size_t dup = 1)
{
_big_file_path = duplicate_log_file(log_file, dup);
_source.reset(new PVRush::PVUnicodeSource<>(
std::make_shared<PVRush::PVInputFile>(_big_file_path.c_str()), chunk_size));
_need_cleanup = dup > 1;
}
private:
static constexpr size_t chunk_size = 6000;
std::string _big_file_path;
std::unique_ptr<PVRush::PVUnicodeSource<>> _source;
bool _need_cleanup;
};
/**
* Create and save context for a view creation.
*
* * Required when we want to work with NRaw content
*/
class TestEnv
{
public:
/**
* Initialize Inspector internal until pipeline is ready to process inputs.
*
* NRaw is not loaded, it has to be done with the load_data methods.
*/
TestEnv(std::string const& log_file, std::string const& format_file, size_t dup = 1)
: TestEnv(std::vector<std::string>(1, log_file), format_file, dup)
{
}
TestEnv(std::vector<std::string> const& log_files,
std::string const& format_file,
size_t dup = 1)
{
init_ctxt();
reset(log_files, format_file, dup);
}
void load_data(size_t begin = 0)
{
PVRush::PVExtractor ext(_format, *_nraw_output.get(), _sc_file, _list_inputs);
PVRush::PVControllerJob_p job = ext.process_from_agg_nlines(begin);
job->wait_end();
}
/**
* Clean up duplicated file when it is over.
*/
~TestEnv()
{
if (_need_cleanup)
std::remove(_big_file_path.c_str());
}
void
reset(std::vector<std::string> const& log_files, std::string const& format_file, size_t dup = 1)
{
_format = PVRush::PVFormat("format", QString::fromStdString(format_file));
_nraw_output.reset(new PVRush::PVNrawOutput(_nraw));
_big_file_path = duplicate_log_file(log_files[0], dup);
_need_cleanup = (dup > 1);
if (dup != 1 and log_files.size() > 1) {
throw std::runtime_error("We don't handle mutliple input with duplication");
}
_list_inputs << PVRush::PVInputDescription_p(
new PVRush::PVFileDescription(QString::fromStdString(_big_file_path)));
for (size_t i = 1; i < log_files.size(); i++) {
// Input file
QString path_file = QString::fromStdString(log_files[i]);
_list_inputs << PVRush::PVInputDescription_p(new PVRush::PVFileDescription(path_file));
}
// Get the source creator
if (!PVRush::PVTests::get_file_sc(_list_inputs.front(), _format, _sc_file)) {
throw std::runtime_error("Can't get sources.");
}
}
/**
* Get number of row in the imported NRaw.
*/
size_t get_nraw_size() const { return _nraw.row_count(); }
PVRush::PVFormat _format;
PVRush::PVNraw _nraw;
std::unique_ptr<PVRush::PVNrawOutput> _nraw_output;
QList<std::shared_ptr<PVRush::PVInputDescription>> _list_inputs;
PVRush::PVSourceCreator_p _sc_file;
private:
std::string _big_file_path;
bool _need_cleanup;
};
} // namespace pvtest
#endif
|
par_relax.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Relaxation scheme
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "Common.h"
#include "_hypre_lapack.h"
#include "par_relax.h"
/*--------------------------------------------------------------------------
* hypre_BoomerAMGRelax
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGRelax( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_type,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real omega,
HYPRE_Real *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp )
{
HYPRE_Int relax_error = 0;
/*---------------------------------------------------------------------------------------
* Switch statement to direct control based on relax_type:
* relax_type = 0 -> Jacobi or CF-Jacobi
* relax_type = 1 -> Gauss-Seidel <--- very slow, sequential
* relax_type = 2 -> Gauss_Seidel: interior points in parallel,
* boundary sequential
* relax_type = 3 -> hybrid: SOR-J mix off-processor, SOR on-processor
* with outer relaxation parameters (forward solve)
* relax_type = 4 -> hybrid: SOR-J mix off-processor, SOR on-processor
* with outer relaxation parameters (backward solve)
* relax_type = 5 -> hybrid: GS-J mix off-processor, chaotic GS on-node
* relax_type = 6 -> hybrid: SSOR-J mix off-processor, SSOR on-processor
* with outer relaxation parameters
* relax_type = 7 -> Jacobi (uses Matvec), only needed in CGNR [GPU-supported]
* relax_type = 8 -> hybrid L1 Symm. Gauss-Seidel
* relax_type = 9 -> Direct solve, Gaussian elimination
* relax_type = 10 -> On-processor direct forward solve for matrices with
* triangular structure (indices need not be ordered
* triangular)
* relax_type = 11 -> Two Stage approximation to GS. Uses the strict lower
* part of the diagonal matrix
* relax_type = 12 -> Two Stage approximation to GS. Uses the strict lower
* part of the diagonal matrix and a second iteration
* for additional error approximation
* relax_type = 13 -> hybrid L1 Gauss-Seidel forward solve
* relax_type = 14 -> hybrid L1 Gauss-Seidel backward solve
* relax_type = 15 -> CG
* relax_type = 16 -> Scaled Chebyshev
* relax_type = 17 -> FCF-Jacobi
* relax_type = 18 -> L1-Jacobi [GPU-supported]
* relax_type = 19 -> Direct Solve, (old version)
* relax_type = 20 -> Kaczmarz
* relax_type = 29 -> Direct solve: use gaussian elimination & BLAS
* (with pivoting) (old version)
* relax_type = 98 -> Direct solve, Gaussian elimination
* relax_type = 99 -> Direct solve, Gaussian elimination
* relax_type = 199-> Direct solve, Gaussian elimination
*-------------------------------------------------------------------------------------*/
switch (relax_type)
{
case 0: /* Weighted Jacobi */
hypre_BoomerAMGRelax0WeightedJacobi(A, f, cf_marker, relax_points, relax_weight, u, Vtemp);
break;
case 1: /* Gauss-Seidel VERY SLOW */
hypre_BoomerAMGRelax1GaussSeidel(A, f, cf_marker, relax_points, u);
break;
case 2: /* Gauss-Seidel: relax interior points in parallel, boundary sequentially */
hypre_BoomerAMGRelax2GaussSeidel(A, f, cf_marker, relax_points, u);
break;
/* Hybrid: Jacobi off-processor, Gauss-Seidel on-processor (forward loop) */
case 3:
hypre_BoomerAMGRelax3HybridGaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp);
break;
case 4: /* Hybrid: Jacobi off-processor, Gauss-Seidel/SOR on-processor (backward loop) */
hypre_BoomerAMGRelax4HybridGaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp);
break;
case 5: /* Hybrid: Jacobi off-processor, chaotic Gauss-Seidel on-processor */
hypre_BoomerAMGRelax5ChaoticHybridGaussSeidel(A, f, cf_marker, relax_points, u);
break;
case 6: /* Hybrid: Jacobi off-processor, Symm. Gauss-Seidel/SSOR on-processor with outer relaxation parameter */
hypre_BoomerAMGRelax6HybridSSOR(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp);
break;
case 7: /* Jacobi (uses ParMatvec) */
hypre_BoomerAMGRelax7Jacobi(A, f, relax_points, relax_weight, l1_norms, u, Vtemp);
break;
case 8: /* hybrid L1 Symm. Gauss-Seidel */
hypre_BoomerAMGRelax8HybridL1SSOR(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp);
break;
/* Hybrid: Jacobi off-processor, ordered Gauss-Seidel on-processor */
case 10:
hypre_BoomerAMGRelax10TopoOrderedGaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp);
break;
case 11: /* Two Stage Gauss Seidel. Forward sweep only */
hypre_BoomerAMGRelax11TwoStageGaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp);
break;
case 12: /* Two Stage Gauss Seidel. Uses the diagonal matrix for the GS part */
hypre_BoomerAMGRelax12TwoStageGaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp);
break;
case 13: /* hybrid L1 Gauss-Seidel forward solve */
hypre_BoomerAMGRelax13HybridL1GaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp);
break;
case 14: /* hybrid L1 Gauss-Seidel backward solve */
hypre_BoomerAMGRelax14HybridL1GaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp);
break;
case 18: /* weighted L1 Jacobi */
hypre_BoomerAMGRelax18WeightedL1Jacobi(A, f, cf_marker, relax_points, relax_weight, l1_norms, u, Vtemp);
break;
case 19: /* Direct solve: use gaussian elimination */
relax_error = hypre_BoomerAMGRelax19GaussElim(A, f, u);
break;
case 20: /* Kaczmarz */
hypre_BoomerAMGRelaxKaczmarz(A, f, omega, l1_norms, u);
break;
case 98: /* Direct solve: use gaussian elimination & BLAS (with pivoting) */
relax_error = hypre_BoomerAMGRelax98GaussElimPivot(A, f, u);
break;
}
return relax_error;
}
HYPRE_Int
hypre_BoomerAMGRelaxWeightedJacobi_core( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
HYPRE_Int Skip_diag )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Complex *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Complex *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
HYPRE_Complex *Vtemp_data = hypre_VectorData(Vtemp_local);
HYPRE_Complex *v_ext_data = NULL;
HYPRE_Complex *v_buf_data = NULL;
HYPRE_Complex zero = 0.0;
HYPRE_Real one_minus_weight = 1.0 - relax_weight;
HYPRE_Complex res;
HYPRE_Int num_procs, my_id, i, j, ii, jj, index, num_sends, start;
hypre_ParCSRCommHandle *comm_handle;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
v_ext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, v_buf_data, v_ext_data);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
Vtemp_data[i] = u_data[i];
}
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
const HYPRE_Complex di = l1_norms ? l1_norms[i] : A_diag_data[A_diag_i[i]];
/*-----------------------------------------------------------
* If i is of the right type ( C or F or All ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------*/
if ( (relax_points == 0 || cf_marker[i] == relax_points) && di != zero )
{
res = f_data[i];
for (jj = A_diag_i[i] + Skip_diag; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * v_ext_data[ii];
}
if (Skip_diag)
{
u_data[i] *= one_minus_weight;
u_data[i] += relax_weight * res / di;
}
else
{
u_data[i] += relax_weight * res / di;
}
}
}
if (num_procs > 1)
{
hypre_TFree(v_ext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGRelax0WeightedJacobi( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
hypre_ParVector *u,
hypre_ParVector *Vtemp )
{
return hypre_BoomerAMGRelaxWeightedJacobi_core(A, f, cf_marker, relax_points, relax_weight, NULL, u, Vtemp, 1);
}
HYPRE_Int
hypre_BoomerAMGRelax18WeightedL1Jacobi( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp )
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
//HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_VectorMemoryLocation(x), hypre_VectorMemoryLocation(b) );
//RL: TODO back to hypre_GetExecPolicy2 later
HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE;
// TODO implement CF relax on GPUs
if (relax_points != 0)
{
exec = HYPRE_EXEC_HOST;
}
if (exec == HYPRE_EXEC_DEVICE)
{
// XXX GPU calls Relax7 XXX
return hypre_BoomerAMGRelax7Jacobi(A, f, relax_points, relax_weight, l1_norms, u, Vtemp);
}
else
#endif
{
/* in the case of non-CF, use relax-7 which is faster */
if (relax_points == 0)
{
return hypre_BoomerAMGRelax7Jacobi(A, f, relax_points, relax_weight, l1_norms, u, Vtemp);
}
else
{
return hypre_BoomerAMGRelaxWeightedJacobi_core(A, f, cf_marker, relax_points, relax_weight, l1_norms, u, Vtemp, 0);
}
}
}
HYPRE_Int
hypre_BoomerAMGRelax1GaussSeidel( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
hypre_ParVector *u )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Complex *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Complex *f_data = hypre_VectorData(f_local);
HYPRE_Complex *v_ext_data = NULL;
HYPRE_Complex *v_buf_data = NULL;
HYPRE_Complex zero = 0.0;
HYPRE_Complex res;
HYPRE_Int num_procs, my_id, i, j, ii, jj, p, jr, ip, num_sends, num_recvs, vec_start, vec_len;
hypre_MPI_Status *status;
hypre_MPI_Request *requests;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
v_ext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_recvs+num_sends, HYPRE_MEMORY_HOST);
requests = hypre_CTAlloc(hypre_MPI_Request, num_recvs+num_sends, HYPRE_MEMORY_HOST);
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
for (p = 0; p < num_procs; p++)
{
jr = 0;
if (p != my_id)
{
for (i = 0; i < num_sends; i++)
{
ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i);
if (ip == p)
{
vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1) - vec_start;
for (j = vec_start; j < vec_start+vec_len; j++)
{
v_buf_data[j] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
hypre_MPI_Isend(&v_buf_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]);
}
}
hypre_MPI_Waitall(jr, requests, status);
hypre_MPI_Barrier(comm);
}
else
{
if (num_procs > 1)
{
for (i = 0; i < num_recvs; i++)
{
ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i);
vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i);
vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i+1) - vec_start;
hypre_MPI_Irecv(&v_ext_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]);
}
hypre_MPI_Waitall(jr, requests, status);
}
for (i = 0; i < num_rows; i++)
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------*/
if ( (relax_points == 0 || cf_marker[i] == relax_points) && A_diag_data[A_diag_i[i]] != zero )
{
res = f_data[i];
for (jj = A_diag_i[i] + 1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * v_ext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
if (num_procs > 1)
{
hypre_MPI_Barrier(comm);
}
}
}
if (num_procs > 1)
{
hypre_TFree(v_ext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(status, HYPRE_MEMORY_HOST);
hypre_TFree(requests, HYPRE_MEMORY_HOST);
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGRelax2GaussSeidel( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
hypre_ParVector *u )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Complex *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Complex *f_data = hypre_VectorData(f_local);
HYPRE_Complex *v_ext_data = NULL;
HYPRE_Complex *v_buf_data = NULL;
HYPRE_Complex zero = 0.0;
HYPRE_Complex res;
HYPRE_Int num_procs, my_id, i, j, ii, jj, p, jr, ip, num_sends, num_recvs, vec_start, vec_len;
hypre_MPI_Status *status;
hypre_MPI_Request *requests;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
v_ext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_recvs+num_sends, HYPRE_MEMORY_HOST);
requests = hypre_CTAlloc(hypre_MPI_Request, num_recvs+num_sends, HYPRE_MEMORY_HOST);
}
/*-----------------------------------------------------------------
* Relax interior points first
*-----------------------------------------------------------------*/
for (i = 0; i < num_rows; i++)
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F or All ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( (relax_points == 0 || cf_marker[i] == relax_points) && A_offd_i[i+1] - A_offd_i[i] == zero &&
A_diag_data[A_diag_i[i]] != zero )
{
res = f_data[i];
for (jj = A_diag_i[i] + 1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
for (p = 0; p < num_procs; p++)
{
jr = 0;
if (p != my_id)
{
for (i = 0; i < num_sends; i++)
{
ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i);
if (ip == p)
{
vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1) - vec_start;
for (j = vec_start; j < vec_start+vec_len; j++)
{
v_buf_data[j] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
hypre_MPI_Isend(&v_buf_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]);
}
}
hypre_MPI_Waitall(jr, requests, status);
hypre_MPI_Barrier(comm);
}
else
{
if (num_procs > 1)
{
for (i = 0; i < num_recvs; i++)
{
ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i);
vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i);
vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i+1) - vec_start;
hypre_MPI_Irecv(&v_ext_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]);
}
hypre_MPI_Waitall(jr, requests, status);
}
for (i = 0; i < num_rows; i++)
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F or All) and diagonal is
* nonzero, relax point i; otherwise, skip it.
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------*/
if ( (relax_points == 0 || cf_marker[i] == relax_points) && A_offd_i[i+1] - A_offd_i[i] != zero &&
A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * v_ext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
if (num_procs > 1)
{
hypre_MPI_Barrier(comm);
}
}
}
if (num_procs > 1)
{
hypre_TFree(v_ext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(status, HYPRE_MEMORY_HOST);
hypre_TFree(requests, HYPRE_MEMORY_HOST);
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGRelaxHybridGaussSeidel_core( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real omega,
HYPRE_Real *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp,
HYPRE_Int GS_order,
HYPRE_Int Symm,
HYPRE_Int Skip_diag,
HYPRE_Int forced_seq,
HYPRE_Int Topo_order )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Complex *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Complex *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = Vtemp ? hypre_ParVectorLocalVector(Vtemp) : NULL;
HYPRE_Complex *Vtemp_data = Vtemp_local ? hypre_VectorData(Vtemp_local) : NULL;
/*
hypre_Vector *Ztemp_local = NULL;
HYPRE_Complex *Ztemp_data = NULL;
*/
HYPRE_Complex *v_ext_data = NULL;
HYPRE_Complex *v_buf_data = NULL;
HYPRE_Int *proc_ordering = NULL;
const HYPRE_Real one_minus_omega = 1.0 - omega;
HYPRE_Int num_procs, my_id, num_threads, j, num_sends;
hypre_ParCSRCommHandle *comm_handle;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
num_threads = forced_seq ? 1 : hypre_NumThreads();
/* GS order: forward or backward */
const HYPRE_Int gs_order = GS_order > 0 ? 1 : -1;
/* for symmetric GS, a forward followed by a backward */
const HYPRE_Int num_sweeps = Symm ? 2 : 1;
/* if relax_weight and omega are both 1.0 */
const HYPRE_Int non_scale = relax_weight == 1.0 && omega == 1.0;
/* */
const HYPRE_Real prod = 1.0 - relax_weight * omega;
/*
if (num_threads > 1)
{
Ztemp_local = hypre_ParVectorLocalVector(Ztemp);
Ztemp_data = hypre_VectorData(Ztemp_local);
}
*/
#if defined(HYPRE_USING_PERSISTENT_COMM)
// JSP: persistent comm can be similarly used for other smoothers
hypre_ParCSRPersistentCommHandle *persistent_comm_handle;
#endif
if (num_procs > 1)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime();
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
#if defined(HYPRE_USING_PERSISTENT_COMM)
persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg);
v_buf_data = (HYPRE_Real *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle);
v_ext_data = (HYPRE_Real *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle);
#else
v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
v_ext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
#endif
HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0);
HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (j = begin; j < end; j++)
{
v_buf_data[j - begin] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)];
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime();
#endif
#if defined(HYPRE_USING_PERSISTENT_COMM)
hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_HOST, v_buf_data);
#else
comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, v_buf_data, v_ext_data);
#endif
#if defined(HYPRE_USING_PERSISTENT_COMM)
hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_HOST, v_ext_data);
#else
hypre_ParCSRCommHandleDestroy(comm_handle);
#endif
comm_handle = NULL;
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime();
#endif
}
if (Topo_order)
{
/* Check for ordering of matrix. If stored, get pointer, otherwise
* compute ordering and point matrix variable to array.
* Used in AIR
*/
if (!hypre_ParCSRMatrixProcOrdering(A))
{
proc_ordering = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
hypre_topo_sort(A_diag_i, A_diag_j, A_diag_data, proc_ordering, num_rows);
hypre_ParCSRMatrixProcOrdering(A) = proc_ordering;
}
else
{
proc_ordering = hypre_ParCSRMatrixProcOrdering(A);
}
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RELAX] -= hypre_MPI_Wtime();
#endif
if ( (num_threads > 1 || !non_scale) && Vtemp_data )
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_rows; j++)
{
Vtemp_data[j] = u_data[j];
}
}
if (num_threads > 1)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
HYPRE_Int ns, ne, sweep;
hypre_partition1D(num_rows, num_threads, j, &ns, &ne);
for (sweep = 0; sweep < num_sweeps; sweep++)
{
const HYPRE_Int iorder = num_sweeps == 1 ? gs_order : sweep == 0 ? 1 : -1;
const HYPRE_Int ibegin = iorder > 0 ? ns : ne - 1;
const HYPRE_Int iend = iorder > 0 ? ne : ns - 1;
if (non_scale)
{
hypre_HybridGaussSeidelNSThreads(A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_j, A_offd_data,
f_data, cf_marker, relax_points, l1_norms, u_data, Vtemp_data, v_ext_data,
ns, ne, ibegin, iend, iorder, Skip_diag);
}
else
{
hypre_HybridGaussSeidelThreads(A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_j, A_offd_data,
f_data, cf_marker, relax_points, relax_weight, omega, one_minus_omega,
prod, l1_norms, u_data, Vtemp_data, v_ext_data, ns, ne, ibegin, iend, iorder, Skip_diag);
}
} /* for (sweep = 0; sweep < num_sweeps; sweep++) */
} /* for (j = 0; j < num_threads; j++) */
}
else /* if (num_threads > 1) */
{
HYPRE_Int sweep;
for (sweep = 0; sweep < num_sweeps; sweep++)
{
const HYPRE_Int iorder = num_sweeps == 1 ? gs_order : sweep == 0 ? 1 : -1;
const HYPRE_Int ibegin = iorder > 0 ? 0 : num_rows - 1;
const HYPRE_Int iend = iorder > 0 ? num_rows : -1;
if (Topo_order)
{
hypre_HybridGaussSeidelOrderedNS(A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_j, A_offd_data,
f_data, cf_marker, relax_points, u_data, NULL, v_ext_data,
ibegin, iend, iorder, proc_ordering);
}
else
{
if (non_scale)
{
hypre_HybridGaussSeidelNS(A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_j, A_offd_data,
f_data, cf_marker, relax_points, l1_norms, u_data, Vtemp_data, v_ext_data,
ibegin, iend, iorder, Skip_diag);
}
else
{
hypre_HybridGaussSeidel(A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_j, A_offd_data,
f_data, cf_marker, relax_points, relax_weight, omega, one_minus_omega,
prod, l1_norms, u_data, Vtemp_data, v_ext_data, ibegin, iend, iorder, Skip_diag);
}
}
} /* for (sweep = 0; sweep < num_sweeps; sweep++) */
} /* if (num_threads > 1) */
#ifndef HYPRE_USING_PERSISTENT_COMM
if (num_procs > 1)
{
hypre_TFree(v_ext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RELAX] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
/* forward hybrid G-S */
HYPRE_Int
hypre_BoomerAMGRelax3HybridGaussSeidel( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real omega,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp )
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
//HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_VectorMemoryLocation(x), hypre_VectorMemoryLocation(b) );
//RL: TODO back to hypre_GetExecPolicy2 later
HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE;
// TODO implement CF relax on GPUs
if (relax_points != 0)
{
exec = HYPRE_EXEC_HOST;
}
if (exec == HYPRE_EXEC_DEVICE)
{
return hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp,
1 /* forward */, 0 /* nonsymm */);
}
else
#endif
{
return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp,
1 /* forward */, 0 /* nonsymm */, 1 /* skip diag */, 0, 0);
}
}
/* backward hybrid G-S */
HYPRE_Int
hypre_BoomerAMGRelax4HybridGaussSeidel( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real omega,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp )
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
//HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_VectorMemoryLocation(x), hypre_VectorMemoryLocation(b) );
//RL: TODO back to hypre_GetExecPolicy2 later
HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE;
// TODO implement CF relax on GPUs
if (relax_points != 0)
{
exec = HYPRE_EXEC_HOST;
}
if (exec == HYPRE_EXEC_DEVICE)
{
return hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp,
-1 /* backward */, 0 /* nonsymm */);
}
else
#endif
{
return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp,
-1 /* backward */, 0 /* nosymm */, 1 /* skip diag */, 0, 0);
}
}
/* chaotic forward G-S */
HYPRE_Int
hypre_BoomerAMGRelax5ChaoticHybridGaussSeidel( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
hypre_ParVector *u )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Complex *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Complex *f_data = hypre_VectorData(f_local);
HYPRE_Complex *v_ext_data = NULL;
HYPRE_Complex *v_buf_data = NULL;
HYPRE_Complex zero = 0.0;
HYPRE_Complex res;
HYPRE_Int num_procs, my_id, i, j, ii, jj, index, num_sends, start;
hypre_ParCSRCommHandle *comm_handle;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
v_ext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
{
v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, v_buf_data, v_ext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F or All) and diagonal is
* nonzero, relax point i; otherwise, skip it.
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------*/
if ( (relax_points == 0 || cf_marker[i] == relax_points) && A_diag_data[A_diag_i[i]] != zero )
{
res = f_data[i];
for (jj = A_diag_i[i] + 1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * v_ext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
if (num_procs > 1)
{
hypre_TFree(v_ext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
return hypre_error_flag;
}
/* symmetric hybrid G-S */
HYPRE_Int
hypre_BoomerAMGRelax6HybridSSOR( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real omega,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp )
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
//HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_VectorMemoryLocation(x), hypre_VectorMemoryLocation(b) );
//RL: TODO back to hypre_GetExecPolicy2 later
HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE;
// TODO implement CF relax on GPUs
if (relax_points != 0)
{
exec = HYPRE_EXEC_HOST;
}
if (exec == HYPRE_EXEC_DEVICE)
{
return hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp,
1, 1 /* symm */);
}
else
#endif
{
return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp,
1, 1 /* symm */, 1 /* skip diag */, 0, 0);
}
}
HYPRE_Int
hypre_BoomerAMGRelax7Jacobi( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp )
{
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_Vector l1_norms_vec;
hypre_ParVector l1_norms_parvec;
hypre_VectorData(&l1_norms_vec) = l1_norms;
hypre_VectorSize(&l1_norms_vec) = num_rows;
/* TODO XXX
* The next line is NOT 100% correct, which should be the memory location of l1_norms instead of f
* But how do I know it? As said, don't use raw pointers, don't use raw pointers!
* It is fine normally since A, f, and l1_norms should live in the same memory space
*/
hypre_VectorMemoryLocation(&l1_norms_vec) = hypre_ParVectorMemoryLocation(f);
hypre_ParVectorLocalVector(&l1_norms_parvec) = &l1_norms_vec;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_Int sync_stream;
hypre_GetSyncCudaCompute(&sync_stream);
hypre_SetSyncCudaCompute(0);
#endif
/*-----------------------------------------------------------------
* Copy f into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParVectorCopy(f, Vtemp);
/*-----------------------------------------------------------------
* Perform Matvec Vtemp = w * (f - Au)
*-----------------------------------------------------------------*/
hypre_ParCSRMatrixMatvec(-relax_weight, A, u, relax_weight, Vtemp);
/*-----------------------------------------------------------------
* u += D^{-1} * Vtemp, where D_ii = ||A(i,:)||_1
*-----------------------------------------------------------------*/
hypre_ParVectorElmdivpy(Vtemp, &l1_norms_parvec, u);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_SetSyncCudaCompute(sync_stream);
hypre_SyncCudaComputeStream(hypre_handle());
#endif
return hypre_error_flag;
}
/* symmetric l1 hybrid G-S */
HYPRE_Int
hypre_BoomerAMGRelax8HybridL1SSOR( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real omega,
HYPRE_Real *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp )
{
const HYPRE_Int skip_diag = relax_weight == 1.0 && omega == 1.0 ? 0 : 1;
return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp,
1, 1 /* symm */, skip_diag, 0, 0);
}
/* forward hybrid topology ordered G-S */
HYPRE_Int
hypre_BoomerAMGRelax10TopoOrderedGaussSeidel( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real omega,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp )
{
return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp,
1 /* forward */, 0 /* nonsymm */, 1 /* skip_diag */, 1, 1);
}
/* forward l1 hybrid G-S */
HYPRE_Int
hypre_BoomerAMGRelax13HybridL1GaussSeidel( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real omega,
HYPRE_Real *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp )
{
const HYPRE_Int skip_diag = relax_weight == 1.0 && omega == 1.0 ? 0 : 1;
return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp,
1 /* forward */, 0 /* nonsymm */, skip_diag, 0, 0 );
}
/* backward l1 hybrid G-S */
HYPRE_Int
hypre_BoomerAMGRelax14HybridL1GaussSeidel( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real omega,
HYPRE_Real *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp )
{
const HYPRE_Int skip_diag = relax_weight == 1.0 && omega == 1.0 ? 0 : 1;
return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp,
-1 /* backward */, 0 /* nonsymm */, skip_diag, 0, 0 );
}
HYPRE_Int
hypre_BoomerAMGRelax19GaussElim( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u )
{
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt first_ind = hypre_ParVectorFirstIndex(u);
HYPRE_Int n_global = (HYPRE_Int) global_num_rows;
HYPRE_Int first_index = (HYPRE_Int) first_ind;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Complex *u_data = hypre_VectorData(u_local);
hypre_CSRMatrix *A_CSR;
HYPRE_Int *A_CSR_i;
HYPRE_Int *A_CSR_j;
HYPRE_Real *A_CSR_data;
hypre_Vector *f_vector;
HYPRE_Real *f_vector_data;
HYPRE_Real *A_mat;
HYPRE_Real *b_vec;
HYPRE_Int i, jj, column, relax_error = 0;
/*-----------------------------------------------------------------
* Generate CSR matrix from ParCSRMatrix A
*-----------------------------------------------------------------*/
/* all processors are needed for these routines */
A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A);
f_vector = hypre_ParVectorToVectorAll(f);
if (num_rows)
{
A_CSR_i = hypre_CSRMatrixI(A_CSR);
A_CSR_j = hypre_CSRMatrixJ(A_CSR);
A_CSR_data = hypre_CSRMatrixData(A_CSR);
f_vector_data = hypre_VectorData(f_vector);
A_mat = hypre_CTAlloc(HYPRE_Real, n_global*n_global, HYPRE_MEMORY_HOST);
b_vec = hypre_CTAlloc(HYPRE_Real, n_global, HYPRE_MEMORY_HOST);
/*---------------------------------------------------------------
* Load CSR matrix into A_mat.
*---------------------------------------------------------------*/
for (i = 0; i < n_global; i++)
{
for (jj = A_CSR_i[i]; jj < A_CSR_i[i+1]; jj++)
{
column = A_CSR_j[jj];
A_mat[i*n_global+column] = A_CSR_data[jj];
}
b_vec[i] = f_vector_data[i];
}
hypre_gselim(A_mat, b_vec, n_global, relax_error);
for (i = 0; i < num_rows; i++)
{
u_data[i] = b_vec[first_index + i];
}
hypre_TFree(A_mat, HYPRE_MEMORY_HOST);
hypre_TFree(b_vec, HYPRE_MEMORY_HOST);
hypre_CSRMatrixDestroy(A_CSR);
A_CSR = NULL;
hypre_SeqVectorDestroy(f_vector);
f_vector = NULL;
}
else
{
hypre_CSRMatrixDestroy(A_CSR);
A_CSR = NULL;
hypre_SeqVectorDestroy(f_vector);
f_vector = NULL;
}
return relax_error;
}
HYPRE_Int
hypre_BoomerAMGRelax98GaussElimPivot( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u )
{
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt first_ind = hypre_ParVectorFirstIndex(u);
HYPRE_Int n_global = (HYPRE_Int) global_num_rows;
HYPRE_Int first_index = (HYPRE_Int) first_ind;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Complex *u_data = hypre_VectorData(u_local);
hypre_CSRMatrix *A_CSR;
HYPRE_Int *A_CSR_i;
HYPRE_Int *A_CSR_j;
HYPRE_Real *A_CSR_data;
hypre_Vector *f_vector;
HYPRE_Real *f_vector_data;
HYPRE_Real *A_mat;
HYPRE_Real *b_vec;
HYPRE_Int i, jj, column, relax_error = 0;
HYPRE_Int info;
HYPRE_Int one_i = 1;
HYPRE_Int *piv;
/*-----------------------------------------------------------------
* Generate CSR matrix from ParCSRMatrix A
*-----------------------------------------------------------------*/
/* all processors are needed for these routines */
A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A);
f_vector = hypre_ParVectorToVectorAll(f);
if (num_rows)
{
A_CSR_i = hypre_CSRMatrixI(A_CSR);
A_CSR_j = hypre_CSRMatrixJ(A_CSR);
A_CSR_data = hypre_CSRMatrixData(A_CSR);
f_vector_data = hypre_VectorData(f_vector);
A_mat = hypre_CTAlloc(HYPRE_Real, n_global*n_global, HYPRE_MEMORY_HOST);
b_vec = hypre_CTAlloc(HYPRE_Real, n_global, HYPRE_MEMORY_HOST);
/*---------------------------------------------------------------
* Load CSR matrix into A_mat.
*---------------------------------------------------------------*/
for (i = 0; i < n_global; i++)
{
for (jj = A_CSR_i[i]; jj < A_CSR_i[i+1]; jj++)
{
/* need col major */
column = A_CSR_j[jj];
A_mat[i + n_global*column] = A_CSR_data[jj];
}
b_vec[i] = f_vector_data[i];
}
piv = hypre_CTAlloc(HYPRE_Int, n_global, HYPRE_MEMORY_HOST);
/* write over A with LU */
hypre_dgetrf(&n_global, &n_global, A_mat, &n_global, piv, &info);
/*now b_vec = inv(A)*b_vec */
hypre_dgetrs("N", &n_global, &one_i, A_mat, &n_global, piv, b_vec, &n_global, &info);
hypre_TFree(piv, HYPRE_MEMORY_HOST);
for (i = 0; i < num_rows; i++)
{
u_data[i] = b_vec[first_index+i];
}
hypre_TFree(A_mat, HYPRE_MEMORY_HOST);
hypre_TFree(b_vec, HYPRE_MEMORY_HOST);
hypre_CSRMatrixDestroy(A_CSR);
A_CSR = NULL;
hypre_SeqVectorDestroy(f_vector);
f_vector = NULL;
}
else
{
hypre_CSRMatrixDestroy(A_CSR);
A_CSR = NULL;
hypre_SeqVectorDestroy(f_vector);
f_vector = NULL;
}
return relax_error;
}
HYPRE_Int
hypre_BoomerAMGRelaxKaczmarz( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Real omega,
HYPRE_Real *l1_norms,
hypre_ParVector *u )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Complex *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Complex *f_data = hypre_VectorData(f_local);
HYPRE_Complex *u_offd_data = NULL;
HYPRE_Complex *u_buf_data = NULL;
HYPRE_Complex res;
HYPRE_Int num_procs, my_id, i, j, index, num_sends, start;
hypre_ParCSRCommHandle *comm_handle;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
if (num_procs > 1)
{
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
u_buf_data = hypre_TAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
u_offd_data = hypre_TAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
u_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, u_buf_data, u_offd_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(u_buf_data, HYPRE_MEMORY_HOST);
}
/* Forward local pass */
for (i = 0; i < num_rows; i++)
{
res = f_data[i];
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
res -= A_diag_data[j] * u_data[A_diag_j[j]];
}
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
res -= A_offd_data[j] * u_offd_data[A_offd_j[j]];
}
res /= l1_norms[i];
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
u_data[A_diag_j[j]] += omega * res * A_diag_data[j];
}
}
/* Backward local pass */
for (i = num_rows - 1; i > -1; i--)
{
res = f_data[i];
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
res -= A_diag_data[j] * u_data[A_diag_j[j]];
}
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
res -= A_offd_data[j] * u_offd_data[A_offd_j[j]];
}
res /= l1_norms[i];
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
u_data[A_diag_j[j]] += omega * res * A_diag_data[j];
}
}
hypre_TFree(u_offd_data, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGRelaxTwoStageGaussSeidelHost( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Real relax_weight,
HYPRE_Real omega,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
HYPRE_Int num_inner_iters)
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
HYPRE_Complex *Vtemp_data = hypre_VectorData(Vtemp_local);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Complex *u_data = hypre_VectorData(u_local);
HYPRE_Int i, k, jj, ii;
HYPRE_Complex multiplier = 1.0;
/* Need to check that EVERY diagonal is nonzero first. If any are, throw exception */
for (i = 0; i < num_rows; i++)
{
if (A_diag_data[A_diag_i[i]] == 0.0)
{
hypre_error_in_arg(1);
}
}
hypre_ParCSRMatrixMatvecOutOfPlace(-relax_weight, A, u, relax_weight, f, Vtemp);
for (i = 0; i < num_rows; i++) /* Run the smoother */
{
// V = V/D
Vtemp_data[i] /= A_diag_data[A_diag_i[i]];
// u = u + m*v
u_data[i] += multiplier * Vtemp_data[i];
}
// adjust for the alternating series
multiplier *= -1.0;
for (k = 0; k < num_inner_iters; ++k)
{
// By going from bottom to top, we can update Vtemp in place because
// we're operating with the strict, lower triangular matrix
for (i = num_rows-1; i >=0; i--) /* Run the smoother */
{
// spmv for the row first
HYPRE_Complex res = 0.0;
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii < i)
{
res += A_diag_data[jj] * Vtemp_data[ii];
}
}
// diagonal scaling has to come after the spmv accumulation. It's a row scaling
// not column
Vtemp_data[i] = res / A_diag_data[A_diag_i[i]];
u_data[i] += multiplier * Vtemp_data[i];
}
// adjust for the alternating series
multiplier *= -1.0;
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGRelax11TwoStageGaussSeidel( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real omega,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp )
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
//HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_VectorMemoryLocation(x), hypre_VectorMemoryLocation(b) );
//RL: TODO back to hypre_GetExecPolicy2 later
HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE;
if (exec == HYPRE_EXEC_DEVICE)
{
hypre_BoomerAMGRelaxTwoStageGaussSeidelDevice(A, f, relax_weight, omega, u, Vtemp, Ztemp, 1);
}
else
#endif
{
hypre_BoomerAMGRelaxTwoStageGaussSeidelHost(A, f, relax_weight, omega, u, Vtemp, 1);
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGRelax12TwoStageGaussSeidel( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real omega,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp )
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
//HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_VectorMemoryLocation(x), hypre_VectorMemoryLocation(b) );
//RL: TODO back to hypre_GetExecPolicy2 later
HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE;
if (exec == HYPRE_EXEC_DEVICE)
{
hypre_BoomerAMGRelaxTwoStageGaussSeidelDevice(A, f, relax_weight, omega, u, Vtemp, Ztemp, 2);
}
else
#endif
{
hypre_BoomerAMGRelaxTwoStageGaussSeidelHost(A, f, relax_weight, omega, u, Vtemp, 2);
}
return hypre_error_flag;
}
|
squeeze_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: jxyang@openailab.com
*/
#include <math.h>
#include "sys_port.h"
#include "module.h"
#include "tengine_errno.h"
#include "tengine_log.h"
#include "tengine_ir.h"
#include "../../cpu_node_ops.h"
#include "tengine_op.h"
#include "squeeze_param.h"
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
struct ir_tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
int out_size = input_tensor->elem_num;
float* input_org = ( float* )input_tensor->data;
// float *output = (float *)sys_malloc(out_size * sizeof(float));
float* output_org = ( float* )output_tensor->data;
int num_thread = exec_graph->num_thread;
//
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < out_size; i++)
{
output_org[i] = input_org[i];
}
// memcpy(output_org, output, out_size * sizeof(float));
// sys_free(output);
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node)
{
return OPS_SCORE_BEST;
}
static struct node_ops squeeze_node_ops = {.prerun = NULL,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
static int reg_squeeze_ops(void* arg)
{
return register_builtin_node_ops(OP_SQUEEZE, &squeeze_node_ops);
}
static int unreg_squeeze_ops(void* arg)
{
return unregister_builtin_node_ops(OP_SQUEEZE, &squeeze_node_ops);
}
AUTO_REGISTER_OPS(reg_squeeze_ops);
AUTO_UNREGISTER_OPS(unreg_squeeze_ops);
|
depend-iterator-2.c | int a, b[64];
struct S { int c; } *d, *e;
struct T;
struct T *f, *g;
int *h;
void
f1 (void)
{
#pragma omp task depend (iterator , in : a) /* { dg-error "expected" } */
;
#pragma omp task depend (iterator (for = 0 : 2) , in : a) /* { dg-error "expected" } */
;
#pragma omp task depend (iterator (5 = 0 : 2) , in : a) /* { dg-error "expected" } */
;
#pragma omp task depend (iterator (i : 0 : 2) , in : a) /* { dg-error "expected '='|name a type|expected" } */
;
#pragma omp task depend (iterator (i = 0, 1 : 2) , in : a) /* { dg-error "expected" } */
;
#pragma omp task depend (iterator (i = (0, 1) : 2) , in : a)
;
#pragma omp task depend (iterator (i = 0 : 1 : 2 : 3) , in : a) /* { dg-error "expected '.'" } */
;
#pragma omp task depend (iterator (i = 0 : 2, 3) , in : a) /* { dg-error "expected" } */
;
#pragma omp task depend (iterator (i = 0 : 10 : 2, 3) , in : a) /* { dg-error "expected" } */
;
#pragma omp task depend (iterator (i = 0:1), iterator (j = 0:1) , in : a) /* { dg-error "invalid depend kind" } */
;
#pragma omp task depend (iterator (i = 0:32) , in : b[i*2:2])
;
#pragma omp task depend (iterator (struct S i = 0:1), in : a) /* { dg-error "iterator 'i' has neither integral nor pointer type" } */
;
#pragma omp task depend (iterator (void i = 0:1) , in : a) /* { dg-error "iterator 'i' has neither integral nor pointer type" } */
;
#pragma omp task depend (iterator (float f = 0.2:0.4) , in : a) /* { dg-error "iterator 'f' has neither integral nor pointer type" } */
;
#pragma omp task depend (iterator (struct S *p = d:e:2) , in : a)
;
#pragma omp task depend (iterator (struct T *p = f:g) , in : a) /* { dg-error "invalid use of" } */
;
#pragma omp task depend (iterator (int i = 0:4, \
struct U { int (*p)[i + 2]; } *p = 0:2) , in : a) /* { dg-error "type of iterator 'p' refers to outer iterator 'i'" "" { target c } } */
; /* { dg-error "types may not be defined in iterator type|not an integral constant" "" { target c++ } .-1 } */
#pragma omp task depend (iterator (i = 0:4, j = i:16) , in : a) /* { dg-error "begin expression refers to outer iterator 'i'" } */
;
#pragma omp task depend (iterator (i = 0:4, j = 2:i:1) , in : a) /* { dg-error "end expression refers to outer iterator 'i'" } */
;
#pragma omp task depend (iterator (i = 0:4, j = 2:8:i) , in : a) /* { dg-error "step expression refers to outer iterator 'i'" } */
;
#pragma omp task depend (iterator (i = *d:2) , in : a) /* { dg-error "aggregate value used where an integer was expected" "" { target c } } */
; /* { dg-error "invalid cast from type 'S' to type 'int'" "" { target c++ } .-1 } */
#pragma omp task depend (iterator (i = 2:*d:2) , in : a) /* { dg-error "aggregate value used where an integer was expected" "" { target c } } */
; /* { dg-error "invalid cast from type 'S' to type 'int'" "" { target c++ } .-1 } */
#pragma omp task depend (iterator (i = 2:4:*d) , in : a) /* { dg-error "iterator step with non-integral type" } */
;
#pragma omp task depend (iterator (i = 1.25:2.5:3) , in : a)
;
#pragma omp task depend (iterator (i = 1:2:3.5) , in : a) /* { dg-error "iterator step with non-integral type" } */
;
#pragma omp task depend (iterator (int *p = 23 : h) , in : a)
;
#pragma omp task depend (iterator (short i=1:3:0) , in : a) /* { dg-error "iterator 'i' has zero step" } */
;
#pragma omp task depend (iterator (i = 1 : 3 : 3 - 3) , in : a) /* { dg-error "iterator 'i' has zero step" } */
;
#pragma omp task depend (iterator (int *p = &b[6]:&b[9]:4 - 4) , in : a) /* { dg-error "iterator 'p' has zero step" } */
;
#pragma omp task depend (iterator (const int i = 0 : 2) , in : a) /* { dg-error "const qualified" } */
;
#pragma omp task depend (iterator (const long long unsigned i = 0 : 2) , in : a) /* { dg-error "const qualified" } */
;
#if !defined (__cplusplus) && __STDC_VERSION__ >= 201112L
#pragma omp task depend (iterator (_Atomic unsigned i = 0 : 2) , in : a) /* { dg-error "_Atomic" "" { target c } } */
;
#endif
}
void
f2 (void)
{
int i, j;
#pragma omp for ordered(2)
for (i = 0; i < 64; i++)
for (j = 0; j < 64; j++)
{
#pragma omp ordered depend (iterator (k=0:1) , sink: i - 1, j - 1) /* { dg-error "'iterator' modifier incompatible with 'sink'" } */
#pragma omp ordered depend (iterator (int l = 0:2:3) , source) /* { dg-error "'iterator' modifier incompatible with 'source'" } */
}
}
void
f3 (void)
{
#pragma omp task depend (iterator (i = 0:1), iterator (j = 0:1) , in : a) /* { dg-error "invalid depend kind" } */
;
}
|
queues.c | // -*-Mode: C++;-*- // technically C99
// * BeginRiceCopyright *****************************************************
//
// $HeadURL$
// $Id$
//
// --------------------------------------------------------------------------
// Part of HPCToolkit (hpctoolkit.org)
//
// Information about sources of support for research and development of
// HPCToolkit is at 'hpctoolkit.org' and in 'README.Acknowledgments'.
// --------------------------------------------------------------------------
//
// Copyright ((c)) 2002-2021, Rice University
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// * Neither the name of Rice University (RICE) nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// This software is provided by RICE and contributors "as is" and any
// express or implied warranties, including, but not limited to, the
// implied warranties of merchantability and fitness for a particular
// purpose are disclaimed. In no event shall RICE or contributors be
// liable for any direct, indirect, incidental, special, exemplary, or
// consequential damages (including, but not limited to, procurement of
// substitute goods or services; loss of use, data, or profits; or
// business interruption) however caused and on any theory of liability,
// whether in contract, strict liability, or tort (including negligence
// or otherwise) arising in any way out of the use of this software, even
// if advised of the possibility of such damage.
//
// ******************************************************* EndRiceCopyright *
//*****************************************************************************
// local includes
//*****************************************************************************
#include "queues.h"
//*****************************************************************************
// interface functions
//*****************************************************************************
#define Sd(q) q.ptr
#define Sp(q) q->ptr
#define Ad(q) q.aptr
#define Ap(q) q->aptr
void
squeue_ptr_set
(
q_element_ptr_t *p,
q_element_t *v
)
{
Sp(p) = v;
}
q_element_t *
squeue_ptr_get
(
q_element_ptr_t *e
)
{
return Sp(e);
}
q_element_t *
squeue_swap
(
q_element_ptr_t *q,
q_element_t *r
)
{
q_element_t *e = Sp(q);
Sp(q) = r;
return e;
}
void
squeue_push
(
q_element_ptr_t *q,
q_element_t *e
)
{
e->Sd(next) = Sp(q);
Sp(q) = e;
}
q_element_t *
squeue_pop
(
q_element_ptr_t *q
)
{
q_element_t *e = 0;
if (Sp(q)) {
e = Sp(q);
Sp(q) = e->Sd(next);
e->Sd(next) = 0;
}
return e;
}
q_element_t *
squeue_steal
(
q_element_ptr_t *q
)
{
q_element_t *e = squeue_swap(q, 0);
return e;
}
void
cqueue_ptr_set
(
q_element_ptr_t *e,
q_element_t *v
)
{
atomic_init(&Ap(e), v);
}
q_element_t *
cqueue_ptr_get
(
q_element_ptr_t *e
)
{
return atomic_load(&Ap(e));
}
q_element_t *
cqueue_swap
(
q_element_ptr_t *q,
q_element_t *r
)
{
q_element_t *e = atomic_exchange(&Ap(q),r);
return e;
}
void
cqueue_push
(
q_element_ptr_t *q,
q_element_t *e
)
{
q_element_t *head = atomic_load(&Ap(q));
q_element_t *new_head = e;
// push a singleton or a chain on the list
for (;;) {
q_element_t *enext = atomic_load(&e->Ad(next));
if (enext == 0) break;
e = enext;
}
do {
atomic_store(&e->Ad(next), head);
} while (!atomic_compare_exchange_strong(&Ap(q), &head, new_head));
}
q_element_t *
cqueue_pop
(
q_element_ptr_t *q
)
{
q_element_t *oldhead = atomic_load(&Ap(q));
q_element_t *next = 0;
do {
if (oldhead == 0) return 0;
next = atomic_load(&oldhead->Ad(next));
} while (!atomic_compare_exchange_strong(&Ap(q), &oldhead, next));
atomic_store(&oldhead->Ad(next),0);
return oldhead;
}
q_element_t *
cqueue_steal
(
q_element_ptr_t *q
)
{
q_element_t *e = cqueue_swap(q, 0);
return e;
}
//*****************************************************************************
// unit test
//*****************************************************************************
#define UNIT_TEST 0
#if UNIT_TEST
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
typedef struct {
q_element_ptr_t next;
int value;
} typed_queue_elem(int);
typedef q_element_ptr_t typed_queue_elem_ptr(int);
typed_queue_elem_ptr(int) queue;
#define qtype cqueue
typed_queue(int, qtype)
typed_queue_elem(int) *
typed_queue_elem_fn(int,new)(int value)
{
typed_queue_elem(int) *e =(typed_queue_elem(int) *) malloc(sizeof(int_q_element_t));
e->value = value;
typed_queue_elem_ptr_set(int, qtype)(&e->next, 0);
}
void
pop
(
int n
)
{
int i;
for(i = 0; i < n; i++) {
typed_queue_elem(int) *e = typed_queue_pop(int, qtype)(&queue);
if (e == 0) {
printf("%d queue empty\n", omp_get_thread_num());
break;
} else {
printf("%d popping %d\n", omp_get_thread_num(), e->value);
}
}
}
void
push
(
int min,
int n
)
{
int i;
for(i = min; i < min+n; i++) {
printf("%d pushing %d\n", omp_get_thread_num(), i);
typed_queue_push(int, qtype)(&queue, typed_queue_elem_fn(int, new)(i));
}
}
void
dump
(
int_q_element_t *e
)
{
int i;
for(; e; e = (int_q_element_t *) typed_queue_elem_ptr_get(int,qtype)(&e->next)) {
printf("%d stole %d\n", omp_get_thread_num(), e->value);
}
}
int
main
(
int argc,
char **argv
)
{
typed_queue_elem_ptr_set(int, qtype)(&queue, 0);
#pragma omp parallel
{
push(0, 30);
pop(10);
push(100, 12);
// pop(100);
int_q_element_t *e = typed_queue_steal(int, qtype)(&queue);
dump(e);
push(300, 30);
typed_queue_push(int, qtype)(&queue, e);
pop(100);
}
}
#endif
|
cpl_io_fits-test.c | /*
* This file is part of the ESO Common Pipeline Library
* Copyright (C) 2001-2017 European Southern Observatory
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
/*-----------------------------------------------------------------------------
Includes
-----------------------------------------------------------------------------*/
#include "cpl_io_fits.h"
#include "cpl_test.h"
#include "cpl_fits.h"
#include "cpl_memory.h"
#include "cpl_table.h"
#include <stdlib.h>
#include <string.h>
/*-----------------------------------------------------------------------------
Defines
-----------------------------------------------------------------------------*/
#ifndef NEXT
#define NEXT 3
#endif
#ifndef NFILE
#define NFILE 4
#endif
#define BASENAME "cpl_io_fits_test"
/*----------------------------------------------------------------------------
Function prototypes
----------------------------------------------------------------------------*/
static void cpl_io_fits_test_many(int);
static void cpl_io_fits_test_fulldisk_table(const char*);
/*-----------------------------------------------------------------------------
Main
-----------------------------------------------------------------------------*/
int main(int argc, char *argv[])
{
cpl_boolean do_bench;
int next = NEXT;
int j;
cpl_test_init(PACKAGE_BUGREPORT, CPL_MSG_WARNING);
do_bench = cpl_msg_get_level() <= CPL_MSG_INFO ? CPL_TRUE : CPL_FALSE;
if (do_bench)
next *= next;
/* Insert tests below */
if (argc == 2) {
cpl_io_fits_test_fulldisk_table(argv[1]);
}
cpl_io_fits_test_many(do_bench
? 1 + CPL_IO_FITS_MAX_OPEN
: 1 + CPL_IO_FITS_MAX_OPEN/3);
#ifdef _OPENMP
#pragma omp parallel for private(j)
#endif
for (j = 0; j < NFILE; j++) {
char * filename = cpl_sprintf(BASENAME "-%d.fits", j + 1);
int base = j * next * 10; /* Assume at most 10 load-loops */
int i;
cpl_msg_info(cpl_func, "Testing with file %s", filename);
/* Creation + append only */
#ifdef _OPENMP
/* Multi-threaded writing is limited to one thread writing at a time */
#pragma omp parallel for ordered private(i)
#endif
for (i = 0; i <= next; i++) {
cpl_error_code myerror;
cpl_image * myimage = cpl_image_new(1, 1, CPL_TYPE_INT);
cpl_propertylist * plist = cpl_propertylist_new();
myerror = cpl_propertylist_append_int(plist, "MYINT", i + base);
cpl_test_eq_error(myerror, CPL_ERROR_NONE);
myerror = cpl_image_add_scalar(myimage, i + base);
cpl_test_eq_error(myerror, CPL_ERROR_NONE);
#ifdef _OPENMP
#pragma omp ordered
#endif
{
myerror = cpl_image_save(myimage, filename, CPL_TYPE_INT, plist,
i ? CPL_IO_EXTEND : CPL_IO_CREATE);
}
cpl_test_eq_error(myerror, CPL_ERROR_NONE);
cpl_image_delete(myimage);
cpl_propertylist_delete(plist);
}
cpl_test_eq(cpl_fits_count_extensions(filename), next);
/* Reading only */
#ifdef _OPENMP
#pragma omp parallel for private(i)
#endif
for (i = 0; i <= next; i++) {
int is_bad;
cpl_image * myimage = cpl_image_load(filename, CPL_TYPE_INT, 0, i);
cpl_propertylist * plist;
cpl_test_error(CPL_ERROR_NONE);
cpl_test_nonnull(myimage);
cpl_test_eq(cpl_image_get(myimage, 1, 1, &is_bad), i + base);
cpl_test_zero(is_bad);
cpl_image_delete(myimage);
/* Make sure to jump around between the extensions */
cpl_test_eq(cpl_fits_count_extensions(filename), next);
plist = cpl_propertylist_load(filename, i);
cpl_test_error(CPL_ERROR_NONE);
cpl_test_nonnull(plist);
cpl_test_eq(cpl_propertylist_get_int(plist, "MYINT"), i + base);
cpl_propertylist_delete(plist);
}
base += next;
/* Alternating creation + append only */
#ifdef _OPENMP
/* Multi-threaded writing is limited to one thread writing at a time */
#pragma omp parallel for ordered private(i)
#endif
for (i = 0; i <= next; i++) {
cpl_error_code myerror;
cpl_image * myimage = cpl_image_new(1, 1, CPL_TYPE_INT);
cpl_propertylist * plist = cpl_propertylist_new();
myerror = cpl_propertylist_append_int(plist, "MYINT", i + base);
cpl_test_eq_error(myerror, CPL_ERROR_NONE);
myerror = cpl_image_add_scalar(myimage, i + base);
cpl_test_eq_error(myerror, CPL_ERROR_NONE);
#ifdef _OPENMP
#pragma omp ordered
#endif
{
myerror = cpl_image_save(myimage, filename, CPL_TYPE_INT, plist,
(i & 1) ? CPL_IO_EXTEND
: CPL_IO_CREATE);
}
cpl_test_eq_error(myerror, CPL_ERROR_NONE);
cpl_image_delete(myimage);
cpl_propertylist_delete(plist);
}
cpl_test_eq(cpl_fits_count_extensions(filename), next & 1);
/* Reading only */
#ifdef _OPENMP
#pragma omp parallel for private(i)
#endif
for (i = 0; i <= (next & 1); i++) {
int is_bad;
cpl_image * myimage = cpl_image_load(filename, CPL_TYPE_INT, 0, i);
cpl_propertylist * plist;
cpl_test_error(CPL_ERROR_NONE);
cpl_test_nonnull(myimage);
cpl_test_eq(cpl_image_get(myimage, 1, 1, &is_bad), i + (next & ~1)
+ base);
cpl_test_zero(is_bad);
cpl_image_delete(myimage);
plist = cpl_propertylist_load(filename, i);
cpl_test_error(CPL_ERROR_NONE);
cpl_test_nonnull(plist);
cpl_test_eq(cpl_propertylist_get_int(plist, "MYINT"),
i + (next & ~1) + base);
cpl_propertylist_delete(plist);
}
base += next;
/* Creation + append and reading */
#ifdef _OPENMP
/* Multi-threaded writing is limited to one thread writing at a time */
#pragma omp parallel for ordered private(i)
#endif
for (i = 0; i <= next; i++) {
int is_bad;
cpl_error_code myerror;
cpl_image * myimage = cpl_image_new(1, 1, CPL_TYPE_INT);
cpl_propertylist * plist = cpl_propertylist_new();
myerror = cpl_propertylist_append_int(plist, "MYINT", i + base);
cpl_test_eq_error(myerror, CPL_ERROR_NONE);
myerror = cpl_image_add_scalar(myimage, i + base);
cpl_test_eq_error(myerror, CPL_ERROR_NONE);
cpl_test_eq(i, i);
#ifdef _OPENMP
#pragma omp ordered
#endif
{
myerror = cpl_image_save(myimage, filename, CPL_TYPE_INT, plist,
i ? CPL_IO_EXTEND : CPL_IO_CREATE);
cpl_test_eq_error(myerror, CPL_ERROR_NONE);
cpl_image_delete(myimage);
/* Make sure to jump around between the extensions */
cpl_test_eq(cpl_fits_count_extensions(filename), i);
myimage = cpl_image_load(filename, CPL_TYPE_INT, 0, i);
cpl_test_error(CPL_ERROR_NONE);
cpl_propertylist_delete(plist);
plist = cpl_propertylist_load(filename, i);
cpl_test_error(CPL_ERROR_NONE);
}
cpl_test_nonnull(myimage);
cpl_test_eq(cpl_image_get(myimage, 1, 1, &is_bad), i + base);
cpl_test_zero(is_bad);
cpl_test_nonnull(plist);
cpl_test_eq(cpl_propertylist_get_int(plist, "MYINT"), i + base);
cpl_image_delete(myimage);
cpl_propertylist_delete(plist);
}
cpl_test_eq(cpl_fits_count_extensions(filename), next);
base += next;
/* Creation + append from one thread, reading from from another*/
#ifdef _OPENMP
/* Multi-threaded writing is limited to one thread writing at a time */
#pragma omp parallel for ordered private(i)
#endif
for (i = 0; i <= 2 * next + 1; i++)
#ifdef _OPENMP
#pragma omp ordered
#endif
{
if (i & 1) {
if (i > 2) {
/* Read the previously written extension */
const int i2 = i / 2 - 1;
int is_bad;
cpl_propertylist * plist;
cpl_image * myimage = cpl_image_load(filename,
CPL_TYPE_INT,
0, i2);
cpl_test_error(CPL_ERROR_NONE);
cpl_test_nonnull(myimage);
cpl_test_eq(cpl_image_get(myimage, 1, 1, &is_bad),
i2 + base);
cpl_test_zero(is_bad);
cpl_image_delete(myimage);
plist = cpl_propertylist_load(filename, i2);
cpl_test_error(CPL_ERROR_NONE);
cpl_test_nonnull(plist);
cpl_test_eq(cpl_propertylist_get_int(plist, "MYINT"),
i2 + base);
cpl_propertylist_delete(plist);
}
} else {
cpl_error_code myerror;
cpl_image * myimage = cpl_image_new(1, 1, CPL_TYPE_INT);
cpl_propertylist * plist = cpl_propertylist_new();
myerror = cpl_propertylist_append_int(plist, "MYINT",
i/2 + base);
cpl_test_eq_error(myerror, CPL_ERROR_NONE);
myerror = cpl_image_add_scalar(myimage, i/2 + base);
cpl_test_eq_error(myerror, CPL_ERROR_NONE);
cpl_test_eq(i, i);
myerror = cpl_image_save(myimage, filename,
CPL_TYPE_INT, plist,
i ? CPL_IO_EXTEND : CPL_IO_CREATE);
cpl_test_eq_error(myerror, CPL_ERROR_NONE);
cpl_image_delete(myimage);
cpl_propertylist_delete(plist);
}
}
cpl_test_eq(cpl_fits_count_extensions(filename), next);
base += next;
/* Alternating creation + append and reading */
#ifdef _OPENMP
/* Multi-threaded writing is limited to one thread writing at a time */
#pragma omp parallel for ordered private(i)
#endif
for (i = 0; i <= next; i++) {
int is_bad;
cpl_error_code myerror;
cpl_image * myimage = cpl_image_new(1, 1, CPL_TYPE_INT);
cpl_propertylist * plist = cpl_propertylist_new();
myerror = cpl_propertylist_append_int(plist, "MYINT", i + base);
cpl_test_eq_error(myerror, CPL_ERROR_NONE);
myerror = cpl_image_add_scalar(myimage, i + base);
cpl_test_eq_error(myerror, CPL_ERROR_NONE);
#ifdef _OPENMP
#pragma omp ordered
#endif
{
myerror = cpl_image_save(myimage, filename, CPL_TYPE_INT, plist,
(i & 1) ? CPL_IO_EXTEND
: CPL_IO_CREATE);
cpl_test_eq_error(myerror, CPL_ERROR_NONE);
/* Make sure to jump around between the extensions */
cpl_test_eq(cpl_fits_count_extensions(filename), i & 1);
cpl_image_delete(myimage);
myimage = cpl_image_load(filename, CPL_TYPE_INT, 0, i & 1);
cpl_test_error(CPL_ERROR_NONE);
cpl_propertylist_delete(plist);
plist = cpl_propertylist_load(filename, i & 1);
cpl_test_error(CPL_ERROR_NONE);
}
cpl_test_nonnull(myimage);
cpl_test_eq(cpl_image_get(myimage, 1, 1, &is_bad), i + base);
cpl_test_zero(is_bad);
cpl_image_delete(myimage);
cpl_test_nonnull(plist);
cpl_test_eq(cpl_propertylist_get_int(plist, "MYINT"), i + base);
cpl_propertylist_delete(plist);
}
cpl_test_eq(cpl_fits_count_extensions(filename), next & 1);
cpl_free((void*)filename);
}
if (cpl_fits_get_mode() | CPL_FITS_START_CACHING) {
cpl_error_code myerror;
myerror = cpl_fits_set_mode(CPL_FITS_STOP_CACHING);
cpl_test_eq_error(myerror, CPL_ERROR_NONE);
}
for (j = 0; j < NFILE; j++) {
char * filename = cpl_sprintf(BASENAME "-%d.fits", j + 1);
cpl_test_zero(remove(filename));
cpl_free(filename);
}
/* End of tests */
return cpl_test_end(0);
}
/*----------------------------------------------------------------------------*/
/**
@brief Open multiple files
@param n The number of files to process
@return void
*/
/*----------------------------------------------------------------------------*/
static void cpl_io_fits_test_many(int n)
{
cpl_propertylist * plist = cpl_propertylist_new();
const cpl_fits_mode iomode = cpl_fits_get_mode();
cpl_msg_info(cpl_func, "Testing I/O-mode %d <=> %d with %d file(s)",
(int)iomode, (int)CPL_FITS_START_CACHING, n);
for (int i = 0; i < n; i++) {
char * filename = cpl_sprintf(BASENAME "-%d.fits", i + 1);
cpl_error_code myerror;
myerror = cpl_propertylist_update_int(plist, "MYINT", i);
cpl_test_eq_error(myerror, CPL_ERROR_NONE);
myerror = cpl_propertylist_save(plist, filename, CPL_IO_CREATE);
cpl_test_eq_error(myerror, CPL_ERROR_NONE);
cpl_propertylist_delete(plist);
plist = cpl_propertylist_load(filename, 0);
cpl_test_error(CPL_ERROR_NONE);
cpl_test_nonnull(plist);
cpl_test_eq(cpl_propertylist_get_int(plist, "MYINT"), i);
cpl_free(filename);
}
cpl_propertylist_delete(plist);
if (iomode | CPL_FITS_START_CACHING) {
cpl_error_code myerror;
myerror = cpl_fits_set_mode(CPL_FITS_STOP_CACHING);
cpl_test_eq_error(myerror, CPL_ERROR_NONE);
}
for (int i = 0; i < n; i++) {
char * filename = cpl_sprintf(BASENAME "-%d.fits", i + 1);
cpl_test_zero(remove(filename));
cpl_free(filename);
}
if (iomode | CPL_FITS_START_CACHING) {
cpl_error_code myerror;
myerror = cpl_fits_set_mode(iomode);
cpl_test_eq_error(myerror, CPL_ERROR_NONE);
}
}
/*----------------------------------------------------------------------------*/
/**
@brief Test I/O on a user specified file system (of limited size)
@param root The name of the file system to fill up
@return void
*/
/*----------------------------------------------------------------------------*/
static void cpl_io_fits_test_fulldisk_table(const char* root)
{
cpl_table* mytab = cpl_table_new(1000);
char* filename = cpl_sprintf("%s/" BASENAME ".fits", root);
const cpl_fits_mode iomode = cpl_fits_get_mode();
cpl_error_code code;
cpl_test_nonnull(mytab);
cpl_test_nonnull(filename);
code = cpl_table_new_column(mytab, "COL1", CPL_TYPE_DOUBLE);
cpl_test_eq_error(code, CPL_ERROR_NONE);
code = cpl_table_fill_invalid_double(mytab, "COL1", 42.0);
cpl_test_eq_error(code, CPL_ERROR_NONE);
code = cpl_table_save(mytab, NULL, NULL, filename, CPL_IO_CREATE);
cpl_test_eq_error(code, CPL_ERROR_NONE);
cpl_test_fits(filename);
do {
code = cpl_table_save(mytab, NULL, NULL, filename, CPL_IO_EXTEND);
} while (code == CPL_ERROR_NONE);
cpl_test_eq_error(code, CPL_ERROR_FILE_NOT_CREATED);
if (cpl_test_get_failed() > 0) {
cpl_msg_warning(cpl_func, "Test failed, not deleting file:: %s",
filename);
} else if (remove(filename) != 0) {
cpl_msg_info(cpl_func, "The failure-test file has already been deleted: "
"%s", filename);
}
cpl_table_delete(mytab);
if (iomode | CPL_FITS_START_CACHING) {
cpl_error_code myerror;
myerror = cpl_fits_set_mode(CPL_FITS_STOP_CACHING);
cpl_test_eq_error(myerror, CPL_ERROR_NONE);
}
cpl_test_zero(remove(filename));
if (iomode | CPL_FITS_START_CACHING) {
cpl_error_code myerror;
myerror = cpl_fits_set_mode(iomode);
cpl_test_eq_error(myerror, CPL_ERROR_NONE);
}
cpl_free(filename);
}
|
trsm_x_csr_n_lo_row.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#include <memory.h>
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT m = A->rows;
ALPHA_Number diag[m];
memset(diag, '\0', m * sizeof(ALPHA_Number));
int num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT r = 0; r < m; r++)
{
for (ALPHA_INT ai = A->rows_start[r]; ai < A->rows_end[r]; ai++)
{
ALPHA_INT ac = A->col_indx[ai];
if (ac == r)
{
diag[r] = A->values[ai];
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++)
{
for (ALPHA_INT r = 0; r < m; r++)
{
ALPHA_Number temp;
alpha_setzero(temp);
for (ALPHA_INT ai = A->rows_start[r]; ai < A->rows_end[r]; ai++)
{
ALPHA_INT ac = A->col_indx[ai];
if (ac < r)
{
alpha_madde(temp, A->values[ai], y[ac * ldy + out_y_col]);
}
}
ALPHA_Number t;
alpha_setzero(t);
alpha_mul(t, alpha, x[r * ldx + out_y_col]);
alpha_sube(t, temp);
alpha_div(y[r * ldy + out_y_col], t, diag[r]);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
DRB049-fprintf-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
/*
Example use of fprintf
*/
#include <stdio.h>
int main(int argc, char *argv[]) {
int i;
int ret;
FILE *pfile;
int len = 1000;
int A[1000];
#pragma omp target data map(tofrom: A[0:1000])
{
#pragma omp target parallel for
for (i = 0; i < len; i++)
A[i] = i;
}
pfile = fopen("mytempfile.txt", "a+");
if (pfile == NULL) {
fprintf(stderr, "Error in fopen()\n");
}
for (i = 0; i < len; ++i) {
fprintf(pfile, "%d\n", A[i]);
}
fclose(pfile);
ret = remove("mytempfile.txt");
if (ret != 0) {
fprintf(stderr, "Error: unable to delete mytempfile.txt\n");
}
return 0;
}
|
GB_binop__eq_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__eq_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__eq_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__eq_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__eq_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_int8)
// A*D function (colscale): GB (_AxD__eq_int8)
// D*A function (rowscale): GB (_DxB__eq_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__eq_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__eq_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_int8)
// C=scalar+B GB (_bind1st__eq_int8)
// C=scalar+B' GB (_bind1st_tran__eq_int8)
// C=A+scalar GB (_bind2nd__eq_int8)
// C=A'+scalar GB (_bind2nd_tran__eq_int8)
// C type: bool
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EQ || GxB_NO_INT8 || GxB_NO_EQ_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__eq_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__eq_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__eq_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__eq_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__eq_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__eq_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__eq_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__eq_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__eq_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__eq_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__eq_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__eq_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__eq_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__eq_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
conduct.h | float __conduct = 0;
int32_t *G_member;
void outputConduct(graph *G) {
printf("sum C = %lf\n", __conduct);
}
void conduct(graph *G) {
inittracking("conduct.csv");
__conduct = 0;
for (int i = 0; i < 4; i++) {
float m = 0.0 ;
int32_t __S2 = 0 ;
int32_t __S3 = 0 ;
int32_t __S4 = 0 ;
#pragma omp parallel
{
int32_t __S2_prv = 0 ;
__S2_prv = 0 ;
#if defined(PARFOR_GUIDED)
#pragma omp for schedule(guided, PAR_CHUNKSIZE)
#elif defined(PARFOR_DYNAMIC)
#pragma omp for schedule(dynamic, PAR_CHUNKSIZE)
#elif defined(TASKLOOP_DEFINED)
#pragma omp taskloop num_tasks(NUM_TASKS)
#else
#pragma omp for schedule(static)
#endif
for (node_t u = 0; u < G->numNodes; u ++)
if ((G_member[u] == i))
{
__S2_prv = __S2_prv + (G->begin[u+1] - G->begin[u]) ;
}
#pragma omp atomic
__S2 += __S2_prv;
}
#pragma omp parallel
{
int32_t __S3_prv = 0 ;
__S3_prv = 0 ;
#if defined(PARFOR_GUIDED)
#pragma omp for schedule(guided, PAR_CHUNKSIZE)
#elif defined(PARFOR_DYNAMIC)
#pragma omp for schedule(dynamic, PAR_CHUNKSIZE)
#elif defined(TASKLOOP_DEFINED)
#pragma omp taskloop num_tasks(NUM_TASKS)
#else
#pragma omp for schedule(static)
#endif
for (node_t u0 = 0; u0 < G->numNodes; u0 ++)
if ((G_member[u0] != i))
{
__S3_prv = __S3_prv + (G->begin[u0+1] - G->begin[u0]) ;
}
#pragma omp atomic
__S3 += __S3_prv;
}
#pragma omp parallel
{
int32_t __S4_prv = 0 ;
__S4_prv = 0 ;
#if defined(PARFOR_GUIDED)
#pragma omp for schedule(guided, PAR_CHUNKSIZE)
#elif defined(PARFOR_DYNAMIC)
#pragma omp for schedule(dynamic, PAR_CHUNKSIZE)
#elif defined(TASKLOOP_DEFINED)
#pragma omp taskloop num_tasks(NUM_TASKS)
#else
#pragma omp for schedule(static)
#endif
for (node_t u1 = 0; u1 < G->numNodes; u1 ++)
if ((G_member[u1] == i))
{
for (edge_t j_idx = G->begin[u1];j_idx < G->begin[u1+1] ; j_idx ++)
{
node_t j = G->node_idx [j_idx];
if ((G_member[j] != i))
{
__S4_prv = __S4_prv + 1 ;
}
}
}
#pragma omp atomic
__S4 += __S4_prv;
}
m = (float)((__S2 < __S3)?__S2:__S3) ;
if(m == 0) {
__conduct += __S4;
} else {
__conduct += __S4/m;
}
}
endtracking();
}
|
spmv.h | /*
* Modifications to this file:
* Copyright (c) 2014-2015, The University of Queensland
* Licensed under the Apache License, Version 2.0.
*
*/
#pragma once
#include <thrust/functional.h>
#include <cusp/detail/functional.h>
#ifndef DIA_CHUNKSIZE
#define DIA_CHUNKSIZE 1024
#endif
//MW: add some OpenMP pragmas
namespace cusp
{
namespace detail
{
namespace host
{
//////////////
// COO SpMV //
//////////////
template <typename Matrix,
typename Vector1,
typename Vector2,
typename UnaryFunction,
typename BinaryFunction1,
typename BinaryFunction2>
void spmv_coo(const Matrix& A,
const Vector1& x,
Vector2& y,
UnaryFunction initialize,
BinaryFunction1 combine,
BinaryFunction2 reduce)
{
typedef typename Matrix::index_type IndexType;
typedef typename Vector2::value_type ValueType;
for(size_t i = 0; i < A.num_rows; i++)
y[i] = initialize(y[i]);
for(size_t n = 0; n < A.num_entries; n++)
{
const IndexType& i = A.row_indices[n];
const IndexType& j = A.column_indices[n];
const ValueType& Aij = A.values[n];
const ValueType& xj = x[j];
y[i] = reduce(y[i], combine(Aij, xj));
}
}
template <typename Matrix,
typename Vector1,
typename Vector2>
void spmv_coo(const Matrix& A,
const Vector1& x,
Vector2& y)
{
typedef typename Vector2::value_type ValueType;
spmv_coo(A, x, y,
cusp::detail::zero_function<ValueType>(),
thrust::multiplies<ValueType>(),
thrust::plus<ValueType>());
}
//////////////
// CSR SpMV //
//////////////
template <typename Matrix,
typename Vector1,
typename Vector2,
typename UnaryFunction,
typename BinaryFunction1,
typename BinaryFunction2>
void spmv_csr(const Matrix& A,
const Vector1& x,
Vector2& y,
UnaryFunction initialize,
BinaryFunction1 combine,
BinaryFunction2 reduce)
{
typedef typename Matrix::index_type IndexType;
typedef typename Vector2::value_type ValueType;
#pragma omp parallel for
for(size_t i = 0; i < A.num_rows; i++)
{
const IndexType& row_start = A.row_offsets[i];
const IndexType& row_end = A.row_offsets[i+1];
ValueType accumulator = initialize(y[i]);
for (IndexType jj = row_start; jj < row_end; jj++)
{
const IndexType& j = A.column_indices[jj];
const ValueType& Aij = A.values[jj];
const ValueType& xj = x[j];
accumulator = reduce(accumulator, combine(Aij, xj));
}
y[i] = accumulator;
}
}
template <typename Matrix,
typename Vector1,
typename Vector2>
void spmv_csr(const Matrix& A,
const Vector1& x,
Vector2& y)
{
typedef typename Vector2::value_type ValueType;
spmv_csr(A, x, y,
cusp::detail::zero_function<ValueType>(),
thrust::multiplies<ValueType>(),
thrust::plus<ValueType>());
}
//////////////
// DIA SpMV //
//////////////
template <typename Matrix,
typename Vector1,
typename Vector2,
typename UnaryFunction,
typename BinaryFunction1,
typename BinaryFunction2>
void spmv_dia(const Matrix& A,
const Vector1& x,
Vector2& y,
UnaryFunction initialize,
BinaryFunction1 combine,
BinaryFunction2 reduce)
{
typedef typename Matrix::index_type IndexType;
//typedef typename Vector2::value_type ValueType;
const size_t num_diagonals = A.values.num_cols;
if (A.symmetric) {
// if matrix has a main diagonal it is the first in offsets and should
// be skipped in the subdiagonal loop below. The main diagonal is
// processed by the second loop
const size_t d0 = (A.diagonal_offsets[0] == 0 ? 1 : 0);
#pragma omp parallel for
for (size_t ch = 0; ch < A.num_rows; ch += DIA_CHUNKSIZE) {
// initialize chunk
for (size_t row = ch; row < std::min(ch+DIA_CHUNKSIZE,A.num_rows); row++)
{
y[row] = initialize(y[row]);
}
// process subdiagonals
for (size_t d = 0; d < num_diagonals-d0; d++)
{
const size_t diag = num_diagonals-d-1;
for (size_t row = ch; row < std::min(ch+DIA_CHUNKSIZE,A.num_rows); row++)
{
const IndexType col = row - A.diagonal_offsets[diag];
if (col >= 0 && col < A.num_rows)
{
y[row] = reduce(y[row], combine(A.values(col, diag), x[col]));
}
}
}
// process main and upper diagonals
for (size_t d = 0; d < num_diagonals; d++)
{
for (size_t row = ch; row < std::min(ch+DIA_CHUNKSIZE,A.num_rows); row++)
{
const IndexType col = row + A.diagonal_offsets[d];
if (col >= 0 && col < A.num_cols)
{
y[row] = reduce(y[row], combine(A.values(row, d), x[col]));
}
}
}
}
} else { // !A.symmetric
#pragma omp parallel for
for (size_t ch = 0; ch < A.num_rows; ch += DIA_CHUNKSIZE) {
// initialize chunk
for (size_t row = ch; row < std::min(ch+DIA_CHUNKSIZE,A.num_rows); row++)
{
y[row] = initialize(y[row]);
}
// for each diagonal
for (size_t d = 0; d < num_diagonals; d++)
{
for (IndexType row=ch; row<std::min(ch+DIA_CHUNKSIZE,A.num_rows); row++)
{
const IndexType col = row + A.diagonal_offsets[d];
if (col >= 0 && col < A.num_cols)
{
y[row] = reduce(y[row], combine(A.values(row, d), x[col]));
}
}
}
}
}
}
template <typename Matrix,
typename Vector1,
typename Vector2>
void spmv_dia(const Matrix& A,
const Vector1& x,
Vector2& y)
{
typedef typename Vector2::value_type ValueType;
spmv_dia(A, x, y,
cusp::detail::zero_function<ValueType>(),
thrust::multiplies<ValueType>(),
thrust::plus<ValueType>());
}
//////////////
// CDS SpMV //
//////////////
template <typename Matrix,
typename Vector1,
typename Vector2,
typename UnaryFunction,
typename BinaryFunction1,
typename BinaryFunction2>
void spmv_cds(const Matrix& A,
const Vector1& x,
Vector2& y,
UnaryFunction initialize,
BinaryFunction1 combine,
BinaryFunction2 reduce)
{
typedef typename Matrix::index_type IndexType;
typedef typename Vector2::value_type ValueType;
const IndexType num_diagonals = A.diagonal_offsets.size();
const IndexType block_size = (IndexType)A.block_size;
const IndexType num_rows = (IndexType)A.num_rows;
// make chunksize a multiple of block_size
const IndexType chunksize = block_size*(DIA_CHUNKSIZE/block_size);
// optimization for special case
if (block_size == 2) {
if (A.symmetric) {
// if there is a main diagonal block, it is the first in offsets
// and should be skipped in the first loop below since the main
// diagonal is processed in the second loop
const IndexType d0 = (A.diagonal_offsets[0] == 0 ? 1 : 0);
#pragma omp parallel for
for (IndexType ch = 0; ch < num_rows; ch+=chunksize)
{
for (IndexType row = ch; row<std::min(ch+chunksize,num_rows); row++)
{
y[row] = initialize(y[row]);
}
// process subdiagonal blocks
for (IndexType d = 0; d < num_diagonals-d0; d++)
{
const IndexType diag = num_diagonals-d-1;
const IndexType k = -2*A.diagonal_offsets[diag];
for (IndexType row = ch; row<std::min(ch+chunksize,num_rows); row+=2)
{
const IndexType col = row + k;
if (col >= 0 && col <= num_rows-2)
{
y[row] = reduce(y[row], combine(A.values(col, 2*diag), x[col]));
y[row] = reduce(y[row], combine(A.values(col+1,2*diag), x[col+1]));
y[row+1] = reduce(y[row+1],combine(A.values(col, 2*diag+1),x[col]));
y[row+1] = reduce(y[row+1],combine(A.values(col+1,2*diag+1),x[col+1]));
}
}
}
// process main and upper diagonal blocks
for (IndexType d = 0; d < num_diagonals; d++)
{
const IndexType k = 2*A.diagonal_offsets[d];
for (IndexType row = ch; row<std::min(ch+chunksize,num_rows); row+=2)
{
const IndexType col = row + k;
if (col >= 0 && col <= num_rows-2)
{
y[row] = reduce(y[row], combine(A.values(row, 2*d), x[col]));
y[row+1] = reduce(y[row+1],combine(A.values(row+1,2*d), x[col]));
y[row] = reduce(y[row], combine(A.values(row, 2*d+1),x[col+1]));
y[row+1] = reduce(y[row+1],combine(A.values(row+1,2*d+1),x[col+1]));
}
}
}
}
} else { // !A.symmetric
#pragma omp parallel for
for (IndexType ch = 0; ch < num_rows; ch+=chunksize)
{
for (IndexType row = ch; row<std::min(ch+chunksize,num_rows); row+=2)
{
ValueType sum1 = initialize(y[row]);
ValueType sum2 = initialize(y[row+1]);
// for each diagonal block
for (IndexType d = 0; d < num_diagonals; d++)
{
const IndexType col = row + A.diagonal_offsets[d]*2;
if (col >= 0 && col <= num_rows-2)
{
sum1 = reduce(sum1,combine(A.values(row, 2*d), x[col]));
sum2 = reduce(sum2,combine(A.values(row+1,2*d), x[col]));
sum1 = reduce(sum1,combine(A.values(row, 2*d+1),x[col+1]));
sum2 = reduce(sum2,combine(A.values(row+1,2*d+1),x[col+1]));
}
}
y[row] = sum1;
y[row+1] = sum2;
}
}
} // A.symmetric
} else { // block size
if (A.symmetric) {
// if there is a main diagonal block, it is the first in offsets
// and should be skipped in the first loop below since the main
// diagonal is processed in the second loop
const IndexType d0 = (A.diagonal_offsets[0] == 0 ? 1 : 0);
const ValueType* values = thrust::raw_pointer_cast(&A.values.values[0]);
const IndexType pitch = A.values.pitch;
#pragma omp parallel for
for (IndexType ch = 0; ch < num_rows; ch+=chunksize)
{
for (IndexType row = ch; row<std::min(ch+chunksize,num_rows); row++)
{
y[row] = initialize(y[row]);
}
IndexType idx = pitch*block_size*(num_diagonals-1);
// process subdiagonal blocks
for (IndexType d = 0; d < num_diagonals-d0; d++)
{
const IndexType diag = num_diagonals-d-1;
const IndexType k = -block_size*A.diagonal_offsets[diag];
for (IndexType row = ch; row<std::min(ch+chunksize,num_rows); row+=block_size)
{
const IndexType col = row + k;
if (col >= 0 && col <= num_rows-block_size)
{
// for each row in block
for (IndexType j = 0; j < block_size; j++)
{
// for each column in block
for (IndexType i = 0; i < block_size; i++)
{
const ValueType& Aij = values[idx+col+i+j*pitch];
const ValueType& xj = x[col + i];
y[row+j] = reduce(y[row+j], combine(Aij, xj));
}
}
}
}
idx -= block_size*pitch;
}
// process main and upper diagonal blocks
for (IndexType d = 0; d < num_diagonals; d++)
{
const IndexType k = A.diagonal_offsets[d]*block_size;
for (IndexType row = ch; row<std::min(ch+chunksize,num_rows); row+=block_size)
{
const IndexType col = row + k;
if (col >= 0 && col <= num_rows-block_size)
{
// for each column in block
for (IndexType i = 0; i < block_size; i++)
{
// for each row in block
for (IndexType j = 0; j < block_size; j++)
{
const ValueType& Aij = values[row+j+(d*block_size+i)*pitch];
const ValueType& xj = x[col + i];
y[row+j] = reduce(y[row+j], combine(Aij, xj));
}
}
}
}
} // diagonals
}
} else { // !A.symmetric
#pragma omp parallel for
for (IndexType ch = 0; ch < num_rows; ch+=chunksize)
{
for (IndexType row = ch; row<std::min(ch+chunksize,num_rows); row++)
{
y[row] = initialize(y[row]);
}
// for each diagonal block
for (IndexType d = 0; d < num_diagonals; d++)
{
const IndexType k = A.diagonal_offsets[d]*block_size;
for (IndexType row=ch; row<std::min(ch+chunksize,num_rows); row+=block_size)
{
const IndexType col = row + k;
if (col >= 0 && col <= num_rows-block_size)
{
// for each column in block
for (IndexType i = 0; i < block_size; i++)
{
// for each row in block
for (IndexType j = 0; j < block_size; j++)
{
const ValueType& Aij = A.values(row+j, d*block_size+i);
const ValueType& xj = x[col + i];
y[row+j] = reduce(y[row+j], combine(Aij, xj));
}
}
}
}
} // diagonals
} // row chunks
} // A.symmetric
} // block size
}
template <typename Matrix,
typename Vector1,
typename Vector2>
void spmv_cds(const Matrix& A,
const Vector1& x,
Vector2& y)
{
typedef typename Vector2::value_type ValueType;
if (A.block_size == 1) {
spmv_dia(A, x, y,
cusp::detail::zero_function<ValueType>(),
thrust::multiplies<ValueType>(),
thrust::plus<ValueType>());
} else {
spmv_cds(A, x, y,
cusp::detail::zero_function<ValueType>(),
thrust::multiplies<ValueType>(),
thrust::plus<ValueType>());
}
}
//////////////
// ELL SpMV //
//////////////
template <typename Matrix,
typename Vector1,
typename Vector2,
typename UnaryFunction,
typename BinaryFunction1,
typename BinaryFunction2>
void spmv_ell(const Matrix& A,
const Vector1& x,
Vector2& y,
UnaryFunction initialize,
BinaryFunction1 combine,
BinaryFunction2 reduce)
{
typedef typename Matrix::index_type IndexType;
typedef typename Vector2::value_type ValueType;
const size_t& num_entries_per_row = A.column_indices.num_cols;
const IndexType invalid_index = Matrix::invalid_index;
for(size_t i = 0; i < A.num_rows; i++)
y[i] = initialize(y[i]);
for(size_t n = 0; n < num_entries_per_row; n++)
{
for(size_t i = 0; i < A.num_rows; i++)
{
const IndexType& j = A.column_indices(i, n);
const ValueType& Aij = A.values(i,n);
if (j != invalid_index)
{
const ValueType& xj = x[j];
y[i] = reduce(y[i], combine(Aij, xj));
}
}
}
}
template <typename Matrix,
typename Vector1,
typename Vector2>
void spmv_ell(const Matrix& A,
const Vector1& x,
Vector2& y)
{
typedef typename Vector2::value_type ValueType;
spmv_ell(A, x, y,
cusp::detail::zero_function<ValueType>(),
thrust::multiplies<ValueType>(),
thrust::plus<ValueType>());
}
} // end namespace host
} // end namespace detail
} // end namespace cusp
|
reversi.c | #include <stdlib.h>
#include <memory.h>
#include <time.h>
#include <omp.h>
#include "reversi.h"
// --- constant ---
const uint8_t EMPTY = 0;
const uint8_t BLACK = 1;
const uint8_t WHITE = 2;
const uint8_t BOTH = 3;
const float INF = 1/0.0;
const uint8_t DEFAULT_MOVE = 3 * 8 + 3; // continue the game when one has 0 avl position
// ----------------
// --- REVERSI related funtions ---
void init(REVERSI *env) {
memset(env->chess_board, EMPTY, 64);
memset(env->avl_board, EMPTY, 64);
env->black_count = 0;
env->black_avl_count = 0;
env->white_count = 0;
env->white_avl_count = 0;
env->round_count = 0;
env->chess_board[3][3] = WHITE;
env->chess_board[3][4] = BLACK;
env->chess_board[4][3] = BLACK;
env->chess_board[4][4] = WHITE;
check_status(env);
}
bool put_chess(REVERSI *env, uint8_t x, uint8_t y, uint8_t player) {
uint8_t avl_count = player == BLACK ? env->black_avl_count : env->white_avl_count;
// if player has no available position
if (avl_count == 0) {
env->round_count += 1;
return true;
}
// if (x,y) is avl, then put the chess
if (env->avl_board[x][y] == player || env->avl_board[x][y] == BOTH) {
env->chess_board[x][y] = player;
flip(env, x, y, player, false);
env->round_count += 1;
return true;
} else {
return false;
}
}
// lots of room for improvements
bool flip(REVERSI *env, uint8_t x, uint8_t y, uint8_t player, bool check) {
// if check is true, return true or false and don't change chess_board
// if check is talse, flip the chess
// vertial up
for (int i = 1; x-i >= 0; i++) {
if (env->chess_board[x-i][y] == EMPTY) break;
if (env->chess_board[x-i][y] == player) {
if (i != 1) {
if (check) return true;
for (int j = 1; j < i; j++)
env->chess_board[x-j][y] = player;
}
break;
}
}
// vertial down
for (int i = 1; x+i < 8; i++) {
if (env->chess_board[x+i][y] == EMPTY) break;
if (env->chess_board[x+i][y] == player) {
if (i != 1) {
if (check) return true;
for (int j = 1; j < i; j++)
env->chess_board[x+j][y] = player;
}
break;
}
}
// horizontal left
for (int i = 1; y-i >= 0; i++) {
if (env->chess_board[x][y-i] == EMPTY) break;
if (env->chess_board[x][y-i] == player) {
if (i != 1) {
if (check) return true;
for (int j = 1; j < i; j++)
env->chess_board[x][y-j] = player;
}
break;
}
}
// horizontal right
for (int i = 1; y+i < 8; i++) {
if (env->chess_board[x][y+i] == EMPTY) break;
if (env->chess_board[x][y+i] == player) {
if (i != 1) {
if (check) return true;
for (int j = 1; j < i; j++)
env->chess_board[x][y+j] = player;
}
break;
}
}
// slash back
for (int i = 1; (x-i >= 0) && (y-i >= 0); i++) {
if (env->chess_board[x-i][y-i] == EMPTY) break;
if (env->chess_board[x-i][y-i] == player) {
if (i != 1) {
if (check) return true;
for (int j = 1; j < i; j++)
env->chess_board[x-j][y-j] = player;
}
break;
}
}
// slash forward
for (int i = 1; (x+i < 8) && (y+i < 8); i++) {
if (env->chess_board[x+i][y+i] == EMPTY) break;
if (env->chess_board[x+i][y+i] == player) {
if (i != 1) {
if (check) return true;
for (int j = 1; j < i; j++)
env->chess_board[x+j][y+j] = player;
}
break;
}
}
// backslash back
for (int i = 1; (x-i >= 0) && (y+i < 8); i++) {
if (env->chess_board[x-i][y+i] == EMPTY) break;
if (env->chess_board[x-i][y+i] == player) {
if (i != 1) {
if (check) return true;
for (int j = 1; j < i; j++)
env->chess_board[x-j][y+j] = player;
}
break;
}
}
// backslash forward
for (int i = 1; (x+i < 8) && (y-i >= 0); i++) {
if (env->chess_board[x+i][y-i] == EMPTY) break;
if (env->chess_board[x+i][y-i] == player) {
if (i != 1) {
if (check) return true;
for (int j = 1; j < i; j++)
env->chess_board[x+j][y-j] = player;
}
break;
}
}
return false;
}
void check_status(REVERSI *env) {
// reset avl_board,
// black_count, black_avl_count
// white_count, white_avl_count
memset(env->avl_board, EMPTY, 64);
env->black_count = 0;
env->black_avl_count = 0;
env->white_count = 0;
env->white_avl_count = 0;
// update avl_board,
// black_count, black_avl_count
// white_count, white_avl_count
for (int i = 0; i < 8; i++) {
for (int j = 0; j < 8; j++) {
if (env->chess_board[i][j] == BLACK) {
env->black_count += 1;
} else if (env->chess_board[i][j] == WHITE) {
env->white_count += 1;
} else {
if (flip(env, i, j, BLACK, true)) {
// rely on BLACK + WHITE = BOTH
env->avl_board[i][j] += BLACK;
env->black_avl_count += 1;
}
if (flip(env, i, j, WHITE, true)) {
// rely on BLACK + WHITE = BOTH
env->avl_board[i][j] += WHITE;
env->white_avl_count += 1;
}
}
}
}
}
bool is_end(REVERSI env) {
if (env.black_avl_count == 0 && env.white_avl_count == 0)
return true;
else
return false;
}
// naive evaluation
float score(REVERSI env, uint8_t player) {
if (player == BLACK)
return 1.5 * (env.black_count - env.white_count) + env.black_avl_count;
else
return 1.5 * (env.white_count - env.black_count) + env.white_avl_count;
}
void possible_move(REVERSI env, uint8_t player, uint8_t (*dst)[2]) {
uint8_t count = 0;
for (int i = 0; i < 8; i++) {
for (int j = 0; j < 8; j++) {
if (env.avl_board[i][j] == player || env.avl_board[i][j] == BOTH) {
dst[count][0] = i;
dst[count][1] = j;
count += 1;
}
}
}
// shuffle algorithm
srand(time(NULL));
for (int i = 0; i < count; i++) {
int random_index = rand() % count;
// swap
uint8_t tmp[2] = {dst[i][0], dst[i][1]};
dst[i][0] = dst[random_index][0];
dst[i][1] = dst[random_index][1];
dst[random_index][0] = tmp[0];
dst[random_index][1] = tmp[1];
}
}
float minimax(REVERSI env, uint8_t depth, uint8_t player, float alpha, float beta, bool maximizing) {
// termination condition
if (depth == 0 || is_end(env)) {
/* always return the score relevent to the original player (first pass)
if minimax(env, depth, WHITE, alpha, beta, true), then no matter what depth it is at,
WHITE is with maximizing==true, BLACK is with false.
if minimax(env, depth, BLACK, alpha, beta, true), then no matter what depth it is at,
BLACK is with maximizing==true, WHITE is with false.
In this way, we can determine whose optimal move we are trying to find without passing any additional param */
return maximizing ? score(env, player) : score(env, OPPONENT(player));
}
// get avl_count for current player
uint8_t avl_count = player == BLACK ? env.black_avl_count : env.white_avl_count;
if (maximizing) {
// initialze max_score to store the max eval
float max_score = -INF;
// initalize moves to store possible moves (avoid memory control)
uint8_t moves[avl_count+1][2];
possible_move(env, player, moves);
/* length of moves is avl_count+1, the last one is DEFAULT_MOVE
when avl_count > 0, DEFAULT_MOVE won't be evaluated,
thanks to the `continue` in the for loop.
when avl_count = 0, DEFAULT_MOVE is the only one,
it will be used to continue the game. */
moves[avl_count][0] = DEFAULT_MOVE / 8;
moves[avl_count][1] = DEFAULT_MOVE % 8;
// explore possible moves, with length = avl_count+1
for (int i = 0; i < avl_count+1; i++) {
// copy env
REVERSI child_env;
memcpy(&child_env, &env, sizeof(REVERSI));
// put chess at avl position
if (!put_chess(&child_env, moves[i][0], moves[i][1], player)) continue;
// check status, update avl_board
check_status(&child_env);
// eval current situation with depth-1
// recursion
float new_score = minimax(child_env, depth-1, OPPONENT(player), alpha, beta, false);
// update the max_score
max_score = MAX(max_score, new_score);
// alpha-beta pruning
alpha = MAX(alpha, new_score);
if (beta <= alpha) break;
}
return max_score;
} else {
float min_score = +INF;
uint8_t moves[avl_count+1][2];
possible_move(env, player, moves);
moves[avl_count][0] = DEFAULT_MOVE / 8;
moves[avl_count][1] = DEFAULT_MOVE % 8;
for (int i = 0; i < avl_count+1; i++) {
REVERSI child_env;
memcpy(&child_env, &env, sizeof(REVERSI));
if (!put_chess(&child_env, moves[i][0], moves[i][1], player)) continue;
check_status(&child_env);
float new_score = minimax(child_env, depth-1, OPPONENT(player), alpha, beta, true);
min_score = MIN(min_score, new_score);
beta = MIN(beta, new_score);
if (beta <= alpha) break;
}
return min_score;
}
}
uint8_t minimax_parallel(REVERSI env, uint8_t depth, uint8_t player) {
/* depth should be at least 1, otherwise it doesn't make sense
depth == 0 means return the current evaluation of the situation
and just looking at the CURRENT evaluation is not sufficient
to determine the move that will lead to the NEXT best situation */
uint8_t avl_count = player == BLACK ? env.black_avl_count : env.white_avl_count;
// if no avl pos, just go DEFAULT_MOVE
if (avl_count == 0) return DEFAULT_MOVE;
uint8_t moves[avl_count][2];
possible_move(env, player, moves);
float scores[avl_count];
int num_threads = omp_get_max_threads();
omp_set_num_threads(MIN(num_threads, avl_count));
#pragma omp parallel for
for (int i = 0; i < avl_count; i++) {
REVERSI child_env;
memcpy(&child_env, &env, sizeof(REVERSI));
if (!put_chess(&child_env, moves[i][0], moves[i][1], player)) continue;
check_status(&child_env);
scores[i] = minimax(child_env, depth-1, OPPONENT(player), -INF, +INF, false);
}
// best_move shouldn't be DEFAULT_MOVE when returned, unless ... bug
uint8_t best_move = DEFAULT_MOVE;
float max_score = -INF;
for (int i = 0; i < avl_count; i++) {
if (scores[i] > max_score) {
max_score = scores[i];
best_move = moves[i][0] * 8 + moves[i][1];
}
}
return best_move;
}
// --------------------------------
|
upwind_flux.c | #include "conv2d.h"
void upwind_flux(double f_M, double f_P, double uM, double vM,
double nx, double ny, double *numflux)
{
const double unM = uM * nx + vM * ny;
if (unM > 0)
{
*numflux = f_M * unM;
}
else
{
*numflux = f_P * unM;
}
return;
}
/* @brief calculate the surface flux deviation for strong form.
*
* Usages:
* [dflux] = upwind_flux(h, h_ext, u, v, nx, ny, eidM, eidP, eidtype);
*/
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[])
{
/* check input & output */
if (nrhs != 9)
mexErrMsgTxt("Wrong number of input arguments.");
if (nlhs != 1)
mexErrMsgTxt("Wrong number of output arguments.");
/* get inputs */
double *h = mxGetPr(prhs[0]);
double *h_ext = mxGetPr(prhs[1]);
double *u = mxGetPr(prhs[2]);
double *v = mxGetPr(prhs[3]);
double *nx = mxGetPr(prhs[4]);
double *ny = mxGetPr(prhs[5]);
double *eidM = mxGetPr(prhs[6]);
double *eidP = mxGetPr(prhs[7]);
signed char *eidtype = (signed char *)mxGetData(prhs[8]); // int8 ç±»å?
/* get dimensions */
size_t Nfp = mxGetM(prhs[7]);
size_t K = mxGetN(prhs[7]);
/* allocate output array */
plhs[0] = mxCreateDoubleMatrix((mwSize)Nfp, (mwSize)K, mxREAL);
double *dflux = mxGetPr(plhs[0]);
/* set number of threads */
#ifdef _OPENMP
#pragma omp parallel for num_threads(DG_THREADS)
#endif
for (int i = 0; i < K; i++)
{
int ind = i * Nfp;
for (int j = 0; j < Nfp; j++)
{
int iM = (int)eidM[ind] - 1; // change index to C type
int iP = (int)eidP[ind] - 1;
double f_M = h[iM]; // local and adjacent node values
double varP = h[iP];
double uM = u[iM], vM = v[iM];
// double uP = u[iP], vP = v[iP];
// outward normal vector of local element
double nx_ = nx[ind];
double ny_ = ny[ind];
double f_ext; // external values on local nodes
f_ext = h_ext[iM];
bc_type type = (bc_type)eidtype[ind];
// get adjacent values hP, qxP, qyP, considering
// various boudnary conditions
double f_P;
int info = bound_cond(f_M, varP, f_ext, nx_, ny_, type, &f_P);
// if(info) mexErrMsgTxt("Unknown boundary conditions.");
double numflux, E, G;
upwind_flux(f_M, f_P, uM, vM, nx_, ny_, &numflux);
nodal_flux(f_M, uM, vM, &E, &G);
dflux[ind] = -numflux + nx_ * E + ny_ * G;
ind++;
}
}
return;
}
|
enhance.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE N N H H AAA N N CCCC EEEEE %
% E NN N H H A A NN N C E %
% EEE N N N HHHHH AAAAA N N N C EEE %
% E N NN H H A A N NN C E %
% EEEEE N N H H A A N N CCCC EEEEE %
% %
% %
% MagickCore Image Enhancement Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/xml-tree.h"
#include "MagickCore/xml-tree-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoGammaImage() extract the 'mean' from the image and adjust the image
% to try make set its gamma appropriatally.
%
% The format of the AutoGammaImage method is:
%
% MagickBooleanType AutoGammaImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AutoGammaImage(Image *image,
ExceptionInfo *exception)
{
double
gamma,
log_mean,
mean,
sans;
MagickStatusType
status;
register ssize_t
i;
log_mean=log(0.5);
if (image->channel_mask == DefaultChannels)
{
/*
Apply gamma correction equally across all given channels.
*/
(void) GetImageMean(image,&mean,&sans,exception);
gamma=log(mean*QuantumScale)/log_mean;
return(LevelImage(image,0.0,(double) QuantumRange,gamma,exception));
}
/*
Auto-gamma each channel separately.
*/
status=MagickTrue;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ChannelType
channel_mask;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
channel_mask=SetImageChannelMask(image,(ChannelType) (1 << i));
status=GetImageMean(image,&mean,&sans,exception);
gamma=log(mean*QuantumScale)/log_mean;
status&=LevelImage(image,0.0,(double) QuantumRange,gamma,exception);
(void) SetImageChannelMask(image,channel_mask);
if (status == MagickFalse)
break;
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoLevelImage() adjusts the levels of a particular image channel by
% scaling the minimum and maximum values to the full quantum range.
%
% The format of the LevelImage method is:
%
% MagickBooleanType AutoLevelImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AutoLevelImage(Image *image,
ExceptionInfo *exception)
{
return(MinMaxStretchImage(image,0.0,0.0,1.0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B r i g h t n e s s C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BrightnessContrastImage() changes the brightness and/or contrast of an
% image. It converts the brightness and contrast parameters into slope and
% intercept and calls a polynomical function to apply to the image.
%
% The format of the BrightnessContrastImage method is:
%
% MagickBooleanType BrightnessContrastImage(Image *image,
% const double brightness,const double contrast,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o brightness: the brightness percent (-100 .. 100).
%
% o contrast: the contrast percent (-100 .. 100).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType BrightnessContrastImage(Image *image,
const double brightness,const double contrast,ExceptionInfo *exception)
{
#define BrightnessContastImageTag "BrightnessContast/Image"
double
alpha,
coefficients[2],
intercept,
slope;
MagickBooleanType
status;
/*
Compute slope and intercept.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
alpha=contrast;
slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0));
if (slope < 0.0)
slope=0.0;
intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope);
coefficients[0]=slope;
coefficients[1]=intercept;
status=FunctionImage(image,PolynomialFunction,2,coefficients,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClutImage() replaces each color value in the given image, by using it as an
% index to lookup a replacement color value in a Color Look UP Table in the
% form of an image. The values are extracted along a diagonal of the CLUT
% image so either a horizontal or vertial gradient image can be used.
%
% Typically this is used to either re-color a gray-scale image according to a
% color gradient in the CLUT image, or to perform a freeform histogram
% (level) adjustment according to the (typically gray-scale) gradient in the
% CLUT image.
%
% When the 'channel' mask includes the matte/alpha transparency channel but
% one image has no such channel it is assumed that that image is a simple
% gray-scale image that will effect the alpha channel values, either for
% gray-scale coloring (with transparent or semi-transparent colors), or
% a histogram adjustment of existing alpha channel values. If both images
% have matte channels, direct and normal indexing is applied, which is rarely
% used.
%
% The format of the ClutImage method is:
%
% MagickBooleanType ClutImage(Image *image,Image *clut_image,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o clut_image: the color lookup table image for replacement color values.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define ClutImageTag "Clut/Image"
CacheView
*clut_view,
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
*clut_map;
register ssize_t
i;
ssize_t adjust,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clut_image != (Image *) NULL);
assert(clut_image->signature == MagickCoreSignature);
if( SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if( (IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsGrayColorspace(clut_image->colorspace) == MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
clut_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*clut_map));
if (clut_map == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Clut image.
*/
status=MagickTrue;
progress=0;
adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1);
clut_view=AcquireVirtualCacheView(clut_image,exception);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
GetPixelInfo(clut_image,clut_map+i);
(void) InterpolatePixelInfo(clut_image,clut_view,method,
(double) i*(clut_image->columns-adjust)/MaxMap,(double) i*
(clut_image->rows-adjust)/MaxMap,clut_map+i,exception);
}
clut_view=DestroyCacheView(clut_view);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelTrait
traits;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
GetPixelInfoPixel(image,q,&pixel);
traits=GetPixelChannelTraits(image,RedPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.red=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.red))].red;
traits=GetPixelChannelTraits(image,GreenPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.green=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.green))].green;
traits=GetPixelChannelTraits(image,BluePixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.blue=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.blue))].blue;
traits=GetPixelChannelTraits(image,BlackPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.black=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.black))].black;
traits=GetPixelChannelTraits(image,AlphaPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.alpha=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.alpha))].alpha;
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ClutImage)
#endif
proceed=SetImageProgress(image,ClutImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
clut_map=(PixelInfo *) RelinquishMagickMemory(clut_map);
if ((clut_image->alpha_trait != UndefinedPixelTrait) &&
((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0))
(void) SetImageAlphaChannel(image,ActivateAlphaChannel,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r D e c i s i o n L i s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorDecisionListImage() accepts a lightweight Color Correction Collection
% (CCC) file which solely contains one or more color corrections and applies
% the correction to the image. Here is a sample CCC file:
%
% <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2">
% <ColorCorrection id="cc03345">
% <SOPNode>
% <Slope> 0.9 1.2 0.5 </Slope>
% <Offset> 0.4 -0.5 0.6 </Offset>
% <Power> 1.0 0.8 1.5 </Power>
% </SOPNode>
% <SATNode>
% <Saturation> 0.85 </Saturation>
% </SATNode>
% </ColorCorrection>
% </ColorCorrectionCollection>
%
% which includes the slop, offset, and power for each of the RGB channels
% as well as the saturation.
%
% The format of the ColorDecisionListImage method is:
%
% MagickBooleanType ColorDecisionListImage(Image *image,
% const char *color_correction_collection,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_correction_collection: the color correction collection in XML.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ColorDecisionListImage(Image *image,
const char *color_correction_collection,ExceptionInfo *exception)
{
#define ColorDecisionListCorrectImageTag "ColorDecisionList/Image"
typedef struct _Correction
{
double
slope,
offset,
power;
} Correction;
typedef struct _ColorCorrection
{
Correction
red,
green,
blue;
double
saturation;
} ColorCorrection;
CacheView
*image_view;
char
token[MagickPathExtent];
ColorCorrection
color_correction;
const char
*content,
*p;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
*cdl_map;
register ssize_t
i;
ssize_t
y;
XMLTreeInfo
*cc,
*ccc,
*sat,
*sop;
/*
Allocate and initialize cdl maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (color_correction_collection == (const char *) NULL)
return(MagickFalse);
ccc=NewXMLTree((const char *) color_correction_collection,exception);
if (ccc == (XMLTreeInfo *) NULL)
return(MagickFalse);
cc=GetXMLTreeChild(ccc,"ColorCorrection");
if (cc == (XMLTreeInfo *) NULL)
{
ccc=DestroyXMLTree(ccc);
return(MagickFalse);
}
color_correction.red.slope=1.0;
color_correction.red.offset=0.0;
color_correction.red.power=1.0;
color_correction.green.slope=1.0;
color_correction.green.offset=0.0;
color_correction.green.power=1.0;
color_correction.blue.slope=1.0;
color_correction.blue.offset=0.0;
color_correction.blue.power=1.0;
color_correction.saturation=0.0;
sop=GetXMLTreeChild(cc,"SOPNode");
if (sop != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*offset,
*power,
*slope;
slope=GetXMLTreeChild(sop,"Slope");
if (slope != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(slope);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.slope=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.slope=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.slope=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
offset=GetXMLTreeChild(sop,"Offset");
if (offset != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(offset);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 1:
{
color_correction.green.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.offset=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
power=GetXMLTreeChild(sop,"Power");
if (power != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(power);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.power=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.power=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.power=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
}
sat=GetXMLTreeChild(cc,"SATNode");
if (sat != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*saturation;
saturation=GetXMLTreeChild(sat,"Saturation");
if (saturation != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(saturation);
p=(const char *) content;
GetNextToken(p,&p,MagickPathExtent,token);
color_correction.saturation=StringToDouble(token,(char **) NULL);
}
}
ccc=DestroyXMLTree(ccc);
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Color Correction Collection:");
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.slope: %g",color_correction.red.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.offset: %g",color_correction.red.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.power: %g",color_correction.red.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.slope: %g",color_correction.green.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.offset: %g",color_correction.green.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.power: %g",color_correction.green.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.slope: %g",color_correction.blue.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.offset: %g",color_correction.blue.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.power: %g",color_correction.blue.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.saturation: %g",color_correction.saturation);
}
cdl_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map));
if (cdl_map == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
cdl_map[i].red=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.red.slope*i/MaxMap+
color_correction.red.offset,color_correction.red.power))));
cdl_map[i].green=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.green.slope*i/MaxMap+
color_correction.green.offset,color_correction.green.power))));
cdl_map[i].blue=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.blue.slope*i/MaxMap+
color_correction.blue.offset,color_correction.blue.power))));
}
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Apply transfer function to colormap.
*/
double
luma;
luma=0.21267f*image->colormap[i].red+0.71526*image->colormap[i].green+
0.07217f*image->colormap[i].blue;
image->colormap[i].red=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red))].red-luma;
image->colormap[i].green=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green))].green-luma;
image->colormap[i].blue=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue))].blue-luma;
}
/*
Apply transfer function to image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
luma;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
luma=0.21267f*GetPixelRed(image,q)+0.71526*GetPixelGreen(image,q)+
0.07217f*GetPixelBlue(image,q);
SetPixelRed(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelRed(image,q))].red-luma)),q);
SetPixelGreen(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelGreen(image,q))].green-luma)),q);
SetPixelBlue(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelBlue(image,q))].blue-luma)),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ColorDecisionListImageChannel)
#endif
proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag,
progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
cdl_map=(PixelInfo *) RelinquishMagickMemory(cdl_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastImage() enhances the intensity differences between the lighter and
% darker elements of the image. Set sharpen to a MagickTrue to increase the
% image contrast otherwise the contrast is reduced.
%
% The format of the ContrastImage method is:
%
% MagickBooleanType ContrastImage(Image *image,
% const MagickBooleanType sharpen,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void Contrast(const int sign,double *red,double *green,double *blue)
{
double
brightness,
hue,
saturation;
/*
Enhance contrast: dark color become darker, light color become lighter.
*/
assert(red != (double *) NULL);
assert(green != (double *) NULL);
assert(blue != (double *) NULL);
hue=0.0;
saturation=0.0;
brightness=0.0;
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)-
brightness);
if (brightness > 1.0)
brightness=1.0;
else
if (brightness < 0.0)
brightness=0.0;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
MagickExport MagickBooleanType ContrastImage(Image *image,
const MagickBooleanType sharpen,ExceptionInfo *exception)
{
#define ContrastImageTag "Contrast/Image"
CacheView
*image_view;
int
sign;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateContrastImage(image,sharpen,exception) != MagickFalse)
return(MagickTrue);
#endif
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
sign=sharpen != MagickFalse ? 1 : -1;
if (image->storage_class == PseudoClass)
{
/*
Contrast enhance colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
double
blue,
green,
red;
red=(double) image->colormap[i].red;
green=(double) image->colormap[i].green;
blue=(double) image->colormap[i].blue;
Contrast(sign,&red,&green,&blue);
image->colormap[i].red=(MagickRealType) red;
image->colormap[i].green=(MagickRealType) green;
image->colormap[i].blue=(MagickRealType) blue;
}
}
/*
Contrast enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
blue,
green,
red;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
Contrast(sign,&red,&green,&blue);
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ContrastImage)
#endif
proceed=SetImageProgress(image,ContrastImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastStretchImage() is a simple image enhancement technique that attempts
% to improve the contrast in an image by 'stretching' the range of intensity
% values it contains to span a desired range of values. It differs from the
% more sophisticated histogram equalization in that it can only apply a
% linear scaling function to the image pixel values. As a result the
% 'enhancement' is less harsh.
%
% The format of the ContrastStretchImage method is:
%
% MagickBooleanType ContrastStretchImage(Image *image,
% const char *levels,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o levels: Specify the levels where the black and white points have the
% range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ContrastStretchImage(Image *image,
const double black_point,const double white_point,ExceptionInfo *exception)
{
#define MaxRange(color) ((double) ScaleQuantumToMap((Quantum) (color)))
#define ContrastStretchImageTag "ContrastStretch/Image"
CacheView
*image_view;
double
*black,
*histogram,
*stretch_map,
*white;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate histogram and stretch map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageGray(image,exception) != MagickFalse)
(void) SetImageColorspace(image,GRAYColorspace,exception);
black=(double *) AcquireQuantumMemory(GetPixelChannels(image),sizeof(*black));
white=(double *) AcquireQuantumMemory(GetPixelChannels(image),sizeof(*white));
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)*
sizeof(*histogram));
stretch_map=(double *) AcquireQuantumMemory(MaxMap+1UL,
GetPixelChannels(image)*sizeof(*stretch_map));
if ((black == (double *) NULL) || (white == (double *) NULL) ||
(histogram == (double *) NULL) || (stretch_map == (double *) NULL))
{
if (stretch_map != (double *) NULL)
stretch_map=(double *) RelinquishMagickMemory(stretch_map);
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (white != (double *) NULL)
white=(double *) RelinquishMagickMemory(white);
if (black != (double *) NULL)
black=(double *) RelinquishMagickMemory(black);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
status=MagickTrue;
(void) ResetMagickMemory(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
pixel=GetPixelIntensity(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
if (image->channel_mask != DefaultChannels)
pixel=(double) p[i];
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum(pixel))+i]++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Find the histogram boundaries by locating the black/white levels.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
register ssize_t
j;
black[i]=0.0;
white[i]=MaxRange(QuantumRange);
intensity=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
if (intensity > black_point)
break;
}
black[i]=(double) j;
intensity=0.0;
for (j=(ssize_t) MaxMap; j != 0; j--)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white[i]=(double) j;
}
histogram=(double *) RelinquishMagickMemory(histogram);
/*
Stretch the histogram to create the stretched image mapping.
*/
(void) ResetMagickMemory(stretch_map,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*stretch_map));
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
register ssize_t
j;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
double
gamma;
gamma=PerceptibleReciprocal(white[i]-black[i]);
if (j < (ssize_t) black[i])
stretch_map[GetPixelChannels(image)*j+i]=0.0;
else
if (j > (ssize_t) white[i])
stretch_map[GetPixelChannels(image)*j+i]=(double) QuantumRange;
else
stretch_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum(
(double) (MaxMap*gamma*(j-black[i])));
}
}
if (image->storage_class == PseudoClass)
{
register ssize_t
j;
/*
Stretch-contrast colormap.
*/
for (j=0; j < (ssize_t) image->colors; j++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,RedPixelChannel);
image->colormap[j].red=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+i];
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,GreenPixelChannel);
image->colormap[j].green=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+i];
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,BluePixelChannel);
image->colormap[j].blue=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+i];
}
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,AlphaPixelChannel);
image->colormap[j].alpha=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+i];
}
}
}
/*
Stretch-contrast image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel=GetPixelChannelChannel(image,j);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=ClampToQuantum(stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(q[j])+j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ContrastStretchImage)
#endif
proceed=SetImageProgress(image,ContrastStretchImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
stretch_map=(double *) RelinquishMagickMemory(stretch_map);
white=(double *) RelinquishMagickMemory(white);
black=(double *) RelinquishMagickMemory(black);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E n h a n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EnhanceImage() applies a digital filter that improves the quality of a
% noisy image.
%
% The format of the EnhanceImage method is:
%
% Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
{
#define EnhanceImageTag "Enhance/Image"
#define EnhancePixel(weight) \
mean=QuantumScale*((double) GetPixelRed(image,r)+pixel.red)/2.0; \
distance=QuantumScale*((double) GetPixelRed(image,r)-pixel.red); \
distance_squared=(4.0+mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelGreen(image,r)+pixel.green)/2.0; \
distance=QuantumScale*((double) GetPixelGreen(image,r)-pixel.green); \
distance_squared+=(7.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelBlue(image,r)+pixel.blue)/2.0; \
distance=QuantumScale*((double) GetPixelBlue(image,r)-pixel.blue); \
distance_squared+=(5.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelBlack(image,r)+pixel.black)/2.0; \
distance=QuantumScale*((double) GetPixelBlack(image,r)-pixel.black); \
distance_squared+=(5.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelAlpha(image,r)+pixel.alpha)/2.0; \
distance=QuantumScale*((double) GetPixelAlpha(image,r)-pixel.alpha); \
distance_squared+=(5.0-mean)*distance*distance; \
if (distance_squared < 0.069) \
{ \
aggregate.red+=(weight)*GetPixelRed(image,r); \
aggregate.green+=(weight)*GetPixelGreen(image,r); \
aggregate.blue+=(weight)*GetPixelBlue(image,r); \
aggregate.black+=(weight)*GetPixelBlack(image,r); \
aggregate.alpha+=(weight)*GetPixelAlpha(image,r); \
total_weight+=(weight); \
} \
r+=GetPixelChannels(image);
CacheView
*enhance_view,
*image_view;
Image
*enhance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize enhanced image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
enhance_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (enhance_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(enhance_image,DirectClass,exception) == MagickFalse)
{
enhance_image=DestroyImage(enhance_image);
return((Image *) NULL);
}
/*
Enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
enhance_view=AcquireAuthenticCacheView(enhance_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,enhance_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception);
q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*(2*(image->columns+4)+2);
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
distance,
distance_squared,
mean,
total_weight;
PixelInfo
aggregate;
register const Quantum
*magick_restrict r;
if (GetPixelWriteMask(image,p) == 0)
{
SetPixelBackgoundColor(enhance_image,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(enhance_image);
continue;
}
GetPixelInfo(image,&aggregate);
total_weight=0.0;
GetPixelInfoPixel(image,p+center,&pixel);
r=p;
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
r=p+GetPixelChannels(image)*(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+2*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0);
EnhancePixel(40.0); EnhancePixel(10.0);
r=p+3*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+4*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
pixel.red=((aggregate.red+total_weight/2.0)/total_weight);
pixel.green=((aggregate.green+total_weight/2.0)/total_weight);
pixel.blue=((aggregate.blue+total_weight/2.0)/total_weight);
pixel.black=((aggregate.black+total_weight/2.0)/total_weight);
pixel.alpha=((aggregate.alpha+total_weight/2.0)/total_weight);
SetPixelViaPixelInfo(image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(enhance_image);
}
if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EnhanceImage)
#endif
proceed=SetImageProgress(image,EnhanceImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
enhance_view=DestroyCacheView(enhance_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
enhance_image=DestroyImage(enhance_image);
return(enhance_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E q u a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EqualizeImage() applies a histogram equalization to the image.
%
% The format of the EqualizeImage method is:
%
% MagickBooleanType EqualizeImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType EqualizeImage(Image *image,
ExceptionInfo *exception)
{
#define EqualizeImageTag "Equalize/Image"
CacheView
*image_view;
double
black[CompositePixelChannel+1],
*equalize_map,
*histogram,
*map,
white[CompositePixelChannel+1];
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize histogram arrays.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateEqualizeImage(image,exception) != MagickFalse)
return(MagickTrue);
#endif
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
equalize_map=(double *) AcquireQuantumMemory(MaxMap+1UL,
GetPixelChannels(image)*sizeof(*equalize_map));
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)*
sizeof(*histogram));
map=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)*
sizeof(*map));
if ((equalize_map == (double *) NULL) || (histogram == (double *) NULL) ||
(map == (double *) NULL))
{
if (map != (double *) NULL)
map=(double *) RelinquishMagickMemory(map);
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (equalize_map != (double *) NULL)
equalize_map=(double *) RelinquishMagickMemory(equalize_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
status=MagickTrue;
(void) ResetMagickMemory(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
intensity=p[i];
if ((image->channel_mask & SyncChannels) != 0)
intensity=GetPixelIntensity(image,p);
histogram[GetPixelChannels(image)*ScaleQuantumToMap(intensity)+i]++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Integrate the histogram to get the equalization map.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
register ssize_t
j;
intensity=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
map[GetPixelChannels(image)*j+i]=intensity;
}
}
(void) ResetMagickMemory(equalize_map,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*equalize_map));
(void) ResetMagickMemory(black,0,sizeof(*black));
(void) ResetMagickMemory(white,0,sizeof(*white));
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
register ssize_t
j;
black[i]=map[i];
white[i]=map[GetPixelChannels(image)*MaxMap+i];
if (black[i] != white[i])
for (j=0; j <= (ssize_t) MaxMap; j++)
equalize_map[GetPixelChannels(image)*j+i]=(double)
ScaleMapToQuantum((double) ((MaxMap*(map[
GetPixelChannels(image)*j+i]-black[i]))/(white[i]-black[i])));
}
histogram=(double *) RelinquishMagickMemory(histogram);
map=(double *) RelinquishMagickMemory(map);
if (image->storage_class == PseudoClass)
{
register ssize_t
j;
/*
Equalize colormap.
*/
for (j=0; j < (ssize_t) image->colors; j++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel=GetPixelChannelChannel(image,RedPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].red=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+
channel];
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel=GetPixelChannelChannel(image,
GreenPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].green=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+
channel];
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel=GetPixelChannelChannel(image,BluePixelChannel);
if (black[channel] != white[channel])
image->colormap[j].blue=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+
channel];
}
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel=GetPixelChannelChannel(image,
AlphaPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].alpha=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+
channel];
}
}
}
/*
Equalize image.
*/
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel=GetPixelChannelChannel(image,j);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if (((traits & UpdatePixelTrait) == 0) || (black[j] == white[j]))
continue;
q[j]=ClampToQuantum(equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(q[j])+j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EqualizeImage)
#endif
proceed=SetImageProgress(image,EqualizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
equalize_map=(double *) RelinquishMagickMemory(equalize_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GammaImage() gamma-corrects a particular image channel. The same
% image viewed on different devices will have perceptual differences in the
% way the image's intensities are represented on the screen. Specify
% individual gamma levels for the red, green, and blue channels, or adjust
% all three with the gamma parameter. Values typically range from 0.8 to 2.3.
%
% You can also reduce the influence of a particular channel with a gamma
% value of 0.
%
% The format of the GammaImage method is:
%
% MagickBooleanType GammaImage(Image *image,const double gamma,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o level: the image gamma as a string (e.g. 1.6,1.2,1.0).
%
% o gamma: the image gamma.
%
*/
static inline double gamma_pow(const double value,const double gamma)
{
return(value < 0.0 ? value : pow(value,gamma));
}
MagickExport MagickBooleanType GammaImage(Image *image,const double gamma,
ExceptionInfo *exception)
{
#define GammaCorrectImageTag "GammaCorrect/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
*gamma_map;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize gamma maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (gamma == 1.0)
return(MagickTrue);
gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map));
if (gamma_map == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) ResetMagickMemory(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map));
if (gamma != 0.0)
for (i=0; i <= (ssize_t) MaxMap; i++)
gamma_map[i]=ScaleMapToQuantum((double) (MaxMap*pow((double) i/
MaxMap,1.0/gamma)));
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Gamma-correct colormap.
*/
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].red))];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].green))];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].blue))];
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].alpha))];
#else
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=QuantumRange*gamma_pow(QuantumScale*
image->colormap[i].red,1.0/gamma);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=QuantumRange*gamma_pow(QuantumScale*
image->colormap[i].green,1.0/gamma);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=QuantumRange*gamma_pow(QuantumScale*
image->colormap[i].blue,1.0/gamma);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=QuantumRange*gamma_pow(QuantumScale*
image->colormap[i].alpha,1.0/gamma);
#endif
}
/*
Gamma-correct image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel=GetPixelChannelChannel(image,j);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
#if !defined(MAGICKCORE_HDRI_SUPPORT)
q[j]=gamma_map[ScaleQuantumToMap(q[j])];
#else
q[j]=QuantumRange*gamma_pow(QuantumScale*q[j],1.0/gamma);
#endif
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GammaImage)
#endif
proceed=SetImageProgress(image,GammaCorrectImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map);
if (image->gamma != 0.0)
image->gamma*=gamma;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GrayscaleImage() converts the image to grayscale.
%
% The format of the GrayscaleImage method is:
%
% MagickBooleanType GrayscaleImage(Image *image,
% const PixelIntensityMethod method ,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the pixel intensity method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GrayscaleImage(Image *image,
const PixelIntensityMethod method,ExceptionInfo *exception)
{
#define GrayscaleImageTag "Grayscale/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateGrayscaleImage(image,method,exception) != MagickFalse)
{
image->intensity=method;
image->type=GrayscaleType;
return(SetImageColorspace(image,GRAYColorspace,exception));
}
#endif
/*
Grayscale image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
blue,
green,
red,
intensity;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
red=(MagickRealType) GetPixelRed(image,q);
green=(MagickRealType) GetPixelGreen(image,q);
blue=(MagickRealType) GetPixelBlue(image,q);
intensity=0.0;
switch (method)
{
case AveragePixelIntensityMethod:
{
intensity=(red+green+blue)/3.0;
break;
}
case BrightnessPixelIntensityMethod:
{
intensity=MagickMax(MagickMax(red,green),blue);
break;
}
case LightnessPixelIntensityMethod:
{
intensity=(MagickMin(MagickMin(red,green),blue)+
MagickMax(MagickMax(red,green),blue))/2.0;
break;
}
case MSPixelIntensityMethod:
{
intensity=(MagickRealType) (((double) red*red+green*green+
blue*blue)/3.0);
break;
}
case Rec601LumaPixelIntensityMethod:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec601LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec709LumaPixelIntensityMethod:
default:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case Rec709LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case RMSPixelIntensityMethod:
{
intensity=(MagickRealType) (sqrt((double) red*red+green*green+
blue*blue)/sqrt(3.0));
break;
}
}
SetPixelGray(image,ClampToQuantum(intensity),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GrayscaleImage)
#endif
proceed=SetImageProgress(image,GrayscaleImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
image->intensity=method;
image->type=GrayscaleType;
return(SetImageColorspace(image,GRAYColorspace,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H a l d C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% HaldClutImage() applies a Hald color lookup table to the image. A Hald
% color lookup table is a 3-dimensional color cube mapped to 2 dimensions.
% Create it with the HALD coder. You can apply any color transformation to
% the Hald image and then use this method to apply the transform to the
% image.
%
% The format of the HaldClutImage method is:
%
% MagickBooleanType HaldClutImage(Image *image,Image *hald_image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o hald_image: the color lookup table image for replacement color values.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType HaldClutImage(Image *image,
const Image *hald_image,ExceptionInfo *exception)
{
#define HaldClutImageTag "Clut/Image"
typedef struct _HaldInfo
{
double
x,
y,
z;
} HaldInfo;
CacheView
*hald_view,
*image_view;
double
width;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
size_t
cube_size,
length,
level;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(hald_image != (Image *) NULL);
assert(hald_image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
/*
Hald clut image.
*/
status=MagickTrue;
progress=0;
length=(size_t) MagickMin((MagickRealType) hald_image->columns,
(MagickRealType) hald_image->rows);
for (level=2; (level*level*level) < length; level++) ;
level*=level;
cube_size=level*level;
width=(double) hald_image->columns;
GetPixelInfo(hald_image,&zero);
hald_view=AcquireVirtualCacheView(hald_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
offset;
HaldInfo
point;
PixelInfo
pixel,
pixel1,
pixel2,
pixel3,
pixel4;
point.x=QuantumScale*(level-1.0)*GetPixelRed(image,q);
point.y=QuantumScale*(level-1.0)*GetPixelGreen(image,q);
point.z=QuantumScale*(level-1.0)*GetPixelBlue(image,q);
offset=point.x+level*floor(point.y)+cube_size*floor(point.z);
point.x-=floor(point.x);
point.y-=floor(point.y);
point.z-=floor(point.z);
pixel1=zero;
(void) InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset,width),floor(offset/width),&pixel1,exception);
pixel2=zero;
(void) InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception);
pixel3=zero;
CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha,
point.y,&pixel3);
offset+=cube_size;
(void) InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset,width),floor(offset/width),&pixel1,exception);
(void) InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception);
pixel4=zero;
CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha,
point.y,&pixel4);
pixel=zero;
CompositePixelInfoAreaBlend(&pixel3,pixel3.alpha,&pixel4,pixel4.alpha,
point.z,&pixel);
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
SetPixelRed(image,ClampToQuantum(pixel.red),q);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
SetPixelGreen(image,ClampToQuantum(pixel.green),q);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
SetPixelBlue(image,ClampToQuantum(pixel.blue),q);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelBlack(image,ClampToQuantum(pixel.black),q);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_HaldClutImage)
#endif
proceed=SetImageProgress(image,HaldClutImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
hald_view=DestroyCacheView(hald_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImage() adjusts the levels of a particular image channel by
% scaling the colors falling between specified white and black points to
% the full available quantum range.
%
% The parameters provided represent the black, and white points. The black
% point specifies the darkest color in the image. Colors darker than the
% black point are set to zero. White point specifies the lightest color in
% the image. Colors brighter than the white point are set to the maximum
% quantum value.
%
% If a '!' flag is given, map black and white colors to the given levels
% rather than mapping those levels to black and white. See
% LevelizeImage() below.
%
% Gamma specifies a gamma correction to apply to the image.
%
% The format of the LevelImage method is:
%
% MagickBooleanType LevelImage(Image *image,const double black_point,
% const double white_point,const double gamma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantumRange (white) to.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double LevelPixel(const double black_point,
const double white_point,const double gamma,const double pixel)
{
double
level_pixel,
scale;
if (fabs(white_point-black_point) < MagickEpsilon)
return(pixel);
scale=1.0/(white_point-black_point);
level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point),
1.0/gamma);
return(level_pixel);
}
MagickExport MagickBooleanType LevelImage(Image *image,const double black_point,
const double white_point,const double gamma,ExceptionInfo *exception)
{
#define LevelImageTag "Level/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].red));
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].green));
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].blue));
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].alpha));
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel=GetPixelChannelChannel(image,j);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(double) q[j]));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_LevelImage)
#endif
proceed=SetImageProgress(image,LevelImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) ClampImage(image,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelizeImage() applies the reversed LevelImage() operation to just
% the specific channels specified. It compresses the full range of color
% values, so that they lie between the given black and white points. Gamma is
% applied before the values are mapped.
%
% LevelizeImage() can be called with by using a +level command line
% API option, or using a '!' on a -level or LevelImage() geometry string.
%
% It can be used to de-contrast a greyscale image to the exact levels
% specified. Or by using specific levels for each channel of an image you
% can convert a gray-scale image to any linear color gradient, according to
% those levels.
%
% The format of the LevelizeImage method is:
%
% MagickBooleanType LevelizeImage(Image *image,const double black_point,
% const double white_point,const double gamma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantumRange (white) to.
%
% o gamma: adjust gamma by this factor before mapping values.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LevelizeImage(Image *image,
const double black_point,const double white_point,const double gamma,
ExceptionInfo *exception)
{
#define LevelizeImageTag "Levelize/Image"
#define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \
(QuantumScale*(x)),gamma))*(white_point-black_point)+black_point)
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) LevelizeValue(image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) LevelizeValue(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) LevelizeValue(image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) LevelizeValue(
image->colormap[i].alpha);
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel=GetPixelChannelChannel(image,j);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=LevelizeValue(q[j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_LevelizeImage)
#endif
proceed=SetImageProgress(image,LevelizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImageColors() maps the given color to "black" and "white" values,
% linearly spreading out the colors, and level values on a channel by channel
% bases, as per LevelImage(). The given colors allows you to specify
% different level ranges for each of the color channels separately.
%
% If the boolean 'invert' is set true the image values will modifyed in the
% reverse direction. That is any existing "black" and "white" colors in the
% image will become the color values given, with all other values compressed
% appropriatally. This effectivally maps a greyscale gradient into the given
% color gradient.
%
% The format of the LevelImageColors method is:
%
% MagickBooleanType LevelImageColors(Image *image,
% const PixelInfo *black_color,const PixelInfo *white_color,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_color: The color to map black to/from
%
% o white_point: The color to map white to/from
%
% o invert: if true map the colors (levelize), rather than from (level)
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LevelImageColors(Image *image,
const PixelInfo *black_color,const PixelInfo *white_color,
const MagickBooleanType invert,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickStatusType
status;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsGrayColorspace(black_color->colorspace) == MagickFalse) ||
(IsGrayColorspace(white_color->colorspace) == MagickFalse)))
(void) SetImageColorspace(image,sRGBColorspace,exception);
status=MagickTrue;
if (invert == MagickFalse)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,RedChannel);
status&=LevelImage(image,black_color->red,white_color->red,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,GreenChannel);
status&=LevelImage(image,black_color->green,white_color->green,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,BlueChannel);
status&=LevelImage(image,black_color->blue,white_color->blue,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_mask=SetImageChannelMask(image,BlackChannel);
status&=LevelImage(image,black_color->black,white_color->black,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=LevelImage(image,black_color->alpha,white_color->alpha,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
}
else
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,RedChannel);
status&=LevelizeImage(image,black_color->red,white_color->red,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,GreenChannel);
status&=LevelizeImage(image,black_color->green,white_color->green,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,BlueChannel);
status&=LevelizeImage(image,black_color->blue,white_color->blue,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_mask=SetImageChannelMask(image,BlackChannel);
status&=LevelizeImage(image,black_color->black,white_color->black,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=LevelizeImage(image,black_color->alpha,white_color->alpha,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i n e a r S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LinearStretchImage() discards any pixels below the black point and above
% the white point and levels the remaining pixels.
%
% The format of the LinearStretchImage method is:
%
% MagickBooleanType LinearStretchImage(Image *image,
% const double black_point,const double white_point,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LinearStretchImage(Image *image,
const double black_point,const double white_point,ExceptionInfo *exception)
{
#define LinearStretchImageTag "LinearStretch/Image"
CacheView
*image_view;
double
*histogram,
intensity;
MagickBooleanType
status;
ssize_t
black,
white,
y;
/*
Allocate histogram and linear map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*histogram));
if (histogram == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Form histogram.
*/
(void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
intensity=GetPixelIntensity(image,p);
histogram[ScaleQuantumToMap(ClampToQuantum(intensity))]++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Find the histogram boundaries by locating the black and white point levels.
*/
intensity=0.0;
for (black=0; black < (ssize_t) MaxMap; black++)
{
intensity+=histogram[black];
if (intensity >= black_point)
break;
}
intensity=0.0;
for (white=(ssize_t) MaxMap; white != 0; white--)
{
intensity+=histogram[white];
if (intensity >= white_point)
break;
}
histogram=(double *) RelinquishMagickMemory(histogram);
status=LevelImage(image,(double) ScaleMapToQuantum((MagickRealType) black),
(double) ScaleMapToQuantum((MagickRealType) white),1.0,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d u l a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModulateImage() lets you control the brightness, saturation, and hue
% of an image. Modulate represents the brightness, saturation, and hue
% as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the
% modulation is lightness, saturation, and hue. For HWB, use blackness,
% whiteness, and hue. And for HCL, use chrome, luma, and hue.
%
% The format of the ModulateImage method is:
%
% MagickBooleanType ModulateImage(Image *image,const char *modulate,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulate: Define the percent change in brightness, saturation, and hue.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ModulateHCL(const double percent_hue,
const double percent_chroma,const double percent_luma,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHCLp(const double percent_hue,
const double percent_chroma,const double percent_luma,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLpToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHSB(const double percent_hue,
const double percent_saturation,const double percent_brightness,double *red,
double *green,double *blue)
{
double
brightness,
hue,
saturation;
/*
Increase or decrease color brightness, saturation, or hue.
*/
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
brightness*=0.01*percent_brightness;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
static inline void ModulateHSI(const double percent_hue,
const double percent_saturation,const double percent_intensity,double *red,
double *green,double *blue)
{
double
intensity,
hue,
saturation;
/*
Increase or decrease color intensity, saturation, or hue.
*/
ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
intensity*=0.01*percent_intensity;
ConvertHSIToRGB(hue,saturation,intensity,red,green,blue);
}
static inline void ModulateHSL(const double percent_hue,
const double percent_saturation,const double percent_lightness,double *red,
double *green,double *blue)
{
double
hue,
lightness,
saturation;
/*
Increase or decrease color lightness, saturation, or hue.
*/
ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
lightness*=0.01*percent_lightness;
ConvertHSLToRGB(hue,saturation,lightness,red,green,blue);
}
static inline void ModulateHSV(const double percent_hue,
const double percent_saturation,const double percent_value,double *red,
double *green,double *blue)
{
double
hue,
saturation,
value;
/*
Increase or decrease color value, saturation, or hue.
*/
ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
value*=0.01*percent_value;
ConvertHSVToRGB(hue,saturation,value,red,green,blue);
}
static inline void ModulateHWB(const double percent_hue,
const double percent_whiteness,const double percent_blackness,double *red,
double *green,double *blue)
{
double
blackness,
hue,
whiteness;
/*
Increase or decrease color blackness, whiteness, or hue.
*/
ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
blackness*=0.01*percent_blackness;
whiteness*=0.01*percent_whiteness;
ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue);
}
static inline void ModulateLCHab(const double percent_luma,
const double percent_chroma,const double percent_hue,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHabToRGB(luma,chroma,hue,red,green,blue);
}
static inline void ModulateLCHuv(const double percent_luma,
const double percent_chroma,const double percent_hue,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue);
}
MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate,
ExceptionInfo *exception)
{
#define ModulateImageTag "Modulate/Image"
CacheView
*image_view;
ColorspaceType
colorspace;
const char
*artifact;
double
percent_brightness,
percent_hue,
percent_saturation;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
register ssize_t
i;
ssize_t
y;
/*
Initialize modulate table.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (modulate == (char *) NULL)
return(MagickFalse);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
flags=ParseGeometry(modulate,&geometry_info);
percent_brightness=geometry_info.rho;
percent_saturation=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
percent_saturation=100.0;
percent_hue=geometry_info.xi;
if ((flags & XiValue) == 0)
percent_hue=100.0;
colorspace=UndefinedColorspace;
artifact=GetImageArtifact(image,"modulate:colorspace");
if (artifact != (const char *) NULL)
colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions,
MagickFalse,artifact);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
double
blue,
green,
red;
/*
Modulate image colormap.
*/
red=(double) image->colormap[i].red;
green=(double) image->colormap[i].green;
blue=(double) image->colormap[i].blue;
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSIColorspace:
{
ModulateHSI(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
}
image->colormap[i].red=red;
image->colormap[i].green=green;
image->colormap[i].blue=blue;
}
/*
Modulate image.
*/
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateModulateImage(image,percent_brightness,percent_hue,
percent_saturation,colorspace,exception) != MagickFalse)
return(MagickTrue);
#endif
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHabColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
}
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ModulateImage)
#endif
proceed=SetImageProgress(image,ModulateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e g a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NegateImage() negates the colors in the reference image. The grayscale
% option means that only grayscale values within the image are negated.
%
% The format of the NegateImage method is:
%
% MagickBooleanType NegateImage(Image *image,
% const MagickBooleanType grayscale,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o grayscale: If MagickTrue, only negate grayscale pixels within the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType NegateImage(Image *image,
const MagickBooleanType grayscale,ExceptionInfo *exception)
{
#define NegateImageTag "Negate/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Negate colormap.
*/
if( grayscale != MagickFalse )
if ((image->colormap[i].red != image->colormap[i].green) ||
(image->colormap[i].green != image->colormap[i].blue))
continue;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
/*
Negate image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
if( grayscale != MagickFalse )
{
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
if ((GetPixelWriteMask(image,q) == 0) ||
IsPixelGray(image,q) != MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel=GetPixelChannelChannel(image,j);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=QuantumRange-q[j];
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_NegateImage)
#endif
proceed=SetImageProgress(image,NegateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(MagickTrue);
}
/*
Negate image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel=GetPixelChannelChannel(image,j);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=QuantumRange-q[j];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_NegateImage)
#endif
proceed=SetImageProgress(image,NegateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N o r m a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The NormalizeImage() method enhances the contrast of a color image by
% mapping the darkest 2 percent of all pixel to black and the brightest
% 1 percent to white.
%
% The format of the NormalizeImage method is:
%
% MagickBooleanType NormalizeImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType NormalizeImage(Image *image,
ExceptionInfo *exception)
{
double
black_point,
white_point;
black_point=(double) image->columns*image->rows*0.0015;
white_point=(double) image->columns*image->rows*0.9995;
return(ContrastStretchImage(image,black_point,white_point,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i g m o i d a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SigmoidalContrastImage() adjusts the contrast of an image with a non-linear
% sigmoidal contrast algorithm. Increase the contrast of the image using a
% sigmoidal transfer function without saturating highlights or shadows.
% Contrast indicates how much to increase the contrast (0 is none; 3 is
% typical; 20 is pushing it); mid-point indicates where midtones fall in the
% resultant image (0 is white; 50% is middle-gray; 100% is black). Set
% sharpen to MagickTrue to increase the image contrast otherwise the contrast
% is reduced.
%
% The format of the SigmoidalContrastImage method is:
%
% MagickBooleanType SigmoidalContrastImage(Image *image,
% const MagickBooleanType sharpen,const char *levels,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
% o contrast: strength of the contrast, the larger the number the more
% 'threshold-like' it becomes.
%
% o midpoint: midpoint of the function as a color value 0 to QuantumRange.
%
% o exception: return any errors or warnings in this structure.
%
*/
/*
ImageMagick 6 has a version of this function which uses LUTs.
*/
/*
Sigmoidal function Sigmoidal with inflexion point moved to b and "slope
constant" set to a.
The first version, based on the hyperbolic tangent tanh, when combined with
the scaling step, is an exact arithmetic clone of the the sigmoid function
based on the logistic curve. The equivalence is based on the identity
1/(1+exp(-t)) = (1+tanh(t/2))/2
(http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the
scaled sigmoidal derivation is invariant under affine transformations of
the ordinate.
The tanh version is almost certainly more accurate and cheaper. The 0.5
factor in the argument is to clone the legacy ImageMagick behavior. The
reason for making the define depend on atanh even though it only uses tanh
has to do with the construction of the inverse of the scaled sigmoidal.
*/
#if defined(MAGICKCORE_HAVE_ATANH)
#define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) )
#else
#define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) )
#endif
/*
Scaled sigmoidal function:
( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) /
( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) )
See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and
http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit
of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by
zero. This is fixed below by exiting immediately when contrast is small,
leaving the image (or colormap) unmodified. This appears to be safe because
the series expansion of the logistic sigmoidal function around x=b is
1/2-a*(b-x)/4+...
so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh).
*/
#define ScaledSigmoidal(a,b,x) ( \
(Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \
(Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) )
/*
Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b
may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic
sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even
when creating a LUT from in gamut values, hence the branching. In
addition, HDRI may have out of gamut values.
InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal:
It is only a right inverse. This is unavoidable.
*/
static inline double InverseScaledSigmoidal(const double a,const double b,
const double x)
{
const double sig0=Sigmoidal(a,b,0.0);
const double sig1=Sigmoidal(a,b,1.0);
const double argument=(sig1-sig0)*x+sig0;
const double clamped=
(
#if defined(MAGICKCORE_HAVE_ATANH)
argument < -1+MagickEpsilon
?
-1+MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b+(2.0/a)*atanh(clamped));
#else
argument < MagickEpsilon
?
MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b-log(1.0/clamped-1.0)/a);
#endif
}
MagickExport MagickBooleanType SigmoidalContrastImage(Image *image,
const MagickBooleanType sharpen,const double contrast,const double midpoint,
ExceptionInfo *exception)
{
#define SigmoidalContrastImageTag "SigmoidalContrast/Image"
#define ScaledSig(x) ( ClampToQuantum(QuantumRange* \
ScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) )
#define InverseScaledSig(x) ( ClampToQuantum(QuantumRange* \
InverseScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) )
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Convenience macros.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Side effect: may clamp values unless contrast<MagickEpsilon, in which
case nothing is done.
*/
if (contrast < MagickEpsilon)
return(MagickTrue);
/*
Sigmoidal-contrast enhance colormap.
*/
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
if( sharpen != MagickFalse )
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(MagickRealType) ScaledSig(
image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(MagickRealType) ScaledSig(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(MagickRealType) ScaledSig(
image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(MagickRealType) ScaledSig(
image->colormap[i].alpha);
}
else
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(MagickRealType) InverseScaledSig(
image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(MagickRealType) InverseScaledSig(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(MagickRealType) InverseScaledSig(
image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(MagickRealType) InverseScaledSig(
image->colormap[i].alpha);
}
}
/*
Sigmoidal-contrast enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if( sharpen != MagickFalse )
q[i]=ScaledSig(q[i]);
else
q[i]=InverseScaledSig(q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SigmoidalContrastImage)
#endif
proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
blas_server_omp.c | /*********************************************************************/
/* Copyright 2009, 2010 The University of Texas at Austin. */
/* All rights reserved. */
/* */
/* Redistribution and use in source and binary forms, with or */
/* without modification, are permitted provided that the following */
/* conditions are met: */
/* */
/* 1. Redistributions of source code must retain the above */
/* copyright notice, this list of conditions and the following */
/* disclaimer. */
/* */
/* 2. Redistributions in binary form must reproduce the above */
/* copyright notice, this list of conditions and the following */
/* disclaimer in the documentation and/or other materials */
/* provided with the distribution. */
/* */
/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */
/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */
/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */
/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */
/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */
/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */
/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */
/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
/* */
/* The views and conclusions contained in the software and */
/* documentation are those of the authors and should not be */
/* interpreted as representing official policies, either expressed */
/* or implied, of The University of Texas at Austin. */
/*********************************************************************/
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
//#include <sys/mman.h>
#include "common.h"
#ifndef USE_OPENMP
#include "blas_server.c"
#else
#ifndef OMP_SCHED
#define OMP_SCHED static
#endif
int blas_server_avail = 0;
static void * blas_thread_buffer[MAX_PARALLEL_NUMBER][MAX_CPU_NUMBER];
#ifdef HAVE_C11
static atomic_bool blas_buffer_inuse[MAX_PARALLEL_NUMBER];
#else
static _Bool blas_buffer_inuse[MAX_PARALLEL_NUMBER];
#endif
void goto_set_num_threads(int num_threads) {
int i=0, j=0;
if (num_threads < 1) num_threads = blas_num_threads;
if (num_threads > MAX_CPU_NUMBER) num_threads = MAX_CPU_NUMBER;
if (num_threads > blas_num_threads) {
blas_num_threads = num_threads;
}
blas_cpu_number = num_threads;
omp_set_num_threads(blas_cpu_number);
//adjust buffer for each thread
for(i=0; i<MAX_PARALLEL_NUMBER; i++) {
for(j=0; j<blas_cpu_number; j++){
if(blas_thread_buffer[i][j]==NULL){
blas_thread_buffer[i][j]=blas_memory_alloc(2);
}
}
for(; j<MAX_CPU_NUMBER; j++){
if(blas_thread_buffer[i][j]!=NULL){
blas_memory_free(blas_thread_buffer[i][j]);
blas_thread_buffer[i][j]=NULL;
}
}
}
#if defined(ARCH_MIPS64)
//set parameters for different number of threads.
blas_set_parameter();
#endif
}
void openblas_set_num_threads(int num_threads) {
goto_set_num_threads(num_threads);
}
int blas_thread_init(void){
int i=0, j=0;
blas_get_cpu_number();
blas_server_avail = 1;
for(i=0; i<MAX_PARALLEL_NUMBER; i++) {
for(j=0; j<blas_num_threads; j++){
blas_thread_buffer[i][j]=blas_memory_alloc(2);
}
for(; j<MAX_CPU_NUMBER; j++){
blas_thread_buffer[i][j]=NULL;
}
}
return 0;
}
int BLASFUNC(blas_thread_shutdown)(void){
int i=0, j=0;
blas_server_avail = 0;
for(i=0; i<MAX_PARALLEL_NUMBER; i++) {
for(j=0; j<MAX_CPU_NUMBER; j++){
if(blas_thread_buffer[i][j]!=NULL){
blas_memory_free(blas_thread_buffer[i][j]);
blas_thread_buffer[i][j]=NULL;
}
}
}
return 0;
}
static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){
if (!(mode & BLAS_COMPLEX)){
#ifdef EXPRECISION
if (mode & BLAS_XDOUBLE){
/* REAL / Extended Double */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, xdouble,
xdouble *, BLASLONG, xdouble *, BLASLONG,
xdouble *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((xdouble *)args -> alpha)[0],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
} else
#endif
if (mode & BLAS_DOUBLE){
/* REAL / Double */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double,
double *, BLASLONG, double *, BLASLONG,
double *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((double *)args -> alpha)[0],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
} else {
/* REAL / Single */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float,
float *, BLASLONG, float *, BLASLONG,
float *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((float *)args -> alpha)[0],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
}
} else {
#ifdef EXPRECISION
if (mode & BLAS_XDOUBLE){
/* COMPLEX / Extended Double */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, xdouble, xdouble,
xdouble *, BLASLONG, xdouble *, BLASLONG,
xdouble *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((xdouble *)args -> alpha)[0],
((xdouble *)args -> alpha)[1],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
} else
#endif
if (mode & BLAS_DOUBLE){
/* COMPLEX / Double */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double, double,
double *, BLASLONG, double *, BLASLONG,
double *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((double *)args -> alpha)[0],
((double *)args -> alpha)[1],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
} else {
/* COMPLEX / Single */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float, float,
float *, BLASLONG, float *, BLASLONG,
float *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((float *)args -> alpha)[0],
((float *)args -> alpha)[1],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
}
}
}
static void exec_threads(blas_queue_t *queue, int buf_index){
void *buffer, *sa, *sb;
int pos=0, release_flag=0;
buffer = NULL;
sa = queue -> sa;
sb = queue -> sb;
#ifdef CONSISTENT_FPCSR
__asm__ __volatile__ ("ldmxcsr %0" : : "m" (queue -> sse_mode));
__asm__ __volatile__ ("fldcw %0" : : "m" (queue -> x87_mode));
#endif
if ((sa == NULL) && (sb == NULL) && ((queue -> mode & BLAS_PTHREAD) == 0)) {
pos = omp_get_thread_num();
buffer = blas_thread_buffer[buf_index][pos];
//fallback
if(buffer==NULL) {
buffer = blas_memory_alloc(2);
release_flag=1;
}
if (sa == NULL) {
sa = (void *)((BLASLONG)buffer + GEMM_OFFSET_A);
queue->sa=sa;
}
if (sb == NULL) {
if (!(queue -> mode & BLAS_COMPLEX)){
#ifdef EXPRECISION
if (queue -> mode & BLAS_XDOUBLE){
sb = (void *)(((BLASLONG)sa + ((QGEMM_P * QGEMM_Q * sizeof(xdouble)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
} else
#endif
if (queue -> mode & BLAS_DOUBLE){
sb = (void *)(((BLASLONG)sa + ((DGEMM_P * DGEMM_Q * sizeof(double)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
} else {
sb = (void *)(((BLASLONG)sa + ((SGEMM_P * SGEMM_Q * sizeof(float)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
}
} else {
#ifdef EXPRECISION
if (queue -> mode & BLAS_XDOUBLE){
sb = (void *)(((BLASLONG)sa + ((XGEMM_P * XGEMM_Q * 2 * sizeof(xdouble)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
} else
#endif
if (queue -> mode & BLAS_DOUBLE){
sb = (void *)(((BLASLONG)sa + ((ZGEMM_P * ZGEMM_Q * 2 * sizeof(double)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
} else {
sb = (void *)(((BLASLONG)sa + ((CGEMM_P * CGEMM_Q * 2 * sizeof(float)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
}
}
queue->sb=sb;
}
}
if (queue -> mode & BLAS_LEGACY) {
legacy_exec(queue -> routine, queue -> mode, queue -> args, sb);
} else
if (queue -> mode & BLAS_PTHREAD) {
void (*pthreadcompat)(void *) = queue -> routine;
(pthreadcompat)(queue -> args);
} else {
int (*routine)(blas_arg_t *, void *, void *, void *, void *, BLASLONG) = queue -> routine;
(routine)(queue -> args, queue -> range_m, queue -> range_n, sa, sb, queue -> position);
}
if (release_flag) blas_memory_free(buffer);
}
int exec_blas(BLASLONG num, blas_queue_t *queue){
BLASLONG i, buf_index;
if ((num <= 0) || (queue == NULL)) return 0;
#ifdef CONSISTENT_FPCSR
for (i = 0; i < num; i ++) {
__asm__ __volatile__ ("fnstcw %0" : "=m" (queue[i].x87_mode));
__asm__ __volatile__ ("stmxcsr %0" : "=m" (queue[i].sse_mode));
}
#endif
while(true) {
for(i=0; i < MAX_PARALLEL_NUMBER; i++) {
#ifdef HAVE_C11
_Bool inuse = false;
if(atomic_compare_exchange_weak(&blas_buffer_inuse[i], &inuse, true)) {
#else
if(blas_buffer_inuse[i] == false) {
blas_buffer_inuse[i] = true;
#endif
buf_index = i;
break;
}
}
if(i != MAX_PARALLEL_NUMBER)
break;
}
#pragma omp parallel for num_threads(num) schedule(OMP_SCHED)
for (i = 0; i < num; i ++) {
#ifndef USE_SIMPLE_THREADED_LEVEL3
queue[i].position = i;
#endif
exec_threads(&queue[i], buf_index);
}
#ifdef HAVE_C11
atomic_store(&blas_buffer_inuse[buf_index], false);
#else
blas_buffer_inuse[buf_index] = false;
#endif
return 0;
}
#endif
|
fftw_parallel.c | #include "libraries.h"
typedef struct comp_comp {
float Re;
float Im;
} complex;
//Appropriate exponential power of omega calculation: w^ki = e^(2*PI*k*i/n)
complex omega(int n, int i, int k) {
complex omeg;
omeg.Re = cos(k*i*2*PI/n);
omeg.Im = sin(k*i*2*PI/n);
return omeg;
}
// addition of 2 complex numbers c1,c2
complex csum(complex c1, complex c2){
complex sum;
sum.Re = c1.Re + c2.Re;
sum.Im = c1.Im + c2.Im;
return sum;
}
// multiplication of 2 complex numbers c1,c2
complex cmul(complex c1, complex c2){
complex mul;
mul.Re = c1.Re * c2.Re - c1.Im * c2.Im;
mul.Im = c1.Re * c2.Im + c1.Im * c2.Re;
return mul;
}
// subtraction of 2 complex numbers c1,c2
complex csub(complex c1, complex c2){
complex sub;
sub.Re = c1.Re - c2.Re;
sub.Im = c1.Im - c2.Im;
return sub;
}
int bitrev(int inp, int numbits)
{
int i, rev=0;
//printf("Original int %d\n",inp);
for (i=0; i < numbits; i++)
{
rev = (rev << 1) | (inp & 1);
inp >>= 1;
}
// printf("Reversed int %d\n",rev);
return rev;
}
double parallel_FFT(complex *X, complex *Y, long n,int num_thr) {
//long ii, shift,prev_shift, l;
long r, temp,m, i,w; //j, k, w, y, t, i2;
//complex temp2, temp3, omeg;
//complex t1;
complex *R,*S,omeg;
int start,end,element,border,tid,block,j,k;
double en,bg,ext;
R = (complex *) malloc(n*sizeof(complex));
S = (complex *) malloc(n*sizeof(complex));
/* Calculate r=logn with n=2^r */
ext=0;
bg=omp_get_wtime();
omp_set_num_threads(num_thr);
r=0;
temp=n;
while ( (n /= 2 ) != 0 ){
r++;}
n=temp;
//Calculate number of iterations without communication
for (i=0; i<n; i++){
R[i].Re = X[i].Re;
R[i].Im = X[i].Im;
}
border=n/2;
//r-d iterations with communication but since
for (m=0; m<r; m++){
//mb=pow(2,m);
for (i=0; i<n; i++){
S[i].Re = R[i].Re;
S[i].Im = R[i].Im;
//printf("step %ld : S has %fl \n",m,S[i].Re);
}
block=n/num_thr;
#pragma omp parallel shared(S,border,block,m,r,n) private (element,start,end,tid,j,k,w,omeg)
{
tid=omp_get_thread_num();
start=(tid)*block;
end=start + block;
for(element=start; element<end; element++){
j=(element & (~(1 << (r-m-1)))) | (0 << (r-m-1));
k=(element & (~(1 << (r-m-1)))) | (1 << (r-m-1));
//Appropriate omega for each butterfly group
w=bitrev(element,r);
w =w << (r-1-m);
omeg=omega(n,-1,w);
if (element<k){
//R[element].Re=S[j].Re +2*S[k].Re;
R[element]=csum(S[j],cmul(omeg,S[k]));
//printf("%lf + 2* %lf \n",S[j].Re,S[k].Re);
//printf("Thread %d at end of step %ld local R %fl %fl, j: %d k: %d \n",tid,m,R[element].Re,R[element].Im,j,k);
}
else {
//R[element].Re=S[k].Re +2*S[j].Re;
R[element]=csum(S[k],cmul(omeg,S[j]));
//printf("%lf + 2* %lf \n",S[k].Re,S[j].Re);
//printf("Thread %d at end of step %ld local R %fl %fl , k: %d j: %d \n",tid,m,R[element].Re,R[element].Im,k,j);
}
}
{
#pragma omp barrier
}
//At the end of the last step reverse indices
if(m==r-1){
for(element=start; element<end; element++){
Y[element]=R[bitrev(element,r)];
}
}
}
border=border/2;
}
en=omp_get_wtime();
ext=en-bg;
//printf("Mean Time: %lf",en-bg);
free(R);
free(S);
return ext;
}
//After the process is done reverse indices in parallel
int main(int argc, char** argv) {
int size, i,num_thr;
complex *X, *Y;
double sum,mean,ext;
//double start,end;
//Provide power of size at input
size = pow(2,atoi(argv[1]));
num_thr = pow(2,atoi(argv[2]));
if (size < num_thr || num_thr==1) {
printf("Non optimal partitioning.Exiting\n");
return -1;
}
Y = (complex *) malloc(size*sizeof(complex));
X = (complex *) malloc(size*sizeof(complex));
for(i=0;i<size;i++){
X[i].Re=(float)rand();
X[i].Im=(float)rand();
}
//for(i=0;i<size;i++) {
// printf("Input: %fl %fl\n",X[i].Re,X[i].Im); }
sum=0;
ext=0;
for(i=0; i<100; i++){
//start=omp_get_wtime();
ext=parallel_FFT(X,Y,size,num_thr);
//end=omp_get_wtime();
sum=sum+ext;
}
mean=(double) sum/ (double) 100;
printf("Time spent in parallel_FFT for %d elements with %d threads: %lf\n",size,num_thr,mean);
//printf("Time spent in serial_FFT for %d elements: %.20lf\n",size,(double)(end-start)/ CLOCKS_PER_SEC);
//Printing the values of Y for debugging
//for(i=0;i<size;i++) {
//printf("Result: %fl %fl\n",Y[i].Re,Y[i].Im); }
free(X);
free(Y);
return 0;
} |
ZQ_CNN_MTCNN.h | #ifndef _ZQ_CNN_MTCNN_H_
#define _ZQ_CNN_MTCNN_H_
#pragma once
#include "ZQ_CNN_Net.h"
#include "ZQ_CNN_BBoxUtils.h"
#include <omp.h>
namespace ZQ
{
class ZQ_CNN_MTCNN
{
public:
using string = std::string;
ZQ_CNN_MTCNN()
{
min_size = 60;
thresh[0] = 0.6;
thresh[1] = 0.7;
thresh[2] = 0.7;
nms_thresh[0] = 0.6;
nms_thresh[1] = 0.7;
nms_thresh[2] = 0.7;
width = 0;
height = 0;
factor = 0.709;
pnet_overlap_thresh_count = 4;
pnet_size = 12;
pnet_stride = 2;
special_handle_very_big_face = false;
force_run_pnet_multithread = false;
show_debug_info = false;
}
~ZQ_CNN_MTCNN()
{
}
private:
std::vector<ZQ_CNN_Net> pnet, rnet, onet, lnet;
bool has_lnet;
int thread_num;
float thresh[3], nms_thresh[3];
int min_size;
int width, height;
float factor;
int pnet_overlap_thresh_count;
int pnet_size;
int pnet_stride;
int rnet_size;
int onet_size;
int lnet_size;
bool special_handle_very_big_face;
bool do_landmark;
float early_accept_thresh;
float nms_thresh_per_scale;
bool force_run_pnet_multithread;
std::vector<float> scales;
std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> pnet_images;
ZQ_CNN_Tensor4D_NHW_C_Align128bit input, rnet_image, onet_image;
bool show_debug_info;
public:
void TurnOnShowDebugInfo() { show_debug_info = true; }
void TurnOffShowDebugInfo() { show_debug_info = false; }
bool Init(const string& pnet_param, const string& pnet_model, const string& rnet_param, const string& rnet_model,
const string& onet_param, const string& onet_model, int thread_num = 1,
bool has_lnet = false, const string& lnet_param = "", const std::string& lnet_model = "")
{
if (thread_num < 1)
force_run_pnet_multithread = true;
else
force_run_pnet_multithread = false;
thread_num = __max(1, thread_num);
pnet.resize(thread_num);
rnet.resize(thread_num);
onet.resize(thread_num);
this->has_lnet = has_lnet;
if (has_lnet)
{
lnet.resize(thread_num);
}
bool ret = true;
for (int i = 0; i < thread_num; i++)
{
ret = pnet[i].LoadFrom(pnet_param, pnet_model) && rnet[i].LoadFrom(rnet_param, rnet_model) && onet[i].LoadFrom(onet_param, onet_model);
if (has_lnet && ret)
ret = lnet[i].LoadFrom(lnet_param, lnet_model);
if (!ret)
break;
}
if (!ret)
{
pnet.clear();
rnet.clear();
onet.clear();
if (has_lnet)
lnet.clear();
this->thread_num = 0;
}
else
this->thread_num = thread_num;
if (show_debug_info)
{
printf("rnet = %.1f M, onet = %.1f M\n", rnet[0].GetNumOfMulAdd() / (1024.0*1024.0),
onet[0].GetNumOfMulAdd() / (1024.0*1024.0));
if (has_lnet)
printf("lnet = %.1f M\n", lnet[0].GetNumOfMulAdd() / (1024.0*1024.0));
}
int C, H, W;
rnet[0].GetInputDim(C, H, W);
rnet_size = H;
onet[0].GetInputDim(C, H, W);
onet_size = H;
if (has_lnet)
{
lnet[0].GetInputDim(C, H, W);
lnet_size = H;
}
return ret;
}
bool InitFromBuffer(
const char* pnet_param, __int64 pnet_param_len, const char* pnet_model, __int64 pnet_model_len,
const char* rnet_param, __int64 rnet_param_len, const char* rnet_model, __int64 rnet_model_len,
const char* onet_param, __int64 onet_param_len, const char* onet_model, __int64 onet_model_len,
int thread_num = 1, bool has_lnet = false,
const char* lnet_param = 0, __int64 lnet_param_len = 0, const char* lnet_model = 0, __int64 lnet_model_len = 0)
{
if (thread_num < 1)
force_run_pnet_multithread = true;
else
force_run_pnet_multithread = false;
thread_num = __max(1, thread_num);
pnet.resize(thread_num);
rnet.resize(thread_num);
onet.resize(thread_num);
this->has_lnet = has_lnet;
if(has_lnet)
lnet.resize(thread_num);
bool ret = true;
for (int i = 0; i < thread_num; i++)
{
ret = pnet[i].LoadFromBuffer(pnet_param, pnet_param_len,pnet_model,pnet_model_len)
&& rnet[i].LoadFromBuffer(rnet_param, rnet_param_len, rnet_model, rnet_model_len)
&& onet[i].LoadFromBuffer(onet_param, onet_param_len, onet_model, onet_model_len);
if (has_lnet && ret)
ret = lnet[i].LoadFromBuffer(lnet_param, lnet_param_len, lnet_model, lnet_model_len);
if (!ret)
break;
}
if (!ret)
{
pnet.clear();
rnet.clear();
onet.clear();
if (has_lnet)
lnet.clear();
this->thread_num = 0;
}
else
this->thread_num = thread_num;
if (show_debug_info)
{
printf("rnet = %.1f M, onet = %.1f M\n", rnet[0].GetNumOfMulAdd() / (1024.0*1024.0),
onet[0].GetNumOfMulAdd() / (1024.0*1024.0));
if (has_lnet)
printf("lnet = %.1f M\n", lnet[0].GetNumOfMulAdd() / (1024.0*1024.0));
}
int C, H, W;
rnet[0].GetInputDim(C, H, W);
rnet_size = H;
onet[0].GetInputDim(C, H, W);
onet_size = H;
return ret;
}
void SetPara(int w, int h, int min_face_size = 60, float pthresh = 0.6, float rthresh = 0.7, float othresh = 0.7,
float nms_pthresh = 0.6, float nms_rthresh = 0.7, float nms_othresh = 0.7, float scale_factor = 0.709,
int pnet_overlap_thresh_count = 4, int pnet_size = 12, int pnet_stride = 2, bool special_handle_very_big_face = false,
bool do_landmark = true, float early_accept_thresh = 1.00)
{
min_size = __max(pnet_size, min_face_size);
thresh[0] = __max(0.1, pthresh); thresh[1] = __max(0.1, rthresh); thresh[2] = __max(0.1, othresh);
nms_thresh[0] = __max(0.1, nms_pthresh); nms_thresh[1] = __max(0.1, nms_rthresh); nms_thresh[2] = __max(0.1, nms_othresh);
scale_factor = __max(0.5, __min(0.97, scale_factor));
this->pnet_overlap_thresh_count = __max(0, pnet_overlap_thresh_count);
this->pnet_size = pnet_size;
this->pnet_stride = pnet_stride;
this->special_handle_very_big_face = special_handle_very_big_face;
this->do_landmark = do_landmark;
this->early_accept_thresh = early_accept_thresh;
if (pnet_size == 20 && pnet_stride == 4)
nms_thresh_per_scale = 0.45;
else
nms_thresh_per_scale = 0.495;
if (width != w || height != h || factor != scale_factor)
{
scales.clear();
pnet_images.clear();
width = w; height = h;
float minside = __min(width, height);
int MIN_DET_SIZE = pnet_size;
float m = (float)MIN_DET_SIZE / min_size;
minside *= m;
while (minside > MIN_DET_SIZE)
{
scales.push_back(m);
minside *= factor;
m *= factor;
}
minside = __min(width, height);
int count = scales.size();
for (int i = scales.size() - 1; i >= 0; i--)
{
if (ceil(scales[i] * minside) <= pnet_size)
{
count--;
}
}
if (special_handle_very_big_face)
{
if (count > 2)
count--;
scales.resize(count);
if (count > 0)
{
float last_size = ceil(scales[count - 1] * minside);
for (int tmp_size = last_size - 1; tmp_size >= pnet_size + 1; tmp_size -= 2)
{
scales.push_back((float)tmp_size / minside);
count++;
}
}
scales.push_back((float)pnet_size / minside);
count++;
}
else
{
scales.push_back((float)pnet_size / minside);
count++;
}
pnet_images.resize(count);
}
}
bool Find(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox>& results)
{
double t1 = omp_get_wtime();
std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox;
if (!_Pnet_stage(bgr_img, _width, _height, _widthStep, firstBbox))
return false;
//results = firstBbox;
//return true;
double t2 = omp_get_wtime();
if (!_Rnet_stage(firstBbox, secondBbox))
return false;
//results = secondBbox;
//return true;
if (!has_lnet || !do_landmark)
{
double t3 = omp_get_wtime();
if (!_Onet_stage(secondBbox, results))
return false;
double t4 = omp_get_wtime();
if (show_debug_info)
{
printf("final found num: %d\n", results.size());
printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms)\n",
1000 * (t4 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3));
}
}
else
{
double t3 = omp_get_wtime();
if (!_Onet_stage(secondBbox, thirdBbox))
return false;
double t4 = omp_get_wtime();
if (!_Lnet_stage(thirdBbox, results))
return false;
double t5 = omp_get_wtime();
if (show_debug_info)
{
printf("final found num: %d\n", results.size());
printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms, L: %.3f ms)\n",
1000 * (t5 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3), 1000 * (t5 - t4));
}
}
return true;
}
bool Find106(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox106>& results)
{
double t1 = omp_get_wtime();
std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox;
if (!_Pnet_stage(bgr_img, _width, _height, _widthStep, firstBbox))
return false;
//results = firstBbox;
//return true;
double t2 = omp_get_wtime();
if (!_Rnet_stage(firstBbox, secondBbox))
return false;
//results = secondBbox;
//return true;
if (!has_lnet || !do_landmark)
{
return false;
}
double t3 = omp_get_wtime();
if (!_Onet_stage(secondBbox, thirdBbox))
return false;
double t4 = omp_get_wtime();
if (!_Lnet106_stage(thirdBbox, results))
return false;
double t5 = omp_get_wtime();
if (show_debug_info)
{
printf("final found num: %d\n", results.size());
printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms, L: %.3f ms)\n",
1000 * (t5 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3), 1000 * (t5 - t4));
}
return true;
}
private:
void _compute_Pnet_single_thread(std::vector<std::vector<float> >& maps,
std::vector<int>& mapH, std::vector<int>& mapW)
{
int scale_num = 0;
for (int i = 0; i < scales.size(); i++)
{
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
if (changedH < pnet_size || changedW < pnet_size)
continue;
scale_num++;
mapH.push_back((changedH - pnet_size) / pnet_stride + 1);
mapW.push_back((changedW - pnet_size) / pnet_stride + 1);
}
maps.resize(scale_num);
for (int i = 0; i < scale_num; i++)
{
maps[i].resize(mapH[i] * mapW[i]);
}
for (int i = 0; i < scale_num; i++)
{
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
float cur_scale_x = (float)width / changedW;
float cur_scale_y = (float)height / changedH;
double t10 = omp_get_wtime();
if (scales[i] != 1)
{
input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0);
}
double t11 = omp_get_wtime();
if (scales[i] != 1)
pnet[0].Forward(pnet_images[i]);
else
pnet[0].Forward(input);
double t12 = omp_get_wtime();
if (show_debug_info)
printf("Pnet [%d]: resolution [%dx%d], resize:%.3f ms, cost:%.3f ms\n",
i, changedW, changedH, 1000 * (t11 - t10), 1000 * (t12 - t11));
const ZQ_CNN_Tensor4D* score = pnet[0].GetBlobByName("prob1");
//score p
int scoreH = score->GetH();
int scoreW = score->GetW();
int scorePixStep = score->GetPixelStep();
const float *p = score->GetFirstPixelPtr() + 1;
for (int row = 0; row < scoreH; row++)
{
for (int col = 0; col < scoreW; col++)
{
if(row < mapH[i] && col < mapW[i])
maps[i][row*mapW[i] + col] = *p;
p += scorePixStep;
}
}
}
}
void _compute_Pnet_multi_thread(std::vector<std::vector<float> >& maps,
std::vector<int>& mapH, std::vector<int>& mapW)
{
if (thread_num <= 1)
{
for (int i = 0; i < scales.size(); i++)
{
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
if (changedH < pnet_size || changedW < pnet_size)
continue;
if (scales[i] != 1)
{
input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0);
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num)
for (int i = 0; i < scales.size(); i++)
{
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
if (changedH < pnet_size || changedW < pnet_size)
continue;
if (scales[i] != 1)
{
input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0);
}
}
}
int scale_num = 0;
for (int i = 0; i < scales.size(); i++)
{
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
if (changedH < pnet_size || changedW < pnet_size)
continue;
scale_num++;
mapH.push_back((changedH - pnet_size) / pnet_stride + 1);
mapW.push_back((changedW - pnet_size) / pnet_stride + 1);
}
maps.resize(scale_num);
for (int i = 0; i < scale_num; i++)
{
maps[i].resize(mapH[i] * mapW[i]);
}
std::vector<int> task_rect_off_x;
std::vector<int> task_rect_off_y;
std::vector<int> task_rect_width;
std::vector<int> task_rect_height;
std::vector<float> task_scale;
std::vector<int> task_scale_id;
int stride = pnet_stride;
const int block_size = 64 * stride;
int cellsize = pnet_size;
int border_size = cellsize - stride;
int overlap_border_size = cellsize / stride;
int jump_size = block_size - border_size;
for (int i = 0; i < scales.size(); i++)
{
int changeH = (int)ceil(height*scales[i]);
int changeW = (int)ceil(width*scales[i]);
if (changeH < pnet_size || changeW < pnet_size)
continue;
int block_H_num = 0;
int block_W_num = 0;
int start = 0;
while (start < changeH)
{
block_H_num++;
if (start + block_size >= changeH)
break;
start += jump_size;
}
start = 0;
while (start < changeW)
{
block_W_num++;
if (start + block_size >= changeW)
break;
start += jump_size;
}
for (int s = 0; s < block_H_num; s++)
{
for (int t = 0; t < block_W_num; t++)
{
int rect_off_x = t * jump_size;
int rect_off_y = s * jump_size;
int rect_width = __min(changeW, rect_off_x + block_size) - rect_off_x;
int rect_height = __min(changeH, rect_off_y + block_size) - rect_off_y;
if (rect_width >= cellsize && rect_height >= cellsize)
{
task_rect_off_x.push_back(rect_off_x);
task_rect_off_y.push_back(rect_off_y);
task_rect_width.push_back(rect_width);
task_rect_height.push_back(rect_height);
task_scale.push_back(scales[i]);
task_scale_id.push_back(i);
}
}
}
}
//
int task_num = task_scale.size();
std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_pnet_images(thread_num);
if (thread_num <= 1)
{
for (int i = 0; i < task_num; i++)
{
int thread_id = omp_get_thread_num();
int scale_id = task_scale_id[i];
float cur_scale = task_scale[i];
int i_rect_off_x = task_rect_off_x[i];
int i_rect_off_y = task_rect_off_y[i];
int i_rect_width = task_rect_width[i];
int i_rect_height = task_rect_height[i];
if (scale_id == 0 && scales[0] == 1)
{
if (!input.ROI(task_pnet_images[thread_id],
i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0))
continue;
}
else
{
if (!pnet_images[scale_id].ROI(task_pnet_images[thread_id],
i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0))
continue;
}
if (!pnet[thread_id].Forward(task_pnet_images[thread_id]))
continue;
const ZQ_CNN_Tensor4D* score = pnet[thread_id].GetBlobByName("prob1");
int task_count = 0;
//score p
int scoreH = score->GetH();
int scoreW = score->GetW();
int scorePixStep = score->GetPixelStep();
const float *p = score->GetFirstPixelPtr() + 1;
ZQ_CNN_BBox bbox;
ZQ_CNN_OrderScore order;
for (int row = 0; row < scoreH; row++)
{
for (int col = 0; col < scoreW; col++)
{
int real_row = row + i_rect_off_y / stride;
int real_col = col + i_rect_off_x / stride;
if (real_row < mapH[scale_id] && real_col < mapW[scale_id])
maps[scale_id][real_row*mapW[scale_id] + real_col] = *p;
p += scorePixStep;
}
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num)
for (int i = 0; i < task_num; i++)
{
int thread_id = omp_get_thread_num();
int scale_id = task_scale_id[i];
float cur_scale = task_scale[i];
int i_rect_off_x = task_rect_off_x[i];
int i_rect_off_y = task_rect_off_y[i];
int i_rect_width = task_rect_width[i];
int i_rect_height = task_rect_height[i];
if (scale_id == 0 && scales[0] == 1)
{
if (!input.ROI(task_pnet_images[thread_id],
i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0))
continue;
}
else
{
if (!pnet_images[scale_id].ROI(task_pnet_images[thread_id],
i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0))
continue;
}
if (!pnet[thread_id].Forward(task_pnet_images[thread_id]))
continue;
const ZQ_CNN_Tensor4D* score = pnet[thread_id].GetBlobByName("prob1");
int task_count = 0;
//score p
int scoreH = score->GetH();
int scoreW = score->GetW();
int scorePixStep = score->GetPixelStep();
const float *p = score->GetFirstPixelPtr() + 1;
ZQ_CNN_BBox bbox;
ZQ_CNN_OrderScore order;
for (int row = 0; row < scoreH; row++)
{
for (int col = 0; col < scoreW; col++)
{
int real_row = row + i_rect_off_y / stride;
int real_col = col + i_rect_off_x / stride;
if (real_row < mapH[scale_id] && real_col < mapW[scale_id])
maps[scale_id][real_row*mapW[scale_id] + real_col] = *p;
p += scorePixStep;
}
}
}
}
}
bool _Pnet_stage(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox>& firstBbox)
{
if (thread_num <= 0)
return false;
double t1 = omp_get_wtime();
firstBbox.clear();
if (width != _width || height != _height)
return false;
if (!input.ConvertFromBGR(bgr_img, width, height, width * 3))
return false;
double t2 = omp_get_wtime();
if (show_debug_info)
printf("convert cost: %.3f ms\n", 1000 * (t2 - t1));
std::vector<std::vector<float> > maps;
std::vector<int> mapH;
std::vector<int> mapW;
if (thread_num == 1 && !force_run_pnet_multithread)
{
pnet[0].TurnOffShowDebugInfo();
//pnet[0].TurnOnShowDebugInfo();
_compute_Pnet_single_thread(maps, mapH, mapW);
}
else
{
_compute_Pnet_multi_thread(maps, mapH, mapW);
}
ZQ_CNN_OrderScore order;
std::vector<std::vector<ZQ_CNN_BBox> > bounding_boxes(scales.size());
std::vector<std::vector<ZQ_CNN_OrderScore> > bounding_scores(scales.size());
const int block_size = 32;
int stride = pnet_stride;
int cellsize = pnet_size;
int border_size = cellsize / stride;
for (int i = 0; i < maps.size(); i++)
{
double t13 = omp_get_wtime();
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
if (changedH < pnet_size || changedW < pnet_size)
continue;
float cur_scale_x = (float)width / changedW;
float cur_scale_y = (float)height / changedH;
int count = 0;
//score p
int scoreH = mapH[i];
int scoreW = mapW[i];
const float *p = &maps[i][0];
if (scoreW <= block_size && scoreH < block_size)
{
ZQ_CNN_BBox bbox;
ZQ_CNN_OrderScore order;
for (int row = 0; row < scoreH; row++)
{
for (int col = 0; col < scoreW; col++)
{
if (*p > thresh[0])
{
bbox.score = *p;
order.score = *p;
order.oriOrder = count;
bbox.row1 = stride*row;
bbox.col1 = stride*col;
bbox.row2 = stride*row + cellsize;
bbox.col2 = stride*col + cellsize;
bbox.exist = true;
bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1);
bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size)
&& (col >= border_size && col < scoreW - border_size);
bounding_boxes[i].push_back(bbox);
bounding_scores[i].push_back(order);
count++;
}
p ++;
}
}
int before_count = bounding_boxes[i].size();
ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count);
int after_count = bounding_boxes[i].size();
for (int j = 0; j < after_count; j++)
{
ZQ_CNN_BBox& bbox = bounding_boxes[i][j];
bbox.row1 = round(bbox.row1 *cur_scale_y);
bbox.col1 = round(bbox.col1 *cur_scale_x);
bbox.row2 = round(bbox.row2 *cur_scale_y);
bbox.col2 = round(bbox.col2 *cur_scale_x);
bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1);
}
double t14 = omp_get_wtime();
if (show_debug_info)
printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count);
}
else
{
int before_count = 0, after_count = 0;
int block_H_num = __max(1, scoreH / block_size);
int block_W_num = __max(1, scoreW / block_size);
int block_num = block_H_num*block_W_num;
int width_per_block = scoreW / block_W_num;
int height_per_block = scoreH / block_H_num;
std::vector<std::vector<ZQ_CNN_BBox> > tmp_bounding_boxes(block_num);
std::vector<std::vector<ZQ_CNN_OrderScore> > tmp_bounding_scores(block_num);
std::vector<int> block_start_w(block_num), block_end_w(block_num);
std::vector<int> block_start_h(block_num), block_end_h(block_num);
for (int bh = 0; bh < block_H_num; bh++)
{
for (int bw = 0; bw < block_W_num; bw++)
{
int bb = bh * block_W_num + bw;
block_start_w[bb] = (bw == 0) ? 0 : (bw*width_per_block - border_size);
block_end_w[bb] = (bw == block_num - 1) ? scoreW : ((bw + 1)*width_per_block);
block_start_h[bb] = (bh == 0) ? 0 : (bh*height_per_block - border_size);
block_end_h[bb] = (bh == block_num - 1) ? scoreH : ((bh + 1)*height_per_block);
}
}
int chunk_size = ceil((float)block_num / thread_num);
if (thread_num <= 1)
{
for (int bb = 0; bb < block_num; bb++)
{
ZQ_CNN_BBox bbox;
ZQ_CNN_OrderScore order;
int count = 0;
for (int row = block_start_h[bb]; row < block_end_h[bb]; row++)
{
p = &maps[i][0] + row*scoreW + block_start_w[bb];
for (int col = block_start_w[bb]; col < block_end_w[bb]; col++)
{
if (*p > thresh[0])
{
bbox.score = *p;
order.score = *p;
order.oriOrder = count;
bbox.row1 = stride*row;
bbox.col1 = stride*col;
bbox.row2 = stride*row + cellsize;
bbox.col2 = stride*col + cellsize;
bbox.exist = true;
bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size)
&& (col >= border_size && col < scoreW - border_size);
bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1);
tmp_bounding_boxes[bb].push_back(bbox);
tmp_bounding_scores[bb].push_back(order);
count++;
}
p++;
}
}
int tmp_before_count = tmp_bounding_boxes[bb].size();
ZQ_CNN_BBoxUtils::_nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count);
int tmp_after_count = tmp_bounding_boxes[bb].size();
before_count += tmp_before_count;
after_count += tmp_after_count;
}
}
else
{
#pragma omp parallel for schedule(static, chunk_size) num_threads(thread_num)
for (int bb = 0; bb < block_num; bb++)
{
ZQ_CNN_BBox bbox;
ZQ_CNN_OrderScore order;
int count = 0;
for (int row = block_start_h[bb]; row < block_end_h[bb]; row++)
{
const float* p = &maps[i][0] + row*scoreW + block_start_w[bb];
for (int col = block_start_w[bb]; col < block_end_w[bb]; col++)
{
if (*p > thresh[0])
{
bbox.score = *p;
order.score = *p;
order.oriOrder = count;
bbox.row1 = stride*row;
bbox.col1 = stride*col;
bbox.row2 = stride*row + cellsize;
bbox.col2 = stride*col + cellsize;
bbox.exist = true;
bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size)
&& (col >= border_size && col < scoreW - border_size);
bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1);
tmp_bounding_boxes[bb].push_back(bbox);
tmp_bounding_scores[bb].push_back(order);
count++;
}
p++;
}
}
int tmp_before_count = tmp_bounding_boxes[bb].size();
ZQ_CNN_BBoxUtils::_nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count);
int tmp_after_count = tmp_bounding_boxes[bb].size();
before_count += tmp_before_count;
after_count += tmp_after_count;
}
}
count = 0;
for (int bb = 0; bb < block_num; bb++)
{
std::vector<ZQ_CNN_BBox>::iterator it = tmp_bounding_boxes[bb].begin();
for (; it != tmp_bounding_boxes[bb].end(); it++)
{
if ((*it).exist)
{
bounding_boxes[i].push_back(*it);
order.score = (*it).score;
order.oriOrder = count;
bounding_scores[i].push_back(order);
count++;
}
}
}
//ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", 0);
after_count = bounding_boxes[i].size();
for (int j = 0; j < after_count; j++)
{
ZQ_CNN_BBox& bbox = bounding_boxes[i][j];
bbox.row1 = round(bbox.row1 *cur_scale_y);
bbox.col1 = round(bbox.col1 *cur_scale_x);
bbox.row2 = round(bbox.row2 *cur_scale_y);
bbox.col2 = round(bbox.col2 *cur_scale_x);
bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1);
}
double t14 = omp_get_wtime();
if (show_debug_info)
printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count);
}
}
std::vector<ZQ_CNN_OrderScore> firstOrderScore;
int count = 0;
for (int i = 0; i < scales.size(); i++)
{
std::vector<ZQ_CNN_BBox>::iterator it = bounding_boxes[i].begin();
for (; it != bounding_boxes[i].end(); it++)
{
if ((*it).exist)
{
firstBbox.push_back(*it);
order.score = (*it).score;
order.oriOrder = count;
firstOrderScore.push_back(order);
count++;
}
}
}
//the first stage's nms
if (count < 1) return false;
double t15 = omp_get_wtime();
ZQ_CNN_BBoxUtils::_nms(firstBbox, firstOrderScore, nms_thresh[0], "Union", 0, 1);
ZQ_CNN_BBoxUtils::_refine_and_square_bbox(firstBbox, width, height,true);
double t16 = omp_get_wtime();
if (show_debug_info)
printf("nms cost: %.3f ms\n", 1000 * (t16 - t15));
if (show_debug_info)
printf("first stage candidate count: %d\n", count);
double t3 = omp_get_wtime();
if (show_debug_info)
printf("stage 1: cost %.3f ms\n", 1000 * (t3 - t2));
return true;
}
bool _Rnet_stage(std::vector<ZQ_CNN_BBox>& firstBbox, std::vector<ZQ_CNN_BBox>& secondBbox)
{
double t3 = omp_get_wtime();
secondBbox.clear();
std::vector<ZQ_CNN_BBox>::iterator it = firstBbox.begin();
std::vector<ZQ_CNN_OrderScore> secondScore;
std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h;
int r_count = 0;
for (; it != firstBbox.end(); it++)
{
if ((*it).exist)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size)
{
(*it).exist = false;
continue;
}
else
{
src_off_x.push_back(off_x);
src_off_y.push_back(off_y);
src_rect_w.push_back(rect_w);
src_rect_h.push_back(rect_h);
r_count++;
secondBbox.push_back(*it);
}
}
}
int batch_size = 64;
int per_num = ceil((float)r_count / thread_num);
int need_thread_num = thread_num;
if (per_num > batch_size)
{
need_thread_num = ceil((float)r_count / batch_size);
per_num = batch_size;
}
std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_rnet_images(need_thread_num);
std::vector<std::vector<int> > task_src_off_x(need_thread_num);
std::vector<std::vector<int> > task_src_off_y(need_thread_num);
std::vector<std::vector<int> > task_src_rect_w(need_thread_num);
std::vector<std::vector<int> > task_src_rect_h(need_thread_num);
std::vector<std::vector<ZQ_CNN_BBox> > task_secondBbox(need_thread_num);
for (int i = 0; i < need_thread_num; i++)
{
int st_id = per_num*i;
int end_id = __min(r_count, per_num*(i + 1));
int cur_num = end_id - st_id;
if (cur_num > 0)
{
task_src_off_x[i].resize(cur_num);
task_src_off_y[i].resize(cur_num);
task_src_rect_w[i].resize(cur_num);
task_src_rect_h[i].resize(cur_num);
task_secondBbox[i].resize(cur_num);
for (int j = 0; j < cur_num; j++)
{
task_src_off_x[i][j] = src_off_x[st_id + j];
task_src_off_y[i][j] = src_off_y[st_id + j];
task_src_rect_w[i][j] = src_rect_w[st_id + j];
task_src_rect_h[i][j] = src_rect_h[st_id + j];
task_secondBbox[i][j] = secondBbox[st_id + j];
}
}
}
if (thread_num <= 1)
{
for (int pp = 0; pp < need_thread_num; pp++)
{
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
rnet[0].Forward(task_rnet_images[pp]);
const ZQ_CNN_Tensor4D* score = rnet[0].GetBlobByName("prob1");
const ZQ_CNN_Tensor4D* location = rnet[0].GetBlobByName("conv5-2");
const float* score_ptr = score->GetFirstPixelPtr();
const float* location_ptr = location->GetFirstPixelPtr();
int score_sliceStep = score->GetSliceStep();
int location_sliceStep = location->GetSliceStep();
int task_count = 0;
for (int i = 0; i < task_secondBbox[pp].size(); i++)
{
if (score_ptr[i*score_sliceStep + 1] > thresh[1])
{
for (int j = 0; j < 4; j++)
task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j];
task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i];
task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1];
task_count++;
}
else
{
task_secondBbox[pp][i].exist = false;
}
}
if (task_count < 1)
{
task_secondBbox[pp].clear();
continue;
}
for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--)
{
if (!task_secondBbox[pp][i].exist)
task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i);
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num) schedule(dynamic,1)
for (int pp = 0; pp < need_thread_num; pp++)
{
int thread_id = omp_get_thread_num();
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
rnet[thread_id].Forward(task_rnet_images[pp]);
const ZQ_CNN_Tensor4D* score = rnet[thread_id].GetBlobByName("prob1");
const ZQ_CNN_Tensor4D* location = rnet[thread_id].GetBlobByName("conv5-2");
const float* score_ptr = score->GetFirstPixelPtr();
const float* location_ptr = location->GetFirstPixelPtr();
int score_sliceStep = score->GetSliceStep();
int location_sliceStep = location->GetSliceStep();
int task_count = 0;
for (int i = 0; i < task_secondBbox[pp].size(); i++)
{
if (score_ptr[i*score_sliceStep + 1] > thresh[1])
{
for (int j = 0; j < 4; j++)
task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j];
task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i];
task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1];
task_count++;
}
else
{
task_secondBbox[pp][i].exist = false;
}
}
if (task_count < 1)
{
task_secondBbox[pp].clear();
continue;
}
for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--)
{
if (!task_secondBbox[pp][i].exist)
task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i);
}
}
}
int count = 0;
for (int i = 0; i < need_thread_num; i++)
{
count += task_secondBbox[i].size();
}
secondBbox.resize(count);
secondScore.resize(count);
int id = 0;
for (int i = 0; i < need_thread_num; i++)
{
for (int j = 0; j < task_secondBbox[i].size(); j++)
{
secondBbox[id] = task_secondBbox[i][j];
secondScore[id].score = secondBbox[id].score;
secondScore[id].oriOrder = id;
id++;
}
}
//ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Union");
ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Min");
ZQ_CNN_BBoxUtils::_refine_and_square_bbox(secondBbox, width, height, true);
count = secondBbox.size();
double t4 = omp_get_wtime();
if (show_debug_info)
printf("run Rnet [%d] times, candidate after nms: %d \n", r_count, count);
if (show_debug_info)
printf("stage 2: cost %.3f ms\n", 1000 * (t4 - t3));
return true;
}
bool _Onet_stage(std::vector<ZQ_CNN_BBox>& secondBbox, std::vector<ZQ_CNN_BBox>& thirdBbox)
{
double t4 = omp_get_wtime();
thirdBbox.clear();
std::vector<ZQ_CNN_BBox>::iterator it = secondBbox.begin();
std::vector<ZQ_CNN_OrderScore> thirdScore;
std::vector<ZQ_CNN_BBox> early_accept_thirdBbox;
std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h;
int o_count = 0;
for (; it != secondBbox.end(); it++)
{
if ((*it).exist)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size)
{
(*it).exist = false;
continue;
}
else
{
if (!do_landmark && it->score > early_accept_thresh)
{
early_accept_thirdBbox.push_back(*it);
}
else
{
src_off_x.push_back(off_x);
src_off_y.push_back(off_y);
src_rect_w.push_back(rect_w);
src_rect_h.push_back(rect_h);
o_count++;
thirdBbox.push_back(*it);
}
}
}
}
int batch_size = 64;
int per_num = ceil((float)o_count / thread_num);
int need_thread_num = thread_num;
if (per_num > batch_size)
{
need_thread_num = ceil((float)o_count / batch_size);
per_num = batch_size;
}
std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_onet_images(need_thread_num);
std::vector<std::vector<int> > task_src_off_x(need_thread_num);
std::vector<std::vector<int> > task_src_off_y(need_thread_num);
std::vector<std::vector<int> > task_src_rect_w(need_thread_num);
std::vector<std::vector<int> > task_src_rect_h(need_thread_num);
std::vector<std::vector<ZQ_CNN_BBox> > task_thirdBbox(need_thread_num);
for (int i = 0; i < need_thread_num; i++)
{
int st_id = per_num*i;
int end_id = __min(o_count, per_num*(i + 1));
int cur_num = end_id - st_id;
if (cur_num > 0)
{
task_src_off_x[i].resize(cur_num);
task_src_off_y[i].resize(cur_num);
task_src_rect_w[i].resize(cur_num);
task_src_rect_h[i].resize(cur_num);
task_thirdBbox[i].resize(cur_num);
for (int j = 0; j < cur_num; j++)
{
task_src_off_x[i][j] = src_off_x[st_id + j];
task_src_off_y[i][j] = src_off_y[st_id + j];
task_src_rect_w[i][j] = src_rect_w[st_id + j];
task_src_rect_h[i][j] = src_rect_h[st_id + j];
task_thirdBbox[i][j] = thirdBbox[st_id + j];
}
}
}
if (thread_num <= 1)
{
for (int pp = 0; pp < need_thread_num; pp++)
{
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
onet[0].Forward(task_onet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D* score = onet[0].GetBlobByName("prob1");
const ZQ_CNN_Tensor4D* location = onet[0].GetBlobByName("conv6-2");
const ZQ_CNN_Tensor4D* keyPoint = onet[0].GetBlobByName("conv6-3");
const float* score_ptr = score->GetFirstPixelPtr();
const float* location_ptr = location->GetFirstPixelPtr();
const float* keyPoint_ptr = 0;
if (keyPoint != 0)
keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int score_sliceStep = score->GetSliceStep();
int location_sliceStep = location->GetSliceStep();
int keyPoint_sliceStep = 0;
if (keyPoint != 0)
keyPoint_sliceStep = keyPoint->GetSliceStep();
int task_count = 0;
ZQ_CNN_OrderScore order;
for (int i = 0; i < task_thirdBbox[pp].size(); i++)
{
if (score_ptr[i*score_sliceStep + 1] > thresh[2])
{
for (int j = 0; j < 4; j++)
task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j];
if (keyPoint != 0)
{
for (int num = 0; num < 5; num++)
{
task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 +
(task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num];
task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 +
(task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5];
}
}
task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i];
task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1];
task_count++;
}
else
{
task_thirdBbox[pp][i].exist = false;
}
}
if (task_count < 1)
{
task_thirdBbox[pp].clear();
continue;
}
for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--)
{
if (!task_thirdBbox[pp][i].exist)
task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i);
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num) schedule(dynamic,1)
for (int pp = 0; pp < need_thread_num; pp++)
{
int thread_id = omp_get_thread_num();
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
onet[thread_id].Forward(task_onet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D* score = onet[thread_id].GetBlobByName("prob1");
const ZQ_CNN_Tensor4D* location = onet[thread_id].GetBlobByName("conv6-2");
const ZQ_CNN_Tensor4D* keyPoint = onet[thread_id].GetBlobByName("conv6-3");
const float* score_ptr = score->GetFirstPixelPtr();
const float* location_ptr = location->GetFirstPixelPtr();
const float* keyPoint_ptr = 0;
if (keyPoint != 0)
keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int score_sliceStep = score->GetSliceStep();
int location_sliceStep = location->GetSliceStep();
int keyPoint_sliceStep = 0;
if (keyPoint != 0)
keyPoint_sliceStep = keyPoint->GetSliceStep();
int task_count = 0;
ZQ_CNN_OrderScore order;
for (int i = 0; i < task_thirdBbox[pp].size(); i++)
{
if (score_ptr[i*score_sliceStep + 1] > thresh[2])
{
for (int j = 0; j < 4; j++)
task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j];
if (keyPoint != 0)
{
for (int num = 0; num < 5; num++)
{
task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 +
(task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num];
task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 +
(task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5];
}
}
task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i];
task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1];
task_count++;
}
else
{
task_thirdBbox[pp][i].exist = false;
}
}
if (task_count < 1)
{
task_thirdBbox[pp].clear();
continue;
}
for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--)
{
if (!task_thirdBbox[pp][i].exist)
task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i);
}
}
}
int count = 0;
for (int i = 0; i < need_thread_num; i++)
{
count += task_thirdBbox[i].size();
}
thirdBbox.resize(count);
thirdScore.resize(count);
int id = 0;
for (int i = 0; i < need_thread_num; i++)
{
for (int j = 0; j < task_thirdBbox[i].size(); j++)
{
thirdBbox[id] = task_thirdBbox[i][j];
thirdScore[id].score = task_thirdBbox[i][j].score;
thirdScore[id].oriOrder = id;
id++;
}
}
ZQ_CNN_OrderScore order;
for (int i = 0; i < early_accept_thirdBbox.size(); i++)
{
order.score = early_accept_thirdBbox[i].score;
order.oriOrder = count++;
thirdScore.push_back(order);
thirdBbox.push_back(early_accept_thirdBbox[i]);
}
ZQ_CNN_BBoxUtils::_refine_and_square_bbox(thirdBbox, width, height, false);
ZQ_CNN_BBoxUtils::_nms(thirdBbox, thirdScore, nms_thresh[2], "Min");
double t5 = omp_get_wtime();
if (show_debug_info)
printf("run Onet [%d] times, candidate before nms: %d \n", o_count, count);
if (show_debug_info)
printf("stage 3: cost %.3f ms\n", 1000 * (t5 - t4));
return true;
}
bool _Lnet_stage(std::vector<ZQ_CNN_BBox>& thirdBbox, std::vector<ZQ_CNN_BBox>& fourthBbox)
{
double t4 = omp_get_wtime();
fourthBbox.clear();
std::vector<ZQ_CNN_BBox>::iterator it = thirdBbox.begin();
std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h;
int l_count = 0;
for (; it != thirdBbox.end(); it++)
{
if ((*it).exist)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size)
{
(*it).exist = false;
continue;
}
else
{
l_count++;
fourthBbox.push_back(*it);
}
}
}
std::vector<ZQ_CNN_BBox> copy_fourthBbox = fourthBbox;
ZQ_CNN_BBoxUtils::_square_bbox(copy_fourthBbox, width, height);
for (it = copy_fourthBbox.begin(); it != copy_fourthBbox.end(); ++it)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
src_off_x.push_back(off_x);
src_off_y.push_back(off_y);
src_rect_w.push_back(rect_w);
src_rect_h.push_back(rect_h);
}
int batch_size = 64;
int per_num = ceil((float)l_count / thread_num);
int need_thread_num = thread_num;
if (per_num > batch_size)
{
need_thread_num = ceil((float)l_count / batch_size);
per_num = batch_size;
}
std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_lnet_images(need_thread_num);
std::vector<std::vector<int> > task_src_off_x(need_thread_num);
std::vector<std::vector<int> > task_src_off_y(need_thread_num);
std::vector<std::vector<int> > task_src_rect_w(need_thread_num);
std::vector<std::vector<int> > task_src_rect_h(need_thread_num);
std::vector<std::vector<ZQ_CNN_BBox> > task_fourthBbox(need_thread_num);
for (int i = 0; i < need_thread_num; i++)
{
int st_id = per_num*i;
int end_id = __min(l_count, per_num*(i + 1));
int cur_num = end_id - st_id;
if (cur_num > 0)
{
task_src_off_x[i].resize(cur_num);
task_src_off_y[i].resize(cur_num);
task_src_rect_w[i].resize(cur_num);
task_src_rect_h[i].resize(cur_num);
task_fourthBbox[i].resize(cur_num);
for (int j = 0; j < cur_num; j++)
{
task_src_off_x[i][j] = src_off_x[st_id + j];
task_src_off_y[i][j] = src_off_y[st_id + j];
task_src_rect_w[i][j] = src_rect_w[st_id + j];
task_src_rect_h[i][j] = src_rect_h[st_id + j];
task_fourthBbox[i][j] = copy_fourthBbox[st_id + j];
}
}
}
if (thread_num <= 1)
{
for (int pp = 0; pp < need_thread_num; pp++)
{
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
lnet[0].Forward(task_lnet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D* keyPoint = lnet[0].GetBlobByName("conv6-3");
const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int keyPoint_sliceStep = keyPoint->GetSliceStep();
for (int i = 0; i < task_fourthBbox[pp].size(); i++)
{
for (int num = 0; num < 5; num++)
{
task_fourthBbox[pp][i].ppoint[num] = task_fourthBbox[pp][i].col1 +
(task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num];
task_fourthBbox[pp][i].ppoint[num + 5] = task_fourthBbox[pp][i].row1 +
(task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5];
}
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num) schedule(dynamic,1)
for (int pp = 0; pp < need_thread_num; pp++)
{
int thread_id = omp_get_thread_num();
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
lnet[thread_id].Forward(task_lnet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D* keyPoint = lnet[thread_id].GetBlobByName("conv6-3");
const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int keyPoint_sliceStep = keyPoint->GetSliceStep();
for (int i = 0; i < task_fourthBbox[pp].size(); i++)
{
for (int num = 0; num < 5; num++)
{
task_fourthBbox[pp][i].ppoint[num] = task_fourthBbox[pp][i].col1 +
(task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num];
task_fourthBbox[pp][i].ppoint[num + 5] = task_fourthBbox[pp][i].row1 +
(task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5];
}
}
}
}
int count = 0;
for (int i = 0; i < need_thread_num; i++)
{
count += task_fourthBbox[i].size();
}
fourthBbox.resize(count);
int id = 0;
for (int i = 0; i < need_thread_num; i++)
{
for (int j = 0; j < task_fourthBbox[i].size(); j++)
{
memcpy(fourthBbox[id].ppoint, task_fourthBbox[i][j].ppoint, sizeof(float) * 10);
id++;
}
}
double t5 = omp_get_wtime();
if (show_debug_info)
printf("run Lnet [%d] times \n", l_count);
if (show_debug_info)
printf("stage 4: cost %.3f ms\n", 1000 * (t5 - t4));
return true;
}
bool _Lnet106_stage(std::vector<ZQ_CNN_BBox>& thirdBbox, std::vector<ZQ_CNN_BBox106>& resultBbox)
{
double t4 = omp_get_wtime();
std::vector<ZQ_CNN_BBox> fourthBbox;
std::vector<ZQ_CNN_BBox>::iterator it = thirdBbox.begin();
std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h;
int l_count = 0;
for (; it != thirdBbox.end(); it++)
{
if ((*it).exist)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size)
{
(*it).exist = false;
continue;
}
else
{
l_count++;
fourthBbox.push_back(*it);
}
}
}
std::vector<ZQ_CNN_BBox> copy_fourthBbox = fourthBbox;
ZQ_CNN_BBoxUtils::_square_bbox(copy_fourthBbox, width, height);
for (it = copy_fourthBbox.begin(); it != copy_fourthBbox.end(); ++it)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
src_off_x.push_back(off_x);
src_off_y.push_back(off_y);
src_rect_w.push_back(rect_w);
src_rect_h.push_back(rect_h);
}
int batch_size = 64;
int per_num = ceil((float)l_count / thread_num);
int need_thread_num = thread_num;
if (per_num > batch_size)
{
need_thread_num = ceil((float)l_count / batch_size);
per_num = batch_size;
}
std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_lnet_images(need_thread_num);
std::vector<std::vector<int> > task_src_off_x(need_thread_num);
std::vector<std::vector<int> > task_src_off_y(need_thread_num);
std::vector<std::vector<int> > task_src_rect_w(need_thread_num);
std::vector<std::vector<int> > task_src_rect_h(need_thread_num);
std::vector<std::vector<ZQ_CNN_BBox106> > task_fourthBbox(need_thread_num);
for (int i = 0; i < need_thread_num; i++)
{
int st_id = per_num*i;
int end_id = __min(l_count, per_num*(i + 1));
int cur_num = end_id - st_id;
if (cur_num > 0)
{
task_src_off_x[i].resize(cur_num);
task_src_off_y[i].resize(cur_num);
task_src_rect_w[i].resize(cur_num);
task_src_rect_h[i].resize(cur_num);
task_fourthBbox[i].resize(cur_num);
for (int j = 0; j < cur_num; j++)
{
task_src_off_x[i][j] = src_off_x[st_id + j];
task_src_off_y[i][j] = src_off_y[st_id + j];
task_src_rect_w[i][j] = src_rect_w[st_id + j];
task_src_rect_h[i][j] = src_rect_h[st_id + j];
task_fourthBbox[i][j].col1 = copy_fourthBbox[st_id + j].col1;
task_fourthBbox[i][j].col2 = copy_fourthBbox[st_id + j].col2;
task_fourthBbox[i][j].row1 = copy_fourthBbox[st_id + j].row1;
task_fourthBbox[i][j].row2 = copy_fourthBbox[st_id + j].row2;
task_fourthBbox[i][j].area = copy_fourthBbox[st_id + j].area;
task_fourthBbox[i][j].score = copy_fourthBbox[st_id + j].score;
task_fourthBbox[i][j].exist = copy_fourthBbox[st_id + j].exist;
}
}
}
resultBbox.resize(l_count);
for (int i = 0; i < l_count; i++)
{
resultBbox[i].col1 = fourthBbox[i].col1;
resultBbox[i].col2 = fourthBbox[i].col2;
resultBbox[i].row1 = fourthBbox[i].row1;
resultBbox[i].row2 = fourthBbox[i].row2;
resultBbox[i].score = fourthBbox[i].score;
resultBbox[i].exist = fourthBbox[i].exist;
resultBbox[i].area = fourthBbox[i].area;
}
if (thread_num <= 1)
{
for (int pp = 0; pp < need_thread_num; pp++)
{
if (task_src_off_x[pp].size() == 0)
continue;
if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
lnet[0].Forward(task_lnet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D* keyPoint = lnet[0].GetBlobByName("conv6-3");
const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int keypoint_num = keyPoint->GetC() / 2;
int keyPoint_sliceStep = keyPoint->GetSliceStep();
for (int i = 0; i < task_fourthBbox[pp].size(); i++)
{
for (int num = 0; num < keypoint_num; num++)
{
task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 +
(task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2];
task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 +
(task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1];
}
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num)
for (int pp = 0; pp < need_thread_num; pp++)
{
int thread_id = omp_get_thread_num();
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
lnet[thread_id].Forward(task_lnet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D* keyPoint = lnet[thread_id].GetBlobByName("conv6-3");
const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int keypoint_num = keyPoint->GetC() / 2;
int keyPoint_sliceStep = keyPoint->GetSliceStep();
for (int i = 0; i < task_fourthBbox[pp].size(); i++)
{
for (int num = 0; num < keypoint_num; num++)
{
task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 +
(task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2];
task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 +
(task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1];
}
}
}
}
int count = 0;
for (int i = 0; i < need_thread_num; i++)
{
count += task_fourthBbox[i].size();
}
resultBbox.resize(count);
int id = 0;
for (int i = 0; i < need_thread_num; i++)
{
for (int j = 0; j < task_fourthBbox[i].size(); j++)
{
memcpy(resultBbox[id].ppoint, task_fourthBbox[i][j].ppoint, sizeof(float) * 212);
id++;
}
}
double t5 = omp_get_wtime();
if (show_debug_info)
printf("run Lnet [%d] times \n", l_count);
if (show_debug_info)
printf("stage 4: cost %.3f ms\n", 1000 * (t5 - t4));
return true;
}
};
}
#endif
|
GB_unop__identity_uint16_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint16_bool
// op(A') function: GB_unop_tran__identity_uint16_bool
// C type: uint16_t
// A type: bool
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = (uint16_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint16_bool
(
uint16_t *Cx, // Cx and Ax may be aliased
const bool *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint16_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
FRICP.h | #ifndef FRICP_H
#define FRICP_H
#include "ICP.h"
#include <AndersonAcceleration.h>
#include <eigen/unsupported/Eigen/MatrixFunctions>
#include "median.h"
#include <limits>
#define SAME_THRESHOLD 1e-6
#include <type_traits>
template<class T>
typename std::enable_if<!std::numeric_limits<T>::is_integer, bool>::type
almost_equal(T x, T y, int ulp)
{
// the machine epsilon has to be scaled to the magnitude of the values used
// and multiplied by the desired precision in ULPs (units in the last place)
return std::fabs(x-y) <= std::numeric_limits<T>::epsilon() * std::fabs(x+y) * ulp
// unless the result is subnormal
|| std::fabs(x-y) < std::numeric_limits<T>::min();
}
template<int N>
class FRICP
{
public:
typedef double Scalar;
typedef Eigen::Matrix<Scalar, N, Eigen::Dynamic> MatrixNX;
typedef Eigen::Matrix<Scalar, N, N> MatrixNN;
typedef Eigen::Matrix<Scalar, N+1, N+1> AffineMatrixN;
typedef Eigen::Transform<Scalar, N, Eigen::Affine> AffineNd;
typedef Eigen::Matrix<Scalar, N, 1> VectorN;
typedef nanoflann::KDTreeAdaptor<MatrixNX, N, nanoflann::metric_L2_Simple> KDtree;
typedef Eigen::Matrix<Scalar, 6, 1> Vector6;
double test_total_construct_time=.0;
double test_total_solve_time=.0;
int test_total_iters=0;
FRICP(){};
~FRICP(){};
private:
AffineMatrixN LogMatrix(const AffineMatrixN& T)
{
Eigen::RealSchur<AffineMatrixN> schur(T);
AffineMatrixN U = schur.matrixU();
AffineMatrixN R = schur.matrixT();
std::vector<bool> selected(N, true);
MatrixNN mat_B = MatrixNN::Zero(N, N);
MatrixNN mat_V = MatrixNN::Identity(N, N);
for (int i = 0; i < N; i++)
{
if (selected[i] && fabs(R(i, i) - 1)> SAME_THRESHOLD)
{
int pair_second = -1;
for (int j = i + 1; j <N; j++)
{
if (fabs(R(j, j) - R(i, i)) < SAME_THRESHOLD)
{
pair_second = j;
selected[j] = false;
break;
}
}
if (pair_second > 0)
{
selected[i] = false;
R(i, i) = R(i, i) < -1 ? -1 : R(i, i);
double theta = acos(R(i, i));
if (R(i, pair_second) < 0)
{
theta = -theta;
}
mat_B(i, pair_second) += theta;
mat_B(pair_second, i) += -theta;
mat_V(i, pair_second) += -theta / 2;
mat_V(pair_second, i) += theta / 2;
double coeff = 1 - (theta * R(i, pair_second)) / (2 * (1 - R(i, i)));
mat_V(i, i) += -coeff;
mat_V(pair_second, pair_second) += -coeff;
}
}
}
AffineMatrixN LogTrim = AffineMatrixN::Zero();
LogTrim.block(0, 0, N, N) = mat_B;
LogTrim.block(0, N, N, 1) = mat_V * R.block(0, N, N, 1);
AffineMatrixN res = U * LogTrim * U.transpose();
return res;
}
inline Vector6 RotToEuler(const AffineNd& T)
{
Vector6 res;
res.head(3) = T.rotation().eulerAngles(0,1,2);
res.tail(3) = T.translation();
return res;
}
inline AffineMatrixN EulerToRot(const Vector6& v)
{
MatrixNN s (Eigen::AngleAxis<Scalar>(v(0), Vector3::UnitX())
* Eigen::AngleAxis<Scalar>(v(1), Vector3::UnitY())
* Eigen::AngleAxis<Scalar>(v(2), Vector3::UnitZ()));
AffineMatrixN m = AffineMatrixN::Zero();
m.block(0,0,3,3) = s;
m(3,3) = 1;
m.col(3).head(3) = v.tail(3);
return m;
}
inline Vector6 LogToVec(const Eigen::Matrix4d& LogT)
{
Vector6 res;
res[0] = -LogT(1, 2);
res[1] = LogT(0, 2);
res[2] = -LogT(0, 1);
res[3] = LogT(0, 3);
res[4] = LogT(1, 3);
res[5] = LogT(2, 3);
return res;
}
inline AffineMatrixN VecToLog(const Vector6& v)
{
AffineMatrixN m = AffineMatrixN::Zero();
m << 0, -v[2], v[1], v[3],
v[2], 0, -v[0], v[4],
-v[1], v[0], 0, v[5],
0, 0, 0, 0;
return m;
}
double FindKnearestMed(const KDtree& kdtree,
const MatrixNX& X, int nk)
{
Eigen::VectorXd X_nearest(X.cols());
#pragma omp parallel for
for(int i = 0; i<X.cols(); i++)
{
int* id = new int[nk];
double *dist = new double[nk];
kdtree.query(X.col(i).data(), nk, id, dist);
Eigen::VectorXd k_dist = Eigen::Map<Eigen::VectorXd>(dist, nk);
igl::median(k_dist.tail(nk-1), X_nearest[i]);
delete[]id;
delete[]dist;
}
double med;
igl::median(X_nearest, med);
return sqrt(med);
}
/// Find self normal edge median of point cloud
double FindKnearestNormMed(const KDtree& kdtree, const Eigen::Matrix3Xd & X, int nk, const Eigen::Matrix3Xd & norm_x)
{
Eigen::VectorXd X_nearest(X.cols());
#pragma omp parallel for
for(int i = 0; i<X.cols(); i++)
{
int* id = new int[nk];
double *dist = new double[nk];
kdtree.query(X.col(i).data(), nk, id, dist);
Eigen::VectorXd k_dist = Eigen::Map<Eigen::VectorXd>(dist, nk);
for(int s = 1; s<nk; s++)
{
k_dist[s] = std::abs((X.col(id[s]) - X.col(id[0])).dot(norm_x.col(id[0])));
}
igl::median(k_dist.tail(nk-1), X_nearest[i]);
delete[]id;
delete[]dist;
}
double med;
igl::median(X_nearest, med);
return med;
}
template <typename Derived1, typename Derived2, typename Derived3>
AffineNd point_to_point(Eigen::MatrixBase<Derived1>& X,
Eigen::MatrixBase<Derived2>& Y,
const Eigen::MatrixBase<Derived3>& w) {
int dim = X.rows();
/// Normalize weight vector
Eigen::VectorXd w_normalized = w / w.sum();
/// De-mean
Eigen::VectorXd X_mean(dim), Y_mean(dim);
for (int i = 0; i<dim; ++i) {
X_mean(i) = (X.row(i).array()*w_normalized.transpose().array()).sum();
Y_mean(i) = (Y.row(i).array()*w_normalized.transpose().array()).sum();
}
X.colwise() -= X_mean;
Y.colwise() -= Y_mean;
/// Compute transformation
AffineNd transformation;
MatrixXX sigma = X * w_normalized.asDiagonal() * Y.transpose();
Eigen::JacobiSVD<MatrixXX> svd(sigma, Eigen::ComputeFullU | Eigen::ComputeFullV);
if (svd.matrixU().determinant()*svd.matrixV().determinant() < 0.0) {
VectorN S = VectorN::Ones(dim); S(dim-1) = -1.0;
transformation.linear() = svd.matrixV()*S.asDiagonal()*svd.matrixU().transpose();
}
else {
transformation.linear() = svd.matrixV()*svd.matrixU().transpose();
}
transformation.translation() = Y_mean - transformation.linear()*X_mean;
/// Re-apply mean
X.colwise() += X_mean;
Y.colwise() += Y_mean;
/// Return transformation
return transformation;
}
template <typename Derived1, typename Derived2, typename Derived3, typename Derived4, typename Derived5>
Eigen::Affine3d point_to_plane(Eigen::MatrixBase<Derived1>& X,
Eigen::MatrixBase<Derived2>& Y,
const Eigen::MatrixBase<Derived3>& Norm,
const Eigen::MatrixBase<Derived4>& w,
const Eigen::MatrixBase<Derived5>& u) {
typedef Eigen::Matrix<double, 6, 6> Matrix66;
typedef Eigen::Matrix<double, 6, 1> Vector6;
typedef Eigen::Block<Matrix66, 3, 3> Block33;
/// Normalize weight vector
Eigen::VectorXd w_normalized = w / w.sum();
/// De-mean
Eigen::Vector3d X_mean;
for (int i = 0; i<3; ++i)
X_mean(i) = (X.row(i).array()*w_normalized.transpose().array()).sum();
X.colwise() -= X_mean;
Y.colwise() -= X_mean;
/// Prepare LHS and RHS
Matrix66 LHS = Matrix66::Zero();
Vector6 RHS = Vector6::Zero();
Block33 TL = LHS.topLeftCorner<3, 3>();
Block33 TR = LHS.topRightCorner<3, 3>();
Block33 BR = LHS.bottomRightCorner<3, 3>();
Eigen::MatrixXd C = Eigen::MatrixXd::Zero(3, X.cols());
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i<X.cols(); i++) {
C.col(i) = X.col(i).cross(Norm.col(i));
}
#pragma omp sections nowait
{
#pragma omp section
for (int i = 0; i<X.cols(); i++) TL.selfadjointView<Eigen::Upper>().rankUpdate(C.col(i), w(i));
#pragma omp section
for (int i = 0; i<X.cols(); i++) TR += (C.col(i)*Norm.col(i).transpose())*w(i);
#pragma omp section
for (int i = 0; i<X.cols(); i++) BR.selfadjointView<Eigen::Upper>().rankUpdate(Norm.col(i), w(i));
#pragma omp section
for (int i = 0; i<C.cols(); i++) {
double dist_to_plane = -((X.col(i) - Y.col(i)).dot(Norm.col(i)) - u(i))*w(i);
RHS.head<3>() += C.col(i)*dist_to_plane;
RHS.tail<3>() += Norm.col(i)*dist_to_plane;
}
}
}
LHS = LHS.selfadjointView<Eigen::Upper>();
/// Compute transformation
Eigen::Affine3d transformation;
Eigen::LDLT<Matrix66> ldlt(LHS);
RHS = ldlt.solve(RHS);
transformation = Eigen::AngleAxisd(RHS(0), Eigen::Vector3d::UnitX()) *
Eigen::AngleAxisd(RHS(1), Eigen::Vector3d::UnitY()) *
Eigen::AngleAxisd(RHS(2), Eigen::Vector3d::UnitZ());
transformation.translation() = RHS.tail<3>();
/// Apply transformation
/// Re-apply mean
X.colwise() += X_mean;
Y.colwise() += X_mean;
transformation.translation() += X_mean - transformation.linear()*X_mean;
/// Return transformation
return transformation;
}
template <typename Derived1, typename Derived2, typename Derived3, typename Derived4>
double point_to_plane_gaussnewton(const Eigen::MatrixBase<Derived1>& X,
const Eigen::MatrixBase<Derived2>& Y,
const Eigen::MatrixBase<Derived3>& norm_y,
const Eigen::MatrixBase<Derived4>& w,
Matrix44 Tk, Vector6& dir) {
typedef Eigen::Matrix<double, 6, 6> Matrix66;
typedef Eigen::Matrix<double, 12, 6> Matrix126;
typedef Eigen::Matrix<double, 9, 3> Matrix93;
typedef Eigen::Block<Matrix126, 9, 3> Block93;
typedef Eigen::Block<Matrix126, 3, 3> Block33;
typedef Eigen::Matrix<double, 12, 1> Vector12;
typedef Eigen::Matrix<double, 9, 1> Vector9;
typedef Eigen::Matrix<double, 4, 2> Matrix42;
/// Normalize weight vector
Eigen::VectorXd w_normalized = w / w.sum();
/// Prepare LHS and RHS
Matrix66 LHS = Matrix66::Zero();
Vector6 RHS = Vector6::Zero();
Vector6 log_T = LogToVec(LogMatrix(Tk));
Matrix33 B = VecToLog(log_T).block(0, 0, 3, 3);
double a = log_T[0];
double b = log_T[1];
double c = log_T[2];
Matrix33 R = Tk.block(0, 0, 3, 3);
Vector3 t = Tk.block(0, 3, 3, 1);
Vector3 u = log_T.tail(3);
Matrix93 dbdw = Matrix93::Zero();
dbdw(1, 2) = dbdw(5, 0) = dbdw(6, 1) = -1;
dbdw(2, 1) = dbdw(3, 2) = dbdw(7, 0) = 1;
Matrix93 db2dw = Matrix93::Zero();
db2dw(3, 1) = db2dw(4, 0) = db2dw(6, 2) = db2dw(8, 0) = a;
db2dw(0, 1) = db2dw(1, 0) = db2dw(7, 2) = db2dw(8, 1) = b;
db2dw(0, 2) = db2dw(2, 0) = db2dw(4, 2) = db2dw(5, 1) = c;
db2dw(1, 1) = db2dw(2, 2) = -2 * a;
db2dw(3, 0) = db2dw(5, 2) = -2 * b;
db2dw(6, 0) = db2dw(7, 1) = -2 * c;
double theta = std::sqrt(a*a + b*b + c*c);
double st = sin(theta), ct = cos(theta);
Matrix42 coeff = Matrix42::Zero();
if (theta>SAME_THRESHOLD)
{
coeff << st / theta, (1 - ct) / (theta*theta),
(theta*ct - st) / (theta*theta*theta), (theta*st - 2 * (1 - ct)) / pow(theta, 4),
(1 - ct) / (theta*theta), (theta - st) / pow(theta, 3),
(theta*st - 2 * (1 - ct)) / pow(theta, 4), (theta*(1 - ct) - 3 * (theta - st)) / pow(theta, 5);
}
else
coeff(0, 0) = 1;
Matrix93 tempB3;
tempB3.block<3, 3>(0, 0) = a*B;
tempB3.block<3, 3>(3, 0) = b*B;
tempB3.block<3, 3>(6, 0) = c*B;
Matrix33 B2 = B*B;
Matrix93 temp2B3;
temp2B3.block<3, 3>(0, 0) = a*B2;
temp2B3.block<3, 3>(3, 0) = b*B2;
temp2B3.block<3, 3>(6, 0) = c*B2;
Matrix93 dRdw = coeff(0, 0)*dbdw + coeff(1, 0)*tempB3
+ coeff(2, 0)*db2dw + coeff(3, 0)*temp2B3;
Vector9 dtdw = coeff(0, 1) * dbdw*u + coeff(1, 1) * tempB3*u
+ coeff(2, 1) * db2dw*u + coeff(3, 1)*temp2B3*u;
Matrix33 dtdu = Matrix33::Identity() + coeff(2, 0)*B + coeff(2, 1) * B2;
Eigen::VectorXd rk(X.cols());
Eigen::MatrixXd Jk(X.cols(), 6);
#pragma omp for
for (int i = 0; i < X.cols(); i++)
{
Vector3 xi = X.col(i);
Vector3 yi = Y.col(i);
Vector3 ni = norm_y.col(i);
double wi = sqrt(w_normalized[i]);
Matrix33 dedR = wi*ni * xi.transpose();
Vector3 dedt = wi*ni;
Vector6 dedx;
dedx(0) = (dedR.cwiseProduct(dRdw.block(0, 0, 3, 3))).sum()
+ dedt.dot(dtdw.head<3>());
dedx(1) = (dedR.cwiseProduct(dRdw.block(3, 0, 3, 3))).sum()
+ dedt.dot(dtdw.segment<3>(3));
dedx(2) = (dedR.cwiseProduct(dRdw.block(6, 0, 3, 3))).sum()
+ dedt.dot(dtdw.tail<3>());
dedx(3) = dedt.dot(dtdu.col(0));
dedx(4) = dedt.dot(dtdu.col(1));
dedx(5) = dedt.dot(dtdu.col(2));
Jk.row(i) = dedx.transpose();
rk[i] = wi * ni.dot(R*xi-yi+t);
}
LHS = Jk.transpose() * Jk;
RHS = -Jk.transpose() * rk;
Eigen::CompleteOrthogonalDecomposition<Matrix66> cod_(LHS);
dir = cod_.solve(RHS);
double gTd = -RHS.dot(dir);
return gTd;
}
public:
void point_to_point(MatrixNX& X, MatrixNX& Y, VectorN& source_mean,
VectorN& target_mean, ICP::Parameters& par){
/// Build kd-tree
KDtree kdtree(Y);
/// Buffers
MatrixNX Q = MatrixNX::Zero(N, X.cols());
VectorX W = VectorX::Zero(X.cols());
AffineNd T;
if (par.use_init) T.matrix() = par.init_trans;
else T = AffineNd::Identity();
MatrixXX To1 = T.matrix();
MatrixXX To2 = T.matrix();
int nPoints = X.cols();
//Anderson Acc para
AndersonAcceleration accelerator_;
AffineNd SVD_T = T;
double energy = .0, last_energy = std::numeric_limits<double>::max();
//ground truth point clouds
MatrixNX X_gt = X;
if(par.has_groundtruth)
{
VectorN temp_trans = par.gt_trans.col(N).head(N);
X_gt.colwise() += source_mean;
X_gt = par.gt_trans.block(0, 0, N, N) * X_gt;
X_gt.colwise() += temp_trans - target_mean;
}
//output para
std::string file_out = par.out_path;
std::vector<double> times, energys, gt_mses;
double begin_time, end_time, run_time;
double gt_mse = 0.0;
// dynamic welsch paras
double nu1 = 1, nu2 = 1;
double begin_init = omp_get_wtime();
//Find initial closest point
#pragma omp parallel for
for (int i = 0; i<nPoints; ++i) {
VectorN cur_p = T * X.col(i);
Q.col(i) = Y.col(kdtree.closest(cur_p.data()));
W[i] = (cur_p - Q.col(i)).norm();
}
if(par.f == ICP::WELSCH)
{
//dynamic welsch, calc k-nearest points with itself;
nu2 = par.nu_end_k * FindKnearestMed(kdtree, Y, 7);
double med1;
igl::median(W, med1);
nu1 = par.nu_begin_k * med1;
nu1 = nu1>nu2? nu1:nu2;
}
double end_init = omp_get_wtime();
double init_time = end_init - begin_init;
//AA init
accelerator_.init(par.anderson_m, (N + 1) * (N + 1), LogMatrix(T.matrix()).data());
begin_time = omp_get_wtime();
bool stop1 = false;
while(!stop1)
{
/// run ICP
int icp = 0;
for (; icp<par.max_icp; ++icp)
{
bool accept_aa = false;
energy = get_energy(par.f, W, nu1);
if (par.use_AA)
{
if (energy < last_energy) {
last_energy = energy;
accept_aa = true;
}
else{
accelerator_.replace(LogMatrix(SVD_T.matrix()).data());
//Re-find the closest point
#pragma omp parallel for
for (int i = 0; i<nPoints; ++i) {
VectorN cur_p = SVD_T * X.col(i);
Q.col(i) = Y.col(kdtree.closest(cur_p.data()));
W[i] = (cur_p - Q.col(i)).norm();
}
last_energy = get_energy(par.f, W, nu1);
}
}
else
last_energy = energy;
end_time = omp_get_wtime();
run_time = end_time - begin_time;
if(par.has_groundtruth)
{
gt_mse = (T*X - X_gt).squaredNorm()/nPoints;
}
// save results
energys.push_back(last_energy);
times.push_back(run_time);
gt_mses.push_back(gt_mse);
if (par.print_energy)
std::cout << "icp iter = " << icp << ", Energy = " << last_energy
<< ", time = " << run_time << std::endl;
robust_weight(par.f, W, nu1);
// Rotation and translation update
T = point_to_point(X, Q, W);
//Anderson Acc
SVD_T = T;
if (par.use_AA)
{
AffineMatrixN Trans = (Eigen::Map<const AffineMatrixN>(accelerator_.compute(LogMatrix(T.matrix()).data()).data(), N+1, N+1)).exp();
T.linear() = Trans.block(0,0,N,N);
T.translation() = Trans.block(0,N,N,1);
}
// Find closest point
#pragma omp parallel for
for (int i = 0; i<nPoints; ++i) {
VectorN cur_p = T * X.col(i) ;
Q.col(i) = Y.col(kdtree.closest(cur_p.data()));
W[i] = (cur_p - Q.col(i)).norm();
}
/// Stopping criteria
double stop2 = (T.matrix() - To2).norm();
To2 = T.matrix();
if(stop2 < par.stop)
{
break;
}
}
if(par.f!= ICP::WELSCH)
stop1 = true;
else
{
stop1 = fabs(nu1 - nu2)<SAME_THRESHOLD? true: false;
nu1 = nu1*par.nu_alpha > nu2? nu1*par.nu_alpha : nu2;
if(par.use_AA)
{
accelerator_.reset(LogMatrix(T.matrix()).data());
last_energy = std::numeric_limits<double>::max();
}
}
}
///calc convergence energy
last_energy = get_energy(par.f, W, nu1);
X = T * X;
gt_mse = (X-X_gt).squaredNorm()/nPoints;
T.translation() += - T.rotation() * source_mean + target_mean;
X.colwise() += target_mean;
///save convergence result
par.convergence_energy = last_energy;
par.convergence_gt_mse = gt_mse;
par.res_trans = T.matrix();
///output
if (par.print_output)
{
std::ofstream out_res(par.out_path);
if (!out_res.is_open())
{
std::cout << "Can't open out file " << par.out_path << std::endl;
}
//output time and energy
out_res.precision(16);
for (int i = 0; i<times.size(); i++)
{
out_res << times[i] << " "<< energys[i] << " " << gt_mses[i] << std::endl;
}
out_res.close();
std::cout << " write res to " << par.out_path << std::endl;
}
}
/// Reweighted ICP with point to plane
/// @param Source (one 3D point per column)
/// @param Target (one 3D point per column)
/// @param Target normals (one 3D normal per column)
/// @param Parameters
// template <typename Derived1, typename Derived2, typename Derived3>
void point_to_plane(Eigen::Matrix3Xd& X,
Eigen::Matrix3Xd& Y, Eigen::Matrix3Xd& norm_x, Eigen::Matrix3Xd& norm_y,
Eigen::Vector3d& source_mean, Eigen::Vector3d& target_mean,
ICP::Parameters &par) {
/// Build kd-tree
KDtree kdtree(Y);
/// Buffers
Eigen::Matrix3Xd Qp = Eigen::Matrix3Xd::Zero(3, X.cols());
Eigen::Matrix3Xd Qn = Eigen::Matrix3Xd::Zero(3, X.cols());
Eigen::VectorXd W = Eigen::VectorXd::Zero(X.cols());
Eigen::Matrix3Xd ori_X = X;
AffineNd T;
if (par.use_init) T.matrix() = par.init_trans;
else T = AffineNd::Identity();
AffineMatrixN To1 = T.matrix();
X = T*X;
Eigen::Matrix3Xd X_gt = X;
if(par.has_groundtruth)
{
Eigen::Vector3d temp_trans = par.gt_trans.block(0, 3, 3, 1);
X_gt = ori_X;
X_gt.colwise() += source_mean;
X_gt = par.gt_trans.block(0, 0, 3, 3) * X_gt;
X_gt.colwise() += temp_trans - target_mean;
}
std::vector<double> times, energys, gt_mses;
double begin_time, end_time, run_time;
double gt_mse = 0.0;
///dynamic welsch, calc k-nearest points with itself;
double begin_init = omp_get_wtime();
//Anderson Acc para
AndersonAcceleration accelerator_;
AffineNd LG_T = T;
double energy = 0.0, prev_res = std::numeric_limits<double>::max(), res = 0.0;
// Find closest point
#pragma omp parallel for
for (int i = 0; i<X.cols(); ++i) {
int id = kdtree.closest(X.col(i).data());
Qp.col(i) = Y.col(id);
Qn.col(i) = norm_y.col(id);
W[i] = std::abs(Qn.col(i).transpose() * (X.col(i) - Qp.col(i)));
}
double end_init = omp_get_wtime();
double init_time = end_init - begin_init;
begin_time = omp_get_wtime();
int total_iter = 0;
double test_total_time = 0.0;
bool stop1 = false;
while(!stop1)
{
/// ICP
for(int icp=0; icp<par.max_icp; ++icp) {
total_iter++;
bool accept_aa = false;
energy = get_energy(par.f, W, par.p);
end_time = omp_get_wtime();
run_time = end_time - begin_time;
energys.push_back(energy);
times.push_back(run_time);
Eigen::VectorXd test_w = (X-Qp).colwise().norm();
if(par.has_groundtruth)
{
gt_mse = (X - X_gt).squaredNorm()/X.cols();
}
gt_mses.push_back(gt_mse);
/// Compute weights
robust_weight(par.f, W, par.p);
/// Rotation and translation update
T = point_to_plane(X, Qp, Qn, W, Eigen::VectorXd::Zero(X.cols()))*T;
/// Find closest point
#pragma omp parallel for
for(int i=0; i<X.cols(); i++) {
X.col(i) = T * ori_X.col(i);
int id = kdtree.closest(X.col(i).data());
Qp.col(i) = Y.col(id);
Qn.col(i) = norm_y.col(id);
W[i] = std::abs(Qn.col(i).transpose() * (X.col(i) - Qp.col(i)));
}
if(par.print_energy)
std::cout << "icp iter = " << total_iter << ", gt_mse = " << gt_mse
<< ", energy = " << energy << std::endl;
/// Stopping criteria
double stop2 = (T.matrix() - To1).norm();
To1 = T.matrix();
if(stop2 < par.stop) break;
}
stop1 = true;
}
par.res_trans = T.matrix();
///calc convergence energy
W = (Qn.array()*(X - Qp).array()).colwise().sum().abs().transpose();
energy = get_energy(par.f, W, par.p);
gt_mse = (X - X_gt).squaredNorm() / X.cols();
T.translation().noalias() += -T.rotation()*source_mean + target_mean;
X.colwise() += target_mean;
norm_x = T.rotation()*norm_x;
///save convergence result
par.convergence_energy = energy;
par.convergence_gt_mse = gt_mse;
par.res_trans = T.matrix();
///output
if (par.print_output)
{
std::ofstream out_res(par.out_path);
if (!out_res.is_open())
{
std::cout << "Can't open out file " << par.out_path << std::endl;
}
///output time and energy
out_res.precision(16);
for (int i = 0; i<total_iter; i++)
{
out_res << times[i] << " "<< energys[i] << " " << gt_mses[i] << std::endl;
}
out_res.close();
std::cout << " write res to " << par.out_path << std::endl;
}
}
/// Reweighted ICP with point to plane
/// @param Source (one 3D point per column)
/// @param Target (one 3D point per column)
/// @param Target normals (one 3D normal per column)
/// @param Parameters
// template <typename Derived1, typename Derived2, typename Derived3>
void point_to_plane_GN(Eigen::Matrix3Xd& X,
Eigen::Matrix3Xd& Y, Eigen::Matrix3Xd& norm_x, Eigen::Matrix3Xd& norm_y,
Eigen::Vector3d& source_mean, Eigen::Vector3d& target_mean,
ICP::Parameters &par) {
/// Build kd-tree
KDtree kdtree(Y);
/// Buffers
Eigen::Matrix3Xd Qp = Eigen::Matrix3Xd::Zero(3, X.cols());
Eigen::Matrix3Xd Qn = Eigen::Matrix3Xd::Zero(3, X.cols());
Eigen::VectorXd W = Eigen::VectorXd::Zero(X.cols());
Eigen::Matrix3Xd ori_X = X;
AffineNd T;
if (par.use_init) T.matrix() = par.init_trans;
else T = AffineNd::Identity();
AffineMatrixN To1 = T.matrix();
X = T*X;
Eigen::Matrix3Xd X_gt = X;
if(par.has_groundtruth)
{
Eigen::Vector3d temp_trans = par.gt_trans.block(0, 3, 3, 1);
X_gt = ori_X;
X_gt.colwise() += source_mean;
X_gt = par.gt_trans.block(0, 0, 3, 3) * X_gt;
X_gt.colwise() += temp_trans - target_mean;
}
std::vector<double> times, energys, gt_mses;
double begin_time, end_time, run_time;
double gt_mse;
///dynamic welsch, calc k-nearest points with itself;
double nu1 = 1, nu2 = 1;
double begin_init = omp_get_wtime();
//Anderson Acc para
AndersonAcceleration accelerator_;
Vector6 LG_T;
Vector6 Dir;
//add time test
double energy = 0.0, prev_energy = std::numeric_limits<double>::max();
if(par.use_AA)
{
Eigen::Matrix4d log_T = LogMatrix(T.matrix());
LG_T = LogToVec(log_T);
accelerator_.init(par.anderson_m, 6, LG_T.data());
}
// Find closest point
#pragma omp parallel for
for (int i = 0; i<X.cols(); ++i) {
int id = kdtree.closest(X.col(i).data());
Qp.col(i) = Y.col(id);
Qn.col(i) = norm_y.col(id);
W[i] = std::abs(Qn.col(i).transpose() * (X.col(i) - Qp.col(i)));
}
if(par.f == ICP::WELSCH)
{
double med1;
igl::median(W, med1);
nu1 =par.nu_begin_k * med1;
nu2 = par.nu_end_k * FindKnearestNormMed(kdtree, Y, 7, norm_y);
nu1 = nu1>nu2? nu1:nu2;
}
double end_init = omp_get_wtime();
double init_time = end_init - begin_init;
begin_time = omp_get_wtime();
int total_iter = 0;
double test_total_time = 0.0;
bool stop1 = false;
par.max_icp = 6;
while(!stop1)
{
par.max_icp = std::min(par.max_icp+1, 10);
/// ICP
for(int icp=0; icp<par.max_icp; ++icp) {
total_iter++;
int n_linsearch = 0;
energy = get_energy(par.f, W, nu1);
if(par.use_AA)
{
if(energy < prev_energy)
{
prev_energy = energy;
}
else
{
// line search
double alpha = 0.0;
Vector6 new_t = LG_T;
Eigen::VectorXd lowest_W = W;
Eigen::Matrix3Xd lowest_Qp = Qp;
Eigen::Matrix3Xd lowest_Qn = Qn;
Eigen::Affine3d lowest_T = T;
n_linsearch++;
alpha = 1;
new_t = LG_T + alpha * Dir;
T.matrix() = VecToLog(new_t).exp();
/// Find closest point
#pragma omp parallel for
for(int i=0; i<X.cols(); i++) {
X.col(i) = T * ori_X.col(i);
int id = kdtree.closest(X.col(i).data());
Qp.col(i) = Y.col(id);
Qn.col(i) = norm_y.col(id);
W[i] = std::abs(Qn.col(i).transpose() * (X.col(i) - Qp.col(i)));
}
double test_energy = get_energy(par.f, W, nu1);
if(test_energy < energy)
{
accelerator_.reset(new_t.data());
energy = test_energy;
}
else
{
Qp = lowest_Qp;
Qn = lowest_Qn;
W = lowest_W;
T = lowest_T;
}
prev_energy = energy;
}
}
else
{
prev_energy = energy;
}
end_time = omp_get_wtime();
run_time = end_time - begin_time;
energys.push_back(prev_energy);
times.push_back(run_time);
if(par.has_groundtruth)
{
gt_mse = (X - X_gt).squaredNorm()/X.cols();
}
gt_mses.push_back(gt_mse);
/// Compute weights
robust_weight(par.f, W, nu1);
/// Rotation and translation update
point_to_plane_gaussnewton(ori_X, Qp, Qn, W, T.matrix(), Dir);
LG_T = LogToVec(LogMatrix(T.matrix()));
LG_T += Dir;
T.matrix() = VecToLog(LG_T).exp();
// Anderson acc
if(par.use_AA)
{
Vector6 AA_t;
AA_t = accelerator_.compute(LG_T.data());
T.matrix() = VecToLog(AA_t).exp();
}
if(par.print_energy)
std::cout << "icp iter = " << total_iter << ", gt_mse = " << gt_mse
<< ", nu1 = " << nu1 << ", acept_aa= " << n_linsearch
<< ", energy = " << prev_energy << std::endl;
/// Find closest point
#pragma omp parallel for
for(int i=0; i<X.cols(); i++) {
X.col(i) = T * ori_X.col(i);
int id = kdtree.closest(X.col(i).data());
Qp.col(i) = Y.col(id);
Qn.col(i) = norm_y.col(id);
W[i] = std::abs(Qn.col(i).transpose() * (X.col(i) - Qp.col(i)));
}
/// Stopping criteria
double stop2 = (T.matrix() - To1).norm();
To1 = T.matrix();
if(stop2 < par.stop) break;
}
if(par.f == ICP::WELSCH)
{
stop1 = fabs(nu1 - nu2)<SAME_THRESHOLD? true: false;
nu1 = nu1*par.nu_alpha > nu2 ? nu1*par.nu_alpha : nu2;
if(par.use_AA)
{
accelerator_.reset(LogToVec(LogMatrix(T.matrix())).data());
prev_energy = std::numeric_limits<double>::max();
}
}
else
stop1 = true;
}
par.res_trans = T.matrix();
///calc convergence energy
W = (Qn.array()*(X - Qp).array()).colwise().sum().abs().transpose();
energy = get_energy(par.f, W, nu1);
gt_mse = (X - X_gt).squaredNorm() / X.cols();
T.translation().noalias() += -T.rotation()*source_mean + target_mean;
X.colwise() += target_mean;
norm_x = T.rotation()*norm_x;
///save convergence result
par.convergence_energy = energy;
par.convergence_gt_mse = gt_mse;
par.res_trans = T.matrix();
///output
if (par.print_output)
{
std::ofstream out_res(par.out_path);
if (!out_res.is_open())
{
std::cout << "Can't open out file " << par.out_path << std::endl;
}
///output time and energy
out_res.precision(16);
for (int i = 0; i<total_iter; i++)
{
out_res << times[i] << " "<< energys[i] << " " << gt_mses[i] << std::endl;
}
out_res.close();
std::cout << " write res to " << par.out_path << std::endl;
}
}
};
#endif
|
parallel_execution_omp.h | /*
* Copyright 2018 Universidad Carlos III de Madrid
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef GRPPI_OMP_PARALLEL_EXECUTION_OMP_H
#define GRPPI_OMP_PARALLEL_EXECUTION_OMP_H
#ifdef GRPPI_OMP
#include "../common/mpmc_queue.h"
#include "../common/iterator.h"
#include "../common/execution_traits.h"
#include "../common/configuration.h"
#include "grppi/seq/sequential_execution.h"
#include <type_traits>
#include <tuple>
#if __cplusplus < 201703L
#include <experimental/optional>
#else
#include <optional>
#endif
#include <omp.h>
namespace grppi {
/**
\brief OpenMP parallel execution policy.
This policy uses OpenMP as implementation back-end.
*/
class parallel_execution_omp {
public:
/**
\brief Default construct an OpenMP parallel execution policy.
Creates an OpenMP parallel execution object.
The concurrency degree is determined by the platform according to OpenMP
rules.
*/
parallel_execution_omp() noexcept{};
parallel_execution_omp(int concurrency_degree) noexcept :
concurrency_degree_{concurrency_degree}
{
omp_set_num_threads(concurrency_degree_);
}
/** @brief Set num_threads to _threads in order to run in parallel
*
* @param _threads number of threads used in the parallel mode
*/
/**
\brief Constructs an OpenMP parallel execution policy.
Creates an OpenMP parallel execution object selecting the concurrency degree
and ordering.
\param concurrency_degree Number of threads used for parallel algorithms.
\param order Whether ordered executions is enabled or disabled.
*/
parallel_execution_omp(int concurrency_degree, bool order) noexcept :
concurrency_degree_{concurrency_degree},
ordering_{order}
{
omp_set_num_threads(concurrency_degree_);
}
/**
\brief Set number of grppi threads.
*/
void set_concurrency_degree(int degree) noexcept {
concurrency_degree_ = degree;
omp_set_num_threads(concurrency_degree_);
}
/**
\brief Get number of grppi threads.
*/
int concurrency_degree() const noexcept {
return concurrency_degree_;
}
/**
\brief Enable ordering.
*/
void enable_ordering() noexcept { ordering_=true; }
/**
\brief Disable ordering.
*/
void disable_ordering() noexcept { ordering_=false; }
/**
\brief Is execution ordered.
*/
bool is_ordered() const noexcept { return ordering_; }
/**
\brief Sets the attributes for the queues built through make_queue<T>(()
*/
void set_queue_attributes(int size, queue_mode mode) noexcept {
queue_size_ = size;
queue_mode_ = mode;
}
/**
\brief Makes a communication queue for elements of type T.
Constructs a queue using the attributes that can be set via
set_queue_attributes(). The value is returned via move semantics.
*/
template <typename T>
mpmc_queue<T> make_queue() const {
return {queue_size_, queue_mode_};
}
/**
\brief Returns the reference of a communication queue for elements of type T
if the queue has been created in an outer pattern.
Returns the reference of the queue received as argument.
\tparam T Element type for the queue.
\tparam Transformers List of the next transformers.
\param queue Reference of a queue of type T
*/
template <typename T, typename ... Transformers>
mpmc_queue<T>& get_output_queue(mpmc_queue<T> & queue, Transformers &&...) const {
return queue;
}
/**
\brief Makes a communication queue for elements of type T
if the queue has not been created in an outer pattern.
Call to the make_queue function and the value is returned via move semantics.
\tparam T Element type for the queue.
\tparam Transformers List of the next transformers.
*/
template <typename T, typename ... Transformers>
mpmc_queue<T> get_output_queue(Transformers &&... ) const{
return std::move(make_queue<T>());
}
/**
\brief Get index of current thread in the thread table
*/
[[deprecated("Thread ids are deprecated.\n"
"If you have a specific use case file a bug")]]
int get_thread_id() const noexcept {
int result;
#pragma omp parallel
{
result = omp_get_thread_num();
}
return result;
}
/**
\brief Applies a transformation to multiple sequences leaving the result in
another sequence using available OpenMP parallelism
\tparam InputIterators Iterator types for input sequences.
\tparam OutputIterator Iterator type for the output sequence.
\tparam Transformer Callable object type for the transformation.
\param firsts Tuple of iterators to input sequences.
\param first_out Iterator to the output sequence.
\param sequence_size Size of the input sequences.
\param transform_op Transformation callable object.
\pre For every I iterators in the range
`[get<I>(firsts), next(get<I>(firsts),sequence_size))` are valid.
\pre Iterators in the range `[first_out, next(first_out,sequence_size)]` are valid.
*/
template <typename ... InputIterators, typename OutputIterator,
typename Transformer>
void map(std::tuple<InputIterators...> firsts,
OutputIterator first_out,
std::size_t sequence_size, Transformer transform_op) const;
/**
\brief Applies a reduction to a sequence of data items.
\tparam InputIterator Iterator type for the input sequence.
\tparam Identity Type for the identity value.
\tparam Combiner Callable object type for the combination.
\param first Iterator to the first element of the sequence.
\param sequence_size Size of the input sequence.
\param identity Identity value for the reduction.
\param combine_op Combination callable object.
\pre Iterators in the range `[first,last)` are valid.
\return The reduction result
*/
template <typename InputIterator, typename Identity, typename Combiner>
auto reduce(InputIterator first, std::size_t sequence_size,
Identity && identity, Combiner && combine_op) const;
/**
\brief Applies a map/reduce operation to a sequence of data items.
\tparam InputIterator Iterator type for the input sequence.
\tparam Identity Type for the identity value.
\tparam Transformer Callable object type for the transformation.
\tparam Combiner Callable object type for the combination.
\param first Iterator to the first element of the sequence.
\param sequence_size Size of the input sequence.
\param identity Identity value for the reduction.
\param transform_op Transformation callable object.
\param combine_op Combination callable object.
\pre Iterators in the range `[first,last)` are valid.
\return The map/reduce result.
*/
template <typename ... InputIterators, typename Identity,
typename Transformer, typename Combiner>
auto map_reduce(std::tuple<InputIterators...> firsts,
std::size_t sequence_size,
Identity && identity,
Transformer && transform_op, Combiner && combine_op) const;
/**
\brief Applies a stencil to multiple sequences leaving the result in
another sequence.
\tparam InputIterators Iterator types for input sequences.
\tparam OutputIterator Iterator type for the output sequence.
\tparam StencilTransformer Callable object type for the stencil transformation.
\tparam Neighbourhood Callable object for generating neighbourhoods.
\param firsts Tuple of iterators to input sequences.
\param first_out Iterator to the output sequence.
\param sequence_size Size of the input sequences.
\param transform_op Stencil transformation callable object.
\param neighbour_op Neighbourhood callable object.
\pre For every I iterators in the range
`[get<I>(firsts), next(get<I>(firsts),sequence_size))` are valid.
\pre Iterators in the range `[first_out, next(first_out,sequence_size)]` are valid.
*/
template <typename ... InputIterators, typename OutputIterator,
typename StencilTransformer, typename Neighbourhood>
void stencil(std::tuple<InputIterators...> firsts, OutputIterator first_out,
std::size_t sequence_size,
StencilTransformer && transform_op,
Neighbourhood && neighbour_op) const;
/**
\brief Invoke \ref md_divide-conquer.
\tparam Input Type used for the input problem.
\tparam Divider Callable type for the divider operation.
\tparam Solver Callable type for the solver operation.
\tparam Combiner Callable type for the combiner operation.
\param ex Sequential execution policy object.
\param input Input problem to be solved.
\param divider_op Divider operation.
\param solver_op Solver operation.
\param combine_op Combiner operation.
*/
template <typename Input, typename Divider, typename Solver, typename Combiner>
[[deprecated("Use new interface with predicate argument")]]
auto divide_conquer(Input && input,
Divider && divide_op,
Solver && solve_op,
Combiner && combine_op) const;
/**
\brief Invoke \ref md_divide-conquer.
\tparam Input Type used for the input problem.
\tparam Divider Callable type for the divider operation.
\tparam Predicate Callable type for the stop condition predicate.
\tparam Solver Callable type for the solver operation.
\tparam Combiner Callable type for the combiner operation.
\param ex Sequential execution policy object.
\param input Input problem to be solved.
\param divider_op Divider operation.
\param predicate_op Predicate operation.
\param solver_op Solver operation.
\param combine_op Combiner operation.
*/
template <typename Input, typename Divider, typename Predicate, typename Solver, typename Combiner>
auto divide_conquer(Input && input,
Divider && divide_op,
Predicate && predicate_op,
Solver && solve_op,
Combiner && combine_op) const;
/**
\brief Invoke \ref md_pipeline.
\tparam Generator Callable type for the generator operation.
\tparam Transformers Callable types for the transformers in the pipeline.
\param generate_op Generator operation.
\param transform_ops Transformer operations.
*/
template <typename Generator, typename ... Transformers>
void pipeline(Generator && generate_op,
Transformers && ... transform_op) const;
/**
\brief Invoke \ref md_pipeline coming from another context
that uses mpmc_queues as communication channels.
\tparam InputType Type of the input stream.
\tparam Transformers Callable types for the transformers in the pipeline.
\tparam InputType Type of the output stream.
\param input_queue Input stream communicator.
\param transform_ops Transformer operations.
\param output_queue Input stream communicator.
*/
template <typename InputType, typename Transformer, typename OutputType>
void pipeline(mpmc_queue<InputType> & input_queue, Transformer && transform_op,
mpmc_queue<OutputType> &output_queue) const
{
do_pipeline(input_queue, std::forward<Transformer>(transform_op), output_queue);
}
private:
template <typename Input, typename Divider, typename Solver, typename Combiner>
auto divide_conquer(Input && input,
Divider && divide_op,
Solver && solve_op,
Combiner && combine_op,
std::atomic<int> & num_threads) const;
template <typename Input, typename Divider, typename Predicate, typename Solver, typename Combiner>
auto divide_conquer(Input && input,
Divider && divide_op,
Predicate && predicate_op,
Solver && solve_op,
Combiner && combine_op,
std::atomic<int> & num_threads) const;
template <typename Queue, typename Consumer,
requires_no_pattern<Consumer> = 0>
void do_pipeline(Queue & input_queue, Consumer && consume_op) const;
template <typename Inqueue, typename Transformer, typename output_type,
requires_no_pattern<Transformer> = 0>
void do_pipeline(Inqueue & input_queue, Transformer && transform_op,
mpmc_queue<output_type> & output_queue) const;
template <typename T, typename ... Others>
void do_pipeline(mpmc_queue<T> & in_q, mpmc_queue<T> & same_queue, Others &&... ops) const;
template <typename T>
void do_pipeline(mpmc_queue<T> &) const {}
template <typename Queue, typename Transformer, typename ... OtherTransformers,
requires_no_pattern<Transformer> = 0>
void do_pipeline(Queue & input_queue, Transformer && transform_op,
OtherTransformers && ... other_ops) const;
template <typename Queue, typename Execution, typename Transformer,
template <typename, typename> class Context,
typename ... OtherTransformers,
requires_context<Context<Execution,Transformer>> = 0>
void do_pipeline(Queue & input_queue, Context<Execution,Transformer> && context_op,
OtherTransformers &&... other_ops) const;
template <typename Queue, typename Execution, typename Transformer,
template <typename, typename> class Context,
typename ... OtherTransformers,
requires_context<Context<Execution,Transformer>> = 0>
void do_pipeline(Queue & input_queue, Context<Execution,Transformer> & context_op,
OtherTransformers &&... other_ops) const
{
do_pipeline(input_queue, std::move(context_op),
std::forward<OtherTransformers>(other_ops)...);
}
template <typename Queue, typename FarmTransformer,
template <typename> class Farm,
requires_farm<Farm<FarmTransformer>> = 0>
void do_pipeline(Queue & input_queue,
Farm<FarmTransformer> & farm_obj) const
{
do_pipeline(input_queue, std::move(farm_obj));
}
template <typename Queue, typename FarmTransformer,
template <typename> class Farm,
requires_farm<Farm<FarmTransformer>> = 0>
void do_pipeline(Queue & input_queue,
Farm<FarmTransformer> && farm_obj) const;
template <typename Queue, typename FarmTransformer,
template <typename> class Farm,
typename ... OtherTransformers,
requires_farm<Farm<FarmTransformer>> =0>
void do_pipeline(Queue & input_queue,
Farm<FarmTransformer> & farm_obj,
OtherTransformers && ... other_transform_ops) const
{
do_pipeline(input_queue, std::move(farm_obj),
std::forward<OtherTransformers>(other_transform_ops)...);
}
template <typename Queue, typename FarmTransformer,
template <typename> class Farm,
typename ... OtherTransformers,
requires_farm<Farm<FarmTransformer>> =0>
void do_pipeline(Queue & input_queue,
Farm<FarmTransformer> && farm_obj,
OtherTransformers && ... other_transform_ops) const;
template <typename Queue, typename Predicate,
template <typename> class Filter,
requires_filter<Filter<Predicate>> = 0>
void do_pipeline(Queue & input_queue,
Filter<Predicate> & filter_obj) const
{
do_pipeline(input_queue, std::move(filter_obj));
}
template <typename Queue, typename Predicate,
template <typename> class Filter,
requires_filter<Filter<Predicate>> = 0>
void do_pipeline(Queue & input_queue,
Filter<Predicate> && filter_obj) const;
template <typename Queue, typename Predicate,
template <typename> class Filter,
typename ... OtherTransformers,
requires_filter<Filter<Predicate>> =0>
void do_pipeline(Queue & input_queue,
Filter<Predicate> & filter_obj,
OtherTransformers && ... other_transform_ops) const
{
do_pipeline(input_queue, std::move(filter_obj),
std::forward<OtherTransformers>(other_transform_ops)...);
}
template <typename Queue, typename Predicate,
template <typename> class Filter,
typename ... OtherTransformers,
requires_filter<Filter<Predicate>> =0>
void do_pipeline(Queue & input_queue,
Filter<Predicate> && filter_obj,
OtherTransformers && ... other_transform_ops) const;
template <typename Queue, typename Combiner, typename Identity,
template <typename C, typename I> class Reduce,
typename ... OtherTransformers,
requires_reduce<Reduce<Combiner,Identity>> = 0>
void do_pipeline(Queue && input_queue, Reduce<Combiner,Identity> & reduce_obj,
OtherTransformers && ... other_transform_ops) const
{
do_pipeline(input_queue, std::move(reduce_obj),
std::forward<OtherTransformers>(other_transform_ops)...);
}
template <typename Queue, typename Combiner, typename Identity,
template <typename C, typename I> class Reduce,
typename ... OtherTransformers,
requires_reduce<Reduce<Combiner,Identity>> = 0>
void do_pipeline(Queue && input_queue, Reduce<Combiner,Identity> && reduce_obj,
OtherTransformers && ... other_transform_ops) const;
template <typename Queue, typename Transformer, typename Predicate,
template <typename T, typename P> class Iteration,
typename ... OtherTransformers,
requires_iteration<Iteration<Transformer,Predicate>> =0,
requires_no_pattern<Transformer> =0>
void do_pipeline(Queue & input_queue, Iteration<Transformer,Predicate> & iteration_obj,
OtherTransformers && ... other_transform_ops) const
{
do_pipeline(input_queue, std::move(iteration_obj),
std::forward<OtherTransformers>(other_transform_ops)...);
}
template <typename Queue, typename Transformer, typename Predicate,
template <typename T, typename P> class Iteration,
typename ... OtherTransformers,
requires_iteration<Iteration<Transformer,Predicate>> =0,
requires_no_pattern<Transformer> =0>
void do_pipeline(Queue & input_queue, Iteration<Transformer,Predicate> && iteration_obj,
OtherTransformers && ... other_transform_ops) const;
template <typename Queue, typename Transformer, typename Predicate,
template <typename T, typename P> class Iteration,
typename ... OtherTransformers,
requires_iteration<Iteration<Transformer,Predicate>> =0,
requires_pipeline<Transformer> =0>
void do_pipeline(Queue & input_queue, Iteration<Transformer,Predicate> && iteration_obj,
OtherTransformers && ... other_transform_ops) const;
template <typename Queue, typename ... Transformers,
template <typename...> class Pipeline,
typename ... OtherTransformers,
requires_pipeline<Pipeline<Transformers...>> = 0>
void do_pipeline(Queue & input_queue,
Pipeline<Transformers...> & pipeline_obj,
OtherTransformers && ... other_transform_ops) const
{
do_pipeline(input_queue, std::move(pipeline_obj),
std::forward<OtherTransformers>(other_transform_ops)...);
}
template <typename Queue, typename ... Transformers,
template <typename...> class Pipeline,
typename ... OtherTransformers,
requires_pipeline<Pipeline<Transformers...>> = 0>
void do_pipeline(Queue & input_queue,
Pipeline<Transformers...> && pipeline_obj,
OtherTransformers && ... other_transform_ops) const;
template <typename Queue, typename ... Transformers,
std::size_t ... I>
void do_pipeline_nested(
Queue & input_queue,
std::tuple<Transformers...> && transform_ops,
std::index_sequence<I...>) const;
private:
/**
\brief Obtain OpenMP platform number of threads.
Queries the current OpenMP number of threads so that it can be used in
initialization of data members.
\return The current OpenMP number of threads.
\note The determination is performed inside a parallel region.
*/
static int impl_concurrency_degree() {
int result;
#pragma omp parallel
{
result = omp_get_num_threads();
}
return result;
}
private:
configuration<> config_{};
int concurrency_degree_= config_.concurrency_degree();
bool ordering_ = config_.ordering();
int queue_size_ = config_.queue_size();
queue_mode queue_mode_ = config_.mode();
};
/**
\brief Metafunction that determines if type E is parallel_execution_omp
\tparam Execution policy type.
*/
template <typename E>
constexpr bool is_parallel_execution_omp() {
return std::is_same<E, parallel_execution_omp>::value;
}
/**
\brief Determines if an execution policy is supported in the current compilation.
\note Specialization for parallel_execution_omp when GRPPI_OMP is enabled.
*/
template <>
constexpr bool is_supported<parallel_execution_omp>() { return true; }
/**
\brief Determines if an execution policy supports the map pattern.
\note Specialization for parallel_execution_omp when GRPPI_OMP is enabled.
*/
template <>
constexpr bool supports_map<parallel_execution_omp>() { return true; }
/**
\brief Determines if an execution policy supports the reduce pattern.
\note Specialization for parallel_execution_omp when GRPPI_OMP is enabled.
*/
template <>
constexpr bool supports_reduce<parallel_execution_omp>() { return true; }
/**
\brief Determines if an execution policy supports the map-reduce pattern.
\note Specialization for parallel_execution_omp when GRPPI_OMP is enabled.
*/
template <>
constexpr bool supports_map_reduce<parallel_execution_omp>() { return true; }
/**
\brief Determines if an execution policy supports the stencil pattern.
\note Specialization for parallel_execution_omp when GRPPI_OMP is enabled.
*/
template <>
constexpr bool supports_stencil<parallel_execution_omp>() { return true; }
/**
\brief Determines if an execution policy supports the divide/conquer pattern.
\note Specialization for parallel_execution_omp when GRPPI_OMP is enabled.
*/
template <>
constexpr bool supports_divide_conquer<parallel_execution_omp>() { return true; }
/**
\brief Determines if an execution policy supports the pipeline pattern.
\note Specialization for parallel_execution_omp when GRPPI_OMP is enabled.
*/
template <>
constexpr bool supports_pipeline<parallel_execution_omp>() { return true; }
template <typename ... InputIterators, typename OutputIterator,
typename Transformer>
void parallel_execution_omp::map(
std::tuple<InputIterators...> firsts,
OutputIterator first_out,
std::size_t sequence_size, Transformer transform_op) const
{
#pragma omp parallel for
for (std::size_t i=0; i<sequence_size; ++i) {
first_out[i] = apply_iterators_indexed(transform_op, firsts, i);
}
}
template <typename InputIterator, typename Identity, typename Combiner>
auto parallel_execution_omp::reduce(
InputIterator first, std::size_t sequence_size,
Identity && identity,
Combiner && combine_op) const
{
constexpr sequential_execution seq;
using result_type = std::decay_t<Identity>;
std::vector<result_type> partial_results(concurrency_degree_);
auto process_chunk = [&](InputIterator f, std::size_t sz, std::size_t id) {
partial_results[id] = seq.reduce(f, sz, std::forward<Identity>(identity),
std::forward<Combiner>(combine_op));
};
const auto chunk_size = sequence_size/concurrency_degree_;
#pragma omp parallel
{
#pragma omp single nowait
{
for (int i=0 ;i<concurrency_degree_-1; ++i) {
const auto delta = chunk_size * i;
const auto chunk_first = std::next(first,delta);
#pragma omp task firstprivate (chunk_first, chunk_size, i)
{
process_chunk(chunk_first, chunk_size, i);
}
}
//Main thread
const auto delta = chunk_size * (concurrency_degree_ - 1);
const auto chunk_first= std::next(first,delta);
const auto chunk_sz = sequence_size - delta;
process_chunk(chunk_first, chunk_sz, concurrency_degree_-1);
#pragma omp taskwait
}
}
return seq.reduce(std::next(partial_results.begin()),
partial_results.size()-1,
partial_results[0], std::forward<Combiner>(combine_op));
}
template <typename ... InputIterators, typename Identity,
typename Transformer, typename Combiner>
auto parallel_execution_omp::map_reduce(
std::tuple<InputIterators...> firsts,
std::size_t sequence_size,
Identity && identity,
Transformer && transform_op, Combiner && combine_op) const
{
constexpr sequential_execution seq;
using result_type = std::decay_t<Identity>;
std::vector<result_type> partial_results(concurrency_degree_);
auto process_chunk = [&](auto f, std::size_t sz, std::size_t i) {
partial_results[i] = seq.map_reduce(
f, sz,
std::forward<Identity>(identity),
std::forward<Transformer>(transform_op),
std::forward<Combiner>(combine_op));
};
const auto chunk_size = sequence_size / concurrency_degree_;
#pragma omp parallel
{
#pragma omp single nowait
{
for (int i=0;i<concurrency_degree_-1;++i) {
#pragma omp task firstprivate(i)
{
const auto delta = chunk_size * i;
const auto chunk_firsts = iterators_next(firsts,delta);
process_chunk(chunk_firsts, chunk_size, i);
}
}
const auto delta = chunk_size * (concurrency_degree_ - 1);
auto chunk_firsts = iterators_next(firsts,delta);
auto chunk_last = std::next(std::get<0>(firsts), sequence_size);
process_chunk(chunk_firsts,
std::distance(std::get<0>(chunk_firsts), chunk_last),
concurrency_degree_ - 1);
#pragma omp taskwait
}
}
return seq.reduce(partial_results.begin(),
partial_results.size(), std::forward<Identity>(identity),
std::forward<Combiner>(combine_op));
}
template <typename ... InputIterators, typename OutputIterator,
typename StencilTransformer, typename Neighbourhood>
void parallel_execution_omp::stencil(
std::tuple<InputIterators...> firsts, OutputIterator first_out,
std::size_t sequence_size,
StencilTransformer && transform_op,
Neighbourhood && neighbour_op) const
{
constexpr sequential_execution seq;
const auto chunk_size = sequence_size / concurrency_degree_;
auto process_chunk = [&](auto f, std::size_t sz, std::size_t delta) {
seq.stencil(f, std::next(first_out,delta), sz,
std::forward<StencilTransformer>(transform_op),
std::forward<Neighbourhood>(neighbour_op));
};
#pragma omp parallel
{
#pragma omp single nowait
{
for (int i=0; i<concurrency_degree_-1; ++i) {
#pragma omp task firstprivate(i)
{
const auto delta = chunk_size * i;
const auto chunk_firsts = iterators_next(firsts,delta);
process_chunk(chunk_firsts, chunk_size, delta);
}
}
const auto delta = chunk_size * (concurrency_degree_ - 1);
const auto chunk_firsts = iterators_next(firsts,delta);
const auto chunk_last = std::next(std::get<0>(firsts), sequence_size);
process_chunk(chunk_firsts,
std::distance(std::get<0>(chunk_firsts), chunk_last), delta);
#pragma omp taskwait
}
}
}
template <typename Input, typename Divider,typename Predicate, typename Solver, typename Combiner>
auto parallel_execution_omp::divide_conquer(
Input && input,
Divider && divide_op,
Predicate && predicate_op,
Solver && solve_op,
Combiner && combine_op) const
{
std::atomic<int> num_threads{concurrency_degree_-1};
return divide_conquer(std::forward<Input>(input),
std::forward<Divider>(divide_op), std::forward<Predicate>(predicate_op),
std::forward<Solver>(solve_op),
std::forward<Combiner>(combine_op),
num_threads);
}
template <typename Input, typename Divider, typename Solver, typename Combiner>
auto parallel_execution_omp::divide_conquer(
Input && input,
Divider && divide_op,
Solver && solve_op,
Combiner && combine_op) const
{
std::atomic<int> num_threads{concurrency_degree_-1};
return divide_conquer(std::forward<Input>(input),
std::forward<Divider>(divide_op), std::forward<Solver>(solve_op),
std::forward<Combiner>(combine_op),
num_threads);
}
template <typename Generator, typename ... Transformers>
void parallel_execution_omp::pipeline(
Generator && generate_op,
Transformers && ... transform_ops) const
{
using namespace std;
using result_type = decay_t<typename result_of<Generator()>::type>;
auto output_queue = make_queue<pair<result_type,long>>();
#pragma omp parallel
{
#pragma omp single nowait
{
#pragma omp task shared(generate_op,output_queue)
{
long order = 0;
for (;;) {
auto item = generate_op();
output_queue.push(make_pair(item,order++)) ;
if (!item) break;
}
}
do_pipeline(output_queue,
forward<Transformers>(transform_ops)...);
#pragma omp taskwait
}
}
}
// PRIVATE MEMBERS
template <typename Input, typename Divider,typename Predicate, typename Solver, typename Combiner>
auto parallel_execution_omp::divide_conquer(
Input && input,
Divider && divide_op,
Predicate && predicate_op,
Solver && solve_op,
Combiner && combine_op,
std::atomic<int> & num_threads) const
{
constexpr sequential_execution seq;
if (num_threads.load()<=0) {
return seq.divide_conquer(std::forward<Input>(input),
std::forward<Divider>(divide_op),std::forward<Predicate>(predicate_op),
std::forward<Solver>(solve_op),
std::forward<Combiner>(combine_op));
}
if (predicate_op(input)) { return solve_op(std::forward<Input>(input)); }
auto subproblems = divide_op(std::forward<Input>(input));
using subresult_type =
std::decay_t<typename std::result_of<Solver(Input)>::type>;
std::vector<subresult_type> partials(subproblems.size()-1);
auto process_subproblems = [&,this](auto it, std::size_t div) {
partials[div] = this->divide_conquer(std::forward<Input>(*it),
std::forward<Divider>(divide_op), std::forward<Predicate>(predicate_op),
std::forward<Solver>(solve_op),
std::forward<Combiner>(combine_op), num_threads);
};
int division = 0;
subresult_type subresult;
#pragma omp parallel
{
#pragma omp single nowait
{
auto i = subproblems.begin() + 1;
while (i!=subproblems.end() && num_threads.load()>0) {
#pragma omp task firstprivate(i,division) \
shared(partials,divide_op,solve_op,combine_op,num_threads)
{
process_subproblems(i, division);
}
num_threads --;
i++;
division++;
}
while (i!=subproblems.end()) {
partials[division] = seq.divide_conquer(std::forward<Input>(*i++),
std::forward<Divider>(divide_op), std::forward<Predicate>(predicate_op),
std::forward<Solver>(solve_op),
std::forward<Combiner>(combine_op));
}
//Main thread works on the first subproblem.
if (num_threads.load()>0) {
subresult = divide_conquer(std::forward<Input>(*subproblems.begin()),
std::forward<Divider>(divide_op),std::forward<Predicate>(predicate_op),
std::forward<Solver>(solve_op),
std::forward<Combiner>(combine_op), num_threads);
}
else {
subresult = seq.divide_conquer(std::forward<Input>(*subproblems.begin()),
std::forward<Divider>(divide_op), std::forward<Predicate>(predicate_op),
std::forward<Solver>(solve_op),
std::forward<Combiner>(combine_op));
}
#pragma omp taskwait
}
}
return seq.reduce(partials.begin(), partials.size(),
std::forward<subresult_type>(subresult), combine_op);
}
template <typename Input, typename Divider, typename Solver, typename Combiner>
auto parallel_execution_omp::divide_conquer(
Input && input,
Divider && divide_op,
Solver && solve_op,
Combiner && combine_op,
std::atomic<int> & num_threads) const
{
constexpr sequential_execution seq;
if (num_threads.load()<=0) {
return seq.divide_conquer(std::forward<Input>(input),
std::forward<Divider>(divide_op), std::forward<Solver>(solve_op),
std::forward<Combiner>(combine_op));
}
auto subproblems = divide_op(std::forward<Input>(input));
if (subproblems.size()<=1) { return solve_op(std::forward<Input>(input)); }
using subresult_type =
std::decay_t<typename std::result_of<Solver(Input)>::type>;
std::vector<subresult_type> partials(subproblems.size()-1);
auto process_subproblems = [&,this](auto it, std::size_t div) {
partials[div] = this->divide_conquer(std::forward<Input>(*it),
std::forward<Divider>(divide_op), std::forward<Solver>(solve_op),
std::forward<Combiner>(combine_op), num_threads);
};
int division = 0;
subresult_type subresult;
#pragma omp parallel
{
#pragma omp single nowait
{
auto i = subproblems.begin() + 1;
while (i!=subproblems.end() && num_threads.load()>0) {
#pragma omp task firstprivate(i,division) \
shared(partials,divide_op,solve_op,combine_op,num_threads)
{
process_subproblems(i, division);
}
num_threads --;
i++;
division++;
}
while (i!=subproblems.end()) {
partials[division] = seq.divide_conquer(std::forward<Input>(*i++),
std::forward<Divider>(divide_op), std::forward<Solver>(solve_op),
std::forward<Combiner>(combine_op));
}
//Main thread works on the first subproblem.
if (num_threads.load()>0) {
subresult = divide_conquer(std::forward<Input>(*subproblems.begin()),
std::forward<Divider>(divide_op), std::forward<Solver>(solve_op),
std::forward<Combiner>(combine_op), num_threads);
}
else {
subresult = seq.divide_conquer(std::forward<Input>(*subproblems.begin()),
std::forward<Divider>(divide_op), std::forward<Solver>(solve_op),
std::forward<Combiner>(combine_op));
}
#pragma omp taskwait
}
}
return seq.reduce(partials.begin(), partials.size(),
std::forward<subresult_type>(subresult), combine_op);
}
template <typename Queue, typename Consumer,
requires_no_pattern<Consumer>>
void parallel_execution_omp::do_pipeline(Queue & input_queue, Consumer && consume_op) const
{
using namespace std;
using input_type = typename Queue::value_type;
if (!is_ordered()) {
for (;;) {
auto item = input_queue.pop();
if (!item.first) break;
consume_op(*item.first);
}
return;
}
vector<input_type> elements;
long current = 0;
auto item = input_queue.pop( );
while (item.first) {
if (current == item.second) {
consume_op(*item.first);
current ++;
}
else {
elements.push_back(item);
}
auto it = find_if(elements.begin(), elements.end(),
[&](auto x) { return x.second== current; });
if(it != elements.end()){
consume_op(*it->first);
elements.erase(it);
current++;
}
item = input_queue.pop( );
}
while(elements.size()>0){
auto it = find_if(elements.begin(), elements.end(),
[&](auto x) { return x.second== current; });
if(it != elements.end()){
consume_op(*it->first);
elements.erase(it);
current++;
}
}
}
template <typename Inqueue, typename Transformer, typename output_type,
requires_no_pattern<Transformer>>
void parallel_execution_omp::do_pipeline(Inqueue & input_queue, Transformer && transform_op,
mpmc_queue<output_type> & output_queue) const
{
using namespace std;
#if __cplusplus < 201703L
using namespace experimental;
#endif
using output_item_value_type = typename output_type::first_type::value_type;
for (;;) {
auto item{input_queue.pop()};
if(!item.first) break;
auto out = output_item_value_type{transform_op(*item.first)};
output_queue.push(make_pair(out,item.second)) ;
}
}
template <typename Queue, typename Execution, typename Transformer,
template <typename, typename> class Context,
typename ... OtherTransformers,
requires_context<Context<Execution,Transformer>>>
void parallel_execution_omp::do_pipeline(Queue & input_queue,
Context<Execution,Transformer> && context_op,
OtherTransformers &&... other_ops) const
{
using namespace std;
#if __cplusplus < 201703L
using namespace experimental;
#endif
using input_item_type = typename Queue::value_type;
using input_item_value_type = typename input_item_type::first_type::value_type;
using output_type = typename stage_return_type<input_item_value_type, Transformer>::type;
using output_optional_type = optional<output_type>;
using output_item_type = pair <output_optional_type, long> ;
decltype(auto) output_queue =
get_output_queue<output_item_type>(other_ops...);
#pragma omp task shared(input_queue,context_op,output_queue)
{
context_op.execution_policy().pipeline(input_queue, context_op.transformer(), output_queue);
output_queue.push(make_pair(output_optional_type{},-1));
}
do_pipeline(output_queue,
forward<OtherTransformers>(other_ops)... );
#pragma omp taskwait
}
template <typename Queue, typename Transformer, typename ... OtherTransformers,
requires_no_pattern<Transformer>>
void parallel_execution_omp::do_pipeline(
Queue & input_queue,
Transformer && transform_op,
OtherTransformers && ... other_ops) const
{
using namespace std;
#if __cplusplus < 201703L
using namespace experimental;
#endif
using input_type = typename Queue::value_type;
using input_value_type = typename input_type::first_type::value_type;
using result_type = typename result_of<Transformer(input_value_type)>::type;
using output_value_type = optional<result_type>;
using output_type = pair<output_value_type,long>;
decltype(auto) output_queue =
get_output_queue<output_type>(other_ops...);
#pragma omp task shared(transform_op, input_queue, output_queue)
{
for (;;) {
auto item = input_queue.pop();
if (!item.first) break;
auto out = output_value_type{transform_op(*item.first)};
output_queue.push(make_pair(out, item.second));
}
output_queue.push(make_pair(output_value_type{}, -1));
}
do_pipeline(output_queue,
forward<OtherTransformers>(other_ops)...);
}
template <typename Queue, typename FarmTransformer,
template <typename> class Farm,
requires_farm<Farm<FarmTransformer>>>
void parallel_execution_omp::do_pipeline(
Queue & input_queue,
Farm<FarmTransformer> && farm_obj) const
{
using namespace std;
#if __cplusplus < 201703L
using namespace experimental;
#endif
for (int i=0; i<farm_obj.cardinality(); ++i) {
#pragma omp task shared(farm_obj,input_queue)
{
auto item = input_queue.pop();
while (item.first) {
farm_obj(*item.first);
item = input_queue.pop();
}
input_queue.push(item);
}
}
#pragma omp taskwait
}
template <typename Queue, typename FarmTransformer,
template <typename> class Farm,
typename ... OtherTransformers,
requires_farm<Farm<FarmTransformer>>>
void parallel_execution_omp::do_pipeline(
Queue & input_queue,
Farm<FarmTransformer> && farm_obj,
OtherTransformers && ... other_transform_ops) const
{
using namespace std;
#if __cplusplus < 201703L
using namespace experimental;
#endif
using input_type = typename Queue::value_type;
using input_value_type = typename input_type::first_type::value_type;
using result_type = typename stage_return_type<input_value_type, FarmTransformer>::type;
using output_optional_type = optional<result_type>;
using output_type = pair<output_optional_type,long>;
decltype(auto) output_queue =
get_output_queue<output_type>(other_transform_ops...);
// auto output_queue = make_queue<output_type>();
atomic<int> done_threads{0};
int ntask = farm_obj.cardinality();
for (int i=0; i<farm_obj.cardinality(); ++i) {
#pragma omp task shared(done_threads,output_queue,farm_obj,input_queue,ntask)
{
do_pipeline(input_queue, farm_obj.transformer(), output_queue);
done_threads++;
if (done_threads == ntask){
output_queue.push(make_pair(output_optional_type{}, -1));
}else{
input_queue.push(input_type{});
}
}
}
do_pipeline(output_queue, forward<OtherTransformers>(other_transform_ops)...);
#pragma omp taskwait
}
template <typename Queue, typename Predicate,
template <typename> class Filter,
requires_filter<Filter<Predicate>>>
void parallel_execution_omp::do_pipeline(
Queue &,
Filter<Predicate> &&) const
{
}
template <typename Queue, typename Predicate,
template <typename> class Filter,
typename ... OtherTransformers,
requires_filter<Filter<Predicate>>>
void parallel_execution_omp::do_pipeline(
Queue & input_queue,
Filter<Predicate> && filter_obj,
OtherTransformers && ... other_transform_ops) const
{
using namespace std;
using input_type = typename Queue::value_type;
using input_value_type = typename input_type::first_type;
auto filter_queue = make_queue<input_type>();
if (is_ordered()) {
auto filter_task = [&]() {
{
auto item{input_queue.pop()};
while (item.first) {
if(filter_obj(*item.first)) {
filter_queue.push(item);
}
else {
filter_queue.push(make_pair(input_value_type{} ,item.second));
}
item = input_queue.pop();
}
filter_queue.push (make_pair(input_value_type{}, -1));
}
};
decltype(auto) output_queue =
get_output_queue<input_type>(other_transform_ops...);
auto reorder_task = [&]() {
vector<input_type> elements;
int current = 0;
long order = 0;
auto item = filter_queue.pop();
for (;;) {
if (!item.first && item.second == -1) break;
if (item.second == current) {
if (item.first) {
output_queue.push(make_pair(item.first, order++));
}
current++;
}
else {
elements.push_back(item);
}
auto it = find_if(elements.begin(), elements.end(),
[&](auto x) { return x.second== current; });
if(it != elements.end()){
if (it->first) {
output_queue.push(make_pair(it->first,order));
order++;
}
elements.erase(it);
current++;
}
item = filter_queue.pop();
}
while (elements.size()>0) {
auto it = find_if(elements.begin(), elements.end(),
[&](auto x) { return x.second== current; });
if(it != elements.end()){
if (it->first) {
output_queue.push(make_pair(it->first,order));
order++;
}
elements.erase(it);
current++;
}
item = filter_queue.pop();
}
output_queue.push(item);
};
#pragma omp task shared(filter_queue,filter_obj,input_queue)
{
filter_task();
}
#pragma omp task shared (output_queue,filter_queue)
{
reorder_task();
}
do_pipeline(output_queue,
forward<OtherTransformers>(other_transform_ops)...);
#pragma omp taskwait
}
else {
auto filter_task = [&]() {
auto item = input_queue.pop( ) ;
while (item.first) {
if (filter_obj(*item.first)) {
filter_queue.push(item);
}
item = input_queue.pop();
}
filter_queue.push(make_pair(input_value_type{}, -1));
};
#pragma omp task shared(filter_queue,filter_obj,input_queue)
{
filter_task();
}
do_pipeline(filter_queue,
std::forward<OtherTransformers>(other_transform_ops)...);
#pragma omp taskwait
}
}
template <typename Queue, typename Combiner, typename Identity,
template <typename C, typename I> class Reduce,
typename ... OtherTransformers,
requires_reduce<Reduce<Combiner,Identity>>>
void parallel_execution_omp::do_pipeline(
Queue && input_queue,
Reduce<Combiner,Identity> && reduce_obj,
OtherTransformers && ... other_transform_ops) const
{
using namespace std;
#if __cplusplus < 201703L
using namespace experimental;
#endif
using output_item_value_type = optional<decay_t<Identity>>;
using output_item_type = pair<output_item_value_type,long>;
decltype(auto) output_queue =
get_output_queue<output_item_type>(other_transform_ops...);
auto reduce_task = [&]() {
auto item{input_queue.pop()};
int order = 0;
while (item.first) {
reduce_obj.add_item(std::forward<Identity>(*item.first));
item = input_queue.pop();
if (reduce_obj.reduction_needed()) {
constexpr sequential_execution seq;
auto red = reduce_obj.reduce_window(seq);
output_queue.push(make_pair(red, order++));
}
}
output_queue.push(make_pair(output_item_value_type{}, -1));
};
#pragma omp task shared(reduce_obj,input_queue, output_queue)
{
reduce_task();
}
do_pipeline(output_queue,
std::forward<OtherTransformers>(other_transform_ops)...);
#pragma omp taskwait
}
template <typename Queue, typename Transformer, typename Predicate,
template <typename T, typename P> class Iteration,
typename ... OtherTransformers,
requires_iteration<Iteration<Transformer,Predicate>>,
requires_no_pattern<Transformer>>
void parallel_execution_omp::do_pipeline(
Queue & input_queue,
Iteration<Transformer,Predicate> && iteration_obj,
OtherTransformers && ... other_transform_ops) const
{
using namespace std;
#if __cplusplus < 201703L
using namespace experimental;
#endif
using input_item_type = typename decay_t<Queue>::value_type;
decltype(auto) output_queue =
get_output_queue<input_item_type>(other_transform_ops...);
auto iteration_task = [&]() {
for (;;) {
auto item = input_queue.pop();
if (!item.first) break;
auto value = iteration_obj.transform(*item.first);
auto new_item = input_item_type{value,item.second};
if (iteration_obj.predicate(value)) {
output_queue.push(new_item);
}
else {
input_queue.push(new_item);
}
}
while (!input_queue.empty()) {
auto item = input_queue.pop();
auto value = iteration_obj.transform(*item.first);
auto new_item = input_item_type{value,item.second};
if (iteration_obj.predicate(value)) {
output_queue.push(new_item);
}
else {
input_queue.push(new_item);
}
}
output_queue.push(input_item_type{{},-1});
};
#pragma omp task shared(iteration_obj,input_queue,output_queue)
{
iteration_task();
}
do_pipeline(output_queue,
std::forward<OtherTransformers>(other_transform_ops)...);
#pragma omp taskwait
}
template <typename Queue, typename Transformer, typename Predicate,
template <typename T, typename P> class Iteration,
typename ... OtherTransformers,
requires_iteration<Iteration<Transformer,Predicate>>,
requires_pipeline<Transformer>>
void parallel_execution_omp::do_pipeline(
Queue &,
Iteration<Transformer,Predicate> &&,
OtherTransformers && ...) const
{
static_assert(!is_pipeline<Transformer>, "Not implemented");
}
template <typename Queue, typename ... Transformers,
template <typename...> class Pipeline,
typename ... OtherTransformers,
requires_pipeline<Pipeline<Transformers...>>>
void parallel_execution_omp::do_pipeline(
Queue & input_queue,
Pipeline<Transformers...> && pipeline_obj,
OtherTransformers && ... other_transform_ops) const
{
do_pipeline_nested(
input_queue,
std::tuple_cat(pipeline_obj.transformers(),
std::forward_as_tuple(other_transform_ops...)),
std::make_index_sequence<sizeof...(Transformers)+sizeof...(OtherTransformers)>());
}
template <typename Queue, typename ... Transformers,
std::size_t ... I>
void parallel_execution_omp::do_pipeline_nested(
Queue & input_queue,
std::tuple<Transformers...> && transform_ops,
std::index_sequence<I...>) const
{
do_pipeline(input_queue,
std::forward<Transformers>(std::get<I>(transform_ops))...);
}
template<typename T, typename... Others>
void parallel_execution_omp::do_pipeline(mpmc_queue <T> &, mpmc_queue <T> &, Others &&...) const
{ }
} // end namespace grppi
#else // GRPPI_OMP undefined
namespace grppi {
/// Parallel execution policy.
/// Empty type if GRPPI_OMP disabled.
struct parallel_execution_omp {};
/**
\brief Metafunction that determines if type E is parallel_execution_omp
This metafunction evaluates to false if GRPPI_OMP is disabled.
\tparam Execution policy type.
*/
template <typename E>
constexpr bool is_parallel_execution_omp() {
return false;
}
}
#endif // GRPPI_OMP
#endif
|
loop_alignment_par.c | #include <omp.h>
void compute(unsigned long **a, unsigned long **b, unsigned long **c, unsigned long **d, int N)
{
int i, j;
#pragma omp parallel for
for(i=1; i<N; i++)
{
a[i][1] = 3*b[i][1]; // S1
c[i][0] = a[i][1] * d[i][1]; // S3
#pragma omp parallel for
for(j=2; j<N; j++)
{
b[i][j] = c[i][j-1]*c[i][j-1]; // S2
a[i][j] = 3*b[i][j]; // S1
c[i][j-1] = a[i][j] * d[i][j]; // S3
}
b[i][N] = c[i][N-1]*c[i][N-1]; // S2
}
}
|
fx.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF X X %
% F X X %
% FFF X %
% F X X %
% F X X %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/fx-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Typedef declarations.
*/
typedef enum
{
BitwiseAndAssignmentOperator = 0xd9U,
BitwiseOrAssignmentOperator,
LeftShiftAssignmentOperator,
RightShiftAssignmentOperator,
PowerAssignmentOperator,
ModuloAssignmentOperator,
PlusAssignmentOperator,
SubtractAssignmentOperator,
MultiplyAssignmentOperator,
DivideAssignmentOperator,
IncrementAssignmentOperator,
DecrementAssignmentOperator,
LeftShiftOperator,
RightShiftOperator,
LessThanEqualOperator,
GreaterThanEqualOperator,
EqualOperator,
NotEqualOperator,
LogicalAndOperator,
LogicalOrOperator,
ExponentialNotation
} FxOperator;
struct _FxInfo
{
const Image
*images;
char
*expression;
FILE
*file;
SplayTreeInfo
*colors,
*symbols;
CacheView
**view;
RandomInfo
*random_info;
ExceptionInfo
*exception;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireFxInfo() allocates the FxInfo structure.
%
% The format of the AcquireFxInfo method is:
%
% FxInfo *AcquireFxInfo(Image *images,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o expression: the expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate FxInfo *AcquireFxInfo(const Image *images,const char *expression,
ExceptionInfo *exception)
{
char
fx_op[2];
const Image
*next;
FxInfo
*fx_info;
register ssize_t
i;
fx_info=(FxInfo *) AcquireCriticalMemory(sizeof(*fx_info));
(void) memset(fx_info,0,sizeof(*fx_info));
fx_info->exception=AcquireExceptionInfo();
fx_info->images=images;
fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength(
fx_info->images),sizeof(*fx_info->view));
if (fx_info->view == (CacheView **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
i=0;
next=GetFirstImageInList(fx_info->images);
for ( ; next != (Image *) NULL; next=next->next)
{
fx_info->view[i]=AcquireVirtualCacheView(next,exception);
i++;
}
fx_info->random_info=AcquireRandomInfo();
fx_info->expression=ConstantString(expression);
fx_info->file=stderr;
(void) SubstituteString(&fx_info->expression," ",""); /* compact string */
/*
Convert compound to simple operators.
*/
fx_op[1]='\0';
*fx_op=(char) BitwiseAndAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"&=",fx_op);
*fx_op=(char) BitwiseOrAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"|=",fx_op);
*fx_op=(char) LeftShiftAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"<<=",fx_op);
*fx_op=(char) RightShiftAssignmentOperator;
(void) SubstituteString(&fx_info->expression,">>=",fx_op);
*fx_op=(char) PowerAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"^=",fx_op);
*fx_op=(char) ModuloAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"%=",fx_op);
*fx_op=(char) PlusAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"+=",fx_op);
*fx_op=(char) SubtractAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"-=",fx_op);
*fx_op=(char) MultiplyAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"*=",fx_op);
*fx_op=(char) DivideAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"/=",fx_op);
*fx_op=(char) IncrementAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"++",fx_op);
*fx_op=(char) DecrementAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"--",fx_op);
*fx_op=(char) LeftShiftOperator;
(void) SubstituteString(&fx_info->expression,"<<",fx_op);
*fx_op=(char) RightShiftOperator;
(void) SubstituteString(&fx_info->expression,">>",fx_op);
*fx_op=(char) LessThanEqualOperator;
(void) SubstituteString(&fx_info->expression,"<=",fx_op);
*fx_op=(char) GreaterThanEqualOperator;
(void) SubstituteString(&fx_info->expression,">=",fx_op);
*fx_op=(char) EqualOperator;
(void) SubstituteString(&fx_info->expression,"==",fx_op);
*fx_op=(char) NotEqualOperator;
(void) SubstituteString(&fx_info->expression,"!=",fx_op);
*fx_op=(char) LogicalAndOperator;
(void) SubstituteString(&fx_info->expression,"&&",fx_op);
*fx_op=(char) LogicalOrOperator;
(void) SubstituteString(&fx_info->expression,"||",fx_op);
*fx_op=(char) ExponentialNotation;
(void) SubstituteString(&fx_info->expression,"**",fx_op);
/*
Force right-to-left associativity for unary negation.
*/
(void) SubstituteString(&fx_info->expression,"-","-1.0*");
(void) SubstituteString(&fx_info->expression,"^-1.0*","^-");
(void) SubstituteString(&fx_info->expression,"E-1.0*","E-");
(void) SubstituteString(&fx_info->expression,"e-1.0*","e-");
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyFxInfo() deallocates memory associated with an FxInfo structure.
%
% The format of the DestroyFxInfo method is:
%
% ImageInfo *DestroyFxInfo(ImageInfo *fx_info)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
*/
MagickPrivate FxInfo *DestroyFxInfo(FxInfo *fx_info)
{
register ssize_t
i;
fx_info->exception=DestroyExceptionInfo(fx_info->exception);
fx_info->expression=DestroyString(fx_info->expression);
fx_info->symbols=DestroySplayTree(fx_info->symbols);
fx_info->colors=DestroySplayTree(fx_info->colors);
for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--)
fx_info->view[i]=DestroyCacheView(fx_info->view[i]);
fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view);
fx_info->random_info=DestroyRandomInfo(fx_info->random_info);
fx_info=(FxInfo *) RelinquishMagickMemory(fx_info);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F x E v a l u a t e C h a n n e l E x p r e s s i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxEvaluateChannelExpression() evaluates an expression and returns the
% results.
%
% The format of the FxEvaluateExpression method is:
%
% double FxEvaluateChannelExpression(FxInfo *fx_info,
% const PixelChannel channel,const ssize_t x,const ssize_t y,
% double *alpha,Exceptioninfo *exception)
% double FxEvaluateExpression(FxInfo *fx_info,
% double *alpha,Exceptioninfo *exception)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
% o channel: the channel.
%
% o x,y: the pixel position.
%
% o alpha: the result.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline const double *GetFxSymbolValue(FxInfo *magick_restrict fx_info,
const char *symbol)
{
return((const double *) GetValueFromSplayTree(fx_info->symbols,symbol));
}
static inline MagickBooleanType SetFxSymbolValue(
FxInfo *magick_restrict fx_info,const char *magick_restrict symbol,
double const value)
{
double
*object;
object=(double *) GetValueFromSplayTree(fx_info->symbols,symbol);
if (object != (double *) NULL)
{
*object=value;
return(MagickTrue);
}
object=(double *) AcquireQuantumMemory(1,sizeof(*object));
if (object == (double *) NULL)
{
(void) ThrowMagickException(fx_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
fx_info->images->filename);
return(MagickFalse);
}
*object=value;
return(AddValueToSplayTree(fx_info->symbols,ConstantString(symbol),object));
}
static double FxChannelStatistics(FxInfo *fx_info,Image *image,
PixelChannel channel,const char *symbol,ExceptionInfo *exception)
{
ChannelType
channel_mask;
char
key[MagickPathExtent];
const double
*value;
double
statistic;
register const char
*p;
channel_mask=UndefinedChannel;
for (p=symbol; (*p != '.') && (*p != '\0'); p++) ;
if (*p == '.')
{
ssize_t
option;
option=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,p+1);
if (option >= 0)
{
channel=(PixelChannel) option;
channel_mask=SetPixelChannelMask(image,(ChannelType)
(1UL << channel));
}
}
(void) FormatLocaleString(key,MagickPathExtent,"%p.%.20g.%s",(void *) image,
(double) channel,symbol);
value=GetFxSymbolValue(fx_info,key);
if (value != (const double *) NULL)
{
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
return(QuantumScale*(*value));
}
statistic=0.0;
if (LocaleNCompare(symbol,"depth",5) == 0)
{
size_t
depth;
depth=GetImageDepth(image,exception);
statistic=(double) depth;
}
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
statistic=kurtosis;
}
if (LocaleNCompare(symbol,"maxima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
statistic=maxima;
}
if (LocaleNCompare(symbol,"mean",4) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
statistic=mean;
}
if (LocaleNCompare(symbol,"minima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
statistic=minima;
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
statistic=skewness;
}
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
statistic=standard_deviation;
}
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
if (SetFxSymbolValue(fx_info,key,statistic) == MagickFalse)
return(0.0);
return(QuantumScale*statistic);
}
static double
FxEvaluateSubexpression(FxInfo *,const PixelChannel,const ssize_t,
const ssize_t,const char *,const size_t,double *,ExceptionInfo *);
static inline MagickBooleanType IsFxFunction(const char *expression,
const char *name,const size_t length)
{
int
c;
register size_t
i;
for (i=0; i <= length; i++)
if (expression[i] == '\0')
return(MagickFalse);
c=expression[length];
if ((LocaleNCompare(expression,name,length) == 0) &&
((isspace(c) == 0) || (c == '(')))
return(MagickTrue);
return(MagickFalse);
}
static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta)
{
if (beta != 0)
return(FxGCD(beta,alpha % beta));
return(alpha);
}
static inline const char *FxSubexpression(const char *expression,
ExceptionInfo *exception)
{
const char
*subexpression;
register ssize_t
level;
level=0;
subexpression=expression;
while ((*subexpression != '\0') &&
((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL)))
{
if (strchr("(",(int) *subexpression) != (char *) NULL)
level++;
else
if (strchr(")",(int) *subexpression) != (char *) NULL)
level--;
subexpression++;
}
if (*subexpression == '\0')
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnbalancedParenthesis","`%s'",expression);
return(subexpression);
}
static double FxGetSymbol(FxInfo *fx_info,const PixelChannel channel,
const ssize_t x,const ssize_t y,const char *expression,const size_t depth,
ExceptionInfo *exception)
{
char
*q,
symbol[MagickPathExtent];
const char
*p;
const double
*value;
double
alpha,
beta;
Image
*image;
MagickBooleanType
status;
PixelInfo
pixel;
PointInfo
point;
register ssize_t
i;
size_t
level;
p=expression;
i=GetImageIndexInList(fx_info->images);
level=0;
point.x=(double) x;
point.y=(double) y;
if (isalpha((int) ((unsigned char) *(p+1))) == 0)
{
char
*subexpression;
subexpression=AcquireString(expression);
if (strchr("suv",(int) *p) != (char *) NULL)
{
switch (*p)
{
case 's':
default:
{
i=GetImageIndexInList(fx_info->images);
break;
}
case 'u': i=0; break;
case 'v': i=1; break;
}
p++;
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
i=(ssize_t) alpha;
if (*p != '\0')
p++;
}
if (*p == '.')
p++;
}
if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0))
{
p++;
if (*p == '{')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '{')
level++;
else
if (*p == '}')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
point.x=alpha;
point.y=beta;
if (*p != '\0')
p++;
}
else
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
point.x+=alpha;
point.y+=beta;
if (*p != '\0')
p++;
}
if (*p == '.')
p++;
}
subexpression=DestroyString(subexpression);
}
image=GetImageFromList(fx_info->images,i);
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"NoSuchImage","`%s'",expression);
return(0.0);
}
i=GetImageIndexInList(image);
GetPixelInfo(image,&pixel);
status=InterpolatePixelInfo(image,fx_info->view[i],image->interpolate,
point.x,point.y,&pixel,exception);
(void) status;
if ((*p != '\0') && (*(p+1) != '\0') && (*(p+2) != '\0') &&
(LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luma") != 0) &&
(LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) &&
(LocaleCompare(p,"saturation") != 0) &&
(LocaleCompare(p,"lightness") != 0))
{
char
name[MagickPathExtent];
size_t
length;
(void) CopyMagickString(name,p,MagickPathExtent);
length=strlen(name);
for (q=name+length-1; q > name; q--)
{
if (*q == ')')
break;
if (*q == '.')
{
*q='\0';
break;
}
}
q=name;
if ((*q != '\0') && (*(q+1) != '\0') && (*(q+2) != '\0') &&
(GetFxSymbolValue(fx_info,name) == (const double *) NULL))
{
PixelInfo
*color;
color=(PixelInfo *) GetValueFromSplayTree(fx_info->colors,name);
if (color != (PixelInfo *) NULL)
{
pixel=(*color);
p+=length;
}
else
{
MagickBooleanType
status;
status=QueryColorCompliance(name,AllCompliance,&pixel,
fx_info->exception);
if (status != MagickFalse)
{
(void) AddValueToSplayTree(fx_info->colors,
ConstantString(name),ClonePixelInfo(&pixel));
p+=length;
}
}
}
}
(void) CopyMagickString(symbol,p,MagickPathExtent);
StripString(symbol);
if (*symbol == '\0')
{
switch (channel)
{
case RedPixelChannel: return(QuantumScale*pixel.red);
case GreenPixelChannel: return(QuantumScale*pixel.green);
case BluePixelChannel: return(QuantumScale*pixel.blue);
case BlackPixelChannel:
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ImageError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
case AlphaPixelChannel:
{
if (pixel.alpha_trait == UndefinedPixelTrait)
return(1.0);
alpha=(double) (QuantumScale*pixel.alpha);
return(alpha);
}
case CompositePixelChannel:
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
case IndexPixelChannel:
return(0.0);
default:
break;
}
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",p);
return(0.0);
}
switch (*symbol)
{
case 'A':
case 'a':
{
if (LocaleCompare(symbol,"a") == 0)
return((QuantumScale*pixel.alpha));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(symbol,"b") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'C':
case 'c':
{
if (IsFxFunction(symbol,"channel",7) != MagickFalse)
{
GeometryInfo
channel_info;
MagickStatusType
flags;
flags=ParseGeometry(symbol+7,&channel_info);
if (image->colorspace == CMYKColorspace)
switch (channel)
{
case CyanPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case MagentaPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case YellowPixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
case AlphaPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
default:
return(0.0);
}
switch (channel)
{
case RedPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case GreenPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case BluePixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
case AlphaPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
default:
return(0.0);
}
}
if (LocaleCompare(symbol,"c") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(symbol,"depth",5) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(symbol,"extent") == 0)
{
if (image->extent != 0)
return((double) image->extent);
return((double) GetBlobSize(image));
}
break;
}
case 'G':
case 'g':
{
if (LocaleCompare(symbol,"g") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'K':
case 'k':
{
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"k") == 0)
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(symbol,"h") == 0)
return((double) image->rows);
if (LocaleCompare(symbol,"hue") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(hue);
}
break;
}
case 'I':
case 'i':
{
if ((LocaleCompare(symbol,"image.depth") == 0) ||
(LocaleCompare(symbol,"image.minima") == 0) ||
(LocaleCompare(symbol,"image.maxima") == 0) ||
(LocaleCompare(symbol,"image.mean") == 0) ||
(LocaleCompare(symbol,"image.kurtosis") == 0) ||
(LocaleCompare(symbol,"image.skewness") == 0) ||
(LocaleCompare(symbol,"image.standard_deviation") == 0))
return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception));
if (LocaleCompare(symbol,"image.resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"image.resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"intensity") == 0)
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
if (LocaleCompare(symbol,"i") == 0)
return((double) x);
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(symbol,"j") == 0)
return((double) y);
break;
}
case 'L':
case 'l':
{
if (LocaleCompare(symbol,"lightness") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(lightness);
}
if (LocaleCompare(symbol,"luma") == 0)
{
double
luma;
luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luma);
}
if (LocaleCompare(symbol,"luminance") == 0)
{
double
luminence;
luminence=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luminence);
}
break;
}
case 'M':
case 'm':
{
if (LocaleNCompare(symbol,"maxima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"mean",4) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"minima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"m") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'N':
case 'n':
{
if (LocaleCompare(symbol,"n") == 0)
return((double) GetImageListLength(fx_info->images));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(symbol,"o") == 0)
return(QuantumScale*pixel.alpha);
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(symbol,"page.height") == 0)
return((double) image->page.height);
if (LocaleCompare(symbol,"page.width") == 0)
return((double) image->page.width);
if (LocaleCompare(symbol,"page.x") == 0)
return((double) image->page.x);
if (LocaleCompare(symbol,"page.y") == 0)
return((double) image->page.y);
if (LocaleCompare(symbol,"printsize.x") == 0)
return(PerceptibleReciprocal(image->resolution.x)*image->columns);
if (LocaleCompare(symbol,"printsize.y") == 0)
return(PerceptibleReciprocal(image->resolution.y)*image->rows);
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(symbol,"quality") == 0)
return((double) image->quality);
break;
}
case 'R':
case 'r':
{
if (LocaleCompare(symbol,"resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"r") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'S':
case 's':
{
if (LocaleCompare(symbol,"saturation") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(saturation);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'T':
case 't':
{
if (LocaleCompare(symbol,"t") == 0)
return((double) GetImageIndexInList(fx_info->images));
break;
}
case 'W':
case 'w':
{
if (LocaleCompare(symbol,"w") == 0)
return((double) image->columns);
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(symbol,"y") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(symbol,"z") == 0)
return((double) GetImageDepth(image,fx_info->exception));
break;
}
default:
break;
}
value=GetFxSymbolValue(fx_info,symbol);
if (value != (const double *) NULL)
return(*value);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UndefinedVariable","`%s'",symbol);
(void) SetFxSymbolValue(fx_info,symbol,0.0);
return(0.0);
}
static const char *FxOperatorPrecedence(const char *expression,
ExceptionInfo *exception)
{
typedef enum
{
UndefinedPrecedence,
NullPrecedence,
BitwiseComplementPrecedence,
ExponentPrecedence,
ExponentialNotationPrecedence,
MultiplyPrecedence,
AdditionPrecedence,
ShiftPrecedence,
RelationalPrecedence,
EquivalencyPrecedence,
BitwiseAndPrecedence,
BitwiseOrPrecedence,
LogicalAndPrecedence,
LogicalOrPrecedence,
TernaryPrecedence,
AssignmentPrecedence,
CommaPrecedence,
SeparatorPrecedence
} FxPrecedence;
FxPrecedence
precedence,
target;
register const char
*subexpression;
register int
c;
size_t
level;
c=(-1);
level=0;
subexpression=(const char *) NULL;
target=NullPrecedence;
while ((c != '\0') && (*expression != '\0'))
{
precedence=UndefinedPrecedence;
if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@'))
{
expression++;
continue;
}
switch (*expression)
{
case 'A':
case 'a':
{
#if defined(MAGICKCORE_HAVE_ACOSH)
if (IsFxFunction(expression,"acosh",5) != MagickFalse)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (IsFxFunction(expression,"asinh",5) != MagickFalse)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ATANH)
if (IsFxFunction(expression,"atanh",5) != MagickFalse)
{
expression+=5;
break;
}
#endif
if (IsFxFunction(expression,"atan2",5) != MagickFalse)
{
expression+=5;
break;
}
break;
}
case 'E':
case 'e':
{
if ((isdigit(c) != 0) &&
((LocaleNCompare(expression,"E+",2) == 0) ||
(LocaleNCompare(expression,"E-",2) == 0)))
{
expression+=2; /* scientific notation */
break;
}
}
case 'J':
case 'j':
{
if ((IsFxFunction(expression,"j0",2) != MagickFalse) ||
(IsFxFunction(expression,"j1",2) != MagickFalse))
{
expression+=2;
break;
}
break;
}
case '#':
{
while (isxdigit((int) ((unsigned char) *(expression+1))) != 0)
expression++;
break;
}
default:
break;
}
if ((c == (int) '{') || (c == (int) '['))
level++;
else
if ((c == (int) '}') || (c == (int) ']'))
level--;
if (level == 0)
switch ((unsigned char) *expression)
{
case '~':
case '!':
{
precedence=BitwiseComplementPrecedence;
break;
}
case '^':
case '@':
{
precedence=ExponentPrecedence;
break;
}
default:
{
if (((c != 0) && ((isdigit(c) != 0) ||
(strchr(")",c) != (char *) NULL))) &&
(((islower((int) ((unsigned char) *expression)) != 0) ||
(strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) ||
((isdigit(c) == 0) &&
(isdigit((int) ((unsigned char) *expression)) != 0))) &&
(strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL))
precedence=MultiplyPrecedence;
break;
}
case '*':
case '/':
case '%':
{
precedence=MultiplyPrecedence;
break;
}
case '+':
case '-':
{
if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) ||
(isalpha(c) != 0))
precedence=AdditionPrecedence;
break;
}
case BitwiseAndAssignmentOperator:
case BitwiseOrAssignmentOperator:
case LeftShiftAssignmentOperator:
case RightShiftAssignmentOperator:
case PowerAssignmentOperator:
case ModuloAssignmentOperator:
case PlusAssignmentOperator:
case SubtractAssignmentOperator:
case MultiplyAssignmentOperator:
case DivideAssignmentOperator:
case IncrementAssignmentOperator:
case DecrementAssignmentOperator:
{
precedence=AssignmentPrecedence;
break;
}
case LeftShiftOperator:
case RightShiftOperator:
{
precedence=ShiftPrecedence;
break;
}
case '<':
case LessThanEqualOperator:
case GreaterThanEqualOperator:
case '>':
{
precedence=RelationalPrecedence;
break;
}
case EqualOperator:
case NotEqualOperator:
{
precedence=EquivalencyPrecedence;
break;
}
case '&':
{
precedence=BitwiseAndPrecedence;
break;
}
case '|':
{
precedence=BitwiseOrPrecedence;
break;
}
case LogicalAndOperator:
{
precedence=LogicalAndPrecedence;
break;
}
case LogicalOrOperator:
{
precedence=LogicalOrPrecedence;
break;
}
case ExponentialNotation:
{
precedence=ExponentialNotationPrecedence;
break;
}
case ':':
case '?':
{
precedence=TernaryPrecedence;
break;
}
case '=':
{
precedence=AssignmentPrecedence;
break;
}
case ',':
{
precedence=CommaPrecedence;
break;
}
case ';':
{
precedence=SeparatorPrecedence;
break;
}
}
if ((precedence == BitwiseComplementPrecedence) ||
(precedence == TernaryPrecedence) ||
(precedence == AssignmentPrecedence))
{
if (precedence > target)
{
/*
Right-to-left associativity.
*/
target=precedence;
subexpression=expression;
}
}
else
if (precedence >= target)
{
/*
Left-to-right associativity.
*/
target=precedence;
subexpression=expression;
}
if (strchr("(",(int) *expression) != (char *) NULL)
expression=FxSubexpression(expression,exception);
c=(int) (*expression++);
}
return(subexpression);
}
static double FxEvaluateSubexpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
const char *expression,const size_t depth,double *beta,
ExceptionInfo *exception)
{
#define FxMaxParenthesisDepth 58
#define FxMaxSubexpressionDepth 200
#define FxReturn(value) \
{ \
subexpression=DestroyString(subexpression); \
return(value); \
}
#define FxParseSubscription(subexpression,sentinal,p,q) \
{ \
p=subexpression; \
for (q=(char *) p; (*q != (sentinal)) && (*q != '\0'); q++) \
if (*q == '(') \
{ \
for (q++; (*q != ')') && (*q != '\0'); q++); \
if (*q == '\0') \
break; \
} \
if (*q == '\0') \
{ \
(void) ThrowMagickException(exception,GetMagickModule(), \
OptionError,"UnableToParseExpression","`%s'",subexpression); \
FxReturn(0.0); \
} \
*q='\0'; \
}
char
*q,
*subexpression;
double
alpha,
gamma,
sans,
value;
register const char
*p;
*beta=0.0;
sans=0.0;
subexpression=AcquireString(expression);
*subexpression='\0';
if (depth > FxMaxSubexpressionDepth)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",expression);
FxReturn(0.0);
}
if (exception->severity >= ErrorException)
FxReturn(0.0);
while (isspace((int) ((unsigned char) *expression)) != 0)
expression++;
if (*expression == '\0')
FxReturn(0.0);
p=FxOperatorPrecedence(expression,exception);
if (p != (const char *) NULL)
{
(void) CopyMagickString(subexpression,expression,(size_t)
(p-expression+1));
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1,
beta,exception);
switch ((unsigned char) *p)
{
case '~':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) (~(size_t) *beta);
FxReturn(*beta);
}
case '!':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(*beta == 0.0 ? 1.0 : 0.0);
}
case '^':
{
*beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p,
depth+1,beta,exception));
FxReturn(*beta);
}
case '*':
case ExponentialNotation:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha*(*beta));
}
case '/':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(PerceptibleReciprocal(*beta)*alpha);
}
case '%':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fmod(alpha,*beta));
}
case '+':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha+(*beta));
}
case '-':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha-(*beta));
}
case BitwiseAndAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=(double) ((size_t) (alpha+0.5) & (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case BitwiseOrAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=(double) ((size_t) (alpha+0.5) | (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case LeftShiftAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (*beta+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
value=(double) ((size_t) (alpha+0.5) << (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case RightShiftAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (*beta+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
value=(double) ((size_t) (alpha+0.5) >> (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case PowerAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=pow(alpha,*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case ModuloAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=fmod(alpha,*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case PlusAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha+(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case SubtractAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha-(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case MultiplyAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha*(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case DivideAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha*PerceptibleReciprocal(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case IncrementAssignmentOperator:
{
if (*subexpression == '\0')
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha+1.0;
if (*subexpression == '\0')
{
if (SetFxSymbolValue(fx_info,p,value) == MagickFalse)
return(0.0);
}
else
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case DecrementAssignmentOperator:
{
if (*subexpression == '\0')
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha-1.0;
if (*subexpression == '\0')
{
if (SetFxSymbolValue(fx_info,p,value) == MagickFalse)
return(0.0);
}
else
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case LeftShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (gamma+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
*beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5));
FxReturn(*beta);
}
case RightShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (gamma+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
*beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5));
FxReturn(*beta);
}
case '<':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha < *beta ? 1.0 : 0.0);
}
case LessThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha <= *beta ? 1.0 : 0.0);
}
case '>':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha > *beta ? 1.0 : 0.0);
}
case GreaterThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha >= *beta ? 1.0 : 0.0);
}
case EqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0);
}
case NotEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0);
}
case '&':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5));
FxReturn(*beta);
}
case '|':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5));
FxReturn(*beta);
}
case LogicalAndOperator:
{
p++;
if (alpha <= 0.0)
{
*beta=0.0;
FxReturn(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
FxReturn(*beta);
}
case LogicalOrOperator:
{
p++;
if (alpha > 0.0)
{
*beta=1.0;
FxReturn(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
FxReturn(*beta);
}
case '?':
{
(void) CopyMagickString(subexpression,++p,MagickPathExtent);
FxParseSubscription(subexpression,':',p,q);
if (fabs(alpha) >= MagickEpsilon)
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
else
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
FxReturn(gamma);
}
case '=':
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case ',':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha);
}
case ';':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(*beta);
}
default:
{
gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,
beta,exception);
FxReturn(gamma);
}
}
}
if (strchr("(",(int) *expression) != (char *) NULL)
{
size_t
length;
if (depth >= FxMaxParenthesisDepth)
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"ParenthesisNestedTooDeeply","`%s'",expression);
length=CopyMagickString(subexpression,expression+1,MagickPathExtent);
if (length != 0)
subexpression[length-1]='\0';
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1,
beta,exception);
FxReturn(gamma);
}
switch (*expression)
{
case '+':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn(1.0*gamma);
}
case '-':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn(-1.0*gamma);
}
case '~':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn((double) (~(size_t) (gamma+0.5)));
}
case 'A':
case 'a':
{
if (IsFxFunction(expression,"abs",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(fabs(alpha));
}
#if defined(MAGICKCORE_HAVE_ACOSH)
if (IsFxFunction(expression,"acosh",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(acosh(alpha));
}
#endif
if (IsFxFunction(expression,"acos",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(acos(alpha));
}
#if defined(MAGICKCORE_HAVE_J1)
if (IsFxFunction(expression,"airy",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0.0)
FxReturn(1.0);
gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha);
FxReturn(gamma*gamma);
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (IsFxFunction(expression,"asinh",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(asinh(alpha));
}
#endif
if (IsFxFunction(expression,"asin",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(asin(alpha));
}
if (IsFxFunction(expression,"alt",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0);
}
if (IsFxFunction(expression,"atan2",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(atan2(alpha,*beta));
}
#if defined(MAGICKCORE_HAVE_ATANH)
if (IsFxFunction(expression,"atanh",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(atanh(alpha));
}
#endif
if (IsFxFunction(expression,"atan",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(atan(alpha));
}
if (LocaleCompare(expression,"a") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(expression,"b") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'C':
case 'c':
{
if (IsFxFunction(expression,"ceil",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(ceil(alpha));
}
if (IsFxFunction(expression,"clamp",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if (alpha < 0.0)
FxReturn(0.0);
if (alpha > 1.0)
FxReturn(1.0);
FxReturn(alpha);
}
if (IsFxFunction(expression,"cosh",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(cosh(alpha));
}
if (IsFxFunction(expression,"cos",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(cos(alpha));
}
if (LocaleCompare(expression,"c") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'D':
case 'd':
{
if (IsFxFunction(expression,"debug",5) != MagickFalse)
{
const char
*type;
size_t
length;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
switch (fx_info->images->colorspace)
{
case CMYKColorspace:
{
switch (channel)
{
case CyanPixelChannel: type="cyan"; break;
case MagentaPixelChannel: type="magenta"; break;
case YellowPixelChannel: type="yellow"; break;
case AlphaPixelChannel: type="alpha"; break;
case BlackPixelChannel: type="black"; break;
default: type="unknown"; break;
}
break;
}
case GRAYColorspace:
{
switch (channel)
{
case RedPixelChannel: type="gray"; break;
case AlphaPixelChannel: type="alpha"; break;
default: type="unknown"; break;
}
break;
}
default:
{
switch (channel)
{
case RedPixelChannel: type="red"; break;
case GreenPixelChannel: type="green"; break;
case BluePixelChannel: type="blue"; break;
case AlphaPixelChannel: type="alpha"; break;
default: type="unknown"; break;
}
break;
}
}
*subexpression='\0';
length=1;
if (strlen(expression) > 6)
length=CopyMagickString(subexpression,expression+6,
MagickPathExtent);
if (length != 0)
subexpression[length-1]='\0';
if (fx_info->file != (FILE *) NULL)
(void) FormatLocaleFile(fx_info->file,"%s[%.20g,%.20g].%s: "
"%s=%.*g\n",fx_info->images->filename,(double) x,(double) y,type,
subexpression,GetMagickPrecision(),alpha);
FxReturn(alpha);
}
if (IsFxFunction(expression,"do",2) != MagickFalse)
{
size_t
length;
/*
Parse do(expression,condition test).
*/
length=CopyMagickString(subexpression,expression+3,MagickPathExtent);
if (length != 0)
subexpression[length-1]='\0';
FxParseSubscription(subexpression,',',p,q);
for ( ; ; )
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
if (fabs(gamma) < MagickEpsilon)
break;
}
FxReturn(alpha);
}
if (IsFxFunction(expression,"drc",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn((alpha/(*beta*(alpha-1.0)+1.0)));
}
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(expression,"epsilon") == 0)
FxReturn(MagickEpsilon);
#if defined(MAGICKCORE_HAVE_ERF)
if (IsFxFunction(expression,"erf",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(erf(alpha));
}
#endif
if (IsFxFunction(expression,"exp",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(exp(alpha));
}
if (LocaleCompare(expression,"e") == 0)
FxReturn(2.7182818284590452354);
break;
}
case 'F':
case 'f':
{
if (IsFxFunction(expression,"floor",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(floor(alpha));
}
if (IsFxFunction(expression,"for",3) != MagickFalse)
{
double
sans = 0.0;
size_t
length;
/*
Parse for(initialization, condition test, expression).
*/
length=CopyMagickString(subexpression,expression+4,MagickPathExtent);
if (length != 0)
subexpression[length-1]='\0';
FxParseSubscription(subexpression,',',p,q);
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
(void) CopyMagickString(subexpression,q+1,MagickPathExtent);
FxParseSubscription(subexpression,',',p,q);
for ( ; ; )
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
if (fabs(gamma) < MagickEpsilon)
break;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
}
FxReturn(alpha);
}
break;
}
case 'G':
case 'g':
{
if (IsFxFunction(expression,"gauss",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI));
}
if (IsFxFunction(expression,"gcd",3) != MagickFalse)
{
MagickOffsetType
gcd;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType) (*beta+
0.5));
FxReturn((double) gcd);
}
if (LocaleCompare(expression,"g") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(expression,"h") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (LocaleCompare(expression,"hue") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (IsFxFunction(expression,"hypot",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(hypot(alpha,*beta));
}
break;
}
case 'K':
case 'k':
{
if (LocaleCompare(expression,"k") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'I':
case 'i':
{
if (IsFxFunction(expression,"if",2) != MagickFalse)
{
double
sans = 0.0;
size_t
length;
length=CopyMagickString(subexpression,expression+3,MagickPathExtent);
if (length != 0)
subexpression[length-1]='\0';
FxParseSubscription(subexpression,',',p,q);
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
(void) CopyMagickString(subexpression,q+1,MagickPathExtent);
FxParseSubscription(subexpression,',',p,q);
if (fabs(alpha) >= MagickEpsilon)
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
else
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
FxReturn(alpha);
}
if (LocaleCompare(expression,"intensity") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (IsFxFunction(expression,"int",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(floor(alpha));
}
if (IsFxFunction(expression,"isnan",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn((double) !!IsNaN(alpha));
}
if (LocaleCompare(expression,"i") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(expression,"j") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
#if defined(MAGICKCORE_HAVE_J0)
if (IsFxFunction(expression,"j0",2) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(j0(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (IsFxFunction(expression,"j1",2) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(j1(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (IsFxFunction(expression,"jinc",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0.0)
FxReturn(1.0);
FxReturn((2.0*j1((MagickPI*alpha))/(MagickPI*alpha)));
}
#endif
break;
}
case 'L':
case 'l':
{
if (IsFxFunction(expression,"ln",2) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(log(alpha));
}
if (IsFxFunction(expression,"logtwo",6) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,
depth+1,beta,exception);
FxReturn(log10(alpha)/log10(2.0));
}
if (IsFxFunction(expression,"log",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(log10(alpha));
}
if (LocaleCompare(expression,"lightness") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'M':
case 'm':
{
if (LocaleCompare(expression,"MaxRGB") == 0)
FxReturn(QuantumRange);
if (LocaleNCompare(expression,"maxima",6) == 0)
break;
if (IsFxFunction(expression,"max",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha > *beta ? alpha : *beta);
}
if (LocaleNCompare(expression,"minima",6) == 0)
break;
if (IsFxFunction(expression,"min",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha < *beta ? alpha : *beta);
}
if (IsFxFunction(expression,"mod",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha-floor((alpha*PerceptibleReciprocal(*beta)))*(*beta));
}
if (LocaleCompare(expression,"m") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'N':
case 'n':
{
if (IsFxFunction(expression,"not",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn((double) (alpha < MagickEpsilon));
}
if (LocaleCompare(expression,"n") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(expression,"Opaque") == 0)
FxReturn(1.0);
if (LocaleCompare(expression,"o") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(expression,"phi") == 0)
FxReturn(MagickPHI);
if (LocaleCompare(expression,"pi") == 0)
FxReturn(MagickPI);
if (IsFxFunction(expression,"pow",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(pow(alpha,*beta));
}
if (LocaleCompare(expression,"p") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(expression,"QuantumRange") == 0)
FxReturn(QuantumRange);
if (LocaleCompare(expression,"QuantumScale") == 0)
FxReturn(QuantumScale);
break;
}
case 'R':
case 'r':
{
if (IsFxFunction(expression,"rand",4) != MagickFalse)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FxEvaluateSubexpression)
#endif
alpha=GetPseudoRandomValue(fx_info->random_info);
FxReturn(alpha);
}
if (IsFxFunction(expression,"round",5) != MagickFalse)
{
/*
Round the fraction to nearest integer.
*/
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if ((alpha-floor(alpha)) < (ceil(alpha)-alpha))
FxReturn(floor(alpha));
FxReturn(ceil(alpha));
}
if (LocaleCompare(expression,"r") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'S':
case 's':
{
if (LocaleCompare(expression,"saturation") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (IsFxFunction(expression,"sign",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(alpha < 0.0 ? -1.0 : 1.0);
}
if (IsFxFunction(expression,"sinc",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0)
FxReturn(1.0);
FxReturn(sin((MagickPI*alpha))/(MagickPI*alpha));
}
if (IsFxFunction(expression,"sinh",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(sinh(alpha));
}
if (IsFxFunction(expression,"sin",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(sin(alpha));
}
if (IsFxFunction(expression,"sqrt",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(sqrt(alpha));
}
if (IsFxFunction(expression,"squish",6) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,
depth+1,beta,exception);
FxReturn((1.0/(1.0+exp(-alpha))));
}
if (LocaleCompare(expression,"s") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'T':
case 't':
{
if (IsFxFunction(expression,"tanh",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(tanh(alpha));
}
if (IsFxFunction(expression,"tan",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(tan(alpha));
}
if (LocaleCompare(expression,"Transparent") == 0)
FxReturn(0.0);
if (IsFxFunction(expression,"trunc",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if (alpha >= 0.0)
FxReturn(floor(alpha));
FxReturn(ceil(alpha));
}
if (LocaleCompare(expression,"t") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'U':
case 'u':
{
if (LocaleCompare(expression,"u") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'V':
case 'v':
{
if (LocaleCompare(expression,"v") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'W':
case 'w':
{
if (IsFxFunction(expression,"while",5) != MagickFalse)
{
size_t
length;
/*
Parse while(condition test, expression).
*/
length=CopyMagickString(subexpression,expression+6,MagickPathExtent);
if (length != 0)
subexpression[length-1]='\0';
FxParseSubscription(subexpression,',',p,q);
for ( ; ; )
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
if (fabs(gamma) < MagickEpsilon)
break;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
}
FxReturn(alpha);
}
if (LocaleCompare(expression,"w") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(expression,"y") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(expression,"z") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
default:
break;
}
subexpression=DestroyString(subexpression);
q=(char *) expression;
alpha=InterpretSiPrefixValue(expression,&q);
if (q == expression)
alpha=FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception);
FxReturn(alpha);
}
MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
return(status);
}
MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
FILE
*file;
MagickBooleanType
status;
file=fx_info->file;
fx_info->file=(FILE *) NULL;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
fx_info->file=file;
return(status);
}
MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
double *alpha,ExceptionInfo *exception)
{
double
beta;
beta=0.0;
*alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,0,
&beta,exception);
return(exception->severity == OptionError ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxImage() applies a mathematical expression to the specified image.
%
% The format of the FxImage method is:
%
% Image *FxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o expression: A mathematical expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
static FxInfo **DestroyFxThreadSet(FxInfo **fx_info)
{
register ssize_t
i;
assert(fx_info != (FxInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (fx_info[i] != (FxInfo *) NULL)
fx_info[i]=DestroyFxInfo(fx_info[i]);
fx_info=(FxInfo **) RelinquishMagickMemory(fx_info);
return(fx_info);
}
static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression,
ExceptionInfo *exception)
{
char
*fx_expression;
double
alpha;
FxInfo
**fx_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info));
if (fx_info == (FxInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return((FxInfo **) NULL);
}
(void) memset(fx_info,0,number_threads*sizeof(*fx_info));
if (*expression != '@')
fx_expression=ConstantString(expression);
else
fx_expression=FileToString(expression+1,~0UL,exception);
for (i=0; i < (ssize_t) number_threads; i++)
{
MagickBooleanType
status;
fx_info[i]=AcquireFxInfo(image,fx_expression,exception);
if (fx_info[i] == (FxInfo *) NULL)
break;
status=FxPreprocessExpression(fx_info[i],&alpha,exception);
if (status == MagickFalse)
break;
}
fx_expression=DestroyString(fx_expression);
if (i < (ssize_t) number_threads)
fx_info=DestroyFxThreadSet(fx_info);
return(fx_info);
}
MagickExport Image *FxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
#define FxImageTag "Fx/Image"
CacheView
*fx_view,
*image_view;
FxInfo
**magick_restrict fx_info;
Image
*fx_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (expression == (const char *) NULL)
return(CloneImage(image,0,0,MagickTrue,exception));
fx_info=AcquireFxThreadSet(image,expression,exception);
if (fx_info == (FxInfo **) NULL)
return((Image *) NULL);
fx_image=CloneImage(image,0,0,MagickTrue,exception);
if (fx_image == (Image *) NULL)
{
fx_info=DestroyFxThreadSet(fx_info);
return((Image *) NULL);
}
if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse)
{
fx_info=DestroyFxThreadSet(fx_info);
fx_image=DestroyImage(fx_image);
return((Image *) NULL);
}
/*
Fx image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
fx_view=AcquireAuthenticCacheView(fx_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(progress,status) \
magick_number_threads(image,fx_image,fx_image->rows,1)
#endif
for (y=0; y < (ssize_t) fx_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) fx_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait fx_traits=GetPixelChannelTraits(fx_image,channel);
if ((traits == UndefinedPixelTrait) ||
(fx_traits == UndefinedPixelTrait))
continue;
if ((fx_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(fx_image,channel,p[i],q);
continue;
}
alpha=0.0;
(void) FxEvaluateChannelExpression(fx_info[id],channel,x,y,&alpha,
exception);
q[i]=ClampToQuantum(QuantumRange*alpha);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(fx_image);
}
if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FxImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
fx_view=DestroyCacheView(fx_view);
image_view=DestroyCacheView(image_view);
fx_info=DestroyFxThreadSet(fx_info);
if (status == MagickFalse)
fx_image=DestroyImage(fx_image);
return(fx_image);
}
|
omp_barrier.c | #include <stdio.h>
#include <unistd.h>
#include <omp.h>
#include "omp_testsuite.h"
#include "omp_my_sleep.h"
int
check_omp_barrier (FILE * logFile)
{
int result1 = 0;
int result2 = 0;
#pragma omp parallel
{
int rank;
rank = omp_get_thread_num ();
if (rank == 1)
{
my_sleep (1.);
result2 = 3;
}
#pragma omp barrier
if (rank == 0)
{
result1 = result2;
}
}
return (result1 == 3);
}
int
crosscheck_omp_barrier (FILE * logFile)
{
int result1 = 0;
int result2 = 0;
#pragma omp parallel
{
int rank;
rank = omp_get_thread_num ();
if (rank == 1)
{
my_sleep (1.);
result2 = 3;
}
if (rank == 0)
{
result1 = result2;
}
}
return (result1 == 3);
}
|
GB_binop__plus_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__plus_uint16
// A.*B function (eWiseMult): GB_AemultB__plus_uint16
// A*D function (colscale): GB_AxD__plus_uint16
// D*A function (rowscale): GB_DxB__plus_uint16
// C+=B function (dense accum): GB_Cdense_accumB__plus_uint16
// C+=b function (dense accum): GB_Cdense_accumb__plus_uint16
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__plus_uint16
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__plus_uint16
// C=scalar+B GB_bind1st__plus_uint16
// C=scalar+B' GB_bind1st_tran__plus_uint16
// C=A+scalar GB_bind2nd__plus_uint16
// C=A'+scalar GB_bind2nd_tran__plus_uint16
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = (aij + bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x + y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_UINT16 || GxB_NO_PLUS_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__plus_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__plus_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__plus_uint16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__plus_uint16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__plus_uint16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__plus_uint16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__plus_uint16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__plus_uint16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__plus_uint16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t bij = Bx [p] ;
Cx [p] = (x + bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__plus_uint16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
Cx [p] = (aij + y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (x + aij) ; \
}
GrB_Info GB_bind1st_tran__plus_uint16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (aij + y) ; \
}
GrB_Info GB_bind2nd_tran__plus_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
target_array_extension.c | // --------------------------------------------------
// Check extends before
// --------------------------------------------------
// RUN: %libomptarget-compile-generic \
// RUN: -fopenmp-version=51 -DEXTENDS=BEFORE
// RUN: %libomptarget-run-fail-generic 2>&1 \
// RUN: | %fcheck-generic
// --------------------------------------------------
// Check extends after
// --------------------------------------------------
// RUN: %libomptarget-compile-generic \
// RUN: -fopenmp-version=51 -DEXTENDS=AFTER
// RUN: %libomptarget-run-fail-generic 2>&1 \
// RUN: | %fcheck-generic
// END.
#include <stdio.h>
#define BEFORE 0
#define AFTER 1
#define SIZE 100
#if EXTENDS == BEFORE
# define SMALL_BEG (SIZE-2)
# define SMALL_END SIZE
# define LARGE_BEG 0
# define LARGE_END SIZE
#elif EXTENDS == AFTER
# define SMALL_BEG 0
# define SMALL_END 2
# define LARGE_BEG 0
# define LARGE_END SIZE
#else
# error EXTENDS undefined
#endif
#define SMALL_SIZE (SMALL_END-SMALL_BEG)
#define LARGE_SIZE (LARGE_END-LARGE_BEG)
#define SMALL SMALL_BEG:SMALL_SIZE
#define LARGE LARGE_BEG:LARGE_SIZE
int main() {
int arr[SIZE];
// CHECK: addr=0x[[#%x,SMALL_ADDR:]], size=[[#%u,SMALL_BYTES:]]
fprintf(stderr, "addr=%p, size=%ld\n", &arr[SMALL_BEG],
SMALL_SIZE * sizeof arr[0]);
// CHECK: addr=0x[[#%x,LARGE_ADDR:]], size=[[#%u,LARGE_BYTES:]]
fprintf(stderr, "addr=%p, size=%ld\n", &arr[LARGE_BEG],
LARGE_SIZE * sizeof arr[0]);
// CHECK-NOT: Libomptarget
#pragma omp target data map(alloc: arr[LARGE])
{
#pragma omp target map(present, tofrom: arr[SMALL])
;
}
// CHECK: arr is present
fprintf(stderr, "arr is present\n");
// CHECK: Libomptarget message: explicit extension not allowed: host address specified is 0x{{0*}}[[#LARGE_ADDR]] ([[#LARGE_BYTES]] bytes), but device allocation maps to host at 0x{{0*}}[[#SMALL_ADDR]] ([[#SMALL_BYTES]] bytes)
// CHECK: Libomptarget message: device mapping required by 'present' map type modifier does not exist for host address 0x{{0*}}[[#LARGE_ADDR]] ([[#LARGE_BYTES]] bytes)
// CHECK: Libomptarget error: Call to getOrAllocTgtPtr returned null pointer ('present' map type modifier).
// CHECK: Libomptarget error: Call to targetDataBegin failed, abort target.
// CHECK: Libomptarget error: Failed to process data before launching the kernel.
// CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory
#pragma omp target data map(alloc: arr[SMALL])
{
#pragma omp target map(present, tofrom: arr[LARGE])
;
}
// CHECK-NOT: arr is present
fprintf(stderr, "arr is present\n");
return 0;
}
|
GB_unaryop__abs_fp64_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_fp64_bool
// op(A') function: GB_tran__abs_fp64_bool
// C type: double
// A type: bool
// cast: double cij = (double) aij
// unaryop: cij = fabs (aij)
#define GB_ATYPE \
bool
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = fabs (x) ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP64 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_fp64_bool
(
double *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_fp64_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
constitute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO N N SSSSS TTTTT IIIII TTTTT U U TTTTT EEEEE %
% C O O NN N SS T I T U U T E %
% C O O N N N ESSS T I T U U T EEE %
% C O O N NN SS T I T U U T E %
% CCCC OOO N N SSSSS T IIIII T UUU T EEEEE %
% %
% %
% MagickCore Methods to Consitute an Image %
% %
% Software Design %
% Cristy %
% October 1998 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/client.h"
#include "MagickCore/coder-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/constitute-private.h"
#include "MagickCore/delegate.h"
#include "MagickCore/geometry.h"
#include "MagickCore/identify.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/profile-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/statistic.h"
#include "MagickCore/stream.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/timer.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n s t i t u t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConstituteImage() returns an image from the pixel data you supply.
% The pixel data must be in scanline order top-to-bottom. The data can be
% char, short int, int, float, or double. Float and double require the
% pixels to be normalized [0..1], otherwise [0..QuantumRange]. For example, to
% create a 640x480 image from unsigned red-green-blue character data, use:
%
% image = ConstituteImage(640,480,"RGB",CharPixel,pixels,&exception);
%
% The format of the ConstituteImage method is:
%
% Image *ConstituteImage(const size_t columns,const size_t rows,
% const char *map,const StorageType storage,const void *pixels,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o columns: width in pixels of the image.
%
% o rows: height in pixels of the image.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan,
% Y = yellow, M = magenta, K = black, I = intensity (for grayscale),
% P = pad.
%
% o storage: Define the data type of the pixels. Float and double types are
% expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose
% from these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel,
% LongPixel, QuantumPixel, or ShortPixel.
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConstituteImage(const size_t columns,const size_t rows,
const char *map,const StorageType storage,const void *pixels,
ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
status;
register ssize_t
i;
/*
Allocate image structure.
*/
assert(map != (const char *) NULL);
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",map);
assert(pixels != (void *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage((ImageInfo *) NULL,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
for (i=0; i < (ssize_t) strlen(map); i++)
{
switch (map[i])
{
case 'a':
case 'A':
case 'O':
case 'o':
{
image->alpha_trait=BlendPixelTrait;
break;
}
case 'C':
case 'c':
case 'm':
case 'M':
case 'Y':
case 'y':
case 'K':
case 'k':
{
image->colorspace=CMYKColorspace;
break;
}
case 'I':
case 'i':
{
image->colorspace=GRAYColorspace;
break;
}
default:
{
if (strlen(map) == 1)
image->colorspace=GRAYColorspace;
break;
}
}
}
status=SetImageExtent(image,columns,rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ImportImagePixels(image,0,0,columns,rows,map,storage,pixels,exception);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i n g I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PingImage() returns all the properties of an image or image sequence
% except for the pixels. It is much faster and consumes far less memory
% than ReadImage(). On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% The format of the PingImage method is:
%
% Image *PingImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Ping the image defined by the file or filename members of
% this structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static size_t PingStream(const Image *magick_unused(image),
const void *magick_unused(pixels),const size_t columns)
{
magick_unreferenced(image);
magick_unreferenced(pixels);
return(columns);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
MagickExport Image *PingImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
Image
*image;
ImageInfo
*ping_info;
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
ping_info=CloneImageInfo(image_info);
ping_info->ping=MagickTrue;
image=ReadStream(ping_info,&PingStream,exception);
if (image != (Image *) NULL)
{
ResetTimer(&image->timer);
if (ping_info->verbose != MagickFalse)
(void) IdentifyImage(image,stdout,MagickFalse,exception);
}
ping_info=DestroyImageInfo(ping_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i n g I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PingImages() pings one or more images and returns them as an image list.
%
% The format of the PingImage method is:
%
% Image *PingImages(ImageInfo *image_info,const char *filename,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o filename: the image filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PingImages(ImageInfo *image_info,const char *filename,
ExceptionInfo *exception)
{
char
ping_filename[MagickPathExtent];
Image
*image,
*images;
ImageInfo
*read_info;
/*
Ping image list from a file.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
(void) SetImageOption(image_info,"filename",filename);
(void) CopyMagickString(image_info->filename,filename,MagickPathExtent);
(void) InterpretImageFilename(image_info,(Image *) NULL,image_info->filename,
(int) image_info->scene,ping_filename,exception);
if (LocaleCompare(ping_filename,image_info->filename) != 0)
{
ExceptionInfo
*sans;
ssize_t
extent,
scene;
/*
Images of the form image-%d.png[1-5].
*/
read_info=CloneImageInfo(image_info);
sans=AcquireExceptionInfo();
(void) SetImageInfo(read_info,0,sans);
sans=DestroyExceptionInfo(sans);
if (read_info->number_scenes == 0)
{
read_info=DestroyImageInfo(read_info);
return(PingImage(image_info,exception));
}
(void) CopyMagickString(ping_filename,read_info->filename,
MagickPathExtent);
images=NewImageList();
extent=(ssize_t) (read_info->scene+read_info->number_scenes);
for (scene=(ssize_t) read_info->scene; scene < (ssize_t) extent; scene++)
{
(void) InterpretImageFilename(image_info,(Image *) NULL,ping_filename,
(int) scene,read_info->filename,exception);
image=PingImage(read_info,exception);
if (image == (Image *) NULL)
continue;
AppendImageToList(&images,image);
}
read_info=DestroyImageInfo(read_info);
return(images);
}
return(PingImage(image_info,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadImage() reads an image or image sequence from a file or file handle.
% The method returns a NULL if there is a memory shortage or if the image
% cannot be read. On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% The format of the ReadImage method is:
%
% Image *ReadImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Read the image defined by the file or filename members of
% this structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType IsCoderAuthorized(const char *coder,
const PolicyRights rights,ExceptionInfo *exception)
{
if (IsRightsAuthorized(CoderPolicyDomain,rights,coder) == MagickFalse)
{
errno=EPERM;
(void) ThrowMagickException(exception,GetMagickModule(),PolicyError,
"NotAuthorized","`%s'",coder);
return(MagickFalse);
}
return(MagickTrue);
}
MagickExport Image *ReadImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
char
filename[MagickPathExtent],
magick[MagickPathExtent],
magick_filename[MagickPathExtent];
const char
*value;
const DelegateInfo
*delegate_info;
const MagickInfo
*magick_info;
DecodeImageHandler
*decoder;
ExceptionInfo
*sans_exception;
GeometryInfo
geometry_info;
Image
*image,
*next;
ImageInfo
*read_info;
MagickBooleanType
status;
MagickStatusType
flags;
/*
Determine image type from filename prefix or suffix (e.g. image.jpg).
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image_info->filename != (char *) NULL);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
read_info=CloneImageInfo(image_info);
(void) CopyMagickString(magick_filename,read_info->filename,MagickPathExtent);
(void) SetImageInfo(read_info,0,exception);
(void) CopyMagickString(filename,read_info->filename,MagickPathExtent);
(void) CopyMagickString(magick,read_info->magick,MagickPathExtent);
/*
Call appropriate image reader based on image type.
*/
sans_exception=AcquireExceptionInfo();
magick_info=GetMagickInfo(read_info->magick,sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (magick_info != (const MagickInfo *) NULL)
{
if (GetMagickEndianSupport(magick_info) == MagickFalse)
read_info->endian=UndefinedEndian;
else
if ((image_info->endian == UndefinedEndian) &&
(GetMagickRawSupport(magick_info) != MagickFalse))
{
unsigned long
lsb_first;
lsb_first=1;
read_info->endian=(*(char *) &lsb_first) == 1 ? LSBEndian :
MSBEndian;
}
}
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickDecoderSeekableStream(magick_info) != MagickFalse))
{
image=AcquireImage(read_info,exception);
(void) CopyMagickString(image->filename,read_info->filename,
MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
read_info=DestroyImageInfo(read_info);
image=DestroyImage(image);
return((Image *) NULL);
}
if (IsBlobSeekable(image) == MagickFalse)
{
/*
Coder requires a seekable stream.
*/
*read_info->filename='\0';
status=ImageToFile(image,read_info->filename,exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
read_info=DestroyImageInfo(read_info);
image=DestroyImage(image);
return((Image *) NULL);
}
read_info->temporary=MagickTrue;
}
(void) CloseBlob(image);
image=DestroyImage(image);
}
image=NewImageList();
decoder=GetImageDecoder(magick_info);
if (decoder == (DecodeImageHandler *) NULL)
{
delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
(void) SetImageInfo(read_info,0,exception);
(void) CopyMagickString(read_info->filename,filename,
MagickPathExtent);
magick_info=GetMagickInfo(read_info->magick,exception);
decoder=GetImageDecoder(magick_info);
}
}
if (decoder != (DecodeImageHandler *) NULL)
{
/*
Call appropriate image reader based on image type.
*/
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception);
image=(Image *) NULL;
if (status != MagickFalse)
image=decoder(read_info,exception);
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
else
{
delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'",
read_info->magick);
if (read_info->temporary != MagickFalse)
(void) RelinquishUniqueFileResource(read_info->filename);
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
/*
Let our decoding delegate process the image.
*/
image=AcquireImage(read_info,exception);
if (image == (Image *) NULL)
{
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
(void) CopyMagickString(image->filename,read_info->filename,
MagickPathExtent);
*read_info->filename='\0';
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
LockSemaphoreInfo(delegate_info->semaphore);
status=InvokeDelegate(read_info,image,read_info->magick,(char *) NULL,
exception);
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
UnlockSemaphoreInfo(delegate_info->semaphore);
image=DestroyImageList(image);
read_info->temporary=MagickTrue;
if (status != MagickFalse)
(void) SetImageInfo(read_info,0,exception);
magick_info=GetMagickInfo(read_info->magick,exception);
decoder=GetImageDecoder(magick_info);
if (decoder == (DecodeImageHandler *) NULL)
{
if (IsPathAccessible(read_info->filename) != MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'",
read_info->magick);
else
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
read_info->filename);
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
/*
Call appropriate image reader based on image type.
*/
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception);
image=(Image *) NULL;
if (status != MagickFalse)
image=(decoder)(read_info,exception);
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
if (read_info->temporary != MagickFalse)
{
(void) RelinquishUniqueFileResource(read_info->filename);
read_info->temporary=MagickFalse;
if (image != (Image *) NULL)
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
}
if (image == (Image *) NULL)
{
read_info=DestroyImageInfo(read_info);
return(image);
}
if (exception->severity >= ErrorException)
(void) LogMagickEvent(ExceptionEvent,GetMagickModule(),
"Coder (%s) generated an image despite an error (%d), "
"notify the developers",image->magick,exception->severity);
if (IsBlobTemporary(image) != MagickFalse)
(void) RelinquishUniqueFileResource(read_info->filename);
if (IsSceneGeometry(read_info->scenes,MagickFalse) != MagickFalse)
{
Image
*clones;
clones=CloneImages(image,read_info->scenes,exception);
if (clones == (Image *) NULL)
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"SubimageSpecificationReturnsNoImages","`%s'",read_info->filename);
else
{
image=DestroyImageList(image);
image=GetFirstImageInList(clones);
}
}
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
{
char
magick_path[MagickPathExtent],
*property,
timestamp[MagickPathExtent];
const char
*option;
const StringInfo
*profile;
ssize_t
option_type;
next->taint=MagickFalse;
GetPathComponent(magick_filename,MagickPath,magick_path);
if (*magick_path == '\0' && *next->magick == '\0')
(void) CopyMagickString(next->magick,magick,MagickPathExtent);
(void) CopyMagickString(next->magick_filename,magick_filename,
MagickPathExtent);
if (IsBlobTemporary(image) != MagickFalse)
(void) CopyMagickString(next->filename,filename,MagickPathExtent);
if (next->magick_columns == 0)
next->magick_columns=next->columns;
if (next->magick_rows == 0)
next->magick_rows=next->rows;
value=GetImageProperty(next,"tiff:Orientation",exception);
if (value == (char *) NULL)
value=GetImageProperty(next,"exif:Orientation",exception);
if (value != (char *) NULL)
{
next->orientation=(OrientationType) StringToLong(value);
(void) DeleteImageProperty(next,"tiff:Orientation");
(void) DeleteImageProperty(next,"exif:Orientation");
}
value=GetImageProperty(next,"exif:XResolution",exception);
if (value != (char *) NULL)
{
geometry_info.rho=next->resolution.x;
geometry_info.sigma=1.0;
flags=ParseGeometry(value,&geometry_info);
if (geometry_info.sigma != 0)
next->resolution.x=geometry_info.rho/geometry_info.sigma;
if (strchr(value,',') != (char *) NULL)
next->resolution.x=geometry_info.rho+geometry_info.sigma/1000.0;
(void) DeleteImageProperty(next,"exif:XResolution");
}
value=GetImageProperty(next,"exif:YResolution",exception);
if (value != (char *) NULL)
{
geometry_info.rho=next->resolution.y;
geometry_info.sigma=1.0;
flags=ParseGeometry(value,&geometry_info);
if (geometry_info.sigma != 0)
next->resolution.y=geometry_info.rho/geometry_info.sigma;
if (strchr(value,',') != (char *) NULL)
next->resolution.y=geometry_info.rho+geometry_info.sigma/1000.0;
(void) DeleteImageProperty(next,"exif:YResolution");
}
value=GetImageProperty(next,"tiff:ResolutionUnit",exception);
if (value == (char *) NULL)
value=GetImageProperty(next,"exif:ResolutionUnit",exception);
if (value != (char *) NULL)
{
option_type=ParseCommandOption(MagickResolutionOptions,MagickFalse,
value);
if (option_type >= 0)
next->units=(ResolutionType) option_type;
(void) DeleteImageProperty(next,"exif:ResolutionUnit");
(void) DeleteImageProperty(next,"tiff:ResolutionUnit");
}
if (next->page.width == 0)
next->page.width=next->columns;
if (next->page.height == 0)
next->page.height=next->rows;
option=GetImageOption(read_info,"caption");
if (option != (const char *) NULL)
{
property=InterpretImageProperties(read_info,next,option,exception);
(void) SetImageProperty(next,"caption",property,exception);
property=DestroyString(property);
}
option=GetImageOption(read_info,"comment");
if (option != (const char *) NULL)
{
property=InterpretImageProperties(read_info,next,option,exception);
(void) SetImageProperty(next,"comment",property,exception);
property=DestroyString(property);
}
option=GetImageOption(read_info,"label");
if (option != (const char *) NULL)
{
property=InterpretImageProperties(read_info,next,option,exception);
(void) SetImageProperty(next,"label",property,exception);
property=DestroyString(property);
}
if (LocaleCompare(next->magick,"TEXT") == 0)
(void) ParseAbsoluteGeometry("0x0+0+0",&next->page);
if ((read_info->extract != (char *) NULL) &&
(read_info->stream == (StreamHandler) NULL))
{
RectangleInfo
geometry;
SetGeometry(next,&geometry);
flags=ParseAbsoluteGeometry(read_info->extract,&geometry);
if ((next->columns != geometry.width) ||
(next->rows != geometry.height))
{
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
Image
*crop_image;
crop_image=CropImage(next,&geometry,exception);
if (crop_image != (Image *) NULL)
ReplaceImageInList(&next,crop_image);
}
else
if (((flags & WidthValue) != 0) || ((flags & HeightValue) != 0))
{
Image
*size_image;
flags=ParseRegionGeometry(next,read_info->extract,&geometry,
exception);
size_image=ResizeImage(next,geometry.width,geometry.height,
next->filter,exception);
if (size_image != (Image *) NULL)
ReplaceImageInList(&next,size_image);
}
}
}
profile=GetImageProfile(next,"icc");
if (profile == (const StringInfo *) NULL)
profile=GetImageProfile(next,"icm");
profile=GetImageProfile(next,"iptc");
if (profile == (const StringInfo *) NULL)
profile=GetImageProfile(next,"8bim");
(void) FormatMagickTime((time_t) GetBlobProperties(next)->st_mtime,
MagickPathExtent,timestamp);
(void) SetImageProperty(next,"date:modify",timestamp,exception);
(void) FormatMagickTime((time_t) GetBlobProperties(next)->st_ctime,
MagickPathExtent,timestamp);
(void) SetImageProperty(next,"date:create",timestamp,exception);
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
if ((flags & GreaterValue) != 0)
{
if (next->delay > (size_t) floor(geometry_info.rho+0.5))
next->delay=(size_t) floor(geometry_info.rho+0.5);
}
else
if ((flags & LessValue) != 0)
{
if (next->delay < (size_t) floor(geometry_info.rho+0.5))
next->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
else
next->delay=(size_t) floor(geometry_info.rho+0.5);
if ((flags & SigmaValue) != 0)
next->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
option=GetImageOption(image_info,"dispose");
if (option != (const char *) NULL)
{
option_type=ParseCommandOption(MagickDisposeOptions,MagickFalse,
option);
if (option_type >= 0)
next->dispose=(DisposeType) option_type;
}
if (read_info->verbose != MagickFalse)
(void) IdentifyImage(next,stderr,MagickFalse,exception);
image=next;
}
read_info=DestroyImageInfo(read_info);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadImages() reads one or more images and returns them as an image list.
%
% The format of the ReadImage method is:
%
% Image *ReadImages(ImageInfo *image_info,const char *filename,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o filename: the image filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ReadImages(ImageInfo *image_info,const char *filename,
ExceptionInfo *exception)
{
char
read_filename[MagickPathExtent];
Image
*image,
*images;
ImageInfo
*read_info;
/*
Read image list from a file.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
read_info=CloneImageInfo(image_info);
*read_info->magick='\0';
(void) SetImageOption(read_info,"filename",filename);
(void) CopyMagickString(read_info->filename,filename,MagickPathExtent);
(void) InterpretImageFilename(read_info,(Image *) NULL,filename,
(int) read_info->scene,read_filename,exception);
if (LocaleCompare(read_filename,read_info->filename) != 0)
{
ExceptionInfo
*sans;
ssize_t
extent,
scene;
/*
Images of the form image-%d.png[1-5].
*/
sans=AcquireExceptionInfo();
(void) SetImageInfo(read_info,0,sans);
sans=DestroyExceptionInfo(sans);
if (read_info->number_scenes != 0)
{
(void) CopyMagickString(read_filename,read_info->filename,
MagickPathExtent);
images=NewImageList();
extent=(ssize_t) (read_info->scene+read_info->number_scenes);
scene=(ssize_t) read_info->scene;
for ( ; scene < (ssize_t) extent; scene++)
{
(void) InterpretImageFilename(image_info,(Image *) NULL,
read_filename,(int) scene,read_info->filename,exception);
image=ReadImage(read_info,exception);
if (image == (Image *) NULL)
continue;
AppendImageToList(&images,image);
}
read_info=DestroyImageInfo(read_info);
return(images);
}
}
(void) CopyMagickString(read_info->filename,filename,MagickPathExtent);
image=ReadImage(read_info,exception);
read_info=DestroyImageInfo(read_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d I n l i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadInlineImage() reads a Base64-encoded inline image or image sequence.
% The method returns a NULL if there is a memory shortage or if the image
% cannot be read. On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% The format of the ReadInlineImage method is:
%
% Image *ReadInlineImage(const ImageInfo *image_info,const char *content,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o content: the image encoded in Base64.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ReadInlineImage(const ImageInfo *image_info,
const char *content,ExceptionInfo *exception)
{
Image
*image;
ImageInfo
*read_info;
unsigned char
*blob;
size_t
length;
register const char
*p;
/*
Skip over header (e.g. data:image/gif;base64,).
*/
image=NewImageList();
for (p=content; (*p != ',') && (*p != '\0'); p++) ;
if (*p == '\0')
ThrowReaderException(CorruptImageError,"CorruptImage");
p++;
length=0;
blob=Base64Decode(p,&length);
if (length == 0)
{
blob=(unsigned char *) RelinquishMagickMemory(blob);
ThrowReaderException(CorruptImageError,"CorruptImage");
}
read_info=CloneImageInfo(image_info);
(void) SetImageInfoProgressMonitor(read_info,(MagickProgressMonitor) NULL,
(void *) NULL);
*read_info->filename='\0';
*read_info->magick='\0';
image=BlobToImage(read_info,blob,length,exception);
blob=(unsigned char *) RelinquishMagickMemory(blob);
read_info=DestroyImageInfo(read_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteImage() writes an image or an image sequence to a file or file handle.
% If writing to a file is on disk, the name is defined by the filename member
% of the image structure. WriteImage() returns MagickFalse is there is a
% memory shortage or if the image cannot be written. Check the exception
% member of image to determine the cause for any failure.
%
% The format of the WriteImage method is:
%
% MagickBooleanType WriteImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WriteImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
char
filename[MagickPathExtent];
const char
*option;
const DelegateInfo
*delegate_info;
const MagickInfo
*magick_info;
EncodeImageHandler
*encoder;
ExceptionInfo
*sans_exception;
ImageInfo
*write_info;
MagickBooleanType
status,
temporary;
/*
Determine image type from filename prefix or suffix (e.g. image.jpg).
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
sans_exception=AcquireExceptionInfo();
write_info=CloneImageInfo(image_info);
(void) CopyMagickString(write_info->filename,image->filename,
MagickPathExtent);
(void) SetImageInfo(write_info,1,sans_exception);
if (*write_info->magick == '\0')
(void) CopyMagickString(write_info->magick,image->magick,MagickPathExtent);
(void) CopyMagickString(filename,image->filename,MagickPathExtent);
(void) CopyMagickString(image->filename,write_info->filename,
MagickPathExtent);
/*
Call appropriate image writer based on image type.
*/
magick_info=GetMagickInfo(write_info->magick,sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (magick_info != (const MagickInfo *) NULL)
{
if (GetMagickEndianSupport(magick_info) == MagickFalse)
image->endian=UndefinedEndian;
else
if ((image_info->endian == UndefinedEndian) &&
(GetMagickRawSupport(magick_info) != MagickFalse))
{
unsigned long
lsb_first;
lsb_first=1;
image->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian;
}
}
(void) SyncImageProfiles(image);
DisassociateImageStream(image);
option=GetImageOption(image_info,"delegate:bimodal");
if ((IsStringTrue(option) != MagickFalse) &&
(write_info->page == (char *) NULL) &&
(GetPreviousImageInList(image) == (Image *) NULL) &&
(GetNextImageInList(image) == (Image *) NULL) &&
(IsTaintImage(image) == MagickFalse) )
{
delegate_info=GetDelegateInfo(image->magick,write_info->magick,exception);
if ((delegate_info != (const DelegateInfo *) NULL) &&
(GetDelegateMode(delegate_info) == 0) &&
(IsPathAccessible(image->magick_filename) != MagickFalse))
{
/*
Process image with bi-modal delegate.
*/
(void) CopyMagickString(image->filename,image->magick_filename,
MagickPathExtent);
status=InvokeDelegate(write_info,image,image->magick,
write_info->magick,exception);
write_info=DestroyImageInfo(write_info);
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
return(status);
}
}
status=MagickFalse;
temporary=MagickFalse;
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickEncoderSeekableStream(magick_info) != MagickFalse))
{
char
image_filename[MagickPathExtent];
(void) CopyMagickString(image_filename,image->filename,MagickPathExtent);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
(void) CopyMagickString(image->filename, image_filename,MagickPathExtent);
if (status != MagickFalse)
{
if (IsBlobSeekable(image) == MagickFalse)
{
/*
A seekable stream is required by the encoder.
*/
write_info->adjoin=MagickTrue;
(void) CopyMagickString(write_info->filename,image->filename,
MagickPathExtent);
(void) AcquireUniqueFilename(image->filename);
temporary=MagickTrue;
}
(void) CloseBlob(image);
}
}
encoder=GetImageEncoder(magick_info);
if (encoder != (EncodeImageHandler *) NULL)
{
/*
Call appropriate image writer based on image type.
*/
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(write_info->magick,WritePolicyRights,exception);
if (status != MagickFalse)
status=encoder(write_info,image,exception);
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
else
{
delegate_info=GetDelegateInfo((char *) NULL,write_info->magick,exception);
if (delegate_info != (DelegateInfo *) NULL)
{
/*
Process the image with delegate.
*/
*write_info->filename='\0';
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
LockSemaphoreInfo(delegate_info->semaphore);
status=InvokeDelegate(write_info,image,(char *) NULL,
write_info->magick,exception);
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
UnlockSemaphoreInfo(delegate_info->semaphore);
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
}
else
{
sans_exception=AcquireExceptionInfo();
magick_info=GetMagickInfo(write_info->magick,sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if ((write_info->affirm == MagickFalse) &&
(magick_info == (const MagickInfo *) NULL))
{
(void) CopyMagickString(write_info->magick,image->magick,
MagickPathExtent);
magick_info=GetMagickInfo(write_info->magick,exception);
}
encoder=GetImageEncoder(magick_info);
if (encoder == (EncodeImageHandler *) NULL)
{
char
extension[MagickPathExtent];
GetPathComponent(image->filename,ExtensionPath,extension);
if (*extension != '\0')
magick_info=GetMagickInfo(extension,exception);
else
magick_info=GetMagickInfo(image->magick,exception);
(void) CopyMagickString(image->filename,filename,
MagickPathExtent);
encoder=GetImageEncoder(magick_info);
}
if (encoder == (EncodeImageHandler *) NULL)
{
magick_info=GetMagickInfo(image->magick,exception);
encoder=GetImageEncoder(magick_info);
if (encoder == (EncodeImageHandler *) NULL)
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoEncodeDelegateForThisImageFormat",
"`%s'",write_info->magick);
}
if (encoder != (EncodeImageHandler *) NULL)
{
/*
Call appropriate image writer based on image type.
*/
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(write_info->magick,WritePolicyRights,
exception);
if (status != MagickFalse)
status=encoder(write_info,image,exception);
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
}
}
if (temporary != MagickFalse)
{
/*
Copy temporary image file to permanent.
*/
status=OpenBlob(write_info,image,ReadBinaryBlobMode,exception);
if (status != MagickFalse)
{
(void) RelinquishUniqueFileResource(write_info->filename);
status=ImageToFile(image,write_info->filename,exception);
}
(void) CloseBlob(image);
(void) RelinquishUniqueFileResource(image->filename);
(void) CopyMagickString(image->filename,write_info->filename,
MagickPathExtent);
}
if ((LocaleCompare(write_info->magick,"info") != 0) &&
(write_info->verbose != MagickFalse))
(void) IdentifyImage(image,stdout,MagickFalse,exception);
write_info=DestroyImageInfo(write_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteImages() writes an image sequence into one or more files. While
% WriteImage() can write an image sequence, it is limited to writing
% the sequence into a single file using a format which supports multiple
% frames. WriteImages(), however, does not have this limitation, instead it
% generates multiple output files if necessary (or when requested). When
% ImageInfo's adjoin flag is set to MagickFalse, the file name is expected
% to include a printf-style formatting string for the frame number (e.g.
% "image%02d.png").
%
% The format of the WriteImages method is:
%
% MagickBooleanType WriteImages(const ImageInfo *image_info,Image *images,
% const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o images: the image list.
%
% o filename: the image filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WriteImages(const ImageInfo *image_info,
Image *images,const char *filename,ExceptionInfo *exception)
{
#define WriteImageTag "Write/Image"
ExceptionInfo
*sans_exception;
ImageInfo
*write_info;
MagickBooleanType
proceed;
MagickOffsetType
progress;
MagickProgressMonitor
progress_monitor;
MagickSizeType
number_images;
MagickStatusType
status;
register Image
*p;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
write_info=CloneImageInfo(image_info);
*write_info->magick='\0';
images=GetFirstImageInList(images);
if (filename != (const char *) NULL)
for (p=images; p != (Image *) NULL; p=GetNextImageInList(p))
(void) CopyMagickString(p->filename,filename,MagickPathExtent);
(void) CopyMagickString(write_info->filename,images->filename,
MagickPathExtent);
sans_exception=AcquireExceptionInfo();
(void) SetImageInfo(write_info,(unsigned int) GetImageListLength(images),
sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (*write_info->magick == '\0')
(void) CopyMagickString(write_info->magick,images->magick,MagickPathExtent);
p=images;
for ( ; GetNextImageInList(p) != (Image *) NULL; p=GetNextImageInList(p))
{
register Image
*next;
next=GetNextImageInList(p);
if (next == (Image *) NULL)
break;
if (p->scene >= next->scene)
{
register ssize_t
i;
/*
Generate consistent scene numbers.
*/
i=(ssize_t) images->scene;
for (p=images; p != (Image *) NULL; p=GetNextImageInList(p))
p->scene=(size_t) i++;
break;
}
}
/*
Write images.
*/
status=MagickTrue;
progress_monitor=(MagickProgressMonitor) NULL;
progress=0;
number_images=GetImageListLength(images);
for (p=images; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (number_images != 1)
progress_monitor=SetImageProgressMonitor(p,(MagickProgressMonitor) NULL,
p->client_data);
status&=WriteImage(write_info,p,exception);
if (number_images != 1)
(void) SetImageProgressMonitor(p,progress_monitor,p->client_data);
if (write_info->adjoin != MagickFalse)
break;
if (number_images != 1)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(p,WriteImageTag,progress,number_images);
if (proceed == MagickFalse)
break;
}
}
write_info=DestroyImageInfo(write_info);
return(status != 0 ? MagickTrue : MagickFalse);
}
|
Quadtree.h | /*
* Quadtree.h
*
* Created on: 21.05.2014
* Author: Moritz v. Looz (moritz.looz-corswarem@kit.edu)
*/
#ifndef QUADTREE_H_
#define QUADTREE_H_
#include <vector>
#include <memory>
#include <cmath>
#include <omp.h>
#include <functional>
#include "QuadNode.h"
#include "../../geometric/HyperbolicSpace.h"
#include "../../auxiliary/Parallel.h"
namespace NetworKit {
template <class T>
class Quadtree {
friend class QuadTreeGTest;
public:
Quadtree() {
root = QuadNode<T>();
this->maxRadius = 1;
}
/**
* @param maxR Radius of the managed area. Must be smaller than 1.
* @param theoreticalSplit If true, split cells to get the same area in each child cell. Default is false
* @param alpha dispersion Parameter of the point distribution. Only has an effect if theoretical split is true
* @param capacity How many points can inhabit a leaf cell before it is split up?
*
*/
Quadtree(double maxR,bool theoreticalSplit=false, double alpha=1, count capacity=1000, double balance = 0.5) {
root = QuadNode<T>(0, 0, 2*M_PI, maxR, capacity, 0,theoreticalSplit,alpha,balance);
this->maxRadius = maxR;
}
count fillInParallel(count l, double alpha, count seqThreshold, count offset, QuadNode<T> ¤tNode) {
if (l > seqThreshold) {
if (currentNode.height() == 1) currentNode.split();
double treeArea = HyperbolicSpace::effectiveAreaInCell(currentNode.getLeftAngle(), currentNode.getRightAngle(), currentNode.getMinR(), currentNode.getMaxR(), alpha);
count coveredPoints = 0;
double coveredArea = 0;
for (index i = 0; i < currentNode.children.size(); i++) {
count pointsInSubtree;
if (i < currentNode.children.size()-1) {
double subTreeArea = HyperbolicSpace::effectiveAreaInCell(currentNode.children[i].getLeftAngle(), currentNode.children[i].getRightAngle(), currentNode.children[i].getMinR(), currentNode.children[i].getMaxR(), alpha);
assert(treeArea-coveredArea >= subTreeArea);
std::binomial_distribution<int> distribution(l-coveredPoints,subTreeArea/(treeArea-coveredArea));
pointsInSubtree = distribution(Aux::Random::getURNG());
coveredArea += subTreeArea;
} else {
pointsInSubtree = l-coveredPoints;
}
coveredPoints += pointsInSubtree;
offset = fillInParallel(pointsInSubtree, alpha, seqThreshold, offset, currentNode.children[i]);
//offset += pointsInSubtree;
}
} else {
#pragma omp task shared(currentNode) firstprivate(offset)
{
vector<double> angles(l);
vector<double> radii(l);
HyperbolicSpace::fillPoints(angles, radii, currentNode.getLeftAngle(), currentNode.getRightAngle(),
HyperbolicSpace::EuclideanRadiusToHyperbolic(currentNode.getMinR()),
HyperbolicSpace::EuclideanRadiusToHyperbolic(currentNode.getMaxR()), alpha);
for (index i = 0; i < l; i++) {
currentNode.addContent(i+offset, angles[i], radii[i]);
}
}
offset += l;
}
return offset;
}
Quadtree(count n, double stretch, bool theoreticalSplit=false, double alpha=1, count capacity=1000, double balance = 0.5) {
double R = stretch*HyperbolicSpace::hyperbolicAreaToRadius(n);
double r = HyperbolicSpace::hyperbolicRadiusToEuclidean(R);
count numberOfThreads = omp_get_max_threads();
//double k = ceil(log(numberOfThreads)/log(4));
root = QuadNode<T>(0, 0, 2*M_PI, r, capacity, 0,theoreticalSplit,alpha,balance);
maxRadius = r;
count result;
#pragma omp parallel
{
#pragma omp single nowait
{
result = fillInParallel(n, alpha, n/numberOfThreads, 0, root);
}
}
assert(result == n);
root.recount();
assert(root.size() == n);
}
Quadtree(const vector<double> &angles, const vector<double> &radii, const vector<T> &content, double stretch, bool theoreticalSplit=false, double alpha=1, count capacity=1000, double balance = 0.5) {
const count n = angles.size();
assert(angles.size() == radii.size());
assert(radii.size() == content.size());
double R = stretch*HyperbolicSpace::hyperbolicAreaToRadius(n);
double r = HyperbolicSpace::hyperbolicRadiusToEuclidean(R);
root = QuadNode<T>(0, 0, 2*M_PI, r, capacity, 0,theoreticalSplit,alpha,balance);
maxRadius = r;
for (index i = 0; i < n; i++) {
assert(content[i] < n);
root.addContent(content[i], angles[i], radii[i]);
}
}
/**
* @param newcomer content to be added at point x
* @param angle angular coordinate of x
* @param R radial coordinate of x
*/
void addContent(T newcomer, double angle, double r) {
root.addContent(newcomer, angle, r);
}
/**
* @param newcomer content to be removed at point x
* @param angle angular coordinate of x
* @param R radial coordinate of x
*/
bool removeContent(T toRemove, double angle, double r) {
return root.removeContent(toRemove, angle, r);
}
/**
* Get all elements, regardless of position
*
* @return vector<T> of elements
*/
vector<T> getElements() const {
return root.getElements();
}
void extractCoordinates(vector<double> &anglesContainer, vector<double> &radiiContainer) const {
root.getCoordinates(anglesContainer, radiiContainer);
}
/**
* Get elements whose hyperbolic distance to the query point is less than the hyperbolic distance
*
*
* @param circleCenter Cartesian coordinates of the query circle's center
* @param hyperbolicRadius Radius of the query circle
*/
vector<T> getElementsInHyperbolicCircle(Point2D<double> circleCenter, double hyperbolicRadius) const {
vector<T> circleDenizens;
getElementsInHyperbolicCircle(circleCenter, hyperbolicRadius, circleDenizens);
return circleDenizens;
}
void getElementsInHyperbolicCircle(const Point2D<double> circleCenter, const double hyperbolicRadius, const bool suppressLeft, vector<T> &circleDenizens) const {
double cc_phi, cc_r;
HyperbolicSpace::cartesianToPolar(circleCenter, cc_phi, cc_r);
//Transform hyperbolic circle into Euclidean circle
double minPhi, maxPhi, radius, r_e;
HyperbolicSpace::getEuclideanCircle(cc_r, hyperbolicRadius, r_e, radius);
Point2D<double> center = HyperbolicSpace::polarToCartesian(cc_phi, r_e);
double minR = r_e - radius;
double maxR = r_e + radius;
//assert(maxR < 1);//this looks fishy
if (maxR > 1) maxR = 1;
if (minR < 0) {
maxR = std::max(abs(minR), maxR);
minR = 0;
minPhi = 0;
maxPhi = 2*M_PI;
} else {
double spread = asin(radius / r_e);
//double phi_c, r_c;
//HyperbolicSpace::cartesianToPolar(center, phi_c, r_c);
minPhi = cc_phi - spread;
maxPhi = cc_phi + spread;
/**
* If the circle overlaps the 2\pi line, we have to make two separate calls and collect
*/
}
if (suppressLeft) minPhi = cc_phi;
/**
* get Elements in Euclidean circle
*/
bool wraparound = false;
root.getElementsInEuclideanCircle(center, radius, circleDenizens, minPhi, maxPhi, minR, maxR);
if (minPhi < 0) {
root.getElementsInEuclideanCircle(center, radius, circleDenizens, 2*M_PI+minPhi, 2*M_PI, minR, maxR);
wraparound = true;
}
if (maxPhi > 2*M_PI) {
root.getElementsInEuclideanCircle(center, radius, circleDenizens, 0, maxPhi - 2*M_PI, minR, maxR);
wraparound = true;
}
//we have sort(deg(v)) here! This is not good, but does not make the asymptotical complexity of O(deg(v) log n) worse.
if (wraparound) {
Aux::Parallel::sort(circleDenizens.begin(), circleDenizens.end());
auto newend = unique(circleDenizens.begin(), circleDenizens.end());
circleDenizens.resize(newend - circleDenizens.begin());
}
}
void getElementsInHyperbolicCircle(const Point2D<double> circleCenter, const double hyperbolicRadius, vector<T> &circleDenizens) const {
getElementsInHyperbolicCircle(circleCenter, hyperbolicRadius, false, circleDenizens);
}
count size() const {
return root.size();
}
count height() const {
return root.height();
}
count countLeaves() const {
return root.countLeaves();
}
index indexSubtree(index nextID) {
return root.indexSubtree(nextID);
}
index getCellID(double phi, double r) const {
return root.getCellID(phi, r);
}
void sortPointsInLeaves() {
#pragma omp parallel
{
#pragma omp single nowait
{
root.sortPointsInLeaves();
}
}
}
void reindex() {
#pragma omp parallel
{
#pragma omp single nowait
{
root.reindex(0);
}
}
}
/**
* trims the vectors used to hold the content in the leaf cells. Reduces memory usage, makes changes slower
*/
void trim() {
root.trim();
}
private:
QuadNode<T> root;
double maxRadius;
};
}
#endif /* QUADTREE_H_ */
|
lapmg_mex.c | #include <inttypes.h>
#include <omp.h>
#include "mex.h"
#include "lapmg_mex.h"
void lapmgf(float *du,
const float *u, const uint8_t *G,
const double *h, const size_t *sz);
void lapmgd(double *du,
const double *u, const uint8_t *G,
const double *h, const size_t *sz);
#ifdef LAPMG_MEX
void
mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
if ((nrhs != 4) || (nlhs > 1)) {
mexErrMsgTxt("Usage: lapmg_mex(d2u, u, G, h);");
}
const uint8_t *G = (const uint8_t *)mxGetData(prhs[2]);
const double *h = (const double *)mxGetData(prhs[3]);
const size_t *sz = (const size_t *)mxGetDimensions(prhs[0]);
if (mxIsSingle(prhs[0])) {
float *du = (float *)mxGetData(prhs[0]);
const float *u = (const float *)mxGetData(prhs[1]);
lapmgf(du, u, G, h, sz);
} else {
double *du = (double *)mxGetData(prhs[0]);
const double *u = (const double *)mxGetData(prhs[1]);
lapmgd(du, u, G, h, sz);
}
if (nlhs == 1) {
plhs[0] = mxCreateDoubleScalar(1.0);
}
return;
}
#endif
void
mx_lapmg(mxArray *mxdu,
const mxArray *mxu, const mxArray *mxG, const mxArray *mxh)
{
const uint8_t *G = (const uint8_t *)mxGetData(mxG);
const double *h = (const double *)mxGetData(mxh);
const size_t *sz = (const size_t *)mxGetDimensions(mxdu);
if (mxIsSingle(mxdu)) {
float *du = (float *)mxGetData(mxdu);
const float *u = (const float *)mxGetData(mxu);
lapmgf(du, u, G, h, sz);
} else {
double *du = (double *)mxGetData(mxdu);
const double *u = (const double *)mxGetData(mxu);
lapmgd(du, u, G, h, sz);
}
return;
}
void
lapmgf(float *du,
const float *u, const uint8_t *G, const double *h, const size_t *sz)
{
size_t i, j, k;
size_t l;
const size_t nx = sz[0];
const size_t ny = sz[1];
const size_t nz = sz[2];
const size_t nxny = nx*ny;
const size_t NX = nx-1;
const size_t NY = nx*(ny-1);
const size_t NZ = nxny*(nz-1);
const float hx = (float)(1.0/(h[0]*h[0]));
const float hy = (float)(1.0/(h[1]*h[1]));
const float hz = (float)(1.0/(h[2]*h[2]));
const float hh = (float)(-2.0*(hx+hy+hz));
#pragma omp parallel for private(i,j,k,l) schedule(static) \
if(nxny*nz > 32*32*32)
for(k = nxny; k < NZ; k += nxny) {
for(j = nx; j < NY; j += nx) {
l = 1 + j + k;
for(i = 1; i < NX; ++i, ++l) {
if (G[l]) {
du[l] =
hh*u[l] +
hx*(u[l-1] + u[l+1]) +
hy*(u[l-nx] + u[l+nx]) +
hz*(u[l-nxny] + u[l+nxny]);
}
}
}
}
return;
}
void
lapmgd(double *du,
const double *u, const uint8_t *G, const double *h, const size_t *sz)
{
size_t i, j, k;
size_t l;
const size_t nx = sz[0];
const size_t ny = sz[1];
const size_t nz = sz[2];
const size_t nxny = nx*ny;
const size_t NX = nx-1;
const size_t NY = nx*(ny-1);
const size_t NZ = nxny*(nz-1);
const double hx = 1.0/(h[0]*h[0]);
const double hy = 1.0/(h[1]*h[1]);
const double hz = 1.0/(h[2]*h[2]);
const double hh = -2.0*(hx+hy+hz);
#pragma omp parallel for private(i,j,k,l) schedule(static) \
if(nxny*nz > 32*32*32)
for(k = nxny; k < NZ; k += nxny) {
for(j = nx; j < NY; j += nx) {
l = 1 + j + k;
for(i = 1; i < NX; ++i, ++l) {
if (G[l]) {
du[l] =
hh*u[l] +
hx*(u[l-1] + u[l+1]) +
hy*(u[l-nx] + u[l+nx]) +
hz*(u[l-nxny] + u[l+nxny]);
}
}
}
}
return;
}
|
shear.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS H H EEEEE AAA RRRR %
% SS H H E A A R R %
% SSS HHHHH EEE AAAAA RRRR %
% SS H H E A A R R %
% SSSSS H H EEEEE A A R R %
% %
% %
% MagickCore Methods to Shear or Rotate an Image by an Arbitrary Angle %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2009 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The RotateImage, XShearImage, and YShearImage methods are based on the
% paper "A Fast Algorithm for General Raster Rotatation" by Alan W. Paeth,
% Graphics Interface '86 (Vancouver). RotateImage is adapted from a similar
% method based on the Paeth paper written by Michael Halle of the Spatial
% Imaging Group, MIT Media Lab.
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/blob-private.h"
#include "magick/cache-private.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/decorate.h"
#include "magick/distort.h"
#include "magick/draw.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/memory_.h"
#include "magick/list.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-private.h"
#include "magick/quantum.h"
#include "magick/resource_.h"
#include "magick/shear.h"
#include "magick/statistic.h"
#include "magick/threshold.h"
#include "magick/transform.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A f f i n e T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AffineTransformImage() transforms an image as dictated by the affine matrix.
% It allocates the memory necessary for the new Image structure and returns
% a pointer to the new image.
%
% The format of the AffineTransformImage method is:
%
% Image *AffineTransformImage(const Image *image,
% AffineMatrix *affine_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o affine_matrix: the affine matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AffineTransformImage(const Image *image,
const AffineMatrix *affine_matrix,ExceptionInfo *exception)
{
double
distort[6];
Image
*affine_image;
/*
Affine transform image.
*/
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(affine_matrix != (AffineMatrix *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
distort[0]=affine_matrix->sx;
distort[1]=affine_matrix->rx;
distort[2]=affine_matrix->ry;
distort[3]=affine_matrix->sy;
distort[4]=affine_matrix->tx;
distort[5]=affine_matrix->ty;
affine_image=DistortImage(image,AffineProjectionDistortion,6,distort,
MagickTrue,exception);
return(affine_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C r o p T o F i t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropToFitImage() crops the sheared image as determined by the bounding box
% as defined by width and height and shearing angles.
%
% The format of the CropToFitImage method is:
%
% Image *CropToFitImage(Image **image,const MagickRealType x_shear,
% const MagickRealType x_shear,const MagickRealType width,
% const MagickRealType height,const MagickBooleanType rotate,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o x_shear, y_shear, width, height: Defines a region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void CropToFitImage(Image **image,const MagickRealType x_shear,
const MagickRealType y_shear,const MagickRealType width,
const MagickRealType height,const MagickBooleanType rotate,
ExceptionInfo *exception)
{
Image
*crop_image;
PointInfo
extent[4],
min,
max;
RectangleInfo
geometry,
page;
register long
i;
/*
Calculate the rotated image size.
*/
extent[0].x=(double) (-width/2.0);
extent[0].y=(double) (-height/2.0);
extent[1].x=(double) width/2.0;
extent[1].y=(double) (-height/2.0);
extent[2].x=(double) (-width/2.0);
extent[2].y=(double) height/2.0;
extent[3].x=(double) width/2.0;
extent[3].y=(double) height/2.0;
for (i=0; i < 4; i++)
{
extent[i].x+=x_shear*extent[i].y;
extent[i].y+=y_shear*extent[i].x;
if (rotate != MagickFalse)
extent[i].x+=x_shear*extent[i].y;
extent[i].x+=(double) (*image)->columns/2.0;
extent[i].y+=(double) (*image)->rows/2.0;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
geometry.x=(long) (min.x+0.5);
geometry.y=(long) (min.y+0.5);
geometry.width=(unsigned long) ((long) (max.x+0.5)-(long) (min.x+0.5));
geometry.height=(unsigned long) ((long) (max.y+0.5)-(long) (min.y+0.5));
page=(*image)->page;
(void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page);
crop_image=CropImage(*image,&geometry,exception);
(*image)->page=page;
if (crop_image != (Image *) NULL)
{
crop_image->page=page;
*image=DestroyImage(*image);
*image=crop_image;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s k e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeskewImage() removes skew from the image. Skew is an artifact that
% occurs in scanned images because of the camera being misaligned,
% imperfections in the scanning or surface, or simply because the paper was
% not placed completely flat when scanned.
%
% The format of the DeskewImage method is:
%
% Image *DeskewImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: separate background from foreground.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _RadonInfo
{
CacheType
type;
unsigned long
width,
height;
MagickSizeType
length;
MagickBooleanType
mapped;
char
path[MaxTextExtent];
int
file;
unsigned short
*cells;
} RadonInfo;
static RadonInfo *DestroyRadonInfo(RadonInfo *radon_info)
{
assert(radon_info != (RadonInfo *) NULL);
switch (radon_info->type)
{
case MemoryCache:
{
if (radon_info->mapped == MagickFalse)
radon_info->cells=(unsigned short *) RelinquishMagickMemory(
radon_info->cells);
else
radon_info->cells=(unsigned short *) UnmapBlob(radon_info->cells,
(size_t) radon_info->length);
RelinquishMagickResource(MemoryResource,radon_info->length);
break;
}
case MapCache:
{
radon_info->cells=(unsigned short *) UnmapBlob(radon_info->cells,(size_t)
radon_info->length);
RelinquishMagickResource(MapResource,radon_info->length);
}
case DiskCache:
{
if (radon_info->file != -1)
(void) close(radon_info->file);
(void) RelinquishUniqueFileResource(radon_info->path);
RelinquishMagickResource(DiskResource,radon_info->length);
break;
}
default:
break;
}
return((RadonInfo *) RelinquishMagickMemory(radon_info));
}
static MagickBooleanType ResetRadonCells(RadonInfo *radon_info)
{
long
y;
register long
x;
ssize_t
count;
unsigned short
value;
if (radon_info->type != DiskCache)
{
(void) ResetMagickMemory(radon_info->cells,0,(size_t) radon_info->length);
return(MagickTrue);
}
value=0;
(void) MagickSeek(radon_info->file,0,SEEK_SET);
for (y=0; y < (long) radon_info->height; y++)
{
for (x=0; x < (long) radon_info->width; x++)
{
count=write(radon_info->file,&value,sizeof(*radon_info->cells));
if (count != (ssize_t) sizeof(*radon_info->cells))
break;
}
if (x < (long) radon_info->width)
break;
}
return(y < (long) radon_info->height ? MagickFalse : MagickTrue);
}
static RadonInfo *AcquireRadonInfo(const Image *image,const unsigned long width,
const unsigned long height,ExceptionInfo *exception)
{
MagickBooleanType
status;
RadonInfo
*radon_info;
radon_info=(RadonInfo *) AcquireMagickMemory(sizeof(*radon_info));
if (radon_info == (RadonInfo *) NULL)
return((RadonInfo *) NULL);
(void) ResetMagickMemory(radon_info,0,sizeof(*radon_info));
radon_info->width=width;
radon_info->height=height;
radon_info->length=(MagickSizeType) width*height*sizeof(*radon_info->cells);
radon_info->type=MemoryCache;
status=AcquireMagickResource(AreaResource,radon_info->length);
if ((status != MagickFalse) &&
(radon_info->length == (MagickSizeType) ((size_t) radon_info->length)))
{
status=AcquireMagickResource(MemoryResource,radon_info->length);
if (status != MagickFalse)
{
radon_info->mapped=MagickFalse;
radon_info->cells=(unsigned short *) AcquireMagickMemory((size_t)
radon_info->length);
if (radon_info->cells == (unsigned short *) NULL)
{
radon_info->mapped=MagickTrue;
radon_info->cells=(unsigned short *) MapBlob(-1,IOMode,0,(size_t)
radon_info->length);
}
if (radon_info->cells == (unsigned short *) NULL)
RelinquishMagickResource(MemoryResource,radon_info->length);
}
}
radon_info->file=(-1);
if (radon_info->cells == (unsigned short *) NULL)
{
status=AcquireMagickResource(DiskResource,radon_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(DestroyRadonInfo(radon_info));
}
radon_info->type=DiskCache;
(void) AcquireMagickResource(MemoryResource,radon_info->length);
radon_info->file=AcquireUniqueFileResource(radon_info->path);
if (radon_info->file == -1)
return(DestroyRadonInfo(radon_info));
status=AcquireMagickResource(MapResource,radon_info->length);
if (status != MagickFalse)
{
status=ResetRadonCells(radon_info);
if (status != MagickFalse)
{
radon_info->cells=(unsigned short *) MapBlob(radon_info->file,
IOMode,0,(size_t) radon_info->length);
if (radon_info->cells != (unsigned short *) NULL)
radon_info->type=MapCache;
else
RelinquishMagickResource(MapResource,radon_info->length);
}
}
}
return(radon_info);
}
static inline size_t MagickMin(const size_t x,const size_t y)
{
if (x < y)
return(x);
return(y);
}
static inline ssize_t ReadRadonCell(const RadonInfo *radon_info,
const off_t offset,const size_t length,unsigned char *buffer)
{
register ssize_t
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PPREAD)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
{
i=(-1);
if (MagickSeek(radon_info->file,offset,SEEK_SET) >= 0)
{
#endif
count=0;
for (i=0; i < (ssize_t) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PPREAD)
count=read(radon_info->file,buffer+i,MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pread(radon_info->file,buffer+i,MagickMin(length-i,(size_t)
SSIZE_MAX),(off_t) (offset+i));
#endif
if (count > 0)
continue;
count=0;
if (errno != EINTR)
{
i=(-1);
break;
}
}
#if !defined(MAGICKCORE_HAVE_PPREAD)
}
}
#endif
return(i);
}
static inline ssize_t WriteRadonCell(const RadonInfo *radon_info,
const off_t offset,const size_t length,const unsigned char *buffer)
{
register ssize_t
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
{
if (MagickSeek(radon_info->file,offset,SEEK_SET) >= 0)
{
#endif
count=0;
for (i=0; i < (ssize_t) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(radon_info->file,buffer+i,MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pwrite(radon_info->file,buffer+i,MagickMin(length-i,(size_t)
SSIZE_MAX),(off_t) (offset+i));
#endif
if (count > 0)
continue;
count=0;
if (errno != EINTR)
{
i=(-1);
break;
}
}
#if !defined(MAGICKCORE_HAVE_PWRITE)
}
}
#endif
return(i);
}
static inline unsigned short GetRadonCell(const RadonInfo *radon_info,
const long x,const long y)
{
off_t
i;
unsigned short
value;
i=(off_t) radon_info->height*x+y;
if ((i < 0) ||
((MagickSizeType) (i*sizeof(*radon_info->cells)) >= radon_info->length))
return(0);
if (radon_info->type != DiskCache)
return(radon_info->cells[i]);
value=0;
(void) ReadRadonCell(radon_info,i*sizeof(*radon_info->cells),
sizeof(*radon_info->cells),(unsigned char *) &value);
return(value);
}
static inline MagickBooleanType SetRadonCell(const RadonInfo *radon_info,
const long x,const long y,const unsigned short value)
{
off_t
i;
ssize_t
count;
i=(off_t) radon_info->height*x+y;
if ((i < 0) ||
((MagickSizeType) (i*sizeof(*radon_info->cells)) >= radon_info->length))
return(MagickFalse);
if (radon_info->type != DiskCache)
{
radon_info->cells[i]=value;
return(MagickTrue);
}
count=WriteRadonCell(radon_info,i*sizeof(*radon_info->cells),
sizeof(*radon_info->cells),(unsigned char *) &value);
if (count != (ssize_t) sizeof(*radon_info->cells))
return(MagickFalse);
return(MagickTrue);
}
static void RadonProjection(RadonInfo *source_cells,
RadonInfo *destination_cells,const long sign,unsigned long *projection)
{
RadonInfo
*swap;
register long
x;
register RadonInfo
*p,
*q;
unsigned long
step;
p=source_cells;
q=destination_cells;
for (step=1; step < p->width; step*=2)
{
for (x=0; x < (long) p->width; x+=2*step)
{
long
y;
register long
i;
unsigned short
cell;
for (i=0; i < (long) step; i++)
{
for (y=0; y < (long) (p->height-i-1); y++)
{
cell=GetRadonCell(p,x+i,y);
(void) SetRadonCell(q,x+2*i,y,cell+GetRadonCell(p,x+i+step,y+i));
(void) SetRadonCell(q,x+2*i+1,y,cell+GetRadonCell(p,x+i+step,y+i+1));
}
for ( ; y < (long) (p->height-i); y++)
{
cell=GetRadonCell(p,x+i,y);
(void) SetRadonCell(q,x+2*i,y,cell+GetRadonCell(p,x+i+step,y+i));
(void) SetRadonCell(q,x+2*i+1,y,cell);
}
for ( ; y < (long) p->height; y++)
{
cell=GetRadonCell(p,x+i,y);
(void) SetRadonCell(q,x+2*i,y,cell);
(void) SetRadonCell(q,x+2*i+1,y,cell);
}
}
}
swap=p;
p=q;
q=swap;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for
#endif
for (x=0; x < (long) p->width; x++)
{
register long
y;
unsigned long
sum;
sum=0;
for (y=0; y < (long) (p->height-1); y++)
{
long
delta;
delta=GetRadonCell(p,x,y)-(long) GetRadonCell(p,x,y+1);
sum+=delta*delta;
}
projection[p->width+sign*x-1]=sum;
}
}
static MagickBooleanType RadonTransform(const Image *image,
const double threshold,unsigned long *projection,ExceptionInfo *exception)
{
long
y;
MagickBooleanType
status;
RadonInfo
*destination_cells,
*source_cells;
register long
i;
unsigned char
byte;
unsigned long
count,
width;
unsigned short
bits[256];
ViewInfo
*image_view;
for (width=1; width < ((image->columns+7)/8); width<<=1) ;
source_cells=AcquireRadonInfo(image,width,image->rows,exception);
destination_cells=AcquireRadonInfo(image,width,image->rows,exception);
if ((source_cells == (RadonInfo *) NULL) ||
(destination_cells == (RadonInfo *) NULL))
{
if (destination_cells != (RadonInfo *) NULL)
destination_cells=DestroyRadonInfo(destination_cells);
if (source_cells != (RadonInfo *) NULL)
source_cells=DestroyRadonInfo(source_cells);
return(MagickFalse);
}
if (ResetRadonCells(source_cells) == MagickFalse)
{
destination_cells=DestroyRadonInfo(destination_cells);
source_cells=DestroyRadonInfo(source_cells);
return(MagickFalse);
}
for (i=0; i < 256; i++)
{
byte=(unsigned char) i;
for (count=0; byte != 0; byte>>=1)
count+=byte & 0x01;
bits[i]=(unsigned short) count;
}
status=MagickTrue;
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (long) image->rows; y++)
{
register const PixelPacket
*p;
register long
i,
x;
unsigned long
bit,
byte;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
bit=0;
byte=0;
i=(long) (image->columns+7)/8;
for (x=0; x < (long) image->columns; x++)
{
byte<<=1;
if (((MagickRealType) p->red < threshold) ||
((MagickRealType) p->green < threshold) ||
((MagickRealType) p->blue < threshold))
byte|=0x01;
bit++;
if (bit == 8)
{
(void) SetRadonCell(source_cells,--i,y,bits[byte]);
bit=0;
byte=0;
}
p++;
}
if (bit != 0)
{
byte<<=(8-bit);
(void) SetRadonCell(source_cells,--i,y,bits[byte]);
}
}
RadonProjection(source_cells,destination_cells,-1,projection);
(void) ResetRadonCells(source_cells);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (long) image->rows; y++)
{
register const PixelPacket
*p;
register long
i,
x;
unsigned long
bit,
byte;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
bit=0;
byte=0;
i=0;
for (x=0; x < (long) image->columns; x++)
{
byte<<=1;
if (((MagickRealType) p->red < threshold) ||
((MagickRealType) p->green < threshold) ||
((MagickRealType) p->blue < threshold))
byte|=0x01;
bit++;
if (bit == 8)
{
(void) SetRadonCell(source_cells,i++,y,bits[byte]);
bit=0;
byte=0;
}
p++;
}
if (bit != 0)
{
byte<<=(8-bit);
(void) SetRadonCell(source_cells,i++,y,bits[byte]);
}
}
RadonProjection(source_cells,destination_cells,1,projection);
image_view=DestroyCacheView(image_view);
destination_cells=DestroyRadonInfo(destination_cells);
source_cells=DestroyRadonInfo(source_cells);
return(MagickTrue);
}
static void GetImageBackgroundColor(Image *image,const long offset,
ExceptionInfo *exception)
{
long
y;
MagickPixelPacket
background;
MagickRealType
count;
ViewInfo
*image_view;
/*
Compute average background color.
*/
if (offset <= 0)
return;
GetMagickPixelPacket(image,&background);
count=0.0;
image_view=AcquireCacheView(image);
for (y=0; y < (long) image->rows; y++)
{
register const PixelPacket
*p;
register long
x;
if ((y >= offset) && (y < ((long) image->rows-offset)))
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
for (x=0; x < (long) image->columns; x++)
{
if ((x >= offset) && (x < ((long) image->columns-offset)))
continue;
background.red+=QuantumScale*p->red;
background.green+=QuantumScale*p->green;
background.blue+=QuantumScale*p->blue;
background.opacity+=QuantumScale*p->opacity;
count++;
p++;
}
}
image_view=DestroyCacheView(image_view);
image->background_color.red=RoundToQuantum((MagickRealType) QuantumRange*
background.red/count);
image->background_color.green=RoundToQuantum((MagickRealType) QuantumRange*
background.green/count);
image->background_color.blue=RoundToQuantum((MagickRealType) QuantumRange*
background.blue/count);
image->background_color.opacity=RoundToQuantum((MagickRealType) QuantumRange*
background.opacity/count);
}
MagickExport Image *DeskewImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
const char
*artifact;
double
degrees,
sum;
Image
*clone_image,
*crop_image,
*median_image,
*rotate_image;
long
skew;
MagickBooleanType
status;
RectangleInfo
geometry;
register long
i;
unsigned long
max_projection,
*projection,
width;
/*
Compute deskew angle.
*/
for (width=1; width < ((image->columns+7)/8); width<<=1) ;
projection=(unsigned long *) AcquireQuantumMemory((size_t) (2*width-1),
sizeof(*projection));
if (projection == (unsigned long *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
status=RadonTransform(image,threshold,projection,exception);
if (status == MagickFalse)
{
projection=(unsigned long *) RelinquishMagickMemory(projection);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
max_projection=0;
sum=0.0;
skew=0;
for (i=0; i < (long) (2*width-1); i++)
{
if (projection[i] > max_projection)
{
skew=i-(long) width+1;
max_projection=projection[i];
}
sum+=projection[i];
}
projection=(unsigned long *) RelinquishMagickMemory(projection);
/*
Deskew image.
*/
degrees=RadiansToDegrees(-atan((double) skew/width/8));
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule()," Deskew angle: %g",
degrees);
artifact=GetImageArtifact(image,"deskew:auto-crop");
if (artifact == (const char *) NULL)
return(RotateImage(image,degrees,exception));
/*
Auto-crop image.
*/
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
GetImageBackgroundColor(clone_image,atol(artifact),exception);
rotate_image=RotateImage(clone_image,degrees,exception);
clone_image=DestroyImage(clone_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
median_image=MedianFilterImage(rotate_image,0.0,exception);
if (median_image == (Image *) NULL)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
geometry=GetImageBoundingBox(median_image,exception);
median_image=DestroyImage(median_image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule()," Deskew geometry: "
"%lux%lu%+ld%+ld",geometry.width,geometry.height,geometry.x,geometry.y);
crop_image=CropImage(rotate_image,&geometry,exception);
rotate_image=DestroyImage(rotate_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n t e g r a l R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IntegralRotateImage() rotates the image an integral of 90 degrees. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the rotated image.
%
% The format of the IntegralRotateImage method is:
%
% Image *IntegralRotateImage(const Image *image,unsigned long rotations,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o rotations: Specifies the number of 90 degree rotations.
%
*/
static Image *IntegralRotateImage(const Image *image,unsigned long rotations,
ExceptionInfo *exception)
{
#define TileHeight 128
#define TileWidth 128
#define RotateImageTag "Rotate/Image"
Image
*rotate_image;
long
progress,
y;
MagickBooleanType
status;
RectangleInfo
page;
ViewInfo
*image_view,
*rotate_view;
/*
Initialize rotated image attributes.
*/
assert(image != (Image *) NULL);
page=image->page;
rotations%=4;
if ((rotations == 1) || (rotations == 3))
rotate_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
else
rotate_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
/*
Integral rotate the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
rotate_view=AcquireCacheView(rotate_image);
switch (rotations)
{
case 0:
{
/*
Rotate 0 degrees.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register IndexPacket
*rotate_indexes;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(rotate_view,0,y,
rotate_image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
(void) CopyMagickMemory(q,p,image->columns*sizeof(*p));
if (indexes != (IndexPacket *) NULL)
{
rotate_indexes=GetCacheViewAuthenticIndexQueue(rotate_view);
(void) CopyMagickMemory(rotate_indexes,indexes,image->columns*
sizeof(*indexes));
}
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,RotateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
break;
}
case 1:
{
long
tile_y;
/*
Rotate 90 degrees.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (tile_y=0; tile_y < (long) image->rows; tile_y+=TileHeight)
{
register long
tile_x;
if (status == MagickFalse)
continue;
for (tile_x=0; tile_x < (long) image->columns; tile_x+=TileWidth)
{
MagickBooleanType
sync;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register IndexPacket
*rotate_indexes;
register long
y;
register PixelPacket
*q;
unsigned long
tile_height,
tile_width;
tile_width=TileWidth;
if ((tile_x+TileWidth) > (long) image->columns)
tile_width=(unsigned long) (TileWidth-(tile_x+TileWidth-
image->columns));
tile_height=TileHeight;
if ((tile_y+TileHeight) > (long) image->rows)
tile_height=(unsigned long) (TileHeight-(tile_y+TileHeight-
image->rows));
p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,tile_width,
tile_height,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (y=0; y < (long) tile_width; y++)
{
register const PixelPacket
*tile_pixels;
register long
x;
q=QueueCacheViewAuthenticPixels(rotate_view,
rotate_image->columns-(tile_y+tile_height),y+tile_x,tile_height,
1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
tile_pixels=p+(tile_height-1)*tile_width+y;
for (x=0; x < (long) tile_height; x++)
{
*q++=(*tile_pixels);
tile_pixels-=tile_width;
}
rotate_indexes=GetCacheViewAuthenticIndexQueue(rotate_view);
if ((indexes != (IndexPacket *) NULL) &&
(rotate_indexes != (IndexPacket *) NULL))
{
register const IndexPacket
*tile_indexes;
tile_indexes=indexes+(tile_height-1)*tile_width+y;
for (x=0; x < (long) tile_height; x++)
{
*rotate_indexes++=(*tile_indexes);
tile_indexes-=tile_width;
}
}
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,RotateImageTag,progress+=TileHeight,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(long) (page.width-rotate_image->columns-page.x);
break;
}
case 2:
{
/*
Rotate 180 degrees.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register IndexPacket
*rotate_indexes;
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(rotate_view,0,(long) (image->rows-
y-1),image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
q+=image->columns;
for (x=0; x < (long) image->columns; x++)
*--q=(*p++);
if (indexes != (IndexPacket *) NULL)
{
rotate_indexes=GetCacheViewAuthenticIndexQueue(rotate_view);
for (x=0; x < (long) image->columns; x++)
rotate_indexes[image->columns-x-1]=indexes[x];
}
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,RotateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
if (page.width != 0)
page.x=(long) (page.width-rotate_image->columns-page.x);
if (page.height != 0)
page.y=(long) (page.height-rotate_image->rows-page.y);
break;
}
case 3:
{
long
tile_y;
/*
Rotate 270 degrees.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (tile_y=0; tile_y < (long) image->rows; tile_y+=TileHeight)
{
register long
tile_x;
if (status == MagickFalse)
continue;
for (tile_x=0; tile_x < (long) image->columns; tile_x+=TileWidth)
{
MagickBooleanType
sync;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register IndexPacket
*rotate_indexes;
register long
y;
register PixelPacket
*q;
unsigned long
tile_height,
tile_width;
tile_width=TileWidth;
if ((tile_x+TileWidth) > (long) image->columns)
tile_width=(unsigned long) (TileWidth-(tile_x+TileWidth-
image->columns));
tile_height=TileHeight;
if ((tile_y+TileHeight) > (long) image->rows)
tile_height=(unsigned long) (TileHeight-(tile_y+TileHeight-
image->rows));
p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,tile_width,
tile_height,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (y=0; y < (long) tile_width; y++)
{
register const PixelPacket
*tile_pixels;
register long
x;
q=QueueCacheViewAuthenticPixels(rotate_view,tile_y,(long)
y+rotate_image->rows-(tile_x+tile_width),tile_height,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
tile_pixels=p+(tile_width-1)-y;
for (x=0; x < (long) tile_height; x++)
{
*q++=(*tile_pixels);
tile_pixels+=tile_width;
}
rotate_indexes=GetCacheViewAuthenticIndexQueue(rotate_view);
if ((indexes != (IndexPacket *) NULL) &&
(rotate_indexes != (IndexPacket *) NULL))
{
register const IndexPacket
*tile_indexes;
tile_indexes=indexes+(tile_width-1)-y;
for (x=0; x < (long) tile_height; x++)
{
*rotate_indexes++=(*tile_indexes);
tile_indexes+=tile_width;
}
}
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,RotateImageTag,progress+=TileHeight,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.height != 0)
page.y=(long) (page.height-rotate_image->rows-page.y);
break;
}
}
rotate_view=DestroyCacheView(rotate_view);
image_view=DestroyCacheView(image_view);
rotate_image->type=image->type;
rotate_image->page=page;
if (status == MagickFalse)
rotate_image=DestroyImage(rotate_image);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ X S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% XShearImage() shears the image in the X direction with a shear angle of
% 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and
% negative angles shear clockwise. Angles are measured relative to a vertical
% Y-axis. X shears will widen an image creating 'empty' triangles on the left
% and right sides of the source image.
%
% The format of the XShearImage method is:
%
% MagickBooleanType XShearImage(Image *image,const MagickRealType degrees,
% const unsigned long width,const unsigned long height,
% const long x_offset,const long y_offset)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: A MagickRealType representing the shearing angle along the X
% axis.
%
% o width, height, x_offset, y_offset: Defines a region of the image
% to shear.
%
*/
static inline MagickRealType Blend_(const MagickRealType p,
const MagickRealType alpha,const MagickRealType q,const MagickRealType beta)
{
return((1.0-QuantumScale*alpha)*p+(1.0-QuantumScale*beta)*q);
}
static inline void MagickCompositeBlend(const MagickPixelPacket *p,
const MagickRealType alpha,const MagickPixelPacket *q,
const MagickRealType beta,const MagickRealType area,
MagickPixelPacket *composite)
{
MagickRealType
gamma;
if ((alpha == TransparentOpacity) && (beta == TransparentOpacity))
{
*composite=(*p);
return;
}
gamma=RoundToUnity((1.0-QuantumScale*(QuantumRange-(1.0-area)*
(QuantumRange-alpha)))+(1.0-QuantumScale*(QuantumRange-area*
(QuantumRange-beta))));
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*Blend_((MagickRealType) p->red,
(MagickRealType) QuantumRange-(1.0-area)*(QuantumRange-alpha),
(MagickRealType) q->red,(MagickRealType) (QuantumRange-area*(QuantumRange-
beta)));
composite->green=gamma*Blend_((MagickRealType) p->green,
(MagickRealType) QuantumRange-(1.0-area)*(QuantumRange-alpha),
(MagickRealType) q->green,(MagickRealType) (QuantumRange-area*(QuantumRange-
beta)));
composite->blue=gamma*Blend_((MagickRealType) p->blue,
(MagickRealType) QuantumRange-(1.0-area)*(QuantumRange-alpha),
(MagickRealType) q->blue,(MagickRealType) (QuantumRange-area*(QuantumRange-
beta)));
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Blend_((MagickRealType) p->index,
(MagickRealType) QuantumRange-(1.0-area)*(QuantumRange-alpha),
(MagickRealType) q->index,(MagickRealType) (QuantumRange-area*
(QuantumRange-beta)));
}
static MagickBooleanType XShearImage(Image *image,const MagickRealType degrees,
const unsigned long width,const unsigned long height,const long x_offset,
const long y_offset)
{
#define XShearImageTag "XShear/Image"
typedef enum
{
LEFT,
RIGHT
} ShearDirection;
ExceptionInfo
*exception;
long
progress,
y;
MagickBooleanType
status;
MagickPixelPacket
background;
ViewInfo
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(IndexPacket *) NULL,
&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
/*
XShear image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress, status)
#endif
for (y=0; y < (long) height; y++)
{
IndexPacket
*indexes,
*shear_indexes;
long
step;
MagickPixelPacket
pixel,
source,
destination;
MagickRealType
area,
displacement;
register long
i;
register PixelPacket
*p,
*q;
ShearDirection
direction;
if (status == MagickFalse)
continue;
p=GetCacheViewAuthenticPixels(image_view,0,y_offset+y,image->columns,1,
exception);
if (p == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
p+=x_offset;
indexes+=x_offset;
displacement=degrees*(MagickRealType) (y-height/2.0);
if (displacement == 0.0)
continue;
if (displacement > 0.0)
direction=RIGHT;
else
{
displacement*=(-1.0);
direction=LEFT;
}
step=(long) floor((double) displacement);
area=(MagickRealType) (displacement-step);
step++;
pixel=background;
GetMagickPixelPacket(image,&source);
GetMagickPixelPacket(image,&destination);
switch (direction)
{
case LEFT:
{
/*
Transfer pixels left-to-right.
*/
if (step > x_offset)
break;
q=p-step;
shear_indexes=indexes-step;
for (i=0; i < (long) width; i++)
{
if ((x_offset+i) < step)
{
SetMagickPixelPacket(image,++p,++indexes,&pixel);
q++;
shear_indexes++;
continue;
}
SetMagickPixelPacket(image,p,indexes,&source);
MagickCompositeBlend(&pixel,(MagickRealType) pixel.opacity,&source,
(MagickRealType) p->opacity,area,&destination);
SetPixelPacket(image,&destination,q++,shear_indexes++);
SetMagickPixelPacket(image,p++,indexes++,&pixel);
}
MagickCompositeBlend(&pixel,(MagickRealType) pixel.opacity,&background,
(MagickRealType) background.opacity,area,&destination);
SetPixelPacket(image,&destination,q++,shear_indexes++);
for (i=0; i < (step-1); i++)
SetPixelPacket(image,&background,q++,shear_indexes++);
break;
}
case RIGHT:
{
/*
Transfer pixels right-to-left.
*/
p+=width;
indexes+=width;
q=p+step;
shear_indexes=indexes+step;
for (i=0; i < (long) width; i++)
{
p--;
indexes--;
q--;
shear_indexes--;
if ((unsigned long) (x_offset+width+step-i) >= image->columns)
continue;
SetMagickPixelPacket(image,p,indexes,&source);
MagickCompositeBlend(&pixel,(MagickRealType) pixel.opacity,&source,
(MagickRealType) p->opacity,area,&destination);
SetPixelPacket(image,&destination,q,shear_indexes);
SetMagickPixelPacket(image,p,indexes,&pixel);
}
MagickCompositeBlend(&pixel,(MagickRealType) pixel.opacity,&background,
(MagickRealType) background.opacity,area,&destination);
SetPixelPacket(image,&destination,--q,--shear_indexes);
for (i=0; i < (step-1); i++)
SetPixelPacket(image,&background,--q,--shear_indexes);
break;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,XShearImageTag,progress++,height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Y S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% YShearImage shears the image in the Y direction with a shear angle of
% 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and
% negative angles shear clockwise. Angles are measured relative to a
% horizontal X-axis. Y shears will increase the height of an image creating
% 'empty' triangles on the top and bottom of the source image.
%
% The format of the YShearImage method is:
%
% MagickBooleanType YShearImage(Image *image,const MagickRealType degrees,
% const unsigned long width,const unsigned long height,
% const long x_offset,const long y_offset)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: A MagickRealType representing the shearing angle along the Y
% axis.
%
% o width, height, x_offset, y_offset: Defines a region of the image
% to shear.
%
*/
static MagickBooleanType YShearImage(Image *image,const MagickRealType degrees,
const unsigned long width,const unsigned long height,const long x_offset,
const long y_offset)
{
#define YShearImageTag "YShear/Image"
typedef enum
{
UP,
DOWN
} ShearDirection;
ExceptionInfo
*exception;
long
progress,
x;
MagickBooleanType
status;
MagickPixelPacket
background;
ViewInfo
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(IndexPacket *) NULL,
&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
/*
Y Shear image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress, status)
#endif
for (x=0; x < (long) width; x++)
{
IndexPacket
*indexes,
*shear_indexes;
long
step;
MagickPixelPacket
pixel,
source,
destination;
MagickRealType
area,
displacement;
register long
i;
register PixelPacket
*p,
*q;
ShearDirection
direction;
if (status == MagickFalse)
continue;
p=GetCacheViewAuthenticPixels(image_view,x_offset+x,0,1,image->rows,
exception);
if (p == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
p+=y_offset;
indexes+=y_offset;
displacement=degrees*(MagickRealType) (x-width/2.0);
if (displacement == 0.0)
continue;
if (displacement > 0.0)
direction=DOWN;
else
{
displacement*=(-1.0);
direction=UP;
}
step=(long) floor((double) displacement);
area=(MagickRealType) (displacement-step);
step++;
pixel=background;
GetMagickPixelPacket(image,&source);
GetMagickPixelPacket(image,&destination);
switch (direction)
{
case UP:
{
/*
Transfer pixels top-to-bottom.
*/
if (step > y_offset)
break;
q=p-step;
shear_indexes=indexes-step;
for (i=0; i < (long) height; i++)
{
if ((y_offset+i) < step)
{
SetMagickPixelPacket(image,++p,++indexes,&pixel);
q++;
shear_indexes++;
continue;
}
SetMagickPixelPacket(image,p,indexes,&source);
MagickCompositeBlend(&pixel,(MagickRealType) pixel.opacity,&source,
(MagickRealType) p->opacity,area,&destination);
SetPixelPacket(image,&destination,q++,shear_indexes++);
SetMagickPixelPacket(image,p++,indexes++,&pixel);
}
MagickCompositeBlend(&pixel,(MagickRealType) pixel.opacity,&background,
(MagickRealType) background.opacity,area,&destination);
SetPixelPacket(image,&destination,q++,shear_indexes++);
for (i=0; i < (step-1); i++)
SetPixelPacket(image,&background,q++,shear_indexes++);
break;
}
case DOWN:
{
/*
Transfer pixels bottom-to-top.
*/
p+=height;
indexes+=height;
q=p+step;
shear_indexes=indexes+step;
for (i=0; i < (long) height; i++)
{
p--;
indexes--;
q--;
shear_indexes--;
if ((unsigned long) (y_offset+height+step-i) >= image->rows)
continue;
SetMagickPixelPacket(image,p,indexes,&source);
MagickCompositeBlend(&pixel,(MagickRealType) pixel.opacity,&source,
(MagickRealType) p->opacity,area,&destination);
SetPixelPacket(image,&destination,q,shear_indexes);
SetMagickPixelPacket(image,p,indexes,&pixel);
}
MagickCompositeBlend(&pixel,(MagickRealType) pixel.opacity,&background,
(MagickRealType) background.opacity,area,&destination);
SetPixelPacket(image,&destination,--q,--shear_indexes);
for (i=0; i < (step-1); i++)
SetPixelPacket(image,&background,--q,--shear_indexes);
break;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,YShearImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. RotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% RotateImage() is based on the paper "A Fast Algorithm for General
% Raster Rotatation" by Alan W. Paeth. RotateImage is adapted from a similar
% method based on the Paeth paper written by Michael Halle of the Spatial
% Imaging Group, MIT Media Lab.
%
% The format of the RotateImage method is:
%
% Image *RotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*integral_image,
*rotate_image;
long
x_offset,
y_offset;
MagickRealType
angle;
PointInfo
shear;
RectangleInfo
border_info;
unsigned long
height,
rotations,
width,
y_width;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
angle=degrees;
while (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
/*
Calculate shear equations.
*/
integral_image=IntegralRotateImage(image,rotations,exception);
if (integral_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((shear.x == 0.0) && (shear.y == 0.0))
return(integral_image);
if (SetImageStorageClass(integral_image,DirectClass) == MagickFalse)
{
InheritException(exception,&integral_image->exception);
integral_image=DestroyImage(integral_image);
return(integral_image);
}
if (integral_image->matte == MagickFalse)
(void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel);
/*
Compute image size.
*/
width=image->columns;
height=image->rows;
if ((rotations == 1) || (rotations == 3))
{
width=image->rows;
height=image->columns;
}
y_width=width+(long) (fabs(shear.x)*height+0.5);
x_offset=(long) (width+((fabs(shear.y)*height+0.5)-width)/2.0+0.5);
y_offset=(long) (height+((fabs(shear.y)*y_width+0.5)-height)/2.0+0.5);
/*
Surround image with a border.
*/
integral_image->border_color=integral_image->background_color;
integral_image->compose=CopyCompositeOp;
border_info.width=(unsigned long) x_offset;
border_info.height=(unsigned long) y_offset;
rotate_image=BorderImage(integral_image,&border_info,exception);
integral_image=DestroyImage(integral_image);
if (rotate_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
/*
Rotate the image.
*/
(void) XShearImage(rotate_image,shear.x,width,height,x_offset,
((long) rotate_image->rows-height)/2);
(void) YShearImage(rotate_image,shear.y,y_width,height,
((long) rotate_image->columns-y_width)/2,y_offset);
(void) XShearImage(rotate_image,shear.x,y_width,rotate_image->rows,
((long) rotate_image->columns-y_width)/2,0);
CropToFitImage(&rotate_image,shear.x,shear.y,(MagickRealType) width,
(MagickRealType) height,MagickTrue,exception);
rotate_image->compose=image->compose;
rotate_image->page.width=0;
rotate_image->page.height=0;
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShearImage() creates a new image that is a shear_image copy of an existing
% one. Shearing slides one edge of an image along the X or Y axis, creating
% a parallelogram. An X direction shear slides an edge along the X axis,
% while a Y direction shear slides an edge along the Y axis. The amount of
% the shear is controlled by a shear angle. For X direction shears, x_shear
% is measured relative to the Y axis, and similarly, for Y direction shears
% y_shear is measured relative to the X axis. Empty triangles left over from
% shearing the image are filled with the background color defined by member
% 'background_color' of the image.. ShearImage() allocates the memory
% necessary for the new Image structure and returns a pointer to the new image.
%
% ShearImage() is based on the paper "A Fast Algorithm for General Raster
% Rotatation" by Alan W. Paeth.
%
% The format of the ShearImage method is:
%
% Image *ShearImage(const Image *image,const double x_shear,
% const double y_shear,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o x_shear, y_shear: Specifies the number of degrees to shear the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShearImage(const Image *image,const double x_shear,
const double y_shear,ExceptionInfo *exception)
{
Image
*integral_image,
*shear_image;
long
x_offset,
y_offset;
PointInfo
shear;
RectangleInfo
border_info;
unsigned long
y_width;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((x_shear != 0.0) && (fmod(x_shear,90.0) == 0.0))
ThrowImageException(ImageError,"AngleIsDiscontinuous");
if ((y_shear != 0.0) && (fmod(y_shear,90.0) == 0.0))
ThrowImageException(ImageError,"AngleIsDiscontinuous");
/*
Initialize shear angle.
*/
integral_image=CloneImage(image,0,0,MagickTrue,exception);
if (integral_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
shear.x=(-tan(DegreesToRadians(x_shear)/2.0));
shear.y=sin(DegreesToRadians(y_shear));
if ((shear.x == 0.0) && (shear.y == 0.0))
return(integral_image);
if (SetImageStorageClass(integral_image,DirectClass) == MagickFalse)
{
InheritException(exception,&integral_image->exception);
integral_image=DestroyImage(integral_image);
return(integral_image);
}
if (integral_image->matte == MagickFalse)
(void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel);
/*
Compute image size.
*/
y_width=(unsigned long) floor(fabs(image->rows*shear.x)+image->columns+0.5);
x_offset=(long) ceil(fabs(2.0*image->rows*shear.x)-0.5);
y_offset=(long) ceil(fabs(y_width*shear.y)-0.5);
/*
Surround image with border.
*/
integral_image->border_color=integral_image->background_color;
integral_image->compose=CopyCompositeOp;
border_info.width=(unsigned long) x_offset;
border_info.height=(unsigned long) y_offset;
shear_image=BorderImage(integral_image,&border_info,exception);
if (shear_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
integral_image=DestroyImage(integral_image);
/*
Shear the image.
*/
if (shear_image->matte == MagickFalse)
(void) SetImageAlphaChannel(shear_image,OpaqueAlphaChannel);
(void) XShearImage(shear_image,shear.x,image->columns,image->rows,x_offset,
((long) shear_image->rows-image->rows)/2);
(void) YShearImage(shear_image,shear.y,y_width,image->rows,
((long) shear_image->columns-y_width)/2,y_offset);
CropToFitImage(&shear_image,shear.x,shear.y,(MagickRealType) image->columns,
(MagickRealType) image->rows,MagickFalse,exception);
shear_image->compose=image->compose;
shear_image->page.width=0;
shear_image->page.height=0;
return(shear_image);
}
|
tpi_openmp.c | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* */
/* This file is part of the program and library */
/* SCIP --- Solving Constraint Integer Programs */
/* */
/* Copyright (C) 2002-2018 Konrad-Zuse-Zentrum */
/* fuer Informationstechnik Berlin */
/* */
/* SCIP is distributed under the terms of the ZIB Academic License. */
/* */
/* You should have received a copy of the ZIB Academic License */
/* along with SCIP; see the file COPYING. If not email to scip@zib.de. */
/* */
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/**@file tpi_openmp.c
* @ingroup TASKINTERFACE
* @brief the interface functions for openmp
* @author Stephen J. Maher
* @author Robert Lion Gottwald
*/
/*---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2*/
#include "tpi/tpi.h"
#include "blockmemshell/memory.h"
/** A job added to the queue */
struct SCIP_Job
{
int jobid; /**< id to identify jobs from a common process */
struct SCIP_Job* nextjob; /**< pointer to the next job in the queue */
SCIP_RETCODE (*jobfunc)(void* args);/**< pointer to the job function */
void* args; /**< pointer to the function arguements */
SCIP_RETCODE retcode; /**< return code of the job */
};
/** the thread pool job queue */
struct SCIP_JobQueue
{
SCIP_JOB* firstjob; /**< pointer to the first job in the queue */
SCIP_JOB* lastjob; /**< pointer to the last job in the queue */
int njobs; /**< number of jobs in the queue */
};
typedef struct SCIP_JobQueue SCIP_JOBQUEUE;
struct SCIP_JobQueues
{
SCIP_JOBQUEUE jobqueue; /**< queue of unprocessed jobs */
SCIP_JOB** currentjobs; /**< array with slot for each thread to store the currently running job */
int ncurrentjobs; /**< number of currently running jobs */
int nthreads; /**< number of threads */
SCIP_JOBQUEUE finishedjobs; /**< jobqueue containing the finished jobs */
SCIP_LOCK lock; /**< lock to protect this stucture from concurrent access */
SCIP_CONDITION jobfinished; /**< condition to signal if a job was finished */
};
typedef struct SCIP_JobQueues SCIP_JOBQUEUES;
static SCIP_JOBQUEUES* _jobqueues = NULL;
static
SCIP_RETCODE createJobQueue(
int nthreads, /**< the number of threads */
int qsize, /**< the queue size */
SCIP_Bool blockwhenfull /**< should the queue be blocked from new jobs when full */
)
{
int i;
assert(nthreads >= 0);
assert(qsize >= 0);
SCIP_UNUSED( blockwhenfull );
/* allocting memory for the job queue */
SCIP_ALLOC( BMSallocMemory(&_jobqueues) );
_jobqueues->jobqueue.firstjob = NULL;
_jobqueues->jobqueue.lastjob = NULL;
_jobqueues->jobqueue.njobs = 0;
_jobqueues->finishedjobs.firstjob = NULL;
_jobqueues->finishedjobs.lastjob = NULL;
_jobqueues->finishedjobs.njobs = 0;
_jobqueues->ncurrentjobs = 0;
_jobqueues->nthreads = nthreads;
SCIP_ALLOC( BMSallocMemoryArray(&_jobqueues->currentjobs, nthreads) );
for( i = 0; i < nthreads; ++i )
_jobqueues->currentjobs[i] = NULL;
SCIP_CALL( SCIPtpiInitLock(&_jobqueues->lock) );
SCIP_CALL( SCIPtpiInitCondition(&_jobqueues->jobfinished) );
return SCIP_OKAY;
}
static
SCIP_RETCODE freeJobQueue(
void
)
{
assert(_jobqueues != NULL);
SCIPtpiDestroyLock(&_jobqueues->lock);
SCIPtpiDestroyCondition(&_jobqueues->jobfinished);
BMSfreeMemoryArray(&_jobqueues->currentjobs);
BMSfreeMemory(&_jobqueues);
return SCIP_OKAY;
}
static
void executeJob(
SCIP_JOB* job /**< the job to be executed in parallel */
)
{
int threadnum;
threadnum = SCIPtpiGetThreadNum();
SCIP_CALL_ABORT( SCIPtpiAcquireLock(&_jobqueues->lock) );
_jobqueues->currentjobs[threadnum] = job;
SCIP_CALL_ABORT( SCIPtpiReleaseLock(&_jobqueues->lock) );
job->retcode = (*(job->jobfunc))(job->args);
SCIP_CALL_ABORT( SCIPtpiAcquireLock(&_jobqueues->lock) );
_jobqueues->ncurrentjobs--;
_jobqueues->currentjobs[threadnum] = NULL;
/* insert job into finished jobs */
if( _jobqueues->finishedjobs.njobs == 0 )
{
_jobqueues->finishedjobs.firstjob = job;
_jobqueues->finishedjobs.lastjob = job;
}
else
{
_jobqueues->finishedjobs.lastjob->nextjob = job;
_jobqueues->finishedjobs.lastjob = job;
}
++_jobqueues->finishedjobs.njobs;
SCIP_CALL_ABORT( SCIPtpiBroadcastCondition(&_jobqueues->jobfinished) );
SCIP_CALL_ABORT( SCIPtpiReleaseLock(&_jobqueues->lock) );
}
/** this is a job that will be executed on to process the job queue */
/* the job will only be added when the number of active jobs is equal to the number of threads.
* As such, there will always be number of threads + 1 tasks available for the scheduler to run. */
static
void jobQueueProcessJob(
void
)
{
SCIP_JOB* job;
SCIP_CALL_ABORT( SCIPtpiAcquireLock(&_jobqueues->lock) );
while( _jobqueues->ncurrentjobs == SCIPtpiGetNumThreads() )
{
SCIP_CALL_ABORT( SCIPtpiWaitCondition(&_jobqueues->jobfinished, &_jobqueues->lock) );
}
if( _jobqueues->jobqueue.njobs == 1 )
{
job = _jobqueues->jobqueue.firstjob;
_jobqueues->jobqueue.firstjob = NULL;
_jobqueues->jobqueue.lastjob = NULL;
--_jobqueues->jobqueue.njobs;
}
else if( _jobqueues->jobqueue.njobs > 1 )
{
job = _jobqueues->jobqueue.firstjob;
_jobqueues->jobqueue.firstjob = job->nextjob;
--_jobqueues->jobqueue.njobs;
}
else
{
job = NULL;
}
++_jobqueues->ncurrentjobs;
SCIP_CALL_ABORT( SCIPtpiReleaseLock(&_jobqueues->lock) );
if( job )
{
executeJob(job);
}
}
/** adding a job to the job queue.
* This gives some more flexibility in the handling of new jobs.
* IMPORTANT: This function MUST be called from within a mutex. */
static
SCIP_RETCODE jobQueueAddJob(
SCIP_JOB* newjob
)
{
/* @todo we want to work out what to do with a full job queue. Is there a problem if the limit is hit? */
/* @note it is important to have a queuesize. This will stop the code submitting infinitely many jobs. */
assert(newjob != NULL);
newjob->nextjob = NULL;
/* this function queries the current job list. This could change by other threads writing to the list. So a lock is
* required to ensure that the current joblist remains static. */
SCIP_CALL( SCIPtpiAcquireLock(&_jobqueues->lock) );
/* checking the status of the job queue */
if( _jobqueues->ncurrentjobs == SCIPtpiGetNumThreads() )
{
if( _jobqueues->jobqueue.njobs == 0 )
{
_jobqueues->jobqueue.firstjob = newjob;
_jobqueues->jobqueue.lastjob = newjob;
}
else /* it is assumed that the jobqueue is not full */
{
_jobqueues->jobqueue.lastjob->nextjob = newjob;
_jobqueues->jobqueue.lastjob = newjob;
}
_jobqueues->jobqueue.njobs++;
SCIP_CALL( SCIPtpiReleaseLock(&_jobqueues->lock) );
#pragma omp task
jobQueueProcessJob();
}
else
{
assert(_jobqueues->ncurrentjobs < SCIPtpiGetNumThreads());
_jobqueues->ncurrentjobs++;
SCIP_CALL( SCIPtpiReleaseLock(&_jobqueues->lock) );
/* running the new job */
#pragma omp task firstprivate(newjob)
executeJob(newjob);
}
return SCIP_OKAY;
}
SCIP_RETCODE SCIPtpiSignalCondition(
SCIP_CONDITION* condition
)
{
SCIP_CALL( SCIPtpiAcquireLock(&condition->_lock) );
if( condition->_waitnum > condition->_signals )
++condition->_signals;
SCIP_CALL( SCIPtpiReleaseLock(&condition->_lock) );
return SCIP_OKAY;
}
SCIP_RETCODE SCIPtpiBroadcastCondition(
SCIP_CONDITION* condition
)
{
SCIP_CALL( SCIPtpiAcquireLock(&condition->_lock) );
condition->_signals = condition->_waitnum;
SCIP_CALL( SCIPtpiReleaseLock(&condition->_lock) );
return SCIP_OKAY;
}
SCIP_RETCODE SCIPtpiWaitCondition(
SCIP_CONDITION* condition,
SCIP_LOCK* lock
)
{
int waitnum;
SCIP_CALL( SCIPtpiReleaseLock(lock) );
SCIP_CALL( SCIPtpiAcquireLock(&condition->_lock) );
waitnum = ++condition->_waitnum;
++condition->_waiters;
do
{
SCIP_CALL( SCIPtpiReleaseLock(&condition->_lock) );
#pragma omp taskyield
SCIP_CALL( SCIPtpiAcquireLock(&condition->_lock) );
}
while( condition->_signals < waitnum );
--condition->_waiters;
if( condition->_waiters == 0 )
{
condition->_signals = 0;
condition->_waitnum = 0;
}
SCIP_CALL( SCIPtpiReleaseLock(&condition->_lock) );
SCIP_CALL( SCIPtpiAcquireLock(lock) );
return SCIP_OKAY;
}
/** Returns the number of threads */
int SCIPtpiGetNumThreads(
)
{
return omp_get_num_threads();
}
/** Returns the thread number */
int SCIPtpiGetThreadNum(
)
{
return omp_get_thread_num();
}
/** creates a job for parallel processing*/
SCIP_RETCODE SCIPtpiCreateJob(
SCIP_JOB** job, /**< pointer to the job that will be created */
int jobid, /**< the id for the current job */
SCIP_RETCODE (*jobfunc)(void* args),/**< pointer to the job function */
void* jobarg /**< the job's argument */
)
{
SCIP_ALLOC( BMSallocMemory(job) );
(*job)->jobid = jobid;
(*job)->jobfunc = jobfunc;
(*job)->args = jobarg;
(*job)->nextjob = NULL;
return SCIP_OKAY;
}
/** get a new job id for the new set of submitted jobs */
int SCIPtpiGetNewJobID(
void
)
{
static int currentjobid = 0;
int jobid;
#pragma omp atomic capture
jobid = ++currentjobid;
return jobid;
}
/** submit a job for parallel processing */
/* the return is a globally defined status */
SCIP_RETCODE SCIPtpiSumbitJob(
SCIP_JOB* job, /**< pointer to the job to be submitted */
SCIP_SUBMITSTATUS* status /**< pointer to store the submit status */
)
{
assert(_jobqueues != NULL);
*status = SCIP_SUBMIT_SUCCESS;
SCIP_CALL( jobQueueAddJob(job) );
return SCIP_OKAY;
}
static
SCIP_Bool isJobRunning(
int jobid
)
{
int i;
if( _jobqueues->ncurrentjobs > 0 )
{
for( i = 0; i < _jobqueues->nthreads; ++i )
{
if( _jobqueues->currentjobs[i] != NULL && _jobqueues->currentjobs[i]->jobid == jobid )
return TRUE;
}
}
return FALSE;
}
static
SCIP_Bool isJobWaiting(
int jobid
)
{
if( _jobqueues->jobqueue.njobs > 0 )
{
SCIP_JOB* currjob;
currjob = _jobqueues->jobqueue.firstjob;
do
{
if( currjob->jobid == jobid )
return TRUE;
if( currjob == _jobqueues->jobqueue.lastjob )
break;
currjob = currjob->nextjob;
}
while( TRUE ); /*lint !e506*/
}
return FALSE;
}
/** Blocks until all jobs of the given jobid have finished
* and then returns the smallest SCIP_RETCODE of all the jobs */
SCIP_RETCODE SCIPtpiCollectJobs(
int jobid
)
{
SCIP_RETCODE retcode;
retcode = SCIP_OKAY;
SCIP_CALL( SCIPtpiAcquireLock(&_jobqueues->lock) );
while( isJobRunning(jobid) || isJobWaiting(jobid) )
{
SCIP_CALL( SCIPtpiWaitCondition(&_jobqueues->jobfinished, &_jobqueues->lock) );
}
if( _jobqueues->finishedjobs.njobs > 0 )
{
SCIP_JOB* currjob = _jobqueues->finishedjobs.firstjob;
SCIP_JOB* prevjob = NULL;
/* finding the location of the processed job in the currentjobs queue */
do
{
if( currjob->jobid == jobid )
{
SCIP_JOB* nextjob;
/** if the job has the right jobid collect its retcode,
* remove it from the finished job list, and free it */
retcode = MIN(retcode, currjob->retcode);
/* removing the finished job from finished jobs list */
if( currjob == _jobqueues->finishedjobs.firstjob )
_jobqueues->finishedjobs.firstjob = currjob->nextjob;
else
prevjob->nextjob = currjob->nextjob; /*lint !e613*/
if( currjob == _jobqueues->finishedjobs.lastjob )
_jobqueues->finishedjobs.lastjob = prevjob;
_jobqueues->finishedjobs.njobs--;
/* update currjob and free finished job; prevjob stays the same */
nextjob = currjob->nextjob;
BMSfreeMemory(&currjob);
currjob = nextjob;
}
else
{
prevjob = currjob;
currjob = prevjob->nextjob;
}
}
while( prevjob != _jobqueues->finishedjobs.lastjob );
}
else
{
/* given jobid was not submitted */
printf("err1");
retcode = SCIP_ERROR;
}
SCIP_CALL_ABORT( SCIPtpiReleaseLock(&_jobqueues->lock) );
return retcode;
}
/** initializes tpi */
SCIP_RETCODE SCIPtpiInit(
int nthreads,
int queuesize,
SCIP_Bool blockwhenfull
)
{
omp_set_num_threads(nthreads);
assert(_jobqueues == NULL);
SCIP_CALL( createJobQueue(nthreads, queuesize, blockwhenfull) );
return SCIP_OKAY;
}
/** deinitializes tpi */
SCIP_RETCODE SCIPtpiExit(
void
)
{
assert(_jobqueues != NULL);
assert(_jobqueues->finishedjobs.njobs == 0);
assert(_jobqueues->jobqueue.njobs == 0);
assert(_jobqueues->ncurrentjobs == 0);
SCIP_CALL( freeJobQueue() );
return SCIP_OKAY;
}
|
DRB033-truedeplinear-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A linear expression is used as array subscription.
Data race pair: a[2*i+1]@64:5 vs. a[i]@64:14
*/
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char* argv[])
{
int i;
int a[2000];
for (i=0; i<2000; i++)
a[i]=i;
#pragma omp parallel for
for (i=0;i<1000;i++)
a[2*i+1]=a[i]+1;
printf("a[1001]=%d\n", a[1001]);
return 0;
}
|
GB_unop__abs_fp64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__abs_fp64_fc64
// op(A') function: GB_unop_tran__abs_fp64_fc64
// C type: double
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = (aij)
// unaryop: cij = cabs (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cabs (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = (aij) ; \
Cx [pC] = cabs (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP64 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__abs_fp64_fc64
(
double *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = (aij) ;
Cx [p] = cabs (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__abs_fp64_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__bclr_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bclr_int32)
// A.*B function (eWiseMult): GB (_AemultB_01__bclr_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__bclr_int32)
// A.*B function (eWiseMult): GB (_AemultB_03__bclr_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_int32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bclr_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__bclr_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_int32)
// C=scalar+B GB (_bind1st__bclr_int32)
// C=scalar+B' GB (_bind1st_tran__bclr_int32)
// C=A+scalar GB (_bind2nd__bclr_int32)
// C=A'+scalar GB (_bind2nd_tran__bclr_int32)
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = GB_BITCLR (aij, bij, int32_t, 32)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITCLR (x, y, int32_t, 32) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BCLR || GxB_NO_INT32 || GxB_NO_BCLR_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bclr_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bclr_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bclr_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bclr_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bclr_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bclr_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bclr_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bclr_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bclr_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITCLR (x, bij, int32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bclr_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITCLR (aij, y, int32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITCLR (x, aij, int32_t, 32) ; \
}
GrB_Info GB (_bind1st_tran__bclr_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITCLR (aij, y, int32_t, 32) ; \
}
GrB_Info GB (_bind2nd_tran__bclr_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
printwhileon.c | #include <stdio.h>
#include <unistd.h>
#ifdef HAVE_MPI
#include <mpi.h>
#endif
#ifdef THREADED_OMP
#include <omp.h>
#endif
#include "../gptl.h"
int main (int argc, char **argv)
{
int nthreads = 1; /* Value is 1 if no threading */
int iam = 0; /* Value is 0 if no MPI */
int commsize = 1; /* Value is 1 if no MPI */
int provided = -1; /* level of threading support in this MPI lib */
int n;
int ret;
#ifdef HAVE_MPI
int resultlen; /* returned length of string from MPI routine */
char string[MPI_MAX_ERROR_STRING]; /* character string returned from MPI routine */
/* Initialize MPI by using MPI_Init_thread: report back level of MPI support */
if ((ret = MPI_Init_thread (&argc, &argv, MPI_THREAD_SINGLE, &provided)) != 0) {
MPI_Error_string (ret, string, &resultlen);
printf ("%s: error from MPI_Init_thread: %s\n", argv[0], string);
MPI_Abort (MPI_COMM_WORLD, -1);
}
ret = MPI_Comm_rank (MPI_COMM_WORLD, &iam); /* Get my rank */
ret = MPI_Comm_size (MPI_COMM_WORLD, &commsize); /* Get communicator size */
#endif
if (iam == 0) {
printf ("%s: testing GPTLpr() and GPTLpr_summary() with some timers ON\n", argv[0]);
printf ("Check timing.* files: 1st and last ranks, 1st and last threads should print error\n");
#ifdef HAVE_MPI
switch (provided) {
case MPI_THREAD_SINGLE:
printf ("MPI support level is MPI_THREAD_SINGLE\n");
break;
case MPI_THREAD_SERIALIZED:
printf ("MPI support level is MPI_THREAD_SERIALIZED\n");
break;
case MPI_THREAD_MULTIPLE:
printf ("MPI support level is MPI_THREAD_MULTIPLE\n");
break;
default:
printf ("MPI support level is not known\n");
MPI_Abort (MPI_COMM_WORLD, -1);
}
#endif
}
ret = GPTLsetoption (GPTLoverhead, 0); /* Don't print overhead stats */
ret = GPTLsetoption (GPTLpercent, 0); /* Don't print percentage stats */
ret = GPTLinitialize (); /* Initialize GPTL */
ret = GPTLstart ("total");
/* Everyone starts "sub", but 1st and last ranks erroneously start it twice */
ret = GPTLstart ("sub");
if (iam == 0 || iam == commsize-1)
ret = GPTLstart ("sub");
#ifdef THREADED_OMP
nthreads = omp_get_max_threads ();
#endif
if (iam == 0)
printf ("nthreads=%d ntasks=%d\n", nthreads, commsize);
#pragma omp parallel for private (ret)
for (n = 0; n < nthreads; ++n) {
ret = GPTLstart ("threaded_region");
ret = GPTLstart ("threaded_region_sub");
/* sleep a short time so timings are meaningful */
ret = sleep (iam+n);
/* Everyone starts "threaded_region_sub", but 1st and last threads erroneously start it twice */
if (n == 0 || n == nthreads-1)
ret = GPTLstart ("threaded_region_sub");
ret = GPTLstop ("threaded_region_sub");
ret = GPTLstop ("threaded_region");
}
ret = GPTLstop ("sub");
ret = GPTLstop ("total");
ret = GPTLpr (iam);
#ifdef HAVE_MPI
ret = GPTLpr_summary (MPI_COMM_WORLD);
ret = MPI_Finalize ();
#else
ret = GPTLpr_summary ();
#endif
return 0;
}
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 16;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=Nt-1;t1++) {
lbp=ceild(t1+1,2);
ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1-2,4),ceild(8*t2-Nz-3,16));t3<=min(floord(4*Nt+Ny-9,16),floord(4*t1+Ny-1,16));t3++) {
for (t4=max(max(ceild(t1-30,32),ceild(8*t2-Nz-115,128)),ceild(16*t3-Ny-115,128));t4<=min(min(floord(4*Nt+Nx-9,128),floord(4*t1+Nx-1,128)),floord(16*t3+Nx+3,128));t4++) {
for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(128*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),4*t3+2),32*t4+30);t5++) {
for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) {
lbv=max(128*t4,4*t5+4);
ubv=min(128*t4+127,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
DRB055-jacobi2d-parallel-no.c | /**
* jacobi-2d-imper.c: This file is part of the PolyBench/C 3.2 test suite.
* Jacobi with array copying, no reduction.
*
* Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://polybench.sourceforge.net
* License: /LICENSE.OSU.txt
*/
#include "omprace.h"
#include <omp.h>
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include "polybench/polybench.h"
/* Include benchmark-specific header. */
/* Default data type is double, default size is 20x1000. */
#include "polybench/jacobi-2d-imper.h"
/* Array initialization. */
static void init_array(int n,double A[500 + 0][500 + 0],double B[500 + 0][500 + 0])
{
//int i;
//int j;
{
int c2;
int c1;
if (n >= 1) {
#pragma omp parallel for private(c2)
for (c1 = 0; c1 <= n + -1; c1++) {
for (c2 = 0; c2 <= n + -1; c2++) {
A[c1][c2] = (((double )c1) * (c2 + 2) + 2) / n;
B[c1][c2] = (((double )c1) * (c2 + 3) + 3) / n;
}
}
}
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static void print_array(int n,double A[500 + 0][500 + 0])
{
int i;
int j;
for (i = 0; i < n; i++)
for (j = 0; j < n; j++) {
fprintf(stderr,"%0.2lf ",A[i][j]);
if ((i * n + j) % 20 == 0)
fprintf(stderr,"\n");
}
fprintf(stderr,"\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static void kernel_jacobi_2d_imper(int tsteps,int n,double A[500 + 0][500 + 0],double B[500 + 0][500 + 0])
{
//int t;
//int i;
//int j;
//#pragma scop
{
int c2;
int c1;
int c0;
for (c2 = 1; c2 <= 498; c2++) {
B[1][c2] = 0.2 * (A[1][c2] + A[1][c2 - 1] + A[1][1 + c2] + A[1 + 1][c2] + A[1 - 1][c2]);
}
for (c0 = 2; c0 <= 525; c0++) {
if (c0 <= 28) {
if ((2 * c0 + 1) % 3 == 0) {
for (c2 = ((2 * c0 + 1) * 3 < 0?-(-(2 * c0 + 1) / 3) : ((3 < 0?(-(2 * c0 + 1) + - 3 - 1) / - 3 : (2 * c0 + 1 + 3 - 1) / 3))); c2 <= (((2 * c0 + 1492) * 3 < 0?((3 < 0?-((-(2 * c0 + 1492) + 3 + 1) / 3) : -((-(2 * c0 + 1492) + 3 - 1) / 3))) : (2 * c0 + 1492) / 3)); c2++) {
B[1][(-2 * c0 + 3 * c2 + 2) / 3] = 0.2 * (A[1][(-2 * c0 + 3 * c2 + 2) / 3] + A[1][(-2 * c0 + 3 * c2 + 2) / 3 - 1] + A[1][1 + (-2 * c0 + 3 * c2 + 2) / 3] + A[1 + 1][(-2 * c0 + 3 * c2 + 2) / 3] + A[1 - 1][(-2 * c0 + 3 * c2 + 2) / 3]);
}
}
}
#pragma omp parallel for private(c2)
for (c1 = ((((2 * c0 + 2) * 3 < 0?-(-(2 * c0 + 2) / 3) : ((3 < 0?(-(2 * c0 + 2) + - 3 - 1) / - 3 : (2 * c0 + 2 + 3 - 1) / 3)))) > c0 + -9?(((2 * c0 + 2) * 3 < 0?-(-(2 * c0 + 2) / 3) : ((3 < 0?(-(2 * c0 + 2) + - 3 - 1) / - 3 : (2 * c0 + 2 + 3 - 1) / 3)))) : c0 + -9); c1 <= (((((2 * c0 + 498) * 3 < 0?((3 < 0?-((-(2 * c0 + 498) + 3 + 1) / 3) : -((-(2 * c0 + 498) + 3 - 1) / 3))) : (2 * c0 + 498) / 3)) < c0?(((2 * c0 + 498) * 3 < 0?((3 < 0?-((-(2 * c0 + 498) + 3 + 1) / 3) : -((-(2 * c0 + 498) + 3 - 1) / 3))) : (2 * c0 + 498) / 3)) : c0)); c1++) {
B[-2 * c0 + 3 * c1][1] = 0.2 * (A[-2 * c0 + 3 * c1][1] + A[-2 * c0 + 3 * c1][1 - 1] + A[-2 * c0 + 3 * c1][1 + 1] + A[1 + (-2 * c0 + 3 * c1)][1] + A[-2 * c0 + 3 * c1 - 1][1]);
for (c2 = 2 * c0 + -2 * c1 + 2; c2 <= 2 * c0 + -2 * c1 + 498; c2++) {
A[-2 * c0 + 3 * c1 + -1][-2 * c0 + 2 * c1 + c2 + -1] = B[-2 * c0 + 3 * c1 + -1][-2 * c0 + 2 * c1 + c2 + -1];
B[-2 * c0 + 3 * c1][-2 * c0 + 2 * c1 + c2] = 0.2 * (A[-2 * c0 + 3 * c1][-2 * c0 + 2 * c1 + c2] + A[-2 * c0 + 3 * c1][-2 * c0 + 2 * c1 + c2 - 1] + A[-2 * c0 + 3 * c1][1 + (-2 * c0 + 2 * c1 + c2)] + A[1 + (-2 * c0 + 3 * c1)][-2 * c0 + 2 * c1 + c2] + A[-2 * c0 + 3 * c1 - 1][-2 * c0 + 2 * c1 + c2]);
}
A[-2 * c0 + 3 * c1 + -1][498] = B[-2 * c0 + 3 * c1 + -1][498];
}
if (c0 >= 499) {
if ((2 * c0 + 1) % 3 == 0) {
for (c2 = ((2 * c0 + -992) * 3 < 0?-(-(2 * c0 + -992) / 3) : ((3 < 0?(-(2 * c0 + -992) + - 3 - 1) / - 3 : (2 * c0 + -992 + 3 - 1) / 3))); c2 <= (((2 * c0 + 499) * 3 < 0?((3 < 0?-((-(2 * c0 + 499) + 3 + 1) / 3) : -((-(2 * c0 + 499) + 3 - 1) / 3))) : (2 * c0 + 499) / 3)); c2++) {
A[498][(-2 * c0 + 3 * c2 + 995) / 3] = B[498][(-2 * c0 + 3 * c2 + 995) / 3];
}
}
}
}
for (c2 = 20; c2 <= 517; c2++) {
A[498][c2 + -19] = B[498][c2 + -19];
}
}
//#pragma endscop
}
int main(int argc,char **argv)
{
omprace_init();
/* Retrieve problem size. */
int n = 500;
int tsteps = 10;
/* Variable declaration/allocation. */
double (*A)[500 + 0][500 + 0];
A = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double )))));
;
double (*B)[500 + 0][500 + 0];
B = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double )))));
;
/* Initialize array(s). */
init_array(n, *A, *B);
/* Start timer. */
polybench_timer_start();
;
/* Run kernel. */
kernel_jacobi_2d_imper(tsteps,n, *A, *B);
/* Stop and print timer. */
polybench_timer_stop();
;
polybench_timer_print();
;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
if (argc > 42 && !strcmp(argv[0],""))
print_array(n, *A);
/* Be clean. */
free(((void *)A));
;
free(((void *)B));
;
omprace_fini();
return 0;
}
|
o10glogon_fmt_plug.c | /*
* This software was written by JimF jfoug AT cox dot net
* in 2016. No copyright is claimed, and the software is hereby
* placed in the public domain. In case this attempt to disclaim
* copyright and place the software in the public domain is deemed
* null and void, then the software is Copyright (c) 2016 JimF
* and it is hereby released to the general public under the following
* terms:
*
* This software may be modified, redistributed, and used for any
* purpose, in source and binary forms, with or without modification.
*
* This is oracle O10g-logon format. NOTE, if the hashes came from a
* Oracle 10g, and the hash data can be sniffed from network traffic
* TNS records.
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_o10glogon;
#elif FMT_REGISTERS_H
john_register_one(&fmt_o10glogon);
#else
#include <string.h>
#include <openssl/des.h>
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "aes.h"
#include "md5.h"
#include "unicode.h"
#include "base64_convert.h"
#include "memdbg.h"
#define FORMAT_LABEL "o10glogon"
#define FORMAT_NAME "Oracle 10g-logon protocol"
#define FORMAT_TAG "$o10glogon$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "DES-AES128-MD5 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 32
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define MAX_USERNAME_LEN 30
#define SALT_SIZE (sizeof(ora10g_salt))
#define SALT_ALIGN (sizeof(unsigned int))
#define CIPHERTEXT_LENGTH 16
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define MAX_HASH_LEN (FORMAT_TAG_LEN+MAX_USERNAME_LEN+1+64+1+64+1+160)
//#define DEBUG_ORACLE
//
// The keys are $o10glogon$oracle-user-name$auth_sess_key$auth_sess_key_c$auth_password
// These can be found in sniffed network traffic.
static struct fmt_tests tests[] = {
{"$o10glogon$jimf$6DA8BE6D9713B7F9190DC0F87F1BB1BDFFE44EB1892E40915592980ECCE60AA3$1C08586339E5806DD45CF8E6D83CC6EA2B8CDCDE7CC9F00ADF43DA0F07309090$E2F3D778138213BF01FD743F2092FC976FD60AB2C9F4A1B1D9B08439325421B1", "JimF"},
{"$o10glogon$SESA218390$3B16F14C3DC6048C993000E2BF543BAB489DF7BD8D6061B7274CC9E1DB743E08$1695D5255EDF15CA6B1F14C5CB39C72C98E2CC2B62FB3224ECA5A6A6790511D4$F0F64E384E567F44E9DF8D7F4C029AA59770FA75094F1C26A66C45AFA9913987", "jimf"},
{"$o10glogon$TESTUSER$EEABE812530C6D4432F781DFC14A7C7F81EAE1804F340D3289732477FD351FCC$7B244D7A1DB5ABE553FB9B7325110024911FCBE95EF99E7965A754BC41CF31C0$4C5E28E66B6382117F9D41B08957A3B9E363B42760C33B44CA5D53EA90204ABE", "TESTPASS"},
{NULL}
};
typedef struct ora10g_salt_t {
int userlen, auth_pass_len;
UTF16 user[MAX_USERNAME_LEN+1];
unsigned char auth_sesskey[32];
unsigned char auth_sesskey_c[32];
unsigned char auth_pass[80];
} ora10g_salt;
static ora10g_salt *cur_salt;
static UTF16 (*cur_key)[PLAINTEXT_LENGTH + 1];
static char (*plain_key)[PLAINTEXT_LENGTH + 1];
static int *cur_key_len;
static int *cracked, any_cracked;
static DES_key_schedule desschedule1; // key 0x0123456789abcdef
static void init(struct fmt_main *self)
{
DES_set_key((DES_cblock *)"\x01\x23\x45\x67\x89\xab\xcd\xef", &desschedule1);
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
cur_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*cur_key));
plain_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*plain_key));
cur_key_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*cur_key_len));
cracked = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*cracked));
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(cur_key_len);
MEM_FREE(plain_key);
MEM_FREE(cur_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *cp;
char tmp[32*5+1];
UTF16 cur_key_mixedcase[MAX_USERNAME_LEN+2];
int len, extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
ciphertext += FORMAT_TAG_LEN;
cp = strchr(ciphertext, '$');
if (!cp)
return 0;
// make sure username fits in MAX_USERNAME_LEN UTF16
if (cp-ciphertext > sizeof(tmp)-1)
return 0;
memcpy(tmp, ciphertext, cp-ciphertext);
tmp[cp-ciphertext] = 0;
len = enc_to_utf16((UTF16 *)cur_key_mixedcase, MAX_USERNAME_LEN+1, (unsigned char*)tmp, strlen(tmp));
if (len < 0 || (len == 0 && cp-ciphertext)) {
static int error_shown = 0;
#ifdef HAVE_FUZZ
if (options.flags & (FLG_FUZZ_CHK | FLG_FUZZ_DUMP_CHK))
return 0;
#endif
if (!error_shown)
fprintf(stderr, "%s: Input file is not UTF-8. Please use --input-enc to specify a codepage.\n", self->params.label);
error_shown = 1;
return 0;
}
if (len > MAX_USERNAME_LEN)
return 0;
ciphertext = cp+1;
cp = strchr(ciphertext, '$');
if (!cp || cp-ciphertext != 64 || hexlenu(ciphertext, 0) != 64)
return 0;
ciphertext = cp+1;
cp = strchr(ciphertext, '$');
if (!cp || cp-ciphertext != 64 || hexlenu(ciphertext, 0) != 64)
return 0;
ciphertext = cp+1;
len = strlen(ciphertext);
cp = strchr(ciphertext, '$');
if (!len || cp || len%16 || hexlenu(ciphertext, &extra) != len || extra)
return 0;
return 1;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[MAX_HASH_LEN*5+1];
strnzcpy(out, ciphertext, MAX_HASH_LEN+1);
enc_strupper(&out[FORMAT_TAG_LEN]);
return out;
}
static void set_salt(void *salt) {
cur_salt = (ora10g_salt *)salt;
}
static void oracle_set_key(char *key, int index) {
UTF16 cur_key_mixedcase[PLAINTEXT_LENGTH+1];
UTF16 *c;
int key_length;
strcpy(plain_key[index], key);
// Can't use enc_to_utf16_be() because we need to do utf16_uc later
key_length = enc_to_utf16(cur_key_mixedcase, PLAINTEXT_LENGTH, (unsigned char*)key, strlen(key));
if (key_length < 0)
key_length = strlen16(cur_key_mixedcase);
// We convert and uppercase in one shot
key_length = utf16_uc(cur_key[index], PLAINTEXT_LENGTH, cur_key_mixedcase, key_length);
// we have no way to 'undo' here, since the expansion is due to single-2-multi expansion in the upcase,
// and we can not 'fix' our password. We simply have to 'not' properly decrypt this one, but protect ourselves.
if (key_length < 0)
key_length *= -1;
cur_key_len[index] = key_length * sizeof(UTF16);
// Now byte-swap to UTF16-BE
c = cur_key[index];
while((*c = *c << 8 | *c >> 8))
c++;
#ifdef DEBUG_ORACLE
dump_stuff_msg("cur_key ", (unsigned char*)cur_key[index], cur_key_len[index]);
#endif
}
static char *get_key(int index) {
return plain_key[index];
}
static void ORACLE_TNS_Decrypt_AES128_CBC (unsigned char aes_key_bytes[16], unsigned char* input, int input_len, unsigned char* output)
{
unsigned char iv[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
AES_KEY key;
AES_set_decrypt_key(aes_key_bytes, 128, &key);
AES_cbc_encrypt(input, output, input_len, &key, iv, AES_DECRYPT);
}
static int terminate_ascii_string (char* ascii_string_not_terminated, int len)
{
int ascii_len = 0;
unsigned char padding_byte;
int pos;
for (pos=0; ; pos++) {
if ((ascii_string_not_terminated[pos] < 32) |
(ascii_string_not_terminated[pos] > 126))
break;
}
ascii_len = pos;
padding_byte = ascii_string_not_terminated[pos];
for (;pos<len; pos++) {
if (ascii_string_not_terminated[pos] != padding_byte)
return -1;
}
ascii_string_not_terminated[ascii_len] = 0;
return ascii_len;
}
static void ORACLE_TNS_Combine_SessKeys (unsigned char server_sesskey[16], unsigned char client_sesskey[16], unsigned char* output)
{
unsigned char combined_sesskeys[16];
int i;
MD5_CTX ctx;
for (i=0;i<16;i++)
combined_sesskeys[i] = server_sesskey[i] ^ client_sesskey[i];
MD5_Init (&ctx);
MD5_Update (&ctx, combined_sesskeys,16);
MD5_Final (output, &ctx);
}
static int ORACLE_TNS_Decrypt_Password_10g (unsigned char OracleHash[8], unsigned char *auth_sesskey, unsigned char *auth_sesskey_c, unsigned char *auth_password, int auth_passwordlen, unsigned char *decrypted)
{
int passlen = 0;
unsigned char aes_key_bytes[32];
unsigned char decrypted_server_sesskey[32];
unsigned char decrypted_client_sesskey[32];
unsigned char combined_sesskeys[16];
char decrypted_password[64];
memset (aes_key_bytes,0,sizeof(aes_key_bytes));
memcpy (aes_key_bytes,OracleHash,8);
// Decrypt server and client session keys
ORACLE_TNS_Decrypt_AES128_CBC (aes_key_bytes, auth_sesskey, 32, decrypted_server_sesskey);
ORACLE_TNS_Decrypt_AES128_CBC (aes_key_bytes, auth_sesskey_c, 32, decrypted_client_sesskey);
// Combine server and client session keys
ORACLE_TNS_Combine_SessKeys (&decrypted_server_sesskey[16], &decrypted_client_sesskey[16], combined_sesskeys);
// Decrypt auth password with combined session key
ORACLE_TNS_Decrypt_AES128_CBC (combined_sesskeys, auth_password, auth_passwordlen, (unsigned char*) decrypted_password);
// terminate decrypted password with NULL
passlen = terminate_ascii_string (&decrypted_password[16], auth_passwordlen-16);
if (passlen != -1)
strncpy ((char*)decrypted, &decrypted_password[16], passlen);
return passlen;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int idx = 0;
if (any_cracked) {
memset(cracked, 0, sizeof(*cracked) * count);
any_cracked = 0;
}
#ifdef DEBUG_ORACLE
dump_stuff_msg("cur_salt ", buf, cur_salt->userlen+key_length);
#endif
#ifdef _OPENMP
#pragma omp parallel for
for (idx = 0; idx < count; idx++)
#endif
{
unsigned char buf[256], buf1[256];
unsigned int l;
uint32_t iv[2];
DES_key_schedule desschedule2;
l = cur_salt->userlen + cur_key_len[idx];
memcpy(buf, cur_salt->user, cur_salt->userlen);
memcpy(buf + cur_salt->userlen, cur_key[idx], cur_key_len[idx]);
iv[0] = iv[1] = 0;
DES_ncbc_encrypt((unsigned char *)buf, buf1, l, &desschedule1, (DES_cblock *) iv, DES_ENCRYPT);
DES_set_key((DES_cblock *)iv, &desschedule2);
iv[0] = iv[1] = 0;
DES_ncbc_encrypt((unsigned char *)buf, buf1, l, &desschedule2, (DES_cblock *) iv, DES_ENCRYPT);
#ifdef DEBUG_ORACLE
dump_stuff_msg(" iv (the hash key) ", (unsigned char*)&iv[0], 8);
#endif
ORACLE_TNS_Decrypt_Password_10g ((unsigned char*)iv, cur_salt->auth_sesskey, cur_salt->auth_sesskey_c, cur_salt->auth_pass, cur_salt->auth_pass_len, buf);
if (!strncmp((char*)buf, plain_key[idx], strlen(plain_key[idx])))
{
cracked[idx] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
return count;
}
static void *get_salt(char *ciphertext)
{
static ora10g_salt salt;
UTF8 tmp[MAX_USERNAME_LEN*5+1];
char *cp;
memset(&salt, 0, sizeof(salt));
ciphertext += FORMAT_TAG_LEN;
cp = strchr(ciphertext, '$');
strncpy((char*)tmp, ciphertext, cp-ciphertext);
tmp[cp-ciphertext] = 0;
salt.userlen = enc_to_utf16_be(salt.user, MAX_USERNAME_LEN, tmp, cp-ciphertext);
if (salt.userlen < 0)
salt.userlen = strlen16(salt.user);
salt.userlen *= 2;
base64_convert(cp+1,e_b64_hex,64,salt.auth_sesskey,e_b64_raw,32,0,0);
cp = strchr(cp+1, '$');
base64_convert(cp+1,e_b64_hex,64,salt.auth_sesskey_c,e_b64_raw,32,0,0);
cp = strchr(cp+1, '$') + 1;
salt.auth_pass_len = strlen(cp)/2;
base64_convert(cp,e_b64_hex,salt.auth_pass_len*2,salt.auth_pass,e_b64_raw,salt.auth_pass_len,0,0);
return &salt;
}
// Public domain hash function by DJ Bernstein (salt is a username)
static int salt_hash(void *salt)
{
UTF16 *s = ((UTF16*)salt) + 1;
unsigned int hash = 5381;
while (*s)
hash = ((hash << 5) + hash) ^ *s++;
return hash & (SALT_HASH_SIZE - 1);
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int count)
{
return cracked[count];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_o10glogon = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_SPLIT_UNIFIES_CASE | FMT_CASE | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
salt_hash,
NULL,
set_salt,
oracle_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
block.h | /**
* @file block.h
* @author Drahomír Dlabaja (xdlaba02)
* @date 12. 5. 2019
* @copyright 2019 Drahomír Dlabaja
* @brief Functions for block extraction and insertion.
*/
#pragma once
#include "stack_allocator.h"
#include <cmath>
#include <numeric>
#include <array>
template<typename T, size_t D>
class DynamicBlock {
T *m_data;
std::array<size_t, D> m_size;
public:
DynamicBlock(const std::array<size_t, D> &size): DynamicBlock(size.data()) {}
#pragma omp declare simd
DynamicBlock(const size_t size[D]) {
size_t bytes = std::accumulate(size, size + D, 1, std::multiplies<size_t>()) * sizeof(T);
m_data = static_cast<T *>(StackAllocator::allocate(bytes));
std::copy(size, size + D, std::begin(m_size));
}
#pragma omp declare simd
~DynamicBlock() {
StackAllocator::free(m_data);
}
#pragma omp declare simd
T &operator[](size_t index) {
return m_data[index];
}
#pragma omp declare simd
const T &operator[](size_t index) const {
return m_data[index];
}
#pragma omp declare simd
T &operator[](const std::array<size_t, D> &pos) {
size_t index = 0;
for (size_t i = 1; i <= D; i++) {
index *= m_size[D - i];
index += pos[D - i];
}
return m_data[index];
}
#pragma omp declare simd
const T &operator[](const std::array<size_t, D> &pos) const {
size_t index = 0;
for (size_t i = 1; i <= D; i++) {
index *= m_size[D - i];
index += pos[D - i];
}
return m_data[index];
}
#pragma omp declare simd
const std::array<size_t, D> &size() const {
return m_size;
}
#pragma omp declare simd
size_t stride(size_t depth = D) const {
return depth ? m_size[depth - 1] * stride(depth - 1) : 1;
}
#pragma omp declare simd
size_t size(size_t i) const {
return m_size[i];
}
#pragma omp declare simd
void fill(T value) {
size_t num_values = std::accumulate(std::begin(m_size), std::end(m_size), 1, std::multiplies<size_t>());
std::fill(m_data, m_data + num_values, value);
}
protected:
static void *operator new(size_t);
static void *operator new[](size_t);
};
template<size_t D>
struct moveBlock {
template <typename IF, typename OF>
moveBlock(
IF &&input, const std::array<size_t, D> &input_size, const std::array<size_t, D> &input_offset,
OF &&output, const std::array<size_t, D> &output_size, const std::array<size_t, D> &output_offset,
const std::array<size_t, D> &size) {
std::array<size_t, D - 1> input_subsize {};
std::array<size_t, D - 1> input_suboffset {};
std::array<size_t, D - 1> output_subsize {};
std::array<size_t, D - 1> output_suboffset {};
std::array<size_t, D - 1> subsize {};
for (size_t i = 0; i < D - 1; i++) {
input_subsize[i] = input_size[i];
input_suboffset[i] = input_offset[i];
output_subsize[i] = output_size[i];
output_suboffset[i] = output_offset[i];
subsize[i] = size[i];
}
size_t input_pos = input_offset[D - 1];
size_t output_pos = output_offset[D - 1];
const size_t input_end = std::min(input_offset[D - 1] + size[D - 1], input_size[D - 1]);
const size_t output_end = std::min(output_offset[D - 1] + size[D - 1], output_size[D - 1]);
std::array<size_t, D> full_input_pos {};
std::array<size_t, D> full_output_pos {};
while (input_pos < input_end && output_pos < output_end) {
full_input_pos[D - 1] = input_pos;
full_output_pos[D - 1] = output_pos;
auto inputF = [&](const std::array<size_t, D - 1> &pos) {
std::copy(std::begin(pos), std::end(pos), std::begin(full_input_pos));
return input(full_input_pos);
};
auto outputF = [&](const std::array<size_t, D - 1> &pos, const auto &value) {
std::copy(std::begin(pos), std::end(pos), std::begin(full_output_pos));
output(full_output_pos, value);
};
moveBlock<D - 1>(inputF, input_subsize, input_suboffset, outputF, output_subsize, output_suboffset, subsize);
input_pos++;
output_pos++;
}
while (output_pos < output_end) {
full_output_pos[D - 1] = output_pos;
auto inputF = [&](const std::array<size_t, D - 1> &pos) {
std::copy(std::begin(pos), std::end(pos), std::begin(full_input_pos));
return input(full_input_pos);
};
auto outputF = [&](const std::array<size_t, D - 1> &pos, const auto &value) {
std::copy(std::begin(pos), std::end(pos), std::begin(full_output_pos));
output(full_output_pos, value);
};
moveBlock<D - 1>(inputF, input_subsize, input_suboffset, outputF, output_subsize, output_suboffset, subsize);
output_pos++;
}
}
};
template<>
struct moveBlock<1> {
/**
* @brief The parital specialization for getting one sample.
* @see getBlock<BS, D>::getBlock
*/
template <typename IF, typename OF>
moveBlock(
IF &&input, const std::array<size_t, 1> &input_size, const std::array<size_t, 1> &input_offset,
OF &&output, const std::array<size_t, 1> &output_size, const std::array<size_t, 1> &output_offset,
const std::array<size_t, 1> &size) {
size_t input_pos = input_offset[0];
size_t output_pos = output_offset[0];
const size_t input_end = std::min(input_offset[0] + size[0], input_size[0]);
const size_t output_end = std::min(output_offset[0] + size[0], output_size[0]);
std::array<size_t, 1> full_input_pos {};
std::array<size_t, 1> full_output_pos {};
while (input_pos < input_end && output_pos < output_end) {
full_input_pos[0] = input_pos;
full_output_pos[0] = output_pos;
output(full_output_pos, input(full_input_pos));
input_pos++;
output_pos++;
}
while (output_pos < output_end) {
full_output_pos[0] = output_pos;
output(full_output_pos, input(full_input_pos));
output_pos++;
}
}
};
|
gemm.c | #include "gemm.h"
#include "utils.h"
#include "cuda.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include <omp.h>
#ifdef OPENBLAS
#include <cblas.h>
#endif
#ifdef OPENGEMM
#include "gemm_kernels.h"
#endif
void gemm_bin(int M, int N, int K, real_t ALPHA, char *A, int lda, real_t *B,
int ldb, real_t *C, int ldc) {
int i, j, k;
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
char A_PART = A[i * lda + k];
if (A_PART) {
for (j = 0; j < N; ++j) {
C[i * ldc + j] += B[k * ldb + j];
}
} else {
for (j = 0; j < N; ++j) {
C[i * ldc + j] -= B[k * ldb + j];
}
}
}
}
}
real_t *random_matrix(int rows, int cols) {
int i;
real_t *m = calloc(rows * cols, sizeof(real_t));
for (i = 0; i < rows * cols; ++i) {
m[i] = (real_t) rand() / RAND_MAX;
}
return m;
}
void time_random_matrix(int TA, int TB, int m, int k, int n) {
real_t *a;
if (!TA)
a = random_matrix(m, k);
else
a = random_matrix(k, m);
int lda = (!TA) ? k : m;
real_t *b;
if (!TB)
b = random_matrix(k, n);
else
b = random_matrix(n, k);
int ldb = (!TB) ? n : k;
real_t *c = random_matrix(m, n);
int i;
clock_t start = clock(), end;
for (i = 0; i < 10; ++i) {
gemm_cpu(TA, TB, m, n, k, 1, a, lda, b, ldb, 1, c, n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n", m, k,
k, n, TA, TB, (real_t)(end - start) / CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void gemm(int TA, int TB, int M, int N, int K, real_t ALPHA, real_t *A, int lda,
real_t *B, int ldb, real_t BETA, real_t *C, int ldc) {
gemm_cpu(TA, TB, M, N, K, ALPHA, A, lda, B, ldb, BETA, C, ldc);
}
void gemm_nn(int M, int N, int K, real_t ALPHA, real_t *A, int lda, real_t *B,
int ldb, real_t *C, int ldc) {
int i, j, k;
#pragma omp parallel for
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
register real_t A_PART = ALPHA * A[i * lda + k];
for (j = 0; j < N; ++j) {
C[i * ldc + j] += A_PART * B[k * ldb + j];
}
}
}
}
void gemm_nt(int M, int N, int K, real_t ALPHA, real_t *A, int lda, real_t *B,
int ldb, real_t *C, int ldc) {
int i, j, k;
#pragma omp parallel for
for (i = 0; i < M; ++i) {
for (j = 0; j < N; ++j) {
register real_t sum = 0;
for (k = 0; k < K; ++k) {
sum += ALPHA * A[i * lda + k] * B[j * ldb + k];
}
C[i * ldc + j] += sum;
}
}
}
void gemm_tn(int M, int N, int K, real_t ALPHA, real_t *A, int lda, real_t *B,
int ldb, real_t *C, int ldc) {
int i, j, k;
#pragma omp parallel for
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
register real_t A_PART = ALPHA * A[k * lda + i];
for (j = 0; j < N; ++j) {
C[i * ldc + j] += A_PART * B[k * ldb + j];
}
}
}
}
void gemm_tt(int M, int N, int K, real_t ALPHA, real_t *A, int lda, real_t *B,
int ldb, real_t *C, int ldc) {
int i, j, k;
#pragma omp parallel for
for (i = 0; i < M; ++i) {
for (j = 0; j < N; ++j) {
register real_t sum = 0;
for (k = 0; k < K; ++k) {
sum += ALPHA * A[i + k * lda] * B[k + j * ldb];
}
C[i * ldc + j] += sum;
}
}
}
void gemm_cpu(int TA, int TB, int M, int N, int K, real_t ALPHA, real_t *A,
int lda, real_t *B, int ldb, real_t BETA, real_t *C, int ldc) {
//printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
#ifdef OPENBLAS
/*
void cblas_sgemm ( const CBLAS_LAYOUT layout,
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float * A,
const int lda,
const float * B,
const int ldb,
const float beta,
float * C,
const int ldc
)
*/
CBLAS_TRANSPOSE transa, transb;
if (!TA && !TB){
transa = CblasNoTrans;
transb = CblasNoTrans;
}else if (TA && !TB){
transa = CblasTrans;
transb = CblasNoTrans;
}else if (!TA && TB){
transa = CblasNoTrans;
transb = CblasTrans;
}else{
transa = CblasTrans;
transb = CblasTrans;
}
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13
cblas_sgemm(CblasRowMajor, transa, transb, M, N, K, ALPHA, A, lda, B, ldb, BETA, C, ldc);
#else
int i, j;
for (i = 0; i < M; ++i) {
for (j = 0; j < N; ++j) {
C[i * ldc + j] *= BETA;
}
}
if (!TA && !TB)
gemm_nn(M, N, K, ALPHA, A, lda, B, ldb, C, ldc);
else if (TA && !TB)
gemm_tn(M, N, K, ALPHA, A, lda, B, ldb, C, ldc);
else if (!TA && TB)
gemm_nt(M, N, K, ALPHA, A, lda, B, ldb, C, ldc);
else
gemm_tt(M, N, K, ALPHA, A, lda, B, ldb, C, ldc);
#endif
}
#ifdef GPU
void gemm_gpu(int TA, int TB, int M, int N, int K, real_t ALPHA, real_t *A_gpu,
int lda, real_t *B_gpu, int ldb, real_t BETA, real_t *C_gpu, int ldc,
unsigned char use_tensor_cores, cudaStream_t st) {
cublasHandle_t handle = blas_handle(use_tensor_cores);
cublasSetStream(handle, st);
// if(TB || TA)
// {
// printf("Matrix need to be transposed %d %d\n", TB, TA);
// }
#ifndef OPENGEMM
#if REAL_TYPE == HALF
//run_cuda_gemm_half(int TA, int TB, int M, int N, int K, real_t ALPHA, real_t *A_gpu,
// int lda, real_t *B_gpu, int ldb, real_t BETA, real_t *C_gpu, int ldc)
run_cuda_gemm_half(handle, TA, TB, M, N, K, ALPHA, A_gpu, lda, B_gpu, ldb, BETA, C_gpu, ldc, st);
#elif REAL_TYPE == FLOAT
cudaError_t status = (cudaError_t) cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N),
(TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb,
A_gpu, lda, &BETA, C_gpu, ldc);
check_error(status);
#elif REAL_TYPE == DOUBLE
cudaError_t status = (cudaError_t) cublasDgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N),
(TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb,
A_gpu, lda, &BETA, C_gpu, ldc);
check_error(status);
#endif
#else
#if REAL_TYPE == HALF
run_cuda_gemm_half(handle, TA, TB, M, N, K, ALPHA, A_gpu, lda, B_gpu, ldb, BETA, C_gpu, ldc, st);
#elif REAL_TYPE == FLOAT
sgemm((TB ? CUBLAS_OP_T : CUBLAS_OP_N),
(TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb,
A_gpu, lda, &BETA, C_gpu, ldc);
#elif REAL_TYPE == DOUBLE
dgemm((TB ? CUBLAS_OP_T : CUBLAS_OP_N),
(TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb,
A_gpu, lda, &BETA, C_gpu, ldc);
#endif
#endif
// cublasHandle_t handle = blas_handle();
// cudaError_t status = cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N),
// (TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb,
// A_gpu, lda, &BETA, C_gpu, ldc);
}
void time_gpu_random_matrix(int TA, int TB, int m, int k, int n) {
real_t *a;
if (!TA)
a = random_matrix(m, k);
else
a = random_matrix(k, m);
int lda = (!TA) ? k : m;
real_t *b;
if (!TB)
b = random_matrix(k, n);
else
b = random_matrix(n, k);
int ldb = (!TB) ? n : k;
real_t *c = random_matrix(m, n);
int i;
clock_t start = clock(), end;
for (i = 0; i < 32; ++i) {
gemm_gpu(TA, TB, m, n, k, 1, a, lda, b, ldb, 1, c, n, 0, 0x0);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n", m, k,
k, n, TA, TB, (real_t)(end - start) / CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void time_gpu(int TA, int TB, int m, int k, int n) {
int iter = 10;
real_t *a = random_matrix(m, k);
real_t *b = random_matrix(k, n);
int lda = (!TA) ? k : m;
int ldb = (!TB) ? n : k;
real_t *c = random_matrix(m, n);
real_t *a_cl = cuda_make_array(a, m * k);
real_t *b_cl = cuda_make_array(b, k * n);
real_t *c_cl = cuda_make_array(c, m * n);
int i;
clock_t start = clock(), end;
for (i = 0; i < iter; ++i) {
gemm_gpu(TA, TB, m, n, k, 1, a_cl, lda, b_cl, ldb, 1, c_cl, n, 0, 0x0);
cudaDeviceSynchronize();
}
double flop = ((double) m) * n * (2. * k + 2.) * iter;
double gflop = flop / pow(10., 9);
end = clock();
double seconds = sec(end - start);
printf(
"Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",
m, k, k, n, TA, TB, seconds, gflop / seconds);
cuda_free(a_cl);
cuda_free(b_cl);
cuda_free(c_cl);
free(a);
free(b);
free(c);
}
void test_gpu_accuracy(int TA, int TB, int m, int k, int n) {
srand(0);
real_t *a;
if (!TA)
a = random_matrix(m, k);
else
a = random_matrix(k, m);
int lda = (!TA) ? k : m;
real_t *b;
if (!TB)
b = random_matrix(k, n);
else
b = random_matrix(n, k);
int ldb = (!TB) ? n : k;
real_t *c = random_matrix(m, n);
real_t *c_gpu = random_matrix(m, n);
memset(c, 0, m * n * sizeof(real_t));
memset(c_gpu, 0, m * n * sizeof(real_t));
int i;
//pm(m,k,b);
gemm_gpu(TA, TB, m, n, k, 1, a, lda, b, ldb, 1, c_gpu, n, 0, 0x0);
//printf("GPU\n");
//pm(m, n, c_gpu);
gemm_cpu(TA, TB, m, n, k, 1, a, lda, b, ldb, 1, c, n);
//printf("\n\nCPU\n");
//pm(m, n, c);
double sse = 0;
for (i = 0; i < m * n; ++i) {
//printf("%f %f\n", c[i], c_gpu[i]);
sse += pow(c[i] - c_gpu[i], 2);
}
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n", m, k,
k, n, TA, TB, sse / (m * n));
free(a);
free(b);
free(c);
free(c_gpu);
}
int test_gpu_blas() {
/*
test_gpu_accuracy(0,0,10,576,75);
test_gpu_accuracy(0,0,17,10,10);
test_gpu_accuracy(1,0,17,10,10);
test_gpu_accuracy(0,1,17,10,10);
test_gpu_accuracy(1,1,17,10,10);
test_gpu_accuracy(0,0,1000,10,100);
test_gpu_accuracy(1,0,1000,10,100);
test_gpu_accuracy(0,1,1000,10,100);
test_gpu_accuracy(1,1,1000,10,100);
test_gpu_accuracy(0,0,10,10,10);
time_gpu(0,0,64,2916,363);(cudaError_t) cublasDgemm
time_gpu(0,0,64,2916,363);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,192,729,1600);
time_gpu(0,0,384,196,1728);
time_gpu(0,0,256,196,3456);
time_gpu(0,0,256,196,2304);
time_gpu(0,0,128,4096,12544);
time_gpu(0,0,128,4096,4096);
*/
time_gpu(0, 0, 64, 75, 12544);
time_gpu(0, 0, 64, 75, 12544);
time_gpu(0, 0, 64, 75, 12544);
time_gpu(0, 0, 64, 576, 12544);
time_gpu(0, 0, 256, 2304, 784);
time_gpu(1, 1, 2304, 256, 784);
time_gpu(0, 0, 512, 4608, 196);
time_gpu(1, 1, 4608, 512, 196);
return 0;
}
#endif
|
direct.h | #ifndef DIRECT_H
#define DIRECT_H
#include "../integrator.h"
class Direct : public Integrator {
public:
int N;
Direct(const std::shared_ptr<Camera>& _camera, const std::shared_ptr<Sampler>& _sampler, int _N) : Integrator(_camera, _sampler), N(_N) {};
RGB sampleLight(const Ray& ray, Scene& scene, Hit& res, const Vec3& wo_local, const Vec3& n, const Vec3& s, const Vec3& t, const std::shared_ptr<Light>& light) const {
auto hitMaterial = res.hitPrimitive->material;
//Light Sampling
Vec3 col_light;
float light_pdf = 0;
Vec3 wi_light;
Vec3 samplePos;
RGB le = light->sample(res, *this->sampler, wi_light, samplePos, light_pdf);
if(isZero(le) || light_pdf == 0) return RGB(0);
Vec3 wi_light_local = worldToLocal(wi_light, n, s, t);
//BRDF
RGB k = hitMaterial->f(res, wo_local, wi_light_local) * std::max(cosTheta(wi_light_local), 0.0f);
float brdf_pdf = hitMaterial->Pdf(wo_local, wi_light_local);
if(isZero(k)) return RGB(0);
//MIS Weight
float weight = std::pow(light_pdf, 2.0f)/(std::pow(light_pdf, 2.0f) + std::pow(brdf_pdf, 2.0f));
if(std::isinf(weight) || std::isnan(weight)) return RGB(0);
//Visibility Test
Ray shadowRay(res.hitPos, wi_light);
Hit shadow_res;
if(light->type == LIGHT_TYPE::AREA) {
if(scene.intersect(shadowRay, shadow_res)) {
if(shadow_res.hitPrimitive->light == light && (samplePos - shadow_res.hitPos).length2() < 1e-6) {
col_light += weight * k * le/light_pdf;
}
}
}
else if(light->type == LIGHT_TYPE::SKY) {
if(!scene.intersect(shadowRay, shadow_res)) {
col_light += weight * k * le/light_pdf;
}
}
//Handle Delta Light Case
else if(light->type == LIGHT_TYPE::POINT) {
scene.intersect(shadowRay, shadow_res);
if(shadow_res.t >= (samplePos - shadowRay.origin).length()) {
col_light += k * le/light_pdf;
}
}
return col_light;
};
RGB sampleBRDF(const Ray& ray, Scene& scene, Hit& res, const Vec3& wo_local, const Vec3& n, const Vec3& s, const Vec3& t, const std::shared_ptr<Light>& light) const {
Vec3 col_brdf;
auto hitMaterial = res.hitPrimitive->material;
//BRDF Sampling
Vec3 wi_local;
float brdf_pdf;
RGB brdf = hitMaterial->sample(res, wo_local, *this->sampler, wi_local, brdf_pdf);
if(isZero(brdf) || brdf_pdf == 0) return RGB(0);
float cos = absCosTheta(wi_local);
Vec3 wi = localToWorld(wi_local, n, s, t);
RGB k = brdf * cos/brdf_pdf;
//Visibility Test
Ray shadowRay(res.hitPos, wi);
Hit shadow_res;
if(light->type != LIGHT_TYPE::SKY) {
if(scene.intersect(shadowRay, shadow_res)) {
if(shadow_res.hitPrimitive->light != nullptr) {
//Light
RGB le = light->Le(shadow_res, shadowRay);
float light_pdf = light->Pdf(res, wi, shadow_res);
//MIS Weight
float weight = std::pow(brdf_pdf, 2.0f)/(std::pow(brdf_pdf, 2.0f) + std::pow(light_pdf, 2.0f));
if(std::isinf(weight) || std::isnan(weight)) return RGB(0);
if(hitMaterial->type != MATERIAL_TYPE::SPECULAR) {
col_brdf += weight * k * le;
}
//Handle Specular Case
else {
col_brdf += k * le;
}
}
}
}
else {
if(!scene.intersect(shadowRay , shadow_res)) {
//Light
RGB le = light->Le(shadow_res, shadowRay);
float light_pdf = light->Pdf(res, wi, shadow_res);
//MIS Weight
float weight = std::pow(brdf_pdf, 2.0f)/(std::pow(brdf_pdf, 2.0f) + std::pow(light_pdf, 2.0f));
if(std::isinf(weight) || std::isnan(weight)) return RGB(0);
if(hitMaterial->type != MATERIAL_TYPE::SPECULAR) {
col_brdf += weight * k * le;
}
//Handle Specular Case
else {
col_brdf += k * le;
}
}
}
return col_brdf;
};
RGB Li(const Ray& ray, Scene& scene) const {
Hit res;
if(scene.intersect(ray, res)) {
if(res.hitPrimitive->light != nullptr) {
return res.hitPrimitive->light->Le(res, ray);
}
//Local Coordinate
Vec3 wo = -ray.direction;
Vec3 n = res.hitNormal;
Vec3 s, t;
orthonormalBasis(n, s, t);
Vec3 wo_local = worldToLocal(wo, n, s, t);
//Light Sampling
unsigned int light_index = (int)(scene.lights.size() * (*this->sampler).getNext());
if(light_index == scene.lights.size()) light_index--;
auto light = scene.lights[light_index];
return scene.lights.size() * (sampleLight(ray, scene, res, wo_local, n, s, t, light) + sampleBRDF(ray, scene, res, wo_local, n, s, t, light));
}
else {
return scene.sky->Le(res, ray);
}
};
void render(Scene& scene) const {
const int width = this->camera->film->width;
const int height = this->camera->film->height;
const int N_sqrt = std::sqrt(N);
for(int i = 0; i < width; i++) {
#pragma omp parallel for schedule(dynamic, 1)
for(int j = 0; j < height; j++) {
for(int k = 0; k < N; k++) {
float rx = 2*sampler->getNext() - 1;
float ry = 2*sampler->getNext() - 1;
float sx = float(k%N_sqrt)/N_sqrt + rx/(2.0*N_sqrt) + 1/(2.0*N_sqrt);
float sy = k/N_sqrt * 1/float(N_sqrt) + ry/(2.0*N_sqrt) + 1/(2.0*N_sqrt);
float u = (2.0*(i + sx) - width)/height;
float v = (2.0*(j + sy) - height)/height;
Vec2 uv(u, v);
Ray ray;
float weight;
if(!this->camera->getRay(u, v, *(this->sampler), ray, weight)) {
this->camera->film->addSample(uv, RGB(0, 0, 0));
}
else {
RGB li = weight*this->Li(ray, scene);
this->camera->film->addSample(uv, li);
}
if(omp_get_thread_num() == 0) {
std::cout << progressbar(i, width) << " " << percentage(i, width) << "\r" << std::flush;
}
}
}
}
this->camera->film->ppm_output("output.ppm");
};
};
#endif
|
main.h | #pragma once
#include <iostream>
#include <cmath>
#include <limits>
#include <vector>
/*
Return sign of x
*/
template <class T>
int sgn(const T & x) {
if (x > 0) return +1;
if (x < 0) return -1;
return 0;
}
/*
Class for storing a root [x,y] representing x + I y in complex plane.
*/
template <typename T>
struct Troot {
T x[2];
Troot(T y[2]) {
x[0] = y[0];
x[1] = y[1];
}
Troot(const T & x0, const T & x1) {
x[0] = x0;
x[1] = x1;
}
T * ptr() {return x;}
T& operator[](const std::size_t & idx) { return x[idx]; }
const T& operator[](const std::size_t & idx) const { return x[idx]; }
};
/*
Class for storing roots and checking theirs uniqueness.
*/
template <class T>
struct Troots: public std::vector<Troot<T>> {
void add(T x[2], const T & eps = 0, const T & min = 0){
bool ok = false;
for (auto && t: *this)
if (
std::abs(t[0] - x[0]) <= eps*std::max(std::abs(x[0]), std::abs(t[0])) + min &&
std::abs(t[1] - x[1]) <= eps*std::max(std::abs(x[1]), std::abs(t[1])) + min
){
ok = true;
break;
}
if (!ok) this->emplace_back(x);
}
void add(const T &x0, const T &x1, const T & eps = 0, const T & min = 0){
bool ok = false;
for (auto && t: *this)
if (
std::abs(t[0] - x0) <= eps*std::max(std::abs(x0), std::abs(t[0])) + min &&
std::abs(t[1] - x1) <= eps*std::max(std::abs(x1), std::abs(t[1])) + min
){
ok = true;
break;
}
if (!ok) this->emplace_back(x0,x1);
}
void print(){
for (auto && x: *this) std::cout << x[0] << ' ' << x[1] << '\n';
}
};
/*
System of two functions f - gradient of the Kopal potential Omega
f := (fx,fy) = grad(Omega)
*/
template <class T>
void fun(T *x, T *f, T *p, unsigned choice = 3){
T
q = p[0],
b = p[1],
s = p[2], // sin(beta)
c = p[3], // cos(beta)
u = x[0] - 1,
x2 = x[0]*x[0],
u2 = u*u,
z2 = x[1]*x[1],
v = b*(x[0]*c - x[1]*s),
f1 = 1/(x2 + z2),
f2 = 1/(u2 + z2);
f1 *= std::sqrt(f1);
f2 *= std::sqrt(f2);
if ((choice & 1u) == 1u) f[0] = -f1*x[0] - q*(f2*u + 1) + c*v;
if ((choice & 2u) == 2u) f[1] = -(f1 + q*f2)*x[1] - s*v;
}
/*
Gradient of the two functions -- Hessian of the Kopal potential Omega
grad(f) = Hessian(Omega)
*/
template <class T>
void Dfun(T x[2], T H[2][2], T *p, T *f = 0){
T
q = p[0],
b = p[1],
s = p[2],
c = p[3],
u = x[0] - 1,
x2 = x[0]*x[0],
u2 = u*u,
z2 = x[1]*x[1],
v = b*(x[0]*c - x[1]*s),
f12 = 1/(x2 + z2),
f22 = 1/(u2 + z2),
f11 = std::sqrt(f12),
f21 = std::sqrt(f22),
f13 = f12*f11,
f23 = f22*f21,
f15 = f13*f12,
f25 = f23*f22;
if (f) {
f[0] = -f13*x[0] - q*(f23*u + 1) + c*v;
f[1] = -(f13 + q*f23)*x[1] - s*v;
}
H[0][0] = 3*x2*f15 - f13 + q*(2*u2 - z2)*f25 + b*c*c;
H[0][1] = H[1][0] = 3*x[1]*(x[0]*f15 + q*u*f25) - b*c*s;
H[1][1] = -f13 -q*f23 + 3*z2*(f15 + q*f25) + b*s*s;
}
template <class T>
int extreme_type(T H[2][2]){
T h = H[0][0] + H[1][1],
det = H[0][0]*H[1][1] - H[0][1]*H[0][1];
int s[2];
if (h != 0) {
s[0] = sgn(h);
s[1] = s[0]*sgn(det);
} else {
s[0] = +1;
s[1] = -1;
}
if (s[1] == 0) std::cerr << "Singular hessian\n";
return s[0] + s[1];
}
/*
Regularized system of two functions fR -- without singularities
fR = ((x0-1)^2 + x1^2)^(3/2) (x0^2 + x1^2)^(3/2) f
If could be simpler pre-factor, but it designed to simplify expressions.
*/
template <class T>
void funR(T *x, T *f, T *p, unsigned choice = 3){
T
q = p[0],
b = p[1],
s = p[2],
c = p[3],
u = x[0] - 1,
u2 = u*u,
x2 = x[0]*x[0],
z2 = x[1]*x[1],
v = b*(x[0]*c - x[1]*s),
f1 = x2 + z2,
f2 = u2 + z2;
f1 *= std::sqrt(f1);
f2 *= std::sqrt(f2);
T f12 = f1*f2;
if ((choice & 1u) == 1u) f[0] = -f2*x[0] - q*(f1*u + f12) + c*v*f12;
if ((choice & 2u) == 2u) f[1] = -(f2 + q*f1)*x[1] - s*v*f12;
}
/*
Solving Eq. A x = b
*/
template <class T> bool solve2D(T A[2][2], T b[2], T x[2]){
T det = A[0][0]*A[1][1] - A[1][0]*A[0][1];
if (det == 0) return false;
x[0] = (A[1][1]*b[0] - A[0][1]*b[1])/det;
x[1] = (A[0][0]*b[1] - A[1][0]*b[0])/det;
return true;
}
/*
Newton-Raphson method for finding roots of the system of equations
around the point x:
x _{n+1} = x_n - Hess(x_n)^-1 f(x_n)
*/
template <class T>
bool newt(T *x, T *p,
int max_iter = 100,
const T &eps = 10*std::numeric_limits<T>::epsilon(),
const T & min = 10*std::numeric_limits<T>::min()) {
long double
y[2] = {x[0], x[1]},
q[4] = {p[0],p[1],p[2],p[3]},
t[2], dx[2], f[2], H[2][2];
do {
Dfun(y, H, q, f);
solve2D(H, f, dx);
for (int i = 0; i < 2; ++i) y[i] -= dx[i];
t[0] = std::max(std::abs(dx[0]), std::abs(dx[1]));
t[1] = std::max(std::abs(y[0]), std::abs(y[1]));
} while (t[0] > t[1]*eps + min && --max_iter > 0);
if (max_iter == 0) {
std::cerr
<< "Too many iterations\n"
<< "p=" << p[0] << ' ' << p[1] << ' ' << p[2] << ' ' << p[3] << '\n';
return false;
}
x[0] = y[0], x[1] = y[1];
return true;
}
/* Recursive 2D bisection of the triangle for nr levels on an rectangle:
(x0,y1) ----- (x1,y1) (s01, q01) ----- (s11, q11)
| | | |
| | | |
(x0,y0) ----- (x1,y0) (s00, q00) ----- (s01, q01)
At parameters p.
*/
template <class T>
void bis(
const T & x0, const T & y0,
const T & x1, const T & y1,
const int &s00, const int &q00,
const int &s01, const int &q01,
const int &s10, const int &q10,
const int &s11, const int &q11,
T *p, const int & nr,
Troots <T> & roots) {
const T eps = 10*std::numeric_limits<T>::epsilon();
const T min = 10*std::numeric_limits<T>::min();
// End of the road
//
bool
d0 = std::abs(x0- x1) <= eps*std::max(std::abs(x0), std::abs(x1)) + min,
d1 = std::abs(y0- y1) <= eps*std::max(std::abs(y0), std::abs(y1)) + min;
if ( d0 || d1 || nr == 0) {
#if 0
std::cout
<< "B:"
<< x0 << ' ' << x1 << ' ' << y0 << ' ' << y1 << '|'
<< s00 << ' ' << q00 << '|'
<< s01 << ' ' << q01 << '|'
<< s10 << ' ' << q10 << '|'
<< s11 << ' ' << q11 << '\n';
#endif
// Discussing corners
if (s00 == 0 && q00 == 0) {
roots.add(x0, y0, eps, min);
} else if (s11 == 0 && q11 == 0) {
roots.add(x1, y1, eps, min);
} else if (s01 == 0 && q01 == 0) {
roots.add(x0, y1, eps, min);
} else if (s10 == 0 && q10 == 0) {
roots.add(x1, y0, eps, min);
} else {
// discussing special known roots
if ( x0 <= 0 && 0 <= x1 && y0 <= 0 && 0 <= y1) {
roots.add(0., 0., eps, min);
} else if ( x0 <= 1 && 1 <= x1 && y0 <= 0 && 0 <= y1) {
roots.add(1., 0., eps, min);
} else {
T x[2] = {(x0+x1)/2, (y0+y1)/2};
bool st = newt(x, p, eps, min);
// std::cout << "N:" << x[0] << ' ' << x[1] << "|" << st << '\n';
if (st) roots.add(x, eps, min);
}
}
return;
}
//
// Divide rectangle into 4 x Rectangles
//
int i, j,
s[3][3],
q[3][3];
T h[3] = {x0, (x0 + x1)/2, x1},
v[3] = {y0, (y0 + y1)/2, y1},
x[2], f[2];
s[0][0] = s00; q[0][0] = q00;
s[0][2] = s01; q[0][2] = q01;
s[2][0] = s10; q[2][0] = q10;
s[2][2] = s11; q[2][2] = q11;
i = 0; j = 1;
x[0] = h[i]; x[1] = v[j]; funR(x, f, p);
s[i][j] = sgn(f[0]); q[i][j] = sgn(f[1]);
i = 1; j = 0;
x[0] = h[i]; x[1] = v[j]; funR(x, f, p);
s[i][j] = sgn(f[0]); q[i][j] = sgn(f[1]);
i = 1; j = 1;
x[0] = h[i]; x[1] = v[j]; funR(x, f, p);
s[i][j] = sgn(f[0]); q[i][j] = sgn(f[1]);
i = 1; j = 2;
x[0] = h[i]; x[1] = v[j]; funR(x, f, p);
s[i][j] = sgn(f[0]); q[i][j] = sgn(f[1]);
i = 2; j = 1;
x[0] = h[i]; x[1] = v[j]; funR(x, f, p);
s[i][j] = sgn(f[0]); q[i][j] = sgn(f[1]);
//
// Bisection on 4 rectangles
//
for (i = 0; i < 2; ++i)
for (j = 0; j < 2; ++j)
// do bisection of there are some signs in both functions
if (
std::abs(s[i][j]+s[i+1][j]+s[i][j+1]+s[i+1][j+1]) != 4 &&
std::abs(q[i][j]+q[i+1][j]+q[i][j+1]+q[i+1][j+1]) != 4
)
bis(
h[i], v[j], h[i+1], v[j+1],
s[i][j], q[i][j], s[i][j+1], q[i][j+1],
s[i+1][j], q[i+1][j], s[i+1][j+1], q[i+1][j+1],
p, nr-1, roots);
}
#if defined(GPU_ENABLED)
__global__ void signs_vline_gpu(
const double q,
const double b,
const double s,
const double c,
const double x0,
const double y0,
const double dy,
const int Ny,
int *sn, int *qn){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < Ny) {
double
x[2] = {x0, y0 + idx*dy},
u = x[0] - 1,
u2 = u*u,
x2 = x[0]*x[0],
z2 = x[1]*x[1],
v = b*(x[0]*c - x[1]*s),
f1 = x2 + z2,
f2 = u2 + z2;
f1 *= std::sqrt(f1);
f2 *= std::sqrt(f2);
double f12 = f1*f2, f[2];
f[0] = -f2*x[0] - q*(f1*u + f12) + c*v*f12;
f[1] = -(f2 + q*f1)*x[1] - s*v*f12;
int *P[2] = {sn + idx, qn + idx};
for (int j = 0; j < 2; ++j)
if (f[j] > 0)
*(P[j]) = 1;
else if (f[j] < 0)
*(P[j]) = -1;
else
*(P[j]) = 0;
}
}
__global__ void signs_point_gpu(
const double q,
const double b,
const double s,
const double c,
const double x0,
const double y0,
const double dx,
const double dy,
const int Nx,
const int Ny,
unsigned char *m){
int
idx = blockIdx.x * blockDim.x + threadIdx.x,
idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx < Nx && idy < Ny) {
double
x[2] = {x0 + idx*dx, y0 + idy*dy},
u = x[0] - 1,
u2 = u*u,
x2 = x[0]*x[0],
z2 = x[1]*x[1],
v = b*(x[0]*c - x[1]*s),
f1 = x2 + z2,
f2 = u2 + z2;
f1 *= std::sqrt(f1);
f2 *= std::sqrt(f2);
double f12 = f1*f2, f[2];
f[0] = -f2*x[0] - q*(f1*u + f12) + c*v*f12;
f[1] = -(f2 + q*f1)*x[1] - s*v*f12;
*(m + idx*Ny + idy) =
(f[0] < 0 ? 0 : (f[0] > 0 ? 2 : 0)) +
((f[1] < 0 ? 0 : (f[1] > 0 ? 2 : 0)) << 4);
}
}
template <class T>
void calc_vline_gpu(
T p[4],
const T & x0,
const T & y0,
const T & dy,
const int & Ny,
int * sn,
int * qn){
int size = Ny*sizeof(int);
int *sn_, *qn_;
cudaMalloc(&sn_, size);
cudaMalloc(&qn_, size);
int blockSize = 1000;
int numBlocks = (Ny + blockSize - 1) / blockSize;
signs_vline_gpu<<<numBlocks, blockSize>>>(p[0], p[1], p[2], p[3], x0, y0, dy, Ny, sn_, qn_);
//cudaDeviceSynchronize();
cudaMemcpy(sn, sn_, size, cudaMemcpyDeviceToHost);
cudaMemcpy(qn, qn_, size, cudaMemcpyDeviceToHost);
cudaFree(sn_);
cudaFree(qn_);
}
#else
template <class T>
void calc_vline(
T p[4],
const T & x0,
const T & y0,
const T & dy,
const int & Ny,
int * sn,
int * qn){
#pragma omp parallel for
for (int i = 0; i < Ny; ++i) {
T y[2] = {x0, y0 + i*dy}, g[2];
funR(y, g, p);
sn[i] = sgn(g[0]);
qn[i] = sgn(g[1]);
}
}
#endif
/*
Scanning the rectangle from left to right:
(x0,y1) ----- (x1,y1)
| |
| |
(x0,y0) ----- (x1,y0)
Nx - number of bins in x direction
Ny - number of bins in y direction
p - parameters of the map
*/
template <class T>
void scan_with_lines(
const T & x0, const T & y0,
const T & x1, const T & y1,
T p[4], Troots <T> & roots,
int Nx = 1000, int Ny = 1000,
int max_levels = 40) {
T dx = (x1 - x0)/Nx,
dy = (y1 - y0)/Ny;
int *s = new int [2*Ny], *sn = s + Ny,
*q = new int [2*Ny], *qn = q + Ny;
// calculating signs for the first two columns (two x's)
#if defined(GPU_ENABLED)
calc_vline_gpu(p, x0, y0, dy, Ny, sn, qn);
#else
calc_vline(p, x0, y0, dy, Ny, sn, qn);
#endif
for (int i = 0; i < Nx - 1; ++i) {
// calculating sign for the next columns
for (int j = 0; j < Ny; ++j) {
s[j] = sn[j];
q[j] = qn[j];
}
#if defined(GPU_ENABLED)
calc_vline_gpu(p, x0 + (i+1)*dx, y0, dy, Ny, sn, qn);
#else
calc_vline(p, x0 + (i+1)*dx, y0, dy, Ny, sn, qn);
#endif
for (int j = 0; j < Ny - 1; ++j)
// try bisection
if (
std::abs(s[j]+s[j+1]+sn[j]+sn[j+1]) != 4 &&
std::abs(q[j]+q[j+1]+qn[j]+qn[j+1]) != 4
)
bis(
x0 + i*dx, y0 + j*dy,
x0 + (i+1)*dx, y0 + (j+1)*dy,
s[j], q[j], s[j+1], q[j+1],
sn[j], qn[j], sn[j+1], qn[j+1],
p, max_levels, roots);
}
delete [] s;
delete [] q;
}
/*
Scanning the rectangle from left to right:
(x0,y1) ----- (x1,y1)
| |
| |
(x0,y0) ----- (x1,y0)
Nx - number of bins in x direction
Ny - number of bins in y direction
p - parameters of the map
*/
template <class T>
void scan_with_image(
const T & x0, const T & y0,
const T & x1, const T & y1,
T p[4], Troots <T> & roots,
int Nx = 1000, int Ny = 1000,
int max_levels = 40) {
T dx = (x1 - x0)/Nx,
dy = (y1 - y0)/Ny;
// generate image
#if defined(GPU_ENABLED)
int
BLOCKSIZE_x = 16,
BLOCKSIZE_y = 16;
unsigned char *r;
cudaMallocManaged(&r, Nx*Ny);
dim3 gridSize((Nx + BLOCKSIZE_x - 1)/BLOCKSIZE_x, (Ny + BLOCKSIZE_y - 1)/BLOCKSIZE_y);
dim3 blockSize(BLOCKSIZE_x, BLOCKSIZE_y);
signs_point_gpu<<<gridSize, blockSize>>>(p[0], p[1], p[2], p[3], x0, y0, dx, dy, Nx, Ny, r);
cudaDeviceSynchronize();
#else
unsigned char *r = new unsigned char [unsigned(Nx)*Ny];
#pragma omp parallel for
for (int i = 0; i < Nx; ++i) {
for (int j = 0; j < Ny; ++j){
T y[2] = {x0 + i*dx, y0 + j*dy}, g[2];
funR(y, g, p);
r[i*Ny + j] =
(g[0] < 0 ? 0 : (g[0] > 0 ? 2 : 1)) +
((g[1] < 0 ? 0 : (g[1] > 0 ? 2 : 1)) << 4);
}
}
#endif
// analyse image
unsigned char *o = r, *o_ = r + Ny, *v;
int s[2][2], q[2][2];
for (int i = 0; i < Nx - 1; ++i, o = o_, o_ += Ny) {
for (int j = 0; j < Ny - 1; ++j){
v = o + j;
s[0][0] = int(*v & 3) - 1;
q[0][0] = int(*v >> 4) - 1;
++v;
s[0][1] = int(*v & 3) - 1;
q[0][1] = int(*v >> 4) - 1;
v = o_ + j;
s[1][0] = int(*v & 3) - 1;
q[1][0] = int(*v >> 4) - 1;
++v;
s[1][1] = int(*v & 3) - 1;
q[1][1] = int(*v >> 4) - 1;
// try bisection
if (
std::abs(s[0][0]+s[0][1]+s[1][0]+s[1][1]) != 4 &&
std::abs(q[0][0]+q[0][1]+q[1][0]+q[1][1]) != 4
)
bis(
x0 + i*dx, y0 + j*dy,
x0 + (i+1)*dx, y0 + (j+1)*dy,
s[0][0], q[0][0], s[0][1], q[0][1],
s[1][0], q[1][0], s[1][1], q[1][1],
p, max_levels, roots);
}
}
#if defined(GPU_ENABLED)
cudaFree(r);
#else
delete [] r;
#endif
}
|
PoW.c | // Copyright (c) 2017-2018 The Popchain Core Developers
#include "PoW.h"
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
#include <assert.h>
#ifndef MAC_OSX
#include <omp.h>
#endif
#include "my_time.h"
#include "common.h"
#include "my_rand48_r.h"
#include "oneWayFunction.h"
// #define SSE_VERSION
/*
* Step 1: Initialize working memory.
*/
void initWorkMemory(uint8_t *input, uint32_t inputLen, uint8_t *Maddr, const uint32_t K) {
uint32_t i, j;
uint8_t a[OUTPUT_LEN], b[OUTPUT_LEN];
cryptoFunc[0].func(input, inputLen, a);
uint64_t randSeed[4] = {0, 0, 0, 0};
#ifndef SSE_VERSION
struct my_rand48_data randBuffer[4];
#else
struct vrand48_data randBuffer[2];
#endif
const uint32_t iterNum = WORK_MEMORY_SIZE >> 5;
for (i = 0; i < iterNum; ++i) {
if (i % K) {
#ifndef SSE_VERSION
uint64_t num = 0;
for (j = 0; j < 4; ++j) {
my_rand64_r(&randBuffer[j], &num);
memcpy(b + (j << 3), (uint8_t *)&num, 8*sizeof(uint8_t));
}
#else
vrand64(b, randBuffer);
#endif
uint8_t shift_num;
uint8_t result[OUTPUT_LEN];
reduce_bit((uint8_t *)&i, 4, (uint8_t *)&shift_num, 8);
rrs(b, OUTPUT_LEN, result, shift_num);
memcpy(Maddr + (i << 5), result, OUTPUT_LEN*sizeof(uint8_t));
for (j = 0; j < 32; ++j) {
a[j] ^= result[j];
}
} else {
uint8_t t = 0, shift_num = 0;
reduce_bit(a, 32, (uint8_t *)&t, 8);
t = (t & 0x0f) ^ (t >> 4);
reduce_bit((uint8_t *)&i, 4, (uint8_t *)&shift_num, 8);
uint8_t a_rrs[INPUT_LEN];
rrs(a, OUTPUT_LEN, a_rrs, shift_num);
cryptoFunc[t].func(a_rrs, 32, a);
reduce_bit(a, 8, (uint8_t *)&randSeed[0], 48);
reduce_bit(a + 8, 8, (uint8_t *)&randSeed[1], 48);
reduce_bit(a + 16, 8, (uint8_t *)&randSeed[2], 48);
reduce_bit(a + 24, 8, (uint8_t *)&randSeed[3], 48);
#ifndef SSE_VERSION
my_seed48_r(randSeed[0], &randBuffer[0]);
my_seed48_r(randSeed[1], &randBuffer[1]);
my_seed48_r(randSeed[2], &randBuffer[2]);
my_seed48_r(randSeed[3], &randBuffer[3]);
#else
vseed48(randSeed , &randBuffer[0]);
vseed48(randSeed + 2, &randBuffer[1]);
#endif
memcpy(Maddr + (i << 5), a, 32*sizeof(uint8_t));
}
}
}
/*
* Step 2: Modify the working memory contents.
*/
void modifyWorkMemory(uint8_t *Maddr, const uint32_t L, const uint32_t C,
uint8_t *result) {
uint32_t i, j;
uint8_t a[OUTPUT_LEN], b[64];
cryptoFunc[0].func(Maddr + WORK_MEMORY_SIZE - 32, 32, a);
memcpy(result, a, OUTPUT_LEN*sizeof(uint8_t));
uint64_t r = 0;
reduce_bit(a, 32, (uint8_t *)&r, 64);
const uint32_t iterNum = L << 6;
for (i = 0; i < C; ++i) {
uint64_t randSeed = 0;
reduce_bit(a, 32, (uint8_t *)&randSeed, 48);
struct my_rand48_data randBuffer;
my_seed48_r(randSeed, &randBuffer);
uint8_t t1, t2, s;
uint64_t randNum = 0, base = 0;
for (j = 0; j < iterNum; ++j) {
my_rand48_r(&randBuffer, &randNum);
base = randNum + r;
uint64_t offset = 0;
reduce_bit((uint8_t *)&r, 8, (uint8_t *)&offset, 8);
offset = (offset << 8) + 1;
uint64_t addr1 = (base + WORK_MEMORY_SIZE - offset) % WORK_MEMORY_SIZE;
uint64_t addr2 = (base + offset) % WORK_MEMORY_SIZE;
t1 = Maddr[addr1];
t2 = Maddr[addr2];
s = a[j & 0x1f];
Maddr[addr1] = t2 ^ s;
Maddr[addr2] = t1 ^ s;
b[j & 0x3f] = t1 ^ t2;
r = r + s + t1 + t2;
}
uint8_t t = 0;
reduce_bit((uint8_t *)&r, 8, (uint8_t *)&t, 8);
t = (t & 0x0f) ^ (t >> 4);
reduce_bit(b, 64, a, 256);
uint8_t shift_num = 0;
uint64_t ir = r + i;
reduce_bit((uint8_t *)&ir, 8, (uint8_t *)&shift_num, 8);
uint8_t a_rrs[INPUT_LEN];
rrs(a, OUTPUT_LEN, a_rrs, shift_num);
cryptoFunc[t].func(a_rrs, 32, a);
for (j = 0; j < OUTPUT_LEN; ++j) {
result[j] ^= a[j];
}
}
}
/*
* Step 3: Calculate the final result.
*/
void calculateFinalResult(uint8_t *Maddr, uint8_t *c, const uint32_t D, uint8_t *result) {
uint32_t i = 0, j = 0, k = 0;
memcpy(result, c, OUTPUT_LEN*sizeof(uint8_t));
const uint32_t num = (WORK_MEMORY_SIZE >> 5) - 1;
uint32_t it = 0;
uint8_t result_rrs[OUTPUT_LEN];
while(1) {
uint8_t t = 0, shift_num = 0;
uint32_t d = 0;
reduce_bit(result, 32, (uint8_t *)&t, 8);
t = (t & 0x0f) ^ (t >> 4);
reduce_bit(result, 32, (uint8_t *)&d, D);
++d;
for (j = 0; j < d; ++j) {
uint32_t index = i << 5;
for (k = 0; k < 32; ++k) {
result[k] ^= Maddr[index + k];
}
++i;
if (i == num) {
it = i + t;
reduce_bit((uint8_t *)&it, 4, (uint8_t *)&shift_num, 8);
rrs(result, OUTPUT_LEN, result_rrs, shift_num);
cryptoFunc[0].func(result_rrs, 32, result);
return;
}
}
it = t + i;
reduce_bit((uint8_t *)&it, 4, (uint8_t *)&shift_num, 8);
rrs(result, OUTPUT_LEN, result_rrs, shift_num);
cryptoFunc[t].func(result_rrs, 32, result);
}
}
/*
* Correctness & Performance test for Proof of work
*/
void testPowFunction(uint8_t *mess, uint32_t messLen, const int64_t iterNum) {
int64_t j;
uint32_t inputLen = messLen;
uint8_t input[INPUT_LEN], output[OUTPUT_LEN];
memset(input, 0, INPUT_LEN*sizeof(uint8_t));
memcpy(input, mess, messLen*sizeof(char));
// Init all one-way function
initOneWayFunction();
uint8_t *Maddr = (uint8_t *)malloc(64 * WORK_MEMORY_SIZE*sizeof(uint8_t));
assert(NULL != Maddr);
memset(Maddr, 0, 64 * WORK_MEMORY_SIZE*sizeof(uint8_t));
printf("****************************** Correctness test (PoW function) ******************************\n");
printf("Test message: %s\n", mess);
powFunction(input, inputLen, Maddr, output);
view_data_u8("PoW", output, OUTPUT_LEN);
printf("*********************************************************************************************\n");
/*
printf("*************************************************** Performance test (PoW function) ***************************************************\n");
uint8_t *result = (uint8_t *)malloc(iterNum * OUTPUT_LEN * sizeof(uint8_t));
assert(NULL != result);
memset(result, 0, iterNum * OUTPUT_LEN * sizeof(uint8_t));
uint32_t threadNumArr[] = {1, 4, 8, 12, 16, 20, 24, 32, 48, 64};
uint32_t threadNumTypes = sizeof(threadNumArr) / sizeof(uint32_t);
printf(" %-18s", "Algorithm");
for (uint32_t ix = 0; ix < threadNumTypes; ++ix)
printf("%12d", threadNumArr[ix]);
printf("\n");
printf("00 %-18s\t", "PoW");
for (uint32_t ix = 0; ix < threadNumTypes; ++ix) {
omp_set_num_threads(threadNumArr[ix]);
double startTime = get_wall_time();
if (threadNumArr[ix] == 1) {
for (j = 0; j < iterNum; ++j) {
powFunction(input, inputLen, Maddr, result + j * OUTPUT_LEN);
}
} else {
#pragma omp parallel for firstprivate(input), private(j) shared(result)
for (j = 0; j < iterNum; ++j) {
powFunction(input, inputLen, Maddr + omp_get_thread_num() * WORK_MEMORY_SIZE, result + j * OUTPUT_LEN);
}
}
double endTime = get_wall_time();
double costTime = endTime - startTime;
printf("%5.0f bps ", iterNum / costTime); fflush(stdout);
// Check result
for (j = 0; j < iterNum; j += 1) {
if (memcmp(output, result + j * OUTPUT_LEN, OUTPUT_LEN)) {
printf("Thread num: %d, j: %ld\n", threadNumArr[ix], j);
view_data_u8("output", output, OUTPUT_LEN);
view_data_u8("result", result + j * OUTPUT_LEN, OUTPUT_LEN);
abort();
}
}
}
printf("\n");
printf("***************************************************************************************************************************************\n");
if (NULL != result) {
free(result);
result = NULL;
}
*/
if (NULL != Maddr) {
free(Maddr);
Maddr = NULL;
}
}
#define OUTPUT_BUFFER_SIZE (32 * 1024UL * 1024UL)
/*popchain ghost*/
//140 to 200
#define MAX_TEST_INPUT_LEN 200
/*popchain ghost*/
#define MAX_OUT_FILE_NAME_LEN 25
const char testInputCase[][MAX_TEST_INPUT_LEN] = {
"",
"HelloWorld",
"0123456789"
};
void powNistTest(const char *outFileName) {
const uint64_t iterNum = 1024UL * 1024UL;
// const uint64_t iterNum = 1024UL;
uint8_t *outputBuffer = (uint8_t *)malloc(OUTPUT_BUFFER_SIZE * sizeof(uint8_t));
assert(NULL != outputBuffer);
memset(outputBuffer, 0, OUTPUT_BUFFER_SIZE * sizeof(uint8_t));
uint8_t *Maddr = (uint8_t *)malloc(WORK_MEMORY_SIZE*sizeof(uint8_t));
assert(NULL != Maddr);
memset(Maddr, 0, WORK_MEMORY_SIZE*sizeof(uint8_t));
initOneWayFunction();
uint32_t testInputCaseNum = sizeof(testInputCase) / sizeof(const char [MAX_TEST_INPUT_LEN]);
for (uint32_t testCaseIx = 0; testCaseIx < testInputCaseNum; ++testCaseIx) {
char curOutFileName[MAX_OUT_FILE_NAME_LEN] = "";
sprintf(curOutFileName, "%s-%u.txt", outFileName, testCaseIx);
FILE *fp = NULL;
if (NULL != (fp = fopen(curOutFileName, "wb"))) {
const uint32_t testInputCaseLen = strlen((char *)testInputCase[testCaseIx]);
uint8_t input[MAX_TEST_INPUT_LEN];
memset(input, 0, MAX_TEST_INPUT_LEN*sizeof(uint8_t));
memcpy(input, testInputCase[testCaseIx], testInputCaseLen*sizeof(uint8_t));
double startTime = get_wall_time();
powFunction(input, testInputCaseLen, Maddr, outputBuffer);
for (uint64_t i = 1, j = 0; i < iterNum; ++i) {
memcpy(input, outputBuffer + j, OUTPUT_LEN * sizeof(uint32_t));
j += OUTPUT_LEN;
powFunction(input, OUTPUT_LEN, Maddr, outputBuffer + j);
/* if (j == OUTPUT_BUFFER_SIZE) {
fwrite(outputBuffer, sizeof(uint8_t), OUTPUT_BUFFER_SIZE / sizeof(uint8_t), fp);
j = 0;
} */
}
double endTime = get_wall_time();
double costTime = endTime - startTime;
fprintf(stdout, "TestCaseIx: %d, Input: %s, IterNum: %llu, Time: %4.2f, Performance: %5.2f bps\n", testCaseIx, \
testInputCase[testCaseIx], iterNum, costTime, ((double)(iterNum * OUTPUT_LEN)) / costTime); fflush(stdout);
fwrite(outputBuffer, sizeof(uint8_t), OUTPUT_BUFFER_SIZE / sizeof(uint8_t), fp);
fclose(fp);
} else {
fprintf(stderr, "Error: Open %s failed!\n", curOutFileName);
abort();
}
}
if (NULL != outputBuffer) {
free(outputBuffer);
outputBuffer = NULL;
}
if (NULL != Maddr) {
free(Maddr);
Maddr = NULL;
}
}
void hashpop(const uint8_t *mess, uint32_t messLen, uint8_t output[OUTPUT_LEN]) {
if(messLen != INPUT_LEN)
{
//won't get in
printf("hashpop:Invalid message length %d\n", messLen);
return;
}
int64_t j;
uint32_t inputLen =messLen;
uint8_t input[INPUT_LEN];
memset(input, 0, INPUT_LEN*sizeof(uint8_t));
memcpy(input, mess, inputLen*sizeof(char)); //operation: input
uint8_t *Maddr = (uint8_t *)malloc(WORK_MEMORY_SIZE*sizeof(uint8_t)); //1024*1024*1
assert(NULL != Maddr);
memset(Maddr, 0, WORK_MEMORY_SIZE*sizeof(uint8_t));
//printf("Test message: %s\n", mess);
powFunction(input, inputLen,Maddr, output);
//view_data_u8("PoW", output, OUTPUT_LEN); //output
if (NULL != Maddr) {
free(Maddr);
Maddr = NULL;
}
}
int my_rand64_r (struct my_rand48_data *buffer, uint64_t *result)
{
uint64_t X = buffer->__x;
X = (X * buffer->__a + buffer->__c) & 0xffffffffffffULL;
buffer->__x = X;
buffer->__x = (X * buffer->__a + buffer->__c) & 0xffffffffffffULL;
X ^= buffer->__x << 16;
*result = X;
return 0;
}
int my_seed48_r (uint64_t seedval, struct my_rand48_data *buffer)
{
buffer->__x = seedval & 0xffffffffffffULL;
buffer->__a = 0x5deece66dULL;
buffer->__c = 0xb;
return 0;
}
void powFunction(uint8_t *input, uint32_t inputLen, uint8_t *Maddr, uint8_t *output)
{
uint8_t c[OUTPUT_LEN];
// Step 1: Initialize working memory.
initWorkMemory(input, inputLen, Maddr, 64);
// view_data_u8("Maddr", Maddr, OUTPUT_LEN);
// Step 2: Modify the working memory contents.
modifyWorkMemory(Maddr, 4, WORK_MEMORY_SIZE >> 11, c);
// view_data_u8("c", c, OUTPUT_LEN);
// Step 3: Calculate the final result.
calculateFinalResult(Maddr, c, 8, output);
// view_data_u8("output", output, OUTPUT_LEN);
}
int my_rand48_r (struct my_rand48_data *buffer, uint64_t *result)
{
*result = (buffer->__x * buffer->__a + buffer->__c) & 0xffffffffffffULL;
buffer->__x = *result;
return 0;
}
|
linked_omp3_tasks.c | #include <omp.h>
#include <stdlib.h>
#include <stdio.h>
#ifndef N
#define N 5
#endif
#ifndef FS
#define FS 38
#endif
struct node {
int data;
int fibdata;
struct node* next;
};
struct node* init_list(struct node* p);
void processwork(struct node* p);
int fib(int n);
int fib(int n)
{
int x, y;
if (n < 2) {
return (n);
} else {
x = fib(n - 1);
y = fib(n - 2);
return (x + y);
}
}
void processwork(struct node* p)
{
int n, temp;
n = p->data;
temp = fib(n);
p->fibdata = temp;
}
struct node* init_list(struct node* p)
{
int i;
struct node* head = NULL;
struct node* temp = NULL;
head = (struct node*) malloc(sizeof(struct node));
p = head;
p->data = FS;
p->fibdata = 0;
for (i=0; i< N; i++) {
temp = (struct node*) malloc(sizeof(struct node));
p->next = temp;
p = temp;
p->data = FS + i + 1;
p->fibdata = i+1;
}
p->next = NULL;
return head;
}
int main()
{
double start, end;
struct node *p=NULL;
struct node *temp=NULL;
struct node *head=NULL;
printf("Process linked list\n");
printf(" Each linked list node will be processed by function 'processwork()'\n");
printf(" Each ll node will compute %d fibonacci numbers beginning with %d\n",N,FS);
p = init_list(p);
head = p;
start = omp_get_wtime();
#pragma omp parallel
{
#pragma omp master
printf("Threads: %d\n", omp_get_num_threads());
#pragma omp single
{
p=head;
while (p) {
#pragma omp task firstprivate(p) //first private is required
{
processwork(p);
}
p = p->next;
}
}
}
end = omp_get_wtime();
p = head;
while (p != NULL) {
printf("%d : %d\n",p->data, p->fibdata);
temp = p->next;
free (p);
p = temp;
}
free (p);
printf("Compute Time: %f seconds\n", end - start);
return 0;
}
|
imageproc.c | /* ----------------------------------------------------------------------------
* @file imageproc.c
* @brief image processing functions for ppm and pgm types
*
* @author Jake Michael, jami1063@colorado.edu
* @course ECEN 5763: EMVIA, Summer 2021
*---------------------------------------------------------------------------*/
#include<stdlib.h>
#include<stdint.h>
#include<stdio.h>
#include<errno.h>
#include<omp.h>
#include"imageio.h"
#include"imageproc.h"
// see .h for more details
int com_annotate(pgm_img_t *pgm, uint8_t thresh) {
// indices of the maxima and minima
int xmax=-1, xmin=pgm->hres, ymax=-1, ymin=pgm->vres;
static int xbar=0, ybar=0;
int i,j;
if (!pgm) return -1;
// scan through all pixels and determine x and y max/min extents
for (i=0; i<pgm->vres; i++) {
for (j=0; j<pgm->hres; j++) {
if (pgm->pixel[i*pgm->hres+j] >= thresh) {
if (i > ymax) ymax = i;
if (i < ymin) ymin = i;
if (j > xmax) xmax = j;
if (j < xmin) xmin = j;
}
}
}
// only update xbar and ybar if we have valid xmin and xmax
if (xmin>-1 && xmax<pgm->hres) {
xbar = (xmax - xmin)/2 + xmin;
}
if (ymin>-1 && ymax<pgm->vres) {
ybar = (ymax - ymin)/2 + ymin;
}
// draw horizontal line at ybar
for (j=0; j<pgm->hres; j++) {
//pgm->pixel[(ybar-1)*pgm->hres + j] = 255;
pgm->pixel[(ybar)*pgm->hres + j] = 255;
//pgm->pixel[(ybar+1)*pgm->hres + j] = 255;
}
// draw vertical line at xbar
for (i=0; i<pgm->vres; i++) {
//pgm->pixel[i*pgm->hres + xbar-1] = 255;
pgm->pixel[i*pgm->hres + xbar] = 255;
//pgm->pixel[i*pgm->hres + xbar+1] = 255;
}
return 0;
}
// see .h for more details
int rgb_diff(ppm_img_t *const frame, ppm_img_t *const bg, ppm_img_t *diff) {
int i;
int temp;
if (!frame || !bg || !diff) return -1;
// use openmp for speedup:
#pragma omp parallel for num_threads(5) \
default(none) private(i, temp) shared(diff)
for (i=0; i<bg->hres*bg->vres; i++) {
// rgb subtraction on ea. channel and keep within 0-255 intensity
temp = frame->pixel[i].r - bg->pixel[i].r;
if (temp < 0) temp = 0;
if (temp > 255) temp = 255;
diff->pixel[i].r = temp;
temp = frame->pixel[i].g - bg->pixel[i].g;
if (temp < 0) temp = 0;
if (temp > 255) temp = 255;
diff->pixel[i].g = temp;
temp = frame->pixel[i].b - bg->pixel[i].b;
if (temp < 0) temp = 0;
if (temp > 255) temp = 255;
diff->pixel[i].b = temp;
}
return 0;
}
// see .h for more details
int kernel_3x3(int i0, int j0, pgm_img_t *pgm, uint8_t *P) {
if (i0<1 || j0<1 || !pgm || !P) {
printf("kernel_3x3 error");
return -1;
}
P[0] = pgm->pixel[i0*pgm->hres+j0];
P[1] = pgm->pixel[i0*pgm->hres+j0+1];
P[2] = pgm->pixel[(i0-1)*pgm->hres+j0+1];
P[3] = pgm->pixel[(i0-1)*pgm->hres+j0];
P[4] = pgm->pixel[(i0-1)*pgm->hres+j0-1];
P[5] = pgm->pixel[i0*pgm->hres+j0-1];
P[6] = pgm->pixel[(i0+1)*pgm->hres+j0-1];
P[7] = pgm->pixel[(i0+1)*pgm->hres+j0];
P[8] = pgm->pixel[(i0+1)*pgm->hres+j0+1];
return 0;
}
// see .h for more details
pgm_img_t* median_filter(pgm_img_t *pgm) {
uint8_t hist[256];
uint8_t kernel[9];
int i, sum, m, r, c;
if (!pgm) return NULL;
// zero out hist
memset(&hist, 0, 256*sizeof(uint8_t));
// allocate space for the new filtered pixels
pgm_img_t* pgm_filt = new_pgm(pgm->hres, pgm->vres);
for (r=0; r < pgm->vres; r++) {
for (c=0; c < pgm->hres; c++) {
// if they are edge pixels, copy them
if (r == 0 || r == pgm->vres-1 || c == 0 || c == pgm->hres) {
pgm_filt->pixel[r*pgm_filt->hres+c] = pgm->pixel[r*pgm->hres+c];
continue;
}
kernel_3x3(r, c, pgm, &kernel[0]);
for (m=0; m<9; m++) hist[kernel[m]]++;
// no need to sort, extracts 5th largest neighbor in kernel
i = 0; sum = 0;
while(sum < 5) {
sum += hist[i++];
}
pgm_filt->pixel[r*pgm_filt->hres+c] = i-1;
// zero histogram entries that were used
for (m=0; m<9; m++) hist[kernel[m]] = 0;
}
}
return pgm_filt;
}
|
rawSHA224_fmt_plug.c | /*
* This file is part of John the Ripper password cracker,
* Copyright (c) 2010 by Solar Designer
* based on rawMD4_fmt.c code, with trivial changes by groszek.
*
* Rewritten Spring 2013, JimF. SSE code added and released with the following terms:
* No copyright is claimed, and the software is hereby placed in the public domain.
* In case this attempt to disclaim copyright and place the software in the public
* domain is deemed null and void, then the software is Copyright (c) 2011 JimF
* and it is hereby released to the general public under the following
* terms:
*
* This software may be modified, redistributed, and used for any
* purpose, in source and binary forms, with or without modification.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_rawSHA224;
#elif FMT_REGISTERS_H
john_register_one(&fmt_rawSHA224);
#else
#include "arch.h"
#include "sha2.h"
#include "stdint.h"
#include "params.h"
#include "common.h"
#include "johnswap.h"
#include "formats.h"
#include "sse-intrinsics.h"
#ifdef _OPENMP
#ifdef MMX_COEF_SHA256
#define OMP_SCALE 1024
#else
#define OMP_SCALE 2048
#endif
#include <omp.h>
#endif
#include "memdbg.h"
#define FORMAT_LABEL "Raw-SHA224"
#define FORMAT_NAME ""
#define FORMAT_TAG "$SHA224$"
#define TAG_LENGTH 8
#ifdef MMX_COEF_SHA256
#define ALGORITHM_NAME SHA256_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "32/" ARCH_BITS_STR " " SHA2_LIB
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#ifdef MMX_COEF_SHA256
#define PLAINTEXT_LENGTH 55
#else
#define PLAINTEXT_LENGTH 125
#endif
#define CIPHERTEXT_LENGTH 56
#define BINARY_SIZE 28
#define BINARY_ALIGN MEM_ALIGN_WORD
#define SALT_SIZE 0
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#ifdef MMX_COEF_SHA256
#define MAX_KEYS_PER_CRYPT MMX_COEF_SHA256
#else
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests tests[] = {
{"d63dc919e201d7bc4c825630d2cf25fdc93d4b2f0d46706d29038d01", "password"},
{"$SHA224$d63dc919e201d7bc4c825630d2cf25fdc93d4b2f0d46706d29038d01", "password"},
{"$SHA224$7e6a4309ddf6e8866679f61ace4f621b0e3455ebac2e831a60f13cd1", "12345678"},
{"$SHA224$d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f", ""},
{NULL}
};
#ifdef MMX_COEF_SHA256
#define GETPOS(i, index) ( (index&(MMX_COEF_SHA256-1))*4 + ((i)&(0xffffffff-3))*MMX_COEF_SHA256 + (3-((i)&3)) + (index>>(MMX_COEF_SHA256>>1))*SHA256_BUF_SIZ*MMX_COEF_SHA256*4 )
static uint32_t (*saved_key)[SHA256_BUF_SIZ*MMX_COEF_SHA256];
static uint32_t (*crypt_out)[8*MMX_COEF_SHA256];
#else
static int (*saved_key_length);
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)
[(BINARY_SIZE + sizeof(ARCH_WORD_32) - 1) / sizeof(ARCH_WORD_32)];
#endif
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt = omp_t * MIN_KEYS_PER_CRYPT;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt = omp_t * MAX_KEYS_PER_CRYPT;
#endif
#ifndef MMX_COEF_SHA256
saved_key_length = mem_calloc_tiny(sizeof(*saved_key_length) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
#else
saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt/MMX_COEF_SHA256, MEM_ALIGN_SIMD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt/MMX_COEF_SHA256, MEM_ALIGN_SIMD);
#endif
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += 8;
q = p;
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
return !*q && q - p == CIPHERTEXT_LENGTH;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + 1];
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
return ciphertext;
memcpy(out, FORMAT_TAG, TAG_LENGTH);
memcpy(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH + 1);
strlwr(out + TAG_LENGTH);
return out;
}
static void *binary(char *ciphertext)
{
static unsigned char *out;
int i;
if (!out)
out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
ciphertext += TAG_LENGTH;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] = atoi16[ARCH_INDEX(ciphertext[i*2])] * 16 +
atoi16[ARCH_INDEX(ciphertext[i*2 + 1])];
}
#ifdef MMX_COEF_SHA256
alter_endianity (out, BINARY_SIZE);
#endif
return out;
}
#ifdef MMX_COEF_SHA256
static int get_hash_0 (int index) { return crypt_out[index>>(MMX_COEF_SHA256>>1)][index&(MMX_COEF_SHA256-1)] & 0xf; }
static int get_hash_1 (int index) { return crypt_out[index>>(MMX_COEF_SHA256>>1)][index&(MMX_COEF_SHA256-1)] & 0xff; }
static int get_hash_2 (int index) { return crypt_out[index>>(MMX_COEF_SHA256>>1)][index&(MMX_COEF_SHA256-1)] & 0xfff; }
static int get_hash_3 (int index) { return crypt_out[index>>(MMX_COEF_SHA256>>1)][index&(MMX_COEF_SHA256-1)] & 0xffff; }
static int get_hash_4 (int index) { return crypt_out[index>>(MMX_COEF_SHA256>>1)][index&(MMX_COEF_SHA256-1)] & 0xfffff; }
static int get_hash_5 (int index) { return crypt_out[index>>(MMX_COEF_SHA256>>1)][index&(MMX_COEF_SHA256-1)] & 0xffffff; }
static int get_hash_6 (int index) { return crypt_out[index>>(MMX_COEF_SHA256>>1)][index&(MMX_COEF_SHA256-1)] & 0x7ffffff; }
#else
static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; }
#endif
#ifdef MMX_COEF_SHA256
static void set_key(char *key, int index) {
const ARCH_WORD_32 *wkey = (ARCH_WORD_32*)key;
ARCH_WORD_32 *keybuffer = &((ARCH_WORD_32 *)saved_key)[(index&(MMX_COEF_SHA256-1)) + (index>>(MMX_COEF_SHA256>>1))*SHA256_BUF_SIZ*MMX_COEF_SHA256];
ARCH_WORD_32 *keybuf_word = keybuffer;
unsigned int len;
ARCH_WORD_32 temp;
len = 0;
while((unsigned char)(temp = *wkey++)) {
if (!(temp & 0xff00))
{
*keybuf_word = JOHNSWAP((temp & 0xff) | (0x80 << 8));
len++;
goto key_cleaning;
}
if (!(temp & 0xff0000))
{
*keybuf_word = JOHNSWAP((temp & 0xffff) | (0x80 << 16));
len+=2;
goto key_cleaning;
}
if (!(temp & 0xff000000))
{
*keybuf_word = JOHNSWAP(temp | (0x80 << 24));
len+=3;
goto key_cleaning;
}
*keybuf_word = JOHNSWAP(temp);
len += 4;
keybuf_word += MMX_COEF_SHA256;
}
*keybuf_word = 0x80000000;
key_cleaning:
keybuf_word += MMX_COEF_SHA256;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += MMX_COEF_SHA256;
}
keybuffer[15*MMX_COEF_SHA256] = len << 3;
}
#else
static void set_key(char *key, int index)
{
int len = strlen(key);
saved_key_length[index] = len;
if (len > PLAINTEXT_LENGTH)
len = saved_key_length[index] = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, len);
}
#endif
#ifdef MMX_COEF_SHA256
static char *get_key(int index) {
unsigned int i,s;
static char out[PLAINTEXT_LENGTH+1];
unsigned char *wucp = (unsigned char*)saved_key;
s = ((ARCH_WORD_32 *)saved_key)[15*MMX_COEF_SHA256 + (index&(MMX_COEF_SHA256-1)) + (index>>(MMX_COEF_SHA256>>1))*SHA256_BUF_SIZ*MMX_COEF_SHA256] >> 3;
for(i=0;i<s;i++)
out[i] = wucp[ GETPOS(i, index) ];
out[i] = 0;
return (char*) out;
}
#else
static char *get_key(int index)
{
saved_key[index][saved_key_length[index]] = 0;
return saved_key[index];
}
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#ifdef MMX_COEF_SHA256
int inc = MMX_COEF_SHA256;
#else
int inc = 1;
#endif
#pragma omp parallel for
for (index = 0; index < count; index += inc)
#endif
{
#ifdef MMX_COEF_SHA256
SSESHA256body(&saved_key[index/MMX_COEF_SHA256], crypt_out[index/MMX_COEF_SHA256], NULL, SSEi_MIXED_IN|SSEi_CRYPT_SHA224);
#else
SHA256_CTX ctx;
SHA224_Init(&ctx);
SHA224_Update(&ctx, saved_key[index], saved_key_length[index]);
SHA224_Final((unsigned char *)crypt_out[index], &ctx);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
#ifdef MMX_COEF_SHA256
if (((uint32_t *) binary)[0] == crypt_out[index>>(MMX_COEF_SHA256>>1)][index&(MMX_COEF_SHA256-1)])
#else
if ( ((ARCH_WORD_32*)binary)[0] == crypt_out[index][0] )
#endif
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
#ifdef MMX_COEF_SHA256
int i;
for (i = 0; i < BINARY_SIZE/4; i++)
if (((uint32_t *) binary)[i] != crypt_out[index>>(MMX_COEF_SHA256>>1)][(index&(MMX_COEF_SHA256-1))+i*MMX_COEF_SHA256])
return 0;
return 1;
#else
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
#endif
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_rawSHA224 = {
{
FORMAT_LABEL,
FORMAT_NAME,
"SHA224 " ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_SPLIT_UNIFIES_CASE,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
binary,
fmt_default_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
omp-private.c | #include <stdio.h>
#include <omp.h>
int main(void){
omp_set_dynamic(0);
omp_set_num_threads(4);
printf("%d\n\n", omp_get_num_threads());
int i, x = 42;
printf("----------------------------------\n");
printf("private\n\n");
printf("start x is %d\n", x);
#pragma omp parallel for private(x)
for(i = 0; i < 4; i++){
x += i;
printf("Thread number: %d x: %d\n", omp_get_thread_num(), x);
}
printf("end x is %d\n\n", x);
printf("----------------------------------\n");
printf("lastprivate\n\n");
int y = 42;
printf("start y is %d\n", y);
#pragma omp parallel for lastprivate(y)
for(i = 0; i < 4; i++){
y += i;
printf("Thread number: %d y: %d\n", omp_get_thread_num(), y);
}
printf("end y is %d\n\n", y);
printf("----------------------------------\n");
printf("firstprivate\n\n");
int z = 42;
printf("start z is %d\n", z);
#pragma omp parallel for firstprivate(z)
for(i = 0; i < 4; i++){
z += i;
printf("Thread number: %d x: %d\n", omp_get_thread_num(), z);
}
printf("end z is %d\n", z);
return 0;
}
// private: not initialised inside parallel region->random value, modifications not kept
// firstprivate: initialise with value before parallel region, modifications not kept
// lastprivate: keep last value of parallel region
|
dcache.c | #include "papi.h"
#include "caches.h"
#include "prepareArray.h"
#include "timing_kernels.h"
#include "dcache.h"
typedef struct {
int *values;
double **rslts;
double **counter;
char *event_name;
int latency_only;
int mode;
} data_t;
extern int _papi_eventset;
int global_max_iter, global_line_size_in_bytes, global_pattern;
float global_pages_per_block;
int line_size;
int guessCount, min_size, max_size;
void d_cache_driver(char* papi_event_name, int max_iter, char* outdir, int latency_only, int mode, int show_progress)
{
int pattern = 3;
int ls = 64;
int test_cnt = 0;
float ppb = 16;
FILE *ofp_papi;
char *sufx, *papiFileName;
// Open file (pass handle to d_cache_test()).
if(CACHE_READ_WRITE == mode){
sufx = strdup(".data.writes");
}else{
sufx = strdup(".data.reads");
}
int l = strlen(outdir)+strlen(papi_event_name)+strlen(sufx);
papiFileName = (char *)calloc( 1+l, sizeof(char) );
if (!papiFileName) {
fprintf(stderr, "Unable to allocate memory. Skipping event %s.\n", papi_event_name);
goto error0;
}
if (l != (sprintf(papiFileName, "%s%s%s", outdir, papi_event_name, sufx))) {
fprintf(stderr, "sprintf error. Skipping event %s.\n", papi_event_name);
goto error1;
}
if (NULL == (ofp_papi = fopen(papiFileName,"w"))) {
fprintf(stderr, "Unable to open file %s. Skipping event %s.\n", papiFileName, papi_event_name);
goto error1;
}
// Go through each parameter variant.
for(pattern = 3; pattern <= 4; ++pattern)
{
for(ls = 64; ls <= 128; ls *= 2)
{
// PPB variation only makes sense if the pattern is not sequential.
if(pattern != 4)
{
for(ppb = 64; ppb >= 16; ppb -= 48)
{
if( show_progress )
{
printf("%3d%%\b\b\b\b",(100*test_cnt++)/6);
fflush(stdout);
}
d_cache_test(pattern, max_iter, ls, ppb, papi_event_name, latency_only, mode, ofp_papi);
}
}
else
{
if( show_progress )
{
printf("%3d%%\b\b\b\b",(100*test_cnt++)/6);
fflush(stdout);
}
d_cache_test(pattern, max_iter, ls, ppb, papi_event_name, latency_only, mode, ofp_papi);
}
}
}
if( show_progress )
{
size_t i;
printf("100%%");
for(i=0; i<strlen("Total:100% Current test:100%"); i++) putchar('\b');
fflush(stdout);
}
// Close files and free memory.
fclose(ofp_papi);
error1:
free(papiFileName);
error0:
free(sufx);
return;
}
void d_cache_test(int pattern, int max_iter, int line_size_in_bytes, float pages_per_block, char* papi_event_name, int latency_only, int mode, FILE* ofp){
int i,j;
int *values;
double **rslts, *sorted_rslts, *latencies;
double **counter, *sorted_counter;
int status;
// Replace this by modifying function header and global vars.
global_pattern = pattern;
global_max_iter = max_iter;
global_line_size_in_bytes = line_size_in_bytes;
global_pages_per_block = pages_per_block;
line_size = line_size_in_bytes/sizeof(uintptr_t);
min_size = 2*1024/sizeof(uintptr_t); // 2KB
max_size = 1024*1024*1024/sizeof(uintptr_t);// 1GB
// The number of different sizes we will guess, trying to find the right size.
guessCount = 0;
for(i=min_size; i<max_size; i*=2){
// += 4 for i, i*1.25, i*1.5, i*1.75
guessCount += 4;
}
rslts = (double **)malloc(max_iter*sizeof(double *));
for(i=0; i<max_iter; ++i){
rslts[i] = (double *)malloc(guessCount*sizeof(double));
}
sorted_rslts = (double *)malloc(max_iter*sizeof(double));
counter = (double **)malloc(max_iter*sizeof(double *));
for(i=0; i<max_iter; ++i){
counter[i] = (double *)malloc(guessCount*sizeof(double));
}
sorted_counter = (double *)malloc(max_iter*sizeof(double));
latencies = (double *)malloc(guessCount*sizeof(double));
values = (int *)malloc(guessCount*sizeof(int));
data_t data;
data.values = values;
data.rslts = rslts;
data.counter = counter;
data.event_name = papi_event_name;
data.latency_only = latency_only;
data.mode = mode;
// Run the pointer chases.
status = experiment_main(&data);
if( 0 != status ){
return;
}
for(j=0; j<guessCount; ++j){
for(i=0; i<max_iter; ++i){
sorted_rslts[i] = rslts[i][j];
}
qsort(sorted_rslts, max_iter, sizeof(double), compar_lf);
if(latency_only)
{
fprintf(ofp, "%d %.4lf\n", values[j], sorted_rslts[0]);
}
latencies[j] = sorted_rslts[0];
for(i=0; i<max_iter; ++i){
sorted_counter[i] = counter[i][j];
}
qsort(sorted_counter, max_iter, sizeof(double), compar_lf);
if(!latency_only)
{
fprintf(ofp, "%d %lf\n", values[j], sorted_counter[0]);
}
}
// Free dynamically allocated memory.
for(i=0; i<max_iter; ++i){
free(rslts[i]);
free(counter[i]);
}
free(rslts);
free(counter);
free(sorted_rslts);
free(sorted_counter);
free(latencies);
free(values);
return;
}
int experiment_main(void *arg){
int i, latency_only, mode;
int native, ret_val;
int *values;
double **rslts;
double **counter;
data_t *data;
int status = 0;
data = (data_t *)arg;
values = data->values;
rslts = data->rslts;
counter = data->counter;
latency_only = data->latency_only;
mode = data->mode;
if( !latency_only ){
_papi_eventset = PAPI_NULL;
/* Set the event */
ret_val = PAPI_create_eventset( &_papi_eventset );
if (ret_val != PAPI_OK ){
return -1;
}
ret_val = PAPI_event_name_to_code( data->event_name, &native );
if (ret_val != PAPI_OK ){
return -1;
}
ret_val = PAPI_add_event( _papi_eventset, native );
if (ret_val != PAPI_OK ){
return -1;
}
/* Done setting the event. */
}
for(i=0; i<global_max_iter; ++i){
status = varyBufferSizes(values, rslts[i], counter[i], global_line_size_in_bytes, global_pages_per_block, latency_only, mode);
}
if( !latency_only ){
ret_val = PAPI_cleanup_eventset(_papi_eventset);
if (ret_val != PAPI_OK ){
fprintf(stderr, "PAPI_cleanup_eventset() returned %d\n",ret_val);
return -1;
}
ret_val = PAPI_destroy_eventset(&_papi_eventset);
if (ret_val != PAPI_OK ){
fprintf(stderr, "PAPI_destroy_eventset() returned %d\n",ret_val);
return -1;
}
}
return status;
}
int varyBufferSizes(int *values, double *rslts, double *counter, int line_size_in_bytes, float pages_per_block, int latency_only, int mode){
int i, j, active_buf_len;
int ONT = 1;
int allocErr = 0;
run_output_t out;
// Get the number of threads.
#pragma omp parallel
{
if(!omp_get_thread_num()) {
ONT = omp_get_num_threads();
}
}
uintptr_t rslt=42, *v[ONT], *ptr[ONT];
// Allocate memory for each thread to traverse.
#pragma omp parallel
{
int idx = omp_get_thread_num();
ptr[idx] = (uintptr_t *)malloc( (2*max_size+line_size)*sizeof(uintptr_t) );
if( !ptr[idx] ){
fprintf(stderr, "Error: cannot allocate space for experiment.\n");
#pragma omp critical
{
allocErr = -1;
}
}
// align v to the line size
v[idx] = (uintptr_t *)(line_size_in_bytes*(((uintptr_t)ptr[idx]+line_size_in_bytes)/line_size_in_bytes));
// touch every page at least a few times
for(i=0; i<2*max_size; i+=512){
rslt += v[idx][i];
}
}
if(allocErr != 0)
{
return -1;
}
// Make a couple of cold runs
out = probeBufferSize(16*line_size, line_size, pages_per_block, ONT, v, &rslt, latency_only, mode);
out = probeBufferSize(2*16*line_size, line_size, pages_per_block, ONT, v, &rslt, latency_only, mode);
if(out.status != 0)
{
return -1;
}
// run the actual experiment
i = 0;
for(active_buf_len=min_size; active_buf_len<max_size; active_buf_len*=2){
usleep(1000);
out = probeBufferSize(active_buf_len, line_size, pages_per_block, ONT, v, &rslt, latency_only, mode);
rslts[i] = out.dt;
counter[i] = out.counter;
values[i++] = sizeof(uintptr_t)*active_buf_len;
usleep(1000);
out = probeBufferSize((int)((double)active_buf_len*1.25), line_size, pages_per_block, ONT, v, &rslt, latency_only, mode);
rslts[i] = out.dt;
counter[i] = out.counter;
values[i++] = sizeof(uintptr_t)*((int)((double)active_buf_len*1.25));
usleep(1000);
out = probeBufferSize((int)((double)active_buf_len*1.5), line_size, pages_per_block, ONT, v, &rslt, latency_only, mode);
rslts[i] = out.dt;
counter[i] = out.counter;
values[i++] = sizeof(uintptr_t)*((int)((double)active_buf_len*1.5));
usleep(1000);
out = probeBufferSize((int)((double)active_buf_len*1.75), line_size, pages_per_block, ONT, v, &rslt, latency_only, mode);
rslts[i] = out.dt;
counter[i] = out.counter;
values[i++] = sizeof(uintptr_t)*((int)((double)active_buf_len*1.75));
}
// Free each thread's memory.
for(j=0; j<ONT; ++j){
free(ptr[j]);
}
return 0;
}
|
return.c | // return.c
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <omp.h>
#include "fractal.h"
#include "init.h"
//#include "iteration.h"
#include "return.h"
#include "MT.h"
#define LOOP 50000000
#define LIMIT 10000000
#define STEPS 1000000
void FRACTAL_return (int FRACTAL_stage_num, int num, FRACTAL fractal) {
const int origin=(num/3)-1;
int m, n;
int diff_x=origin, diff_y=origin;
int return_times[STEPS];
int r, iteration, max=10;
FILE *time_log;
time_t time_s, time_g;
time(&time_s);
init_genrand((unsigned)time(NULL));
for (n=0; n<STEPS; n++) return_times[n] = 0;
//#pragma omp parallel for private(n)
for (m=0; m<LOOP; m++) {
diff_x = origin;
diff_y = origin;
iteration = 0;
// random walk
// #pragma vector always
for (n=0; n<LIMIT; n++) {
r = genrand_int31();
switch (r%4) {
case 0:
// right_0
if (diff_x!=(num-1) && fractal.init[diff_y][diff_x+1]!=0) {
diff_x += 1; iteration++;
if (diff_x==origin && diff_y==origin) return_times[iteration-1]++;
}
break;
case 1:
// up_1
if (diff_y!=0 && fractal.init[diff_y-1][diff_x]!=0) {
diff_y -= 1; iteration++;
if (diff_x==origin && diff_y==origin) return_times[iteration-1]++;
}
break;
case 2:
// left_2
if (diff_x!=0 && fractal.init[diff_y][diff_x-1]!=0) {
diff_x -= 1; iteration++;
if (diff_x==origin && diff_y==origin) return_times[iteration-1]++;
}
break;
case 3:
// down_3
if (diff_y!=(num-1) && fractal.init[diff_y+1][diff_x]!=0) {
diff_y += 1; iteration++;
if (diff_x==origin && diff_y==origin) return_times[iteration-1]++;
}
break;
}
if (iteration==STEPS) break;
// if (n==0) printf("int = %3d, x = %d, y = %d\n", r%4, diff_x, diff_y);
}
}
for (n=0; n<STEPS; n++) {
if ((n+1)%max == 0) {
printf("%10d %15.10f %% %10d\n", n+1, 100*(double)(return_times[n])/LOOP, return_times[n]);
max *= 5;
}
}
// time
time(&time_g);
time_log = fopen(".//bin//debug//time.log", "w");
fprintf(time_log, "start: %.24s\n", ctime(&time_s));
fprintf(time_log, "goal : %.24s\n", ctime(&time_g));
fprintf(time_log, "diff : %.6f\n" , difftime(time_g, time_s));
fclose(time_log);
return;
}
|
BitMap.h | #pragma once
#include <unistd.h>
#include <cstdlib>
#include <cstdio>
#include <string>
#include <string_view>
#include <cstdarg>
#include <cassert>
#include <memory>
// debug
#include <iostream>
#include <vector>
#include "PRNG.h"
enum DistrName{
UNIFORM,
NORMAL
};
struct png_dims{
png_dims(size_t h, size_t w, size_t c, size_t bd, png_byte ct) :
height{ h }, width{ w }, channels{ c }, bit_depth{ bd },
color_type(ct){}
size_t height, width, channels, bit_depth;
png_byte color_type;
};
// utility lookup maps structure to retrieve the correct shift/value for r-g-b-a pixels
struct RGBA_utils{
static constexpr uint32_t rgba[4] = {0x000000ff,
0x0000ff00,
0x00ff0000,
0xff000000};
static constexpr unsigned char shift[4] = {0, 8, 16, 24};
}; RGBA_utils rgba_utils;
#define CLIP(p, inf, sup) if(p < inf) { p = inf; } else if(p > sup) { p = sup; }
class BitMapRGBA;
class BitMapRGB;
template<typename T>
class PixelMap{
public:
explicit
PixelMap();
PixelMap(size_t height, size_t width, size_t channels);
PixelMap(png_dims&& dims);
PixelMap(PixelMap<T>&& other) noexcept;
PixelMap(const PixelMap<T>& other);
PixelMap(const BitMapRGBA& bitmap);
PixelMap(const BitMapRGB& bitmap);
inline size_t getHeight() const;
inline size_t getWidth() const;
inline size_t getChannels() const;
inline bool has_same_dims_as(const PixelMap<T>& other) const;
PixelMap<T> transpose() const;
PixelMap<T>& subtract(const PixelMap<T>& other);
inline T& operator()(size_t i, size_t j, size_t c);
inline const T& operator()(size_t i, size_t j, size_t c) const;
inline T* begin();
inline T* end();
inline const T* begin() const;
inline const T* end() const;
inline T* rowBegin(size_t row);
inline T* rowEnd(size_t row);
inline const T* rowBegin(size_t row) const;
inline const T* rowEnd(size_t row) const;
inline T* pixBegin(size_t row, size_t col);
inline T* pixEnd(size_t row, size_t col);
inline const T* pixBegin(size_t row, size_t col) const;
inline const T* pixEnd(size_t row, size_t col) const;
PixelMap<T>& operator=(const PixelMap<T>& other);
bool operator==(const PixelMap<T>& other) const;
void print() const;
private:
size_t _height = 0;
size_t _width = 0;
size_t _channels = 0;
std::unique_ptr<T[]> _pixel_map;
};
class BitMapRGBA{
public:
explicit
BitMapRGBA();
BitMapRGBA(size_t height, size_t width);
BitMapRGBA(size_t height, size_t width, DistrName d_name);
template<typename U, typename V>
BitMapRGBA(size_t height, size_t width, DistrName d_name, U param1, V param2);
BitMapRGBA(const BitMapRGBA& other);
BitMapRGBA(BitMapRGBA&& other) noexcept;
BitMapRGBA(const PixelMap<png_byte>& pixelmap);
inline size_t getHeight() const;
inline size_t getWidth() const;
inline unsigned char
get(size_t i, size_t j, size_t c) const;
template<typename rowIt>
void copy_row(rowIt rowit, size_t row, size_t channels);
BitMapRGBA transpose() const;
BitMapRGBA& subtract_rgb(const BitMapRGBA& other);
BitMapRGBA& subtract_rgba(const BitMapRGBA& other);
inline uint32_t& operator()(size_t i, size_t j);
inline const uint32_t& operator()(size_t i, size_t j) const;
inline uint32_t* begin();
inline uint32_t* end();
inline const uint32_t* begin() const;
inline const uint32_t* end() const;
inline uint32_t* rowBegin(size_t row);
inline uint32_t* rowEnd(size_t row);
inline const uint32_t* rowBegin(size_t row) const;
inline const uint32_t* rowEnd(size_t row) const;
BitMapRGBA& operator=(const BitMapRGBA& other);
//BitMapRGBA& operator=(const PixelMap<png_byte>& pixelmap);
void print() const;
void print_bitmap() const;
private:
size_t _height = 0;
size_t _width = 0;
std::unique_ptr<uint32_t[]> _data;
};
class BitMapRGB{
public:
explicit
BitMapRGB();
BitMapRGB(size_t height, size_t width);
BitMapRGB(size_t height, size_t width, DistrName d_name);
template<typename U, typename V>
BitMapRGB(size_t height, size_t width, DistrName d_name, U param1, V param2);
BitMapRGB(const BitMapRGB& other);
BitMapRGB(BitMapRGB&& other) noexcept;
BitMapRGB(const PixelMap<png_byte>& pixelmap);
inline size_t getHeight() const;
inline size_t getWidth() const;
inline unsigned char
get(size_t i, size_t j, size_t c) const;
template<typename rowIt>
void copy_row(rowIt rowit, size_t row, size_t channels);
BitMapRGB transpose() const;
BitMapRGB& subtract(const BitMapRGB& other);
inline uint32_t& operator()(size_t i, size_t j);
inline const uint32_t& operator()(size_t i, size_t j) const;
inline uint32_t* begin();
inline uint32_t* end();
inline const uint32_t* begin() const;
inline const uint32_t* end() const;
inline uint32_t* rowBegin(size_t row);
inline uint32_t* rowEnd(size_t row);
inline const uint32_t* rowBegin(size_t row) const;
inline const uint32_t* rowEnd(size_t row) const;
BitMapRGB& operator=(const BitMapRGB& other);
//BitMapRGB& operator=(const PixelMap<png_byte>& pixelmap);
//BitMapRGB& operator-=(const BitMapRGB other);
void print() const;
void print_bitmap() const;
private:
size_t _height = 0;
size_t _width = 0;
std::unique_ptr<uint32_t[]> _data;
};
//////////////////////////////////////////////////////////////////////
// //
// PIXELMAP IMPLEM //
// //
//////////////////////////////////////////////////////////////////////
template<typename T>
PixelMap<T>::PixelMap() :
_height{ 0 }, _width{ 0 }, _channels{ 0 }{}
template<typename T>
PixelMap<T>::PixelMap(size_t height, size_t width, size_t channels) :
_height{ height }, _width{ width }, _channels{ channels },
_pixel_map{ std::make_unique<T[]>(height*width*channels) }{}
template<typename T>
PixelMap<T>::PixelMap(png_dims&& dims) :
_height{ dims.height }, _width{ dims.width }, _channels{ dims.channels },
_pixel_map{ std::make_unique<T[]>(dims.height*dims.width*dims.channels) }{}
template<typename T>
PixelMap<T>::PixelMap(PixelMap<T>&& other) noexcept:
_height{ other._height }, _width{ other._width }, _channels{ other._channels },
_pixel_map{ std::move(other._pixel_map) }{}
template<typename T>
PixelMap<T>::PixelMap(const PixelMap<T>& other){
*this = other;
}
template<typename T>
PixelMap<T>::PixelMap(const BitMapRGBA& bitmap) :
_height{ bitmap.getHeight() }, _width{ bitmap.getWidth() }, _channels{ 4 },
_pixel_map{ std::make_unique<T[]>(bitmap.getHeight()*bitmap.getWidth()*4) }{
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
(*this)(i, j, 0) = ( bitmap(i, j) & 0xff);
(*this)(i, j, 1) = ((bitmap(i, j) >> 8 )& 0xff);
(*this)(i, j, 2) = ((bitmap(i, j) >> 16)& 0xff);
(*this)(i, j, 3) = ((bitmap(i, j) >> 24)& 0xff);
}
}
}
template<typename T>
PixelMap<T>::PixelMap(const BitMapRGB& bitmap) :
_height{ bitmap.getHeight() }, _width{ bitmap.getWidth() }, _channels{ 3 },
_pixel_map{ std::make_unique<T[]>(bitmap.getHeight()*bitmap.getWidth()*3) }{
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
(*this)(i, j, 0) = ( bitmap(i, j) & 0xff);
(*this)(i, j, 1) = ((bitmap(i, j) >> 8 )& 0xff);
(*this)(i, j, 2) = ((bitmap(i, j) >> 16)& 0xff);
}
}
}
template<typename T>
size_t PixelMap<T>::getHeight() const{ return _height; }
template<typename T>
size_t PixelMap<T>::getWidth() const{ return _width; }
template<typename T>
size_t PixelMap<T>::getChannels() const{ return _channels; }
template<typename T>
T& PixelMap<T>::operator()(size_t i, size_t j, size_t c){ return _pixel_map[c+(j+i*_width)*_channels]; }
template<typename T>
const T& PixelMap<T>::operator()(size_t i, size_t j, size_t c) const{ return _pixel_map[c+(j+i*_width)*_channels]; }
template<typename T>
T* PixelMap<T>::begin() { return _pixel_map.get(); }
template<typename T>
T* PixelMap<T>::end() { return begin() + _width*_height*_channels; }
template<typename T>
const T* PixelMap<T>::begin() const { return _pixel_map.get(); }
template<typename T>
const T* PixelMap<T>::end() const { return begin() + _width*_height*_channels; }
template<typename T>
T* PixelMap<T>::rowBegin(size_t row) { return begin() + row*_width*_channels; }
template<typename T>
T* PixelMap<T>::rowEnd(size_t row) { return rowBegin(row) + _width*_channels; }
template<typename T>
const T* PixelMap<T>::rowBegin(size_t row) const { return begin() + row*_width*_channels; }
template<typename T>
const T* PixelMap<T>::rowEnd(size_t row) const { return rowBegin(row) + _width*_channels; }
template<typename T>
T* PixelMap<T>::pixBegin(size_t row, size_t col) { return rowBegin(row) + col*_channels; }
template<typename T>
T* PixelMap<T>::pixEnd(size_t row, size_t col) { return pixBegin(row, col) + _channels; }
template<typename T>
const T* PixelMap<T>::pixBegin(size_t row, size_t col) const { return rowBegin(row) + col*_channels; }
template<typename T>
const T* PixelMap<T>::pixEnd(size_t row, size_t col) const { return pixBegin(row, col) + _channels; }
template<typename T>
PixelMap<T>& PixelMap<T>::operator=(const PixelMap<T>& other){
if(_height != other.getHeight() || _width != other.getWidth() || _channels != other.getChannels()){
_height = other.getHeight();
_width = other.getWidth();
_channels = other.getChannels();
_pixel_map.reset(nullptr);
_pixel_map = std::make_unique<T[]>(_height*_width*_channels);
}
T* it_dest = begin();
const T* it_src = other.begin();
for(; it_dest != end(); ++it_dest, ++it_src) *it_dest = *it_src;
return *this;
}
template<typename T>
bool PixelMap<T>::operator==(const PixelMap<T>& other) const{
if(has_same_dims_as(other))
return false;
return std::equal(begin(), other.begin(), other.end());
}
template<typename T>
bool PixelMap<T>::has_same_dims_as(const PixelMap<T>& other) const{
if(_height != other.getHeight() || _width != other.getWidth() || _channels != other.getChannels())
return false;
return true;
}
template<typename T>
PixelMap<T> PixelMap<T>::transpose() const{
PixelMap<T> res(_width, _height, _channels);
for(size_t y = 0; y < _height; ++y){
for(size_t x = 0; x < _width; ++x){
for(size_t c = 0; c < _channels; ++c){
res(x, y, c) = (*this)(y, x, c);
}
}
}
return res;
}
// if min val negative shift all values
/**
* normalization:
* x_new = (x - min)*(max_des - min_des)/(max-min)+min_des
*/
template<typename T>
PixelMap<T>& PixelMap<T>::subtract(const PixelMap<T>& other){
assert(has_same_dims_as(other));
std::vector<uint16_t> min_val(_channels);
std::vector<uint16_t> max_val(_channels);
#ifdef _OPENMP
#pragma omp for simd
#endif
for(size_t c = 0; c < _channels; ++c) min_val[c] = 510; // 2*255
#ifdef _OPENMP
#pragma omp for simd
#endif
for(size_t c = 0; c < _channels; ++c) max_val[c] = 0;
std::vector<uint16_t> buf(_width*_height*_channels);
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
for(size_t c = 0; c < _channels; ++c){
buf[c+(j+i*_width)*_channels] = (255 + (*this)(i, j, c)) - static_cast<uint32_t>(other(i, j, c));
if(buf[c+(j+i*_width)*_channels] < min_val[c]) min_val[c] = buf[c+(j+i*_width)*_channels];
else
if(buf[c+(j+i*_width)*_channels] > max_val[c]) max_val[c] = buf[c+(j+i*_width)*_channels];
}
}
}
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
for(size_t c = 0; c < _channels; ++c){
float fact = 255/(max_val[c]-min_val[c]+1e-10f/* == 0 ? 1e-10f : max_val[c]-min_val[c]*/);
(*this)(i, j, c) = static_cast<png_byte>((buf[c+(j+i*_width)*_channels] - min_val[c]) * fact);
}
}
}
return *this;
}
template<typename T>
void PixelMap<T>::print() const{
const T* it = begin();
for(size_t y = 0; y < _height; ++y){
for(size_t x = 0; x < _width; ++x){
//const T* it = pixBegin(y, x);
std::cout << "{";
for(size_t c = 0; c < _channels; ++c){
std::cout << *static_cast<const T*>(it++) << ", ";
}
std::cout << "}, ";
}
std::cout << std::endl;
}
std::cout << std::endl;
}
//////////////////////////////////////////////////////////////////////
// //
// BITMAP IMPLEM (RGBA) //
// //
//////////////////////////////////////////////////////////////////////
BitMapRGBA::BitMapRGBA() :
_height{ 0 }, _width{ 0 }{}
BitMapRGBA::BitMapRGBA(size_t height, size_t width) :
_height{ height }, _width{ width },
_data{ std::make_unique<uint32_t[]>(height*width) }{}
/**
* Constructor with default arguments that may have different types
* d_name == UNIFORM -> pixel in [0, 255]
* d_name == NORMAL -> n_trials = height*width*channels | probability = 0.5
*/
BitMapRGBA::BitMapRGBA(size_t height, size_t width, DistrName d_name) :
BitMapRGBA(height, width, d_name, ((d_name==NORMAL) ? 255 : 0), ((d_name==NORMAL) ? 0.5f : 255)){}
template<typename U, typename V>
BitMapRGBA::BitMapRGBA(size_t height, size_t width, DistrName d_name, U param1, V param2) :
_height{ height }, _width{ width },
_data{ std::make_unique<uint32_t[]>(height*width) }{
std::unique_ptr<distribution<uint32_t>> distr;
switch(d_name){
case UNIFORM:{
distr = std::make_unique<uniform_dist<uint32_t>>(param1, param2);
break;
}
case NORMAL:{
distr = std::make_unique<normal_dist<uint32_t>>(param1, param2);
break;
}
default:{
printf("default bitmap noise: uniform\n");
distr = std::make_unique<uniform_dist<uint32_t>>(param1, param2);
break;
}
};
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
_data[j+i*_width] = (((*distr)() << 0 )& 0x000000ff) |
(((*distr)() << 8 )& 0x0000ff00) |
(((*distr)() << 16)& 0x00ff0000) |
(((*distr)() << 24)& 0xff000000);
}
}
}
BitMapRGBA::BitMapRGBA(const BitMapRGBA& other){
*this = other;
}
BitMapRGBA::BitMapRGBA(BitMapRGBA&& other) noexcept:
_height{ other._height }, _width{ other._width },
_data{ std::move(other._data) }{}
BitMapRGBA::BitMapRGBA(const PixelMap<png_byte>& pixelmap) :
BitMapRGBA(pixelmap.getHeight(), pixelmap.getWidth()){
size_t channels = pixelmap.getChannels();
switch(channels){
case 1:{
uint32_t *it_bm = begin();
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
const png_byte *pixel_pm = pixelmap.pixBegin(i, j);
*(it_bm++) = ( 0xff00) |
(static_cast<uint32_t>(*pixel_pm) & 0x00ff);
}
}
break;
}
case 2:{
uint32_t *it_bm = begin();
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
const png_byte *pixel_pm = pixelmap.pixBegin(i, j);
*(it_bm++) = (static_cast<uint32_t>(*(pixel_pm+1) << 8) & 0x0000ff00) |
(static_cast<uint32_t>(* pixel_pm << 0) & 0x000000ff);
}
}
break;
}
case 3:{
uint32_t *it_bm = begin();
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
const png_byte *pixel_pm = pixelmap.pixBegin(i, j);
*(it_bm++) = ( 0xff000000) |
((static_cast<uint32_t>(*(pixel_pm+2)) << 16) & 0x00ff0000) |
((static_cast<uint32_t>(*(pixel_pm+1)) << 8 ) & 0x0000ff00) |
( static_cast<uint32_t>(* pixel_pm ) & 0x000000ff);
}
}
break;
}
case 4:{
uint32_t *it_bm = begin();
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
const png_byte *pixel_pm = pixelmap.pixBegin(i, j);
*(it_bm++) = ((static_cast<uint32_t>(*(pixel_pm+3)) << 24) & 0xff000000) |
((static_cast<uint32_t>(*(pixel_pm+2)) << 16) & 0x00ff0000) |
((static_cast<uint32_t>(*(pixel_pm+1)) << 8 ) & 0x0000ff00) |
( static_cast<uint32_t>(* pixel_pm ) & 0x000000ff);
}
}
break;
}
default:{
printf("Must provide (1-4) channels for Bitmap construction.\n");
abort();
}
}
}
size_t BitMapRGBA::getHeight() const{ return _height; }
size_t BitMapRGBA::getWidth() const{ return _width; }
unsigned char
BitMapRGBA::get(size_t i, size_t j, size_t c) const{ return (_data[j+i*_width] >> (8*c) & 0xff); }
uint32_t& BitMapRGBA::operator()(size_t i, size_t j){ return _data[j+i*_width]; }
const uint32_t& BitMapRGBA::operator()(size_t i, size_t j) const{ return _data[j+i*_width]; }
uint32_t* BitMapRGBA::begin(){ return _data.get(); }
uint32_t* BitMapRGBA::end(){ return begin() + _height*_width; }
const uint32_t* BitMapRGBA::begin() const{ return _data.get(); }
const uint32_t* BitMapRGBA::end() const{ return begin() + _height*_width; }
uint32_t* BitMapRGBA::rowBegin(size_t row){ return begin() + row*_height*_width; }
uint32_t* BitMapRGBA::rowEnd(size_t row){ return rowBegin(row) + _width; }
const uint32_t* BitMapRGBA::rowBegin(size_t row) const{ return begin() + row*_height*_width; }
const uint32_t* BitMapRGBA::rowEnd(size_t row) const{ return rowBegin(row) + _width; }
template<typename rowIt>
void BitMapRGBA::copy_row(rowIt rowit, size_t row, size_t channels){
//assert(row < _height && (*rowit)->getWidth() == _width);
for(size_t j = 0; j < _width; ++j){
for(size_t c = 0; c < channels; ++c){
_data[j+row*_width] |= (*(rowit+(c+j*channels)) << rgba_utils.shift[c] & rgba_utils.rgba[c]);
}
}
}
BitMapRGBA BitMapRGBA::transpose() const{
BitMapRGBA res(_width, _height);
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
res(j, i) = _data[j + i*_width];
}
}
return res;
}
/*
* shift all values by min_value to avoid negative values
*
* normalization:
* x_new = (x - min)*(max_des - min_des)/(max-min)+min_des
*
* each rgba max value "spreads" in uint64_t integer
* max possible value: 2 * 255 = 510 = 0x01fe
*/
BitMapRGBA& BitMapRGBA::subtract_rgb(const BitMapRGBA& other){
uint64_t min_val = 0x000001fe01fe01fe;
uint64_t max_val = 0x0;
std::vector<uint64_t> buf(_width*_height, 0);
for(size_t n = 0; n < _width*_height; ++n) buf[n] = 0x000000ff00ff00ff;
const uint32_t *this_it = begin();
const uint32_t *other_it = other.begin();
for(size_t n = 0; n < _width*_height; ++this_it, ++other_it, ++n){
buf[n] += ((static_cast<uint64_t>(*this_it & 0xff0000)) << 16) |
((static_cast<uint64_t>(*this_it & 0xff00)) << 8) |
( static_cast<uint64_t>(*this_it & 0xff));
buf[n] -= ((static_cast<uint64_t>(*other_it & 0xff0000)) << 16) |
((static_cast<uint64_t>(*other_it & 0xff00)) << 8) |
( static_cast<uint64_t>(*other_it & 0xff));
min_val = ((buf[n] & 0x01fe00000000) < (min_val & 0x01fe00000000) ? (buf[n] & 0x01fe00000000) : (min_val & 0x01fe00000000)) |
((buf[n] & 0x01fe0000) < (min_val & 0x01fe0000) ? (buf[n] & 0x01fe0000) : (min_val & 0x01fe0000)) |
((buf[n] & 0x01fe) < (min_val & 0x01fe) ? (buf[n] & 0x01fe) : (min_val & 0x01fe));
max_val = ((buf[n] & 0x01fe00000000) > (max_val & 0x01fe00000000) ? (buf[n] & 0x01fe00000000) : (max_val & 0x01fe00000000)) |
((buf[n] & 0x01fe0000) > (max_val & 0x01fe0000) ? (buf[n] & 0x01fe0000) : (max_val & 0x01fe0000)) |
((buf[n] & 0x01fe) > (max_val & 0x01fe) ? (buf[n] & 0x01fe) : (max_val & 0x01fe));
}
uint64_t max_min = max_val - min_val;
// norm. factor for each col. channel (diff. can still be 0x1fe at this point)
float b_fact = 255.0f / ((max_min & 0x1fe00000000) >> 32);
float g_fact = 255.0f / ((max_min & 0x1fe0000) >> 16);
float r_fact = 255.0f / ( max_min & 0x1fe);
uint32_t *this_it_update = begin();
for(size_t n = 0; n < _width*_height; ++n, ++this_it_update){
//res = (buf[n] - min_val) * fact;
buf[n] -= min_val;
// shifting the bits so that they line up in proper uint32_t position. Alpha channel is copied
*this_it_update = ((*this_it_update) & 0xff000000) |
((static_cast<uint32_t>(((buf[n] & 0xff00000000) >> 32) * b_fact)/* & 0xff*/) << 16) |
((static_cast<uint32_t>(((buf[n] & 0x0000ff0000) >> 16) * g_fact)/* & 0xff*/) << 8 ) |
((static_cast<uint32_t>(((buf[n] & 0x00000000ff) ) * r_fact)/* & 0xff*/) );
}
return *this;
}
BitMapRGBA& BitMapRGBA::subtract_rgba(const BitMapRGBA& other){
uint64_t min_val = 0x01fe01fe01fe01fe;
uint64_t max_val = 0x0;
std::vector<uint64_t> buf(_width*_height, 0);
for(size_t n = 0; n < _width*_height; ++n) buf[n] = 0x00ff00ff00ff00ff;
const uint32_t *this_it = begin();
const uint32_t *other_it = other.begin();
for(size_t n = 0; n < _width*_height; ++n, ++this_it, ++other_it){
buf[n] += ((static_cast<uint64_t>(*this_it & 0xff000000)) << 24) |
((static_cast<uint64_t>(*this_it & 0xff0000)) << 16) |
((static_cast<uint64_t>(*this_it & 0xff00)) << 8) |
( static_cast<uint64_t>(*this_it & 0xff));
buf[n] -= ((static_cast<uint64_t>(*other_it & 0xff000000)) << 24) |
((static_cast<uint64_t>(*other_it & 0xff0000)) << 16) |
((static_cast<uint64_t>(*other_it & 0xff00)) << 8) |
( static_cast<uint64_t>(*other_it & 0xff));
min_val = ((buf[n] & 0x01fe000000000000) < (min_val & 0x01fe000000000000) ? (buf[n] & 0x01fe000000000000) : (min_val & 0x01fe000000000000)) |
((buf[n] & 0x01fe00000000) < (min_val & 0x01fe00000000) ? (buf[n] & 0x01fe00000000) : (min_val & 0x01fe00000000)) |
((buf[n] & 0x01fe0000) < (min_val & 0x01fe0000) ? (buf[n] & 0x01fe0000) : (min_val & 0x01fe0000)) |
((buf[n] & 0x01fe) < (min_val & 0x01fe) ? (buf[n] & 0x01fe) : (min_val & 0x01fe));
max_val = ((buf[n] & 0x01fe000000000000) > (max_val & 0x01fe000000000000) ? (buf[n] & 0x01fe000000000000) : (max_val & 0x01fe000000000000)) |
((buf[n] & 0x01fe00000000) > (max_val & 0x01fe00000000) ? (buf[n] & 0x01fe00000000) : (max_val & 0x01fe00000000)) |
((buf[n] & 0x01fe0000) > (max_val & 0x01fe0000) ? (buf[n] & 0x01fe0000) : (max_val & 0x01fe0000)) |
((buf[n] & 0x01fe) > (max_val & 0x01fe) ? (buf[n] & 0x01fe) : (max_val & 0x01fe));
}
uint64_t max_min = max_val - min_val;
// norm. factor for each col. channel (diff. can still be 0x1fe at this point)
float a_fact = 255.0f / ((max_min & 0x1fe000000000000) >> 48);
float b_fact = 255.0f / ((max_min & 0x1fe00000000) >> 32);
float g_fact = 255.0f / ((max_min & 0x1fe0000) >> 16);
float r_fact = 255.0f / ( max_min & 0x1fe);
uint32_t *this_it_update = begin();
for(size_t n = 0; n < _width*_height; ++n, ++this_it_update){
//res = (buf[n] - min_val) * fact;
buf[n] -= min_val;
// shifting the bits so that they line up in proper uint32_t position
*this_it_update = ((static_cast<uint32_t>(((buf[n] & 0xff0000000000) >> 48) * a_fact)/* & 0xff*/) << 24) |
((static_cast<uint32_t>(((buf[n] & 0x00ff00000000) >> 32) * b_fact)/* & 0xff*/) << 16) |
((static_cast<uint32_t>(((buf[n] & 0x000000ff0000) >> 16) * g_fact)/* & 0xff*/) << 8 ) |
((static_cast<uint32_t>(((buf[n] & 0x0000000000ff) ) * r_fact)/* & 0xff*/) );
}
return *this;
}
BitMapRGBA& BitMapRGBA::operator=(const BitMapRGBA& other){
if(_height != other.getHeight() || _width != other.getWidth()){
_height = other.getHeight();
_width = other.getWidth();
_data.reset(nullptr);
_data = std::make_unique<uint32_t[]>(_height*_width);
}
uint32_t* it_dest = begin();
const uint32_t* it_src = other.begin();
for(; it_dest != end(); ++it_dest, ++it_src) *it_dest = *it_src;
return *this;
}
/*
BitMapRGBA& BitMapRGBA::operator=(const PixelMap<png_byte>& pixelmap){
*this = BitMapRGBA(pixelmap);
return *this;
}
*/
void BitMapRGBA::print() const{
printf("red = [");
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
if(i == _height-1 && j == _width-1)
printf("%d]", (int)(_data[j+i*_width] >> 0 & 0xff));
else
printf("%d,", (int)(_data[j+i*_width] >> 0 & 0xff));
}
//printf("\n");
}
printf("\n");
printf("green = [");
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
if(i == _height-1 && j == _width-1)
printf("%d]", (int)(_data[j+i*_width] >> 8 & 0xff));
else
printf("%d,", (int)(_data[j+i*_width] >> 8 & 0xff));
}
//printf("\n");
}
printf("\n");
printf("blue = [");
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
if(i == _height-1 && j == _width-1)
printf("%d]", (int)(_data[j+i*_width] >> 16 & 0xff));
else
printf("%d,", (int)(_data[j+i*_width] >> 16 & 0xff));
}
//printf("\n");
}
printf("\n");
printf("alpha = [");
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
if(i == _height-1 && j == _width-1)
printf("%d]", (int)(_data[j+i*_width] >> 24 & 0xff));
else
printf("%d,", (int)(_data[j+i*_width] >> 24 & 0xff));
}
//printf("\n");
}
printf("\n");
}
void BitMapRGBA::print_bitmap() const{
printf("red = [");
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
if(i == _height-1 && j == _width-1)
printf("%d],", (int)((_data[j+i*_width] >> 0) & 0xff));
else
printf("%d,", (int)((_data[j+i*_width] >> 0) & 0xff));
}
printf("\n");
}
printf("\n");
printf("green = [");
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
if(i == _height-1 && j == _width-1)
printf("%d],", (int)((_data[j+i*_width] >> 8) & 0xff));
else
printf("%d,", (int)((_data[j+i*_width] >> 8) & 0xff));
}
printf("\n");
}
printf("\n");
printf("blue = [");
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
if(i == _height-1 && j == _width-1)
printf("%d],", (int)((_data[j+i*_width] >> 16) & 0xff));
else
printf("%d,", (int)((_data[j+i*_width] >> 16) & 0xff));
}
printf("\n");
}
printf("\n");
/*
printf("alpha = [");
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
if(i == _height-1 && j == _width-1)
printf("%d],", (int)((_data[j+i*_width] >> 24) & 0xff));
else
printf("%d,", (int)((_data[j+i*_width] >> 24) & 0xff));
}
printf("\n");
}
printf("\n");
*/
}
//////////////////////////////////////////////////////////////////////
// //
// BITMAP IMPLEM (RGB) //
// //
//////////////////////////////////////////////////////////////////////
BitMapRGB::BitMapRGB() :
_height{ 0 }, _width{ 0 }{}
BitMapRGB::BitMapRGB(size_t height, size_t width) :
_height{ height }, _width{ width },
_data{ std::make_unique<uint32_t[]>(height*width) }{}
/**
* Constructor with default arguments that may have different types
* d_name == UNIFORM -> pixel in [0, 255]
* d_name == NORMAL -> n_trials = height*width*channels | probability = 0.5
*/
BitMapRGB::BitMapRGB(size_t height, size_t width, DistrName d_name) :
BitMapRGB(height, width, d_name, ((d_name==NORMAL) ? 255 : 0), ((d_name==NORMAL) ? 0.5f : 255)){}
template<typename U, typename V>
BitMapRGB::BitMapRGB(size_t height, size_t width, DistrName d_name, U param1, V param2) :
_height{ height }, _width{ width },
_data{ std::make_unique<uint32_t[]>(height*width) }{
std::unique_ptr<distribution<uint32_t>> distr;
switch(d_name){
case UNIFORM:{
distr = std::make_unique<uniform_dist<uint32_t>>(param1, param2);
break;
}
case NORMAL:{
distr = std::make_unique<normal_dist<uint32_t>>(param1, param2);
break;
}
default:{
printf("default bitmap noise: uniform\n");
distr = std::make_unique<uniform_dist<uint32_t>>(param1, param2);
break;
}
};
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
_data[j+i*_width] = (( *distr)() & 0xff) |
(((*distr)() << 8 )& 0xff00) |
(((*distr)() << 16)& 0xff0000);
}
}
}
BitMapRGB::BitMapRGB(const BitMapRGB& other){
*this = other;
}
BitMapRGB::BitMapRGB(BitMapRGB&& other) noexcept:
_height{ other._height }, _width{ other._width },
_data{ std::move(other._data) }{}
BitMapRGB::BitMapRGB(const PixelMap<png_byte>& pixelmap) :
BitMapRGB(pixelmap.getHeight(), pixelmap.getWidth()){
size_t channels = pixelmap.getChannels();
switch(channels){
case 1:{
uint32_t *it_bm = begin();
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
const png_byte *pixel_pm = pixelmap.pixBegin(i, j); // first channel
*(it_bm++) = (static_cast<uint32_t>(*pixel_pm) & 0xff);
}
}
break;
}
case 2:{
uint32_t *it_bm = begin();
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
const png_byte *pixel_pm = pixelmap.pixBegin(i, j); // first and alpha channels
*(it_bm++) = (static_cast<uint32_t>(*(pixel_pm+1) << 8) & 0xff00) |
(static_cast<uint32_t>(* pixel_pm ) & 0x00ff);
}
}
break;
}
case 3:{
uint32_t *it_bm = begin();
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
const png_byte *pixel_pm = pixelmap.pixBegin(i, j); // rgb channels
*(it_bm++) = ((static_cast<uint32_t>(*(pixel_pm+2)) << 16) & 0xff0000) |
((static_cast<uint32_t>(*(pixel_pm+1)) << 8 ) & 0x00ff00) |
( static_cast<uint32_t>(* pixel_pm ) & 0x0000ff);
}
}
break;
}
case 4:{
uint32_t *it_bm = begin();
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
const png_byte *pixel_pm = pixelmap.pixBegin(i, j); // skip alpha channel
*(it_bm++) = ((static_cast<uint32_t>(*(pixel_pm+2)) << 16) & 0xff0000) |
((static_cast<uint32_t>(*(pixel_pm+1)) << 8 ) & 0x00ff00) |
( static_cast<uint32_t>(* pixel_pm ) & 0x0000ff);
}
}
break;
}
default:{
printf("Must provide (1-4) channels for Bitmap construction.\n");
abort();
}
}
}
size_t BitMapRGB::getHeight() const{ return _height; }
size_t BitMapRGB::getWidth() const{ return _width; }
unsigned char
BitMapRGB::get(size_t i, size_t j, size_t c) const{ return (_data[j+i*_width] >> (8*c) & 0xff); }
uint32_t& BitMapRGB::operator()(size_t i, size_t j){ return _data[j+i*_width]; }
const uint32_t& BitMapRGB::operator()(size_t i, size_t j) const{ return _data[j+i*_width]; }
uint32_t* BitMapRGB::begin(){ return _data.get(); }
uint32_t* BitMapRGB::end(){ return begin() + _height*_width; }
const uint32_t* BitMapRGB::begin() const{ return _data.get(); }
const uint32_t* BitMapRGB::end() const{ return begin() + _height*_width; }
uint32_t* BitMapRGB::rowBegin(size_t row){ return begin() + row*_height*_width; }
uint32_t* BitMapRGB::rowEnd(size_t row){ return rowBegin(row) + _width; }
const uint32_t* BitMapRGB::rowBegin(size_t row) const{ return begin() + row*_height*_width; }
const uint32_t* BitMapRGB::rowEnd(size_t row) const{ return rowBegin(row) + _width; }
BitMapRGB& BitMapRGB::operator=(const BitMapRGB& other){
if(_height != other.getHeight() || _width != other.getWidth()){
_height = other.getHeight();
_width = other.getWidth();
_data.reset(nullptr);
_data = std::make_unique<uint32_t[]>(_height*_width);
}
uint32_t* it_dest = begin();
const uint32_t* it_src = other.begin();
for(; it_dest != end(); ++it_dest, ++it_src) *it_dest = *it_src;
return *this;
}
/*
BitMapRGB& BitMapRGB::operator=(const PixelMap<png_byte>& pixelmap){
*this = BitMapRGB(pixelmap);
return *this;
}
*/
template<typename rowIt>
void BitMapRGB::copy_row(rowIt rowit, size_t row, size_t channels){
assert(channels < 4);
//assert(row < _height && (*rowit)->getWidth() == _width);
for(size_t j = 0; j < _width; ++j){
for(size_t c = 0; c < 3; ++c){
_data[j+row*_width] |= (*(rowit+(c+j*channels)) << rgba_utils.shift[c] & rgba_utils.rgba[c]);
}
}
}
BitMapRGB BitMapRGB::transpose() const{
BitMapRGB res(_width, _height);
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
res(j, i) = _data[j + i*_width];
}
}
return res;
}
BitMapRGB& BitMapRGB::subtract(const BitMapRGB& other){
uint64_t min_val = 0x01fe01fe01fe;
uint64_t max_val = 0x0;
std::vector<uint64_t> buf(_width*_height, 0);
for(size_t n = 0; n < _width*_height; ++n) buf[n] = 0x00ff00ff00ff;
const uint32_t *this_it = begin();
const uint32_t *other_it = other.begin();
for(size_t n = 0; n < _width*_height; ++this_it, ++other_it, ++n){
buf[n] += ((static_cast<uint64_t>(*this_it & 0xff0000)) << 16) |
((static_cast<uint64_t>(*this_it & 0xff00)) << 8) |
( static_cast<uint64_t>(*this_it & 0xff));
buf[n] -= ((static_cast<uint64_t>(*other_it & 0xff0000)) << 16) |
((static_cast<uint64_t>(*other_it & 0xff00)) << 8) |
( static_cast<uint64_t>(*other_it & 0xff));
min_val = ((buf[n] & 0x01fe00000000) < (min_val & 0x01fe00000000) ? (buf[n] & 0x01fe00000000) : (min_val & 0x01fe00000000)) |
((buf[n] & 0x01fe0000) < (min_val & 0x01fe0000) ? (buf[n] & 0x01fe0000) : (min_val & 0x01fe0000)) |
((buf[n] & 0x01fe) < (min_val & 0x01fe) ? (buf[n] & 0x01fe) : (min_val & 0x01fe));
max_val = ((buf[n] & 0x01fe00000000) > (max_val & 0x01fe00000000) ? (buf[n] & 0x01fe00000000) : (max_val & 0x01fe00000000)) |
((buf[n] & 0x01fe0000) > (max_val & 0x01fe0000) ? (buf[n] & 0x01fe0000) : (max_val & 0x01fe0000)) |
((buf[n] & 0x01fe) > (max_val & 0x01fe) ? (buf[n] & 0x01fe) : (max_val & 0x01fe));
}
uint64_t max_min = max_val - min_val;
// norm. factor for each col. channel (diff. can still be 0x1fe at this point)
float b_fact = 255.0f / ((max_min & 0x1fe00000000) >> 32);
float g_fact = 255.0f / ((max_min & 0x1fe0000) >> 16);
float r_fact = 255.0f / ( max_min & 0x1fe);
uint32_t *this_it_update = begin();
for(size_t n = 0; n < _width*_height; ++n, ++this_it_update){
//res = (buf[n] - min_val) * fact;
buf[n] -= min_val;
// shifting the bits so that they line up in proper uint32_t position. Alpha channel is copied
*this_it_update = ((static_cast<uint32_t>(((buf[n] & 0xff00000000) >> 32) * b_fact)/* & 0xff*/) << 16) |
((static_cast<uint32_t>(((buf[n] & 0x0000ff0000) >> 16) * g_fact)/* & 0xff*/) << 8 ) |
((static_cast<uint32_t>(((buf[n] & 0x00000000ff) ) * r_fact)/* & 0xff*/) );
}
return *this;
}
void BitMapRGB::print() const{
printf("red = [");
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
if(i == _height-1 && j == _width-1)
printf("%d]", (int)(_data[j+i*_width] >> 0 & 0xff));
else
printf("%d,", (int)(_data[j+i*_width] >> 0 & 0xff));
}
//printf("\n");
}
printf("\n");
printf("green = [");
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
if(i == _height-1 && j == _width-1)
printf("%d]", (int)(_data[j+i*_width] >> 8 & 0xff));
else
printf("%d,", (int)(_data[j+i*_width] >> 8 & 0xff));
}
//printf("\n");
}
printf("\n");
printf("blue = [");
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
if(i == _height-1 && j == _width-1)
printf("%d]", (int)(_data[j+i*_width] >> 16 & 0xff));
else
printf("%d,", (int)(_data[j+i*_width] >> 16 & 0xff));
}
//printf("\n");
}
printf("\n");
}
void BitMapRGB::print_bitmap() const{
printf("red = [");
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
if(i == _height-1 && j == _width-1)
printf("%d],", (int)(_data[j+i*_width] >> 0 & 0xff));
else
printf("%d,", (int)(_data[j+i*_width] >> 0 & 0xff));
}
printf("\n");
}
printf("\n");
printf("green = [");
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
if(i == _height-1 && j == _width-1)
printf("%d],", (int)(_data[j+i*_width] >> 8 & 0xff));
else
printf("%d,", (int)(_data[j+i*_width] >> 8 & 0xff));
}
printf("\n");
}
printf("\n");
printf("blue = [");
for(size_t i = 0; i < _height; ++i){
for(size_t j = 0; j < _width; ++j){
if(i == _height-1 && j == _width-1)
printf("%d],", (int)(_data[j+i*_width] >> 16 & 0xff));
else
printf("%d,", (int)(_data[j+i*_width] >> 16 & 0xff));
}
printf("\n");
}
printf("\n");
} |
context.c | #include "ghost/config.h"
#include "ghost/core.h"
#include "ghost/types.h"
#include "ghost/util.h"
#include "ghost/context.h"
#include "ghost/locality.h"
#include "ghost/bincrs.h"
#include "ghost/matrixmarket.h"
#include "ghost/log.h"
#include "ghost/omp.h"
#include "ghost/machine.h"
#include "ghost/bench.h"
#include "ghost/map.h"
#include <float.h>
#include <math.h>
ghost_error ghost_context_create(ghost_context **context, ghost_gidx gnrows, ghost_gidx gncols, ghost_context_flags_t context_flags, ghost_mpi_comm comm, double weight)
{
GHOST_FUNC_ENTER(GHOST_FUNCTYPE_SETUP);
if (weight < 0) {
GHOST_ERROR_LOG("Negative weight");
return GHOST_ERR_INVALID_ARG;
}
int nranks, me;
ghost_error ret = GHOST_SUCCESS;
GHOST_CALL_GOTO(ghost_nrank(&nranks, comm),err,ret);
GHOST_CALL_GOTO(ghost_rank(&me, comm),err,ret);
if (fabs(weight) < DBL_MIN) {
#ifdef GHOST_USE_MPI
double max_bw=0.0;
double avgweight;
int withinavg = 0;
int totalwithinavg = 0;
GHOST_CALL_GOTO(ghost_bench_bw(GHOST_BENCH_UPDATE,&weight,&max_bw),err,ret);
MPI_CALL_GOTO(MPI_Allreduce(&weight,&avgweight,1,MPI_DOUBLE,MPI_SUM,comm),err,ret);
avgweight /= nranks;
if (fabs(weight-avgweight)/avgweight < 0.1) {
withinavg = 1;
}
MPI_CALL_GOTO(MPI_Allreduce(&withinavg,&totalwithinavg,1,MPI_INT,MPI_SUM,comm),err,ret);
totalwithinavg
if (nranks > 1) {
if (totalwithinavg == nranks) {
GHOST_INFO_LOG("The bandwidths of all processes differ by less than 10%%, the weights will be fixed to 1.0 to avoid artifacts.");
weight = 1.0;
} else {
GHOST_INFO_LOG("The bandwidths of all processes differ by more than 10%%, automatically setting weight to %.2f according to UPDATE bandwidth!",weight);
}
}
#else
weight = 1.0;
#endif
}
if (!gnrows) {
GHOST_ERROR_LOG("The global number of rows (and columns for non-square matrices) must not be zero!");
return GHOST_ERR_INVALID_ARG;
}
if (!gncols) {
gncols = gnrows;
}
ghost_lidx *target_rows = NULL;
char *tmpval = NULL;
ghost_gidx *tmpcol = NULL;
GHOST_CALL_GOTO(ghost_malloc((void **)context,sizeof(ghost_context)),err,ret);
(*context)->weight=weight;
(*context)->flags = context_flags;
(*context)->mpicomm = comm;
(*context)->mpicomm_parent = MPI_COMM_NULL;
(*context)->wishes = NULL;
(*context)->dues = NULL;
(*context)->hput_pos = NULL;
(*context)->cu_duelist = NULL;
(*context)->duelist = NULL;
(*context)->wishlist = NULL;
(*context)->dues = NULL;
(*context)->wishes = NULL;
(*context)->duepartners = NULL;
(*context)->nduepartners = 0;
(*context)->wishpartners = NULL;
(*context)->nwishpartners = 0;
(*context)->entsInCol = NULL;
(*context)->ncolors = 0;
(*context)->color_ptr = NULL;
(*context)->nzones = 0;
(*context)->zone_ptr = NULL;
(*context)->kacz_setting.kacz_method = GHOST_KACZ_METHOD_MC;
(*context)->kacz_setting.active_threads = 0;
(*context)->bandwidth = 0;
(*context)->lowerBandwidth = 0;
(*context)->upperBandwidth = 0;
(*context)->avg_ptr = NULL;
(*context)->mapAvg = NULL;
(*context)->mappedDuelist = NULL;
(*context)->nrankspresent = NULL;
(*context)->nmats = 1;
GHOST_CALL_GOTO(ghost_map_create(&((*context)->row_map),gnrows,comm,GHOST_MAP_ROW,GHOST_MAP_DEFAULT),err,ret);
GHOST_CALL_GOTO(ghost_map_create(&((*context)->col_map),gncols,comm,GHOST_MAP_COL,GHOST_MAP_DEFAULT),err,ret);
if (!((*context)->flags & GHOST_CONTEXT_DIST_NZ)) {
(*context)->flags |= (ghost_context_flags_t)GHOST_CONTEXT_DIST_ROWS;
}
GHOST_DEBUG_LOG(1,"Context created successfully");
goto out;
err:
free(*context); *context = NULL;
out:
free(tmpval); tmpval = NULL;
free(tmpcol); tmpcol = NULL;
free(target_rows); target_rows = NULL;
GHOST_FUNC_EXIT(GHOST_FUNCTYPE_SETUP);
return ret;
}
ghost_error ghost_context_string(char **str, ghost_context *context)
{
GHOST_FUNC_ENTER(GHOST_FUNCTYPE_UTIL);
GHOST_CALL_RETURN(ghost_malloc((void **)str,1));
memset(*str,'\0',1);
int nranks;
GHOST_CALL_RETURN(ghost_nrank(&nranks, context->mpicomm));
ghost_header_string(str,"Context");
ghost_line_string(str,"MPI processes",NULL,"%d",nranks);
//ghost_line_string(str,"Number of rows",NULL,"%"PRGIDX,context->gnrows);
ghost_line_string(str,"Work distribution scheme",NULL,"%s",ghost_context_workdist_string(context->flags));
ghost_footer_string(str);
GHOST_FUNC_EXIT(GHOST_FUNCTYPE_UTIL);
return GHOST_SUCCESS;
}
void ghost_context_destroy(ghost_context *context)
{
GHOST_FUNC_ENTER(GHOST_FUNCTYPE_TEARDOWN);
if (context) {
if (context->wishlist) {
free(context->wishlist[0]);
}
if (context->duelist) {
free(context->duelist[0]);
}
#ifdef GHOST_HAVE_CUDA
if (context->cu_duelist) {
ghost_cu_free(context->cu_duelist[0]);
}
#endif
free(context->wishlist); context->wishlist = NULL;
free(context->duelist); context->duelist = NULL;
free(context->cu_duelist); context->cu_duelist = NULL;
free(context->wishes); context->wishes = NULL;
free(context->dues); context->dues = NULL;
free(context->hput_pos); context->hput_pos = NULL;
//free(context->lnEnts); context->lnEnts = NULL;
//free(context->lfEnt); context->lfEnt = NULL;
free(context->duepartners); context->duepartners = NULL;
free(context->wishpartners); context->wishpartners = NULL;
free(context->entsInCol); context->entsInCol = NULL;
if (context->col_map->loc_perm != context->row_map->loc_perm) {
ghost_cu_free(context->col_map->cu_loc_perm); context->col_map->cu_loc_perm = NULL;
free(context->col_map->loc_perm); context->col_map->loc_perm = NULL;
free(context->col_map->loc_perm_inv); context->col_map->loc_perm_inv = NULL;
}
ghost_cu_free(context->row_map->cu_loc_perm); context->row_map->cu_loc_perm = NULL;
free(context->row_map->loc_perm); context->row_map->loc_perm = NULL;
free(context->row_map->loc_perm_inv); context->row_map->loc_perm_inv = NULL;
if (context->col_map->glb_perm != context->row_map->glb_perm) {
free(context->col_map->glb_perm); context->col_map->glb_perm = NULL;
free(context->col_map->glb_perm_inv); context->col_map->glb_perm_inv = NULL;
}
free(context->row_map->glb_perm); context->row_map->glb_perm = NULL;
free(context->row_map->glb_perm_inv); context->row_map->glb_perm_inv = NULL;
ghost_map_destroy(context->row_map);
ghost_map_destroy(context->col_map);
context->row_map = NULL;
context->col_map = NULL;
if(context->avg_ptr){
free(context->avg_ptr); context->avg_ptr = NULL;
free(context->mapAvg); context->mapAvg = NULL;
free(context->mappedDuelist); context->mappedDuelist = NULL;
free(context->nrankspresent); context->nrankspresent = NULL;
}
free(context->color_ptr);
free(context->zone_ptr);
/*
if( context->perm_local )
{
free(context->perm_local->perm); context->perm_local->perm = NULL;
free(context->perm_local->invPerm); context->perm_local->invPerm = NULL;
if(context->perm_local->method == GHOST_PERMUTATION_UNSYMMETRIC) {
free(context->perm_local->colPerm);
context->perm_local->colPerm = NULL;
free(context->perm_local->colInvPerm);
context->perm_local->colInvPerm = NULL;
}
free(context->perm_local); context->perm_local = NULL;
}
*/
}
free(context);
GHOST_FUNC_EXIT(GHOST_FUNCTYPE_TEARDOWN);
}
ghost_error ghost_context_comm_init(ghost_context *ctx, ghost_gidx *col_orig, ghost_sparsemat *mat, ghost_lidx *col, ghost_lidx *nhalo)
{
GHOST_FUNC_ENTER(GHOST_FUNCTYPE_INITIALIZATION|GHOST_FUNCTYPE_SETUP);
ghost_error ret = GHOST_SUCCESS;
ghost_gidx j;
ghost_gidx i;
ghost_lidx max_loc_elements;
ghost_lidx *present_values = NULL;
ghost_lidx acc_dues = 0;
ghost_lidx acc_wishes;
ghost_lidx *item_from = NULL;
ghost_lidx *wishlist_counts = NULL;
ghost_lidx **wishlist = NULL;
ghost_lidx **cwishlist = NULL;
ghost_lidx this_pseudo_col;
ghost_lidx *pseudocol = NULL;
ghost_gidx *globcol = NULL;
ghost_lidx *myrevcol = NULL;
ghost_lidx *comm_remotePE = NULL;
ghost_lidx *comm_remoteEl = NULL;
ghost_lidx *wishl_mem = NULL;
ghost_lidx *duel_mem = NULL;
ghost_lidx acc_transfer_wishes, acc_transfer_dues;
size_t size_nint, size_lcol, size_gcol;
size_t size_nptr, size_pval;
size_t size_wish, size_dues;
int nprocs;
int me;
GHOST_CALL_RETURN(ghost_nrank(&nprocs, ctx->mpicomm));
GHOST_CALL_RETURN(ghost_rank(&me, ctx->mpicomm));
#ifdef GHOST_HAVE_MPI
MPI_Request req[2*nprocs];
MPI_Status stat[2*nprocs];
#endif
GHOST_CALL_GOTO(ghost_malloc((void **)&ctx->entsInCol,ctx->col_map->dim*sizeof(ghost_lidx)),err,ret);
GHOST_CALL_GOTO(ghost_malloc((void **)&ctx->wishlist,nprocs*sizeof(ghost_lidx *)),err,ret);
GHOST_CALL_GOTO(ghost_malloc((void **)&ctx->duelist,nprocs*sizeof(ghost_lidx *)),err,ret);
GHOST_CALL_GOTO(ghost_malloc((void **)&ctx->wishes,nprocs*sizeof(ghost_lidx)),err,ret);
GHOST_CALL_GOTO(ghost_malloc((void **)&ctx->dues,nprocs*sizeof(ghost_lidx)),err,ret);
memset(ctx->entsInCol,0,ctx->col_map->dim*sizeof(ghost_lidx));
ghost_lidx chunk,rowinchunk,entinrow,globalent,globalrow;
for(chunk = 0; chunk < mat->nchunks; chunk++) {
for (rowinchunk=0; rowinchunk<mat->traits.C; rowinchunk++) {
globalrow = chunk*mat->traits.C+rowinchunk;
if (globalrow < ctx->row_map->dim) { // avoid chunk padding rows
for (entinrow=0; entinrow<mat->rowLen[globalrow]; entinrow++) {
globalent = mat->chunkStart[chunk] + entinrow*mat->traits.C + rowinchunk;
if (col_orig[globalent] >= ctx->col_map->offs && col_orig[globalent]<(ctx->col_map->offs+ctx->col_map->dim)) {
ctx->entsInCol[col_orig[globalent]-ctx->col_map->offs]++;
}
}
}
}
}
ghost_type type;
ghost_type_get(&type);
#ifdef GHOST_HAVE_CUDA
if (type == GHOST_TYPE_CUDA) {
GHOST_CALL_GOTO(ghost_malloc((void **)&ctx->cu_duelist,nprocs*sizeof(ghost_lidx *)),err,ret);
}
#endif
for (i=0; i<nprocs; i++){
ctx->wishes[i] = 0;
ctx->dues[i] = 0;
ctx->wishlist[i] = NULL;
ctx->duelist[i] = NULL;
#ifdef GHOST_HAVE_CUDA
if (type == GHOST_TYPE_CUDA) {
ctx->cu_duelist[i] = NULL;
}
#endif
}
size_nint = (size_t)( (size_t)(nprocs) * sizeof(ghost_lidx) );
size_nptr = (size_t)( nprocs * sizeof(ghost_lidx*) );
#ifdef GHOST_HAVE_MPI
MPI_CALL_GOTO(MPI_Allreduce(&mat->nEnts,&max_loc_elements,1,ghost_mpi_dt_lidx,MPI_MAX,ctx->mpicomm),err,ret);
#else
max_loc_elements = mat->nEnts;
#endif
/*
max_loc_elements = 0;
for (i=0;i<nprocs;i++) {
if (max_loc_elements<ctx->lnEnts[i]) {
max_loc_elements = ctx->lnEnts[i];
}
}
*/
size_pval = (size_t)( max_loc_elements * sizeof(ghost_lidx) );
size_lcol = (size_t)( (size_t)(mat->nEnts) * sizeof( ghost_lidx ) );
size_gcol = (size_t)( (size_t)(mat->nEnts) * sizeof( ghost_gidx ) );
/* / 1 2 . 3 4 . \
* | . 5 6 7 . . |
* mat = | 8 9 . . . 10 |
* | . 11 12 13 . . |
* | . . . . 14 15 |
* \16 . . . 17 18 /
*
* nprocs = 3
* max_loc_elements = 4
* item_from = <{0,0,0},{0,0,0},{0,0,0}>
* wishlist_counts = <{0,0,0},{0,0,0},{0,0,0}>
* comm_remotePE = <{0,0,0,0,0,0,0},{0,0,0,0,0,0},{0,0,0,0,0}> PE where element is on
* comm_remoteEl = <{0,0,0,0,0,0,0},{0,0,0,0,0,0},{0,0,0,0,0}> local colidx of element
* present_values = <{0,0,0,0,0,0,0},{0,0,0,0,0,0,0},{0,0,0,0,0,0,0}>
*/
GHOST_CALL_GOTO(ghost_malloc((void **)&item_from, size_nint),err,ret);
GHOST_CALL_GOTO(ghost_malloc((void **)&wishlist_counts, nprocs*sizeof(ghost_lidx)),err,ret);
GHOST_CALL_GOTO(ghost_malloc((void **)&comm_remotePE, size_lcol),err,ret);
GHOST_CALL_GOTO(ghost_malloc((void **)&comm_remoteEl, size_lcol),err,ret);
GHOST_CALL_GOTO(ghost_malloc((void **)&present_values, size_pval),err,ret);
for (i=0; i<nprocs; i++) wishlist_counts[i] = 0;
int nthreads;
unsigned clsize;
#pragma omp parallel
{
#pragma omp single
nthreads = ghost_omp_nthread();
}
ghost_machine_cacheline_size(&clsize);
int padding = 8*(int)clsize/sizeof(ghost_lidx);
ghost_lidx *partial_wishlist_counts;
GHOST_CALL_GOTO(ghost_malloc((void **)&partial_wishlist_counts, nthreads*(nprocs+padding)*sizeof(ghost_lidx)),err,ret);
memset(partial_wishlist_counts,0,nthreads*(nprocs+padding)*sizeof(ghost_lidx));
GHOST_INSTR_START("comm_remote*");
#pragma omp parallel shared (partial_wishlist_counts)
{
int thread = ghost_omp_threadnum();
#pragma omp for private(j)
for (i=0;i<mat->nEnts;i++){
for (j=nprocs-1;j>=0; j--){
if (ctx->row_map->goffs[j]<col_orig[i]+1) {//is col_orig unpermuted(probably)
comm_remotePE[i] = j;//comm_remotePE[colPerm[i]]=j
comm_remoteEl[i] = col_orig[i] -ctx->row_map->goffs[j]; //comm_remoteEl[colPerm[i]] = col_orig[i] -ctx->row_map->goffs[j];
partial_wishlist_counts[(padding+nprocs)*thread+j]++;
break;
}
}
}
}
for (j=0; j<nprocs; j++) {
for (i=0; i<nthreads; i++) {
wishlist_counts[j] += partial_wishlist_counts[(padding+nprocs)*i+j];
}
}
free(partial_wishlist_counts);
GHOST_INSTR_STOP("comm_remote*");
/*
* wishlist_counts = <{3,3,1},{3,2,1},{1,0,4}>
* comm_remotePE = <{0,0,1,2,0,1,1},{0,0,2,0,1,1},{2,2,0,2,2}>
* comm_remoteEl = <{0,1,1,0,1,0,1},{0,1,1,1,0,1},{0,1,0,0,1}>
*/
acc_wishes = 0;
for (i=0; i<nprocs; i++) {
acc_wishes += wishlist_counts[i];
}
/*
* acc_wishes = <7,6,5> equal to lnEnts
*/
GHOST_CALL_GOTO(ghost_malloc((void **)&wishlist,size_nptr),err,ret);
GHOST_CALL_GOTO(ghost_malloc((void **)&cwishlist,size_nptr),err,ret);
/*
* wishlist = <{NULL,NULL,NULL},{NULL,NULL,NULL},{NULL,NULL,NULL}>
* cwishlist = <{NULL,NULL,NULL},{NULL,NULL,NULL},{NULL,NULL,NULL}>
*/
for (i=0; i<nprocs; i++){
GHOST_CALL_GOTO(ghost_malloc((void **)&cwishlist[i],wishlist_counts[i]*sizeof(ghost_lidx)),err,ret);
GHOST_CALL_GOTO(ghost_malloc((void **)&wishlist[i],wishlist_counts[i]*sizeof(ghost_lidx)),err,ret);
}
/*
* wishlist = <{{0,0,0},{0,0,0},{0}},{{0,0,0},{0,0},{0}},{{0},NULL,{0,0,0,0}}>
* cwishlist = <{{0,0,0},{0,0,0},{0}},{{0,0,0},{0,0},{0}},{{0},NULL,{0,0,0,0}}>
*/
for (i=0;i<nprocs;i++) item_from[i] = 0;
GHOST_INSTR_START("wishlist");
for (i=0;i<mat->nEnts;i++){
wishlist[comm_remotePE[i]][item_from[comm_remotePE[i]]] = comm_remoteEl[i];
item_from[comm_remotePE[i]]++;
}
GHOST_INSTR_STOP("wishlist");
/*
* wishlist = <{{0,1,1},{1,0,1},{0}},{{0,1,1},{0,1},{1}},{{0},NULL,{0,1,0,1}}> local column idx of wishes
* item_from = <{3,3,1},{3,2,1},{1,0,4}> equal to wishlist_counts
*/
#ifdef GHOST_HAVE_MPI
MPI_Barrier(ctx->mpicomm);
GHOST_INSTR_START("wishes_and_dues");
MPI_Win due_win,nduepartners_win;
MPI_CALL_GOTO(MPI_Win_create(ctx->dues,nprocs*sizeof(ghost_lidx),sizeof(ghost_lidx),MPI_INFO_NULL,ctx->mpicomm,&due_win),err,ret);
MPI_CALL_GOTO(MPI_Win_create(&ctx->nduepartners,sizeof(int),sizeof(int),MPI_INFO_NULL,ctx->mpicomm,&nduepartners_win),err,ret);
ghost_lidx thisentry = 0;
int one = 1;
for (i=0; i<nprocs; i++) {
if ( (i!=me) && (wishlist_counts[i]>0) ){
#pragma omp parallel for
for (j=0; j<max_loc_elements; j++) {
present_values[j] = -1;
}
thisentry = 0;
for (j=0; j<wishlist_counts[i]; j++){
if (present_values[wishlist[i][j]]<0){
present_values[wishlist[i][j]] = thisentry;
cwishlist[i][thisentry] = wishlist[i][j];
thisentry = thisentry + 1;
}
}
ctx->wishes[i] = thisentry;
ctx->nwishpartners++;
MPI_CALL_GOTO(MPI_Win_lock(MPI_LOCK_SHARED,i,0,due_win),err,ret);
MPI_CALL_GOTO(MPI_Put(&ctx->wishes[i],1,ghost_mpi_dt_lidx,i,me,1,ghost_mpi_dt_lidx,due_win),err,ret);
MPI_CALL_GOTO(MPI_Win_unlock(i,due_win),err,ret);
MPI_CALL_GOTO(MPI_Win_lock(MPI_LOCK_SHARED,i,0,nduepartners_win),err,ret);
MPI_CALL_GOTO(MPI_Accumulate(&one,1,MPI_INT,i,0,1,MPI_INT,MPI_SUM,nduepartners_win),err,ret);
MPI_CALL_GOTO(MPI_Win_unlock(i,nduepartners_win),err,ret);
} else {
ctx->wishes[i] = 0;
}
}
MPI_Win_free(&due_win);
MPI_Win_free(&nduepartners_win);
#endif
/*
* cwishlist = <{{#,#,#},{1,0,#},{0}},{{0,1,#},{#,#},{1}},{{0},NULL,{#,#,#,#}}> compressed wish list
* ctx->wishes = <{0,2,1},{2,0,1},{1,0,0}>
* ctx->dues = <{0,2,1},{2,0,0},{1,1,0}>
*/
GHOST_INSTR_STOP("wishes_and_dues");
// now, we now have many due/wish partners we have and can allocate the according arrays
// it will be filled in a later loop over nprocs
GHOST_CALL_GOTO(ghost_malloc((void **)&ctx->duepartners,sizeof(int)*ctx->nduepartners),err,ret);
GHOST_CALL_GOTO(ghost_malloc((void **)&ctx->wishpartners,sizeof(int)*ctx->nwishpartners),err,ret);
acc_transfer_dues = 0;
acc_transfer_wishes = 0;
for (i=0; i<nprocs; i++){
acc_transfer_wishes += ctx->wishes[i];
acc_transfer_dues += ctx->dues[i];
}
/*
* acc_transfer_wishes = <3,3,1>
* acc_transfer_dues = <3,2,2>
*/
(*nhalo) = 0;
ghost_lidx tt = 0;
i = me;
int meHandled = 0;
ghost_lidx first_putpos = 0;
if(mat->context->col_map->flags & GHOST_PERM_NO_DISTINCTION) {
ghost_lidx halo_ctr = 0;
//we need to know number of halo elements now
for(int k=0;k<nprocs;++k) {
if (k != me){
for (j=0;j<ctx->wishes[k];j++){
++halo_ctr;
}
}
}
// ctx->col_map->dimpad = PAD(ctx->row_map->dim+halo_ctr,ghost_densemat_row_padding());
// ctx->nrowspadded = PAD(ctx->row_map->ldim[me]+halo_ctr,rowpadding);
// rowpaddingoffset = ctx->nrowspadded-ctx->row_map->ldim[me];
first_putpos = PAD(mat->context->col_map->dim,ghost_densemat_row_padding())+halo_ctr;
} else {
// ctx->nrowspadded = PAD(ctx->row_map->ldim[me],rowpadding);// this is set already
// ctx->col_map->dimpad = PAD(ctx->row_map->dim,ghost_densemat_row_padding());
first_putpos = PAD(mat->context->col_map->dim,ghost_densemat_row_padding());
}
// rowpaddingoffset = MAX(ctx->row_map->dimpad,ctx->col_map->dimpad)-ctx->row_map->dim;
//first_putpos -= ctx->row_map->dim;
GHOST_INSTR_START("compress_cols");
/*
* col[i] = <{0,1,3,4,1,2,3},{0,1,5,1,2,3},{4,5,0,4,5}>
*/
GHOST_CALL_GOTO(ghost_malloc((void **)&pseudocol,size_lcol),err,ret);
GHOST_CALL_GOTO(ghost_malloc((void **)&globcol,size_gcol),err,ret);
/*
* pseudocol = <{0,0,0,0,0,0,0},{0,0,0,0,0,0},{0,0,0,0,0}> PE where element is on
* globcol = <{0,0,0,0,0,0,0},{0,0,0,0,0,0},{0,0,0,0,0}> local colidx of element
*/
this_pseudo_col = ctx->row_map->ldim[me];
GHOST_CALL_RETURN(ghost_nrank(&nprocs, ctx->mpicomm));
GHOST_CALL_RETURN(ghost_rank(&me, ctx->mpicomm));
i = me;
for (; i<nprocs; i++) { // iterate i=me,..,nprocs,0,..,me-1
ghost_lidx t;
if (meHandled && (i == me)) continue;
if (i != me){
for (j=0;j<ctx->wishes[i];j++){
pseudocol[(*nhalo)] = this_pseudo_col;
globcol[(*nhalo)] = ctx->row_map->goffs[i]+cwishlist[i][j];
(*nhalo)++;
this_pseudo_col++;
}
/*
* pseudocol = <{0,1,2},{0,1,2},{0}> colidx of each halo element starting from 0
* globcol = <{2,3,4},{2,3,4},{2}> colidx of each halo element starting from lnrows[me]
*/
// myrevcol maps the actual colidx to the new colidx
GHOST_DEBUG_LOG(2,"Allocating space for myrevcol");
GHOST_CALL_GOTO(ghost_malloc((void **)&myrevcol,ctx->row_map->ldim[i]*sizeof(ghost_lidx)),err,ret);
for (j=0;j<ctx->wishes[i];j++){
myrevcol[globcol[tt]-ctx->row_map->goffs[i]] = tt;
tt++;
}
/*
* 1st iter: myrevcol = <{1,0},{0,1},{0,#}>
* 2nd iter: myrevcol = <{2,#},{#,2},{#,#}>
*/
#pragma omp parallel for
for (t=0; t<mat->nEnts; t++) {
if (comm_remotePE[t] == i) { // local element for rank i
col[t] = first_putpos-ctx->row_map->dim + pseudocol[myrevcol[col_orig[t]-ctx->row_map->goffs[i]]];
//printf("col = %d-%d+%d = %d\n",first_putpos,ctx->row_map->dim, pseudocol[myrevcol[col_orig[t]-ctx->row_map->goffs[i]]],col[t]);
}
}
free(myrevcol); myrevcol = NULL;
} else { // first i iteration goes here
#pragma omp parallel for
for (t=0; t<mat->nEnts; t++) {
if (comm_remotePE[t] == me) { // local element for myself
col[t] = comm_remoteEl[t];
}
}
/*
* col[i] = <{0,1,3,4,1,2,3},{0,1,5,1,0,1},{0,1,0,0,1}> local idx changed after first iteration
*/
}
if (!meHandled) {
i = -1;
meHandled = 1;
}
}
GHOST_INSTR_STOP("compress_cols")
/*
* col[i] = <{0,1,2,4,1,3,2},{2,3,4,3,0,1},{0,1,2,0,1}>
*/
GHOST_INSTR_START("final")
size_wish = (size_t)( acc_transfer_wishes * sizeof(ghost_lidx) );
size_dues = (size_t)( acc_transfer_dues * sizeof(ghost_lidx) );
// we need a contiguous array in memory
GHOST_CALL_GOTO(ghost_malloc((void **)&wishl_mem,size_wish),err,ret);
GHOST_CALL_GOTO(ghost_malloc((void **)&duel_mem,size_dues),err,ret);
GHOST_CALL_GOTO(ghost_malloc((void **)&ctx->hput_pos,size_nptr),err,ret);
ghost_type_get(&type);
#ifdef GHOST_HAVE_CUDA
ghost_lidx *cu_duel_mem;
if (type == GHOST_TYPE_CUDA) {
GHOST_CALL_GOTO(ghost_cu_malloc((void **)&cu_duel_mem,size_dues),err,ret);
}
#endif
acc_dues = 0;
acc_wishes = 0;
int duepartneridx = 0, wishpartneridx = 0;
for (i=0; i<nprocs; i++){
if (ctx->dues[i]) {
ctx->duepartners[duepartneridx] = i;
duepartneridx++;
}
if (ctx->wishes[i]) {
ctx->wishpartners[wishpartneridx] = i;
wishpartneridx++;
}
ctx->duelist[i] = &(duel_mem[acc_dues]);
#ifdef GHOST_HAVE_CUDA
if (type == GHOST_TYPE_CUDA) {
ctx->cu_duelist[i] = &(cu_duel_mem[acc_dues]);
}
#endif
ctx->wishlist[i] = &(wishl_mem[acc_wishes]);
ctx->hput_pos[i] = first_putpos + acc_wishes;
if ( (me != i) && !( (i == nprocs-2) && (me == nprocs-1) ) ){
acc_dues += ctx->dues[i];
acc_wishes += ctx->wishes[i];
}
}
#ifdef GHOST_HAVE_MPI
for (i=0;i<2*nprocs;i++)
req[i] = MPI_REQUEST_NULL;
for (i=0; i<nprocs; i++)
for (j=0;j<ctx->wishes[i];j++)
ctx->wishlist[i][j] = cwishlist[i][j];
int msgcount = 0;
// TODO only loop duepartners
for(i=0; i<nprocs; i++)
{ // receive _my_ dues from _other_ processes' wishes
MPI_CALL_GOTO(MPI_Irecv(ctx->duelist[i],ctx->dues[i],ghost_mpi_dt_lidx,i,i,ctx->mpicomm,&req[msgcount]),err,ret);
msgcount++;
}
for(i=0; i<nprocs; i++) {
MPI_CALL_GOTO(MPI_Isend(ctx->wishlist[i],ctx->wishes[i],ghost_mpi_dt_lidx,i,me,ctx->mpicomm,&req[msgcount]),err,ret);
msgcount++;
}
MPI_CALL_GOTO(MPI_Waitall(msgcount,req,stat),err,ret);
#endif
#ifdef GHOST_HAVE_CUDA
if (type == GHOST_TYPE_CUDA) {
GHOST_CALL_GOTO(ghost_cu_upload(cu_duel_mem,duel_mem,size_dues),err,ret);
}
#endif
GHOST_INSTR_STOP("final")
goto out;
err:
free(wishl_mem); wishl_mem = NULL;
free(duel_mem); duel_mem = NULL;
for (i=0; i<nprocs; i++) {
free(ctx->wishlist[i]); ctx->wishlist[i] = NULL;
free(ctx->duelist[i]); ctx->duelist[i] = NULL;
}
free(ctx->hput_pos); ctx->hput_pos = NULL;
free(ctx->wishes); ctx->wishes = NULL;
free(ctx->dues); ctx->dues = NULL;
free(ctx->duepartners); ctx->duepartners = NULL;
free(ctx->wishpartners); ctx->wishpartners = NULL;
free(ctx->entsInCol); ctx->entsInCol = NULL;
out:
for (i=0; i<nprocs; i++) {
free(wishlist[i]); wishlist[i] = NULL;
}
free(wishlist); wishlist = NULL;
for (i=0; i<nprocs; i++) {
free(cwishlist[i]); cwishlist[i] = NULL;
}
free(cwishlist); cwishlist = NULL;
free(wishlist_counts); wishlist_counts = NULL;
free(item_from); item_from = NULL;
free(comm_remotePE); comm_remotePE = NULL;
free(comm_remoteEl); comm_remoteEl = NULL;
free(present_values); present_values = NULL;
free(pseudocol); pseudocol = NULL;
free(globcol); globcol = NULL;
free(myrevcol); myrevcol = NULL;
GHOST_FUNC_EXIT(GHOST_FUNCTYPE_INITIALIZATION|GHOST_FUNCTYPE_SETUP);
return ret;
}
char * ghost_context_workdist_string(ghost_context_flags_t flags)
{
GHOST_FUNC_ENTER(GHOST_FUNCTYPE_UTIL);
char *ret;
if (flags & GHOST_CONTEXT_DIST_NZ) {
ret = "Equal no. of nonzeros";
} else if(flags & GHOST_CONTEXT_DIST_ROWS) {
ret = "Equal no. of rows";
} else {
ret = "Invalid";
}
GHOST_FUNC_EXIT(GHOST_FUNCTYPE_UTIL);
return ret;
}
ghost_map *ghost_context_map(const ghost_context *ctx, const ghost_maptype mt)
{
return mt==GHOST_MAP_ROW?ctx->row_map:mt==GHOST_MAP_COL?ctx->col_map:ctx->row_map;
}
ghost_map *ghost_context_other_map(const ghost_context *ctx, const ghost_maptype mt)
{
return mt==GHOST_MAP_ROW?ctx->col_map:mt==GHOST_MAP_COL?ctx->row_map:NULL;
}
ghost_map *ghost_context_max_map(const ghost_context *ctx)
{
return ctx->row_map->dimpad>ctx->col_map->dimpad?ctx->row_map:ctx->col_map;
}
ghost_error ghost_context_set_map(ghost_context *ctx, ghost_maptype which, ghost_map *map)
{
ghost_map *oldmap = ghost_context_map(ctx,which);
if (oldmap) {
oldmap->ref_count--;
}
if (which == GHOST_MAP_ROW) {
ctx->row_map = map;
} else if (which == GHOST_MAP_COL) {
ctx->col_map = map;
} else {
GHOST_ERROR_LOG("The map is to be either a column or row map!");
return GHOST_ERR_INVALID_ARG;
}
map->ref_count++;
return GHOST_SUCCESS;
}
|
convolution_pack1to4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convolution_transform_kernel_pack1to4_neon(const Mat& weight_data, Mat& weight_data_pack1to4, int num_input, int num_output, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// src = kw-kh-inch-outch
// dst = 4b-kw-kh-inch-outch/4b
Mat weight_data_r2 = weight_data.reshape(maxk, num_input, num_output);
weight_data_pack1to4.create(maxk, num_input, num_output / 4, (size_t)4 * 4, 4);
for (int q = 0; q + 3 < num_output; q += 4)
{
const Mat k0 = weight_data_r2.channel(q);
const Mat k1 = weight_data_r2.channel(q + 1);
const Mat k2 = weight_data_r2.channel(q + 2);
const Mat k3 = weight_data_r2.channel(q + 3);
float* g00 = weight_data_pack1to4.channel(q / 4);
for (int p = 0; p < num_input; p++)
{
const float* k00 = k0.row(p);
const float* k10 = k1.row(p);
const float* k20 = k2.row(p);
const float* k30 = k3.row(p);
for (int k = 0; k < maxk; k++)
{
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00 += 4;
}
}
}
}
static void convolution_pack1to4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_pack1to4, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float32x4_t _sum = vdupq_n_f32(0.f);
if (bias_data_ptr)
{
_sum = vld1q_f32(bias_data_ptr + p * 4);
}
const float* kptr = (const float*)weight_data_pack1to4 + maxk * channels * p * 4;
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
const float* sptr = m.row(i * stride_h) + j * stride_w;
for (int k = 0; k < maxk; k++) // 29.23
{
float32x4_t _val = vdupq_n_f32(sptr[space_ofs[k]]);
float32x4_t _w = vld1q_f32(kptr);
_sum = vmlaq_f32(_sum, _val, _w);
kptr += 4;
}
}
_sum = activation_ps(_sum, activation_type, activation_params);
vst1q_f32(outptr + j * 4, _sum);
}
outptr += outw * 4;
}
}
}
|
calib.c | /* Copyright 2013-2016. The Regents of the University of California.
* Copyright 2016. Martin Uecker.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2012-2016 Martin Uecker <martin.uecker@med.uni-goettingen.de>
* 2013 Dara Bahri <dbahri123@gmail.com>
* 2015-2016 Siddharth Iyer <sid8795@gmail.com>
*
*
* Uecker M, Lai P, Murphy MJ, Virtue P, Elad M, Pauly JM, Vasanawala SS, Lustig M.
* ESPIRiT - An Eigenvalue Approach to Autocalibrating Parallel MRI: Where SENSE
* meets GRAPPA. Magn Reson Med, 71:990-1001 (2014)
*
* Iyer S, Ong F, Lustig M.
* Towards A Parameter Free ESPIRiT: Soft-Weighting For Robust Coil Sensitivity Estimation.
* Presented in the session: "New Frontiers In Image Reconstruction" at ISMRM 2016.
* http://www.ismrm.org/16/program_files/O86.htm
*
*/
#include <assert.h>
#include <complex.h>
#include <math.h>
#include <stdbool.h>
#include "linops/linop.h"
#include "linops/someops.h"
#include "num/multind.h"
#include "num/fft.h"
#include "num/flpmath.h"
#include "num/linalg.h"
#include "num/lapack.h"
#include "num/casorati.h"
#include "num/rand.h"
#include "misc/misc.h"
#include "misc/mri.h"
#include "misc/resize.h"
#include "misc/debug.h"
#include "misc/utils.h"
#include "calib/calmat.h"
#include "calib/cc.h"
#include "calib/softweight.h"
#include "calib.h"
#ifdef USE_CUDA
#include "calib/calibcu.h"
#endif
#if 0
#define CALMAT_SVD
#endif
#if 0
#define FLIP
#endif
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
static void eigen_herm3(int M, int N, float val[M], complex float matrix[N][N]) // ordering might be different to herm2
{
complex float mout[M][N];
for (int li = 0; li < N; li++)
for (int lj = 0; lj < li; lj++)
matrix[lj][li] = conj(matrix[li][lj]);
//mat_identity(M, N, mout);
orthiter(M, N, 30, val, mout, matrix);
for (int i = 0; i < M; i++)
for (int j = 0; j < N; j++)
matrix[i][j] = mout[i][j];
}
static float scurve(float x)
{
if (x <= -1.)
return 0.;
if (x >= 1.)
return 1.;
return 0.5 * (1. + 2. * x / (1. + powf(x, 2.)));
}
static float crop_weight_function(float crth, float val)
{
return scurve((sqrtf(val) - crth) / (1. - crth));
}
static float crop_thresh_function(float crth, float val)
{
return (val <= crth) ? 0. : 1.;
}
static void md_scurve(int N, const long dims[N], float* dst, const float* src)
{
float* tmp1 = md_alloc_sameplace(N, dims, FL_SIZE, src);
float* tmp2 = md_alloc_sameplace(N, dims, FL_SIZE, src);
md_sgreatequal(N, dims, tmp1, src, -1);
md_slessequal(N, dims, tmp2, src, 1);
md_mul(N, dims, tmp2, src, src);
md_sadd(N, dims, tmp2, tmp2, 1.);
md_div(N, dims, tmp2, src, tmp2);
md_sadd(N, dims, tmp2, tmp2, 0.5);
md_sgreatequal(N, dims, dst, src, 1);
md_fmac(N, dims, dst, tmp1, tmp2);
md_free(tmp1);
md_free(tmp2);
}
static void md_crop_weight_fun(int N, const long dims[N], float crth, complex float* dst, const complex float* src)
{
md_zabs(N, dims, dst, src);
float* tmp = md_alloc_sameplace(N, dims, FL_SIZE, src);
md_real(N, dims, tmp, dst);
md_sqrt(N, dims, tmp, tmp);
md_sadd(N, dims, tmp, tmp, - crth);
md_smul(N, dims, tmp, tmp, 1. / (1. -crth));
md_scurve(N, dims, tmp, tmp);
md_zcmpl_real(N, dims, dst, tmp);
md_free(tmp);
}
static void md_crop_thresh_fun(int N, const long dims[N], float crth, complex float* dst, const complex float* src)
{
md_zabs(N, dims, dst, src);
md_zsgreatequal(N, dims, dst, dst, crth);
}
typedef void (*md_weight_function)(int N, const long dims[N], float crth, complex float* dst, const complex float* src);
static void md_crop_weight(int N, const long dims[N], complex float* ptr, md_weight_function fun, float crth, const complex float* map)
{
assert(4 < N);
long wgh_dims[N];
md_select_dims(N, FFT_FLAGS | MAPS_FLAG, wgh_dims, dims);
complex float* tmp = md_alloc_sameplace(N, wgh_dims, CFL_SIZE, map);
fun(N, wgh_dims, crth, tmp, map);
long strs[N];
long wgh_strs[N];
md_calc_strides(N, strs, dims, CFL_SIZE);
md_calc_strides(N, wgh_strs, wgh_dims, CFL_SIZE);
md_zmul2(N, dims, strs, ptr, strs, ptr, wgh_strs, tmp);
md_free(tmp);
}
typedef float (*weight_function)(float crth, float val);
static void crop_weight(int N, const long dims[N], complex float* ptr, weight_function fun, float crth, const complex float* map)
{
long xx = dims[0];
long yy = dims[1];
long zz = dims[2];
long cc = dims[3];
long mm = dims[4];
assert(N >= 5);
assert(1 == md_calc_size(N - 5, dims + 5));
for (long m = 0; m < mm; m++) {
#pragma omp parallel for
for (long k = 0; k < zz; k++) {
for (long i = 0; i < yy; i++) {
for (long j = 0; j < xx; j++) {
float val = cabsf(map[((m * zz + k) * yy + i) * xx + j]);
for (long c = 0; c < cc; c++)
ptr[(((m * cc + c) * zz + k) * yy + i) * xx + j] *= fun(crth, val);
}
}
}
}
}
void crop_sens(const long dims[DIMS], complex float* ptr, bool soft, float crth, const complex float* map)
{
md_crop_weight(DIMS, dims, ptr, soft ? md_crop_weight_fun : md_crop_thresh_fun, crth, map);
//crop_weight(DIMS, dims, ptr, soft ? crop_weight_function : crop_thresh_function, crth, map);
}
/**
* sure_crop - This determines the crop-threshold to use as described in the talk: "Towards A Parameter
* Free ESPIRiT: Soft-Weighting For Robust Coil Sensitivity Estimation". This was given at the
* session: "New Frontiers In Image Reconstruction" at ISMRM 2016.
*
* Parameters:
* var - Estimated variance in data.
* evec_dims - The eigenvector dimensions.
* evec_data - The eigenvectors.
* eptr - The eigenvalues.
* calreg_dims - Dimension of the calibration region.
* calreg - Calibration data.
*/
static float sure_crop(float var, const long evec_dims[DIMS], complex float* evec_data, complex float* eptr, const long calreg_dims[DIMS], const complex float* calreg)
{
assert(1 == md_calc_size(DIMS - 5, evec_dims + 5));
assert(1 == md_calc_size(DIMS - 5, calreg_dims + 5));
long num_maps = evec_dims[4];
// Construct low-resolution image
long im_dims[5];
md_select_dims(5, 15, im_dims, evec_dims);
complex float* im = md_alloc_sameplace(5, im_dims, CFL_SIZE, calreg);
md_clear(5, im_dims, im, CFL_SIZE);
md_resize_center(5, im_dims, im, calreg_dims, calreg, CFL_SIZE);
auto lop_fft_im = linop_fftc_create(5, im_dims, FFT_FLAGS);
auto lop_fft_evec = linop_fftc_create(5, evec_dims, FFT_FLAGS);
linop_adjoint(lop_fft_im, 5, im_dims, im, 5, im_dims, im);
// Temporary vector for crop dimensions
long cropdims[5];
md_select_dims(5, 15, cropdims, calreg_dims);
cropdims[4] = num_maps;
// Eigenvectors (M)
complex float* M = md_alloc_sameplace(5, evec_dims, CFL_SIZE, calreg);
md_copy(5, evec_dims, M, evec_data, CFL_SIZE);
// Temporary eigenvector holder to hold low resolution maps
complex float* LM = md_alloc_sameplace(5, evec_dims, CFL_SIZE, calreg);
// Temporary holder for projection calreg
complex float* TC = md_alloc_sameplace(5, calreg_dims, CFL_SIZE, calreg);
// Temporary holder to hold low resolution calib maps
complex float* CM = md_alloc_sameplace(5, cropdims, CFL_SIZE, calreg);
// Eigenvalues (W)
long W_dims[5];
md_select_dims(5, 23, W_dims, evec_dims);
complex float* W = md_alloc_sameplace(5, W_dims, CFL_SIZE, calreg);
md_copy(5, W_dims, W, eptr, CFL_SIZE);
// Place holder for the inner product result
complex float* ip = md_alloc_sameplace(5, W_dims, CFL_SIZE, calreg);
// Place holder for the projection result
complex float* proj = md_alloc_sameplace(5, im_dims, CFL_SIZE, calreg);
// Place holder for divergence term
long div_dims[5] = MD_INIT_ARRAY(5, 1);
complex float* div = md_alloc_sameplace(5, div_dims, CFL_SIZE, calreg);
// Calculating strides.
long str1_ip[5];
long str2_ip[5];
long stro_ip[5];
md_calc_strides(5, str1_ip, im_dims, CFL_SIZE);
md_calc_strides(5, str2_ip, evec_dims, CFL_SIZE);
md_calc_strides(5, stro_ip, W_dims, CFL_SIZE);
long str1_proj[5];
long str2_proj[5];
long stro_proj[5];
md_calc_strides(5, str1_proj, W_dims, CFL_SIZE);
md_calc_strides(5, str2_proj, evec_dims, CFL_SIZE);
md_calc_strides(5, stro_proj, im_dims, CFL_SIZE);
long str1_div[5];
long str2_div[5];
long stro_div[5];
md_calc_strides(5, str1_div, evec_dims, CFL_SIZE);
md_calc_strides(5, str2_div, evec_dims, CFL_SIZE);
md_calc_strides(5, stro_div, div_dims, CFL_SIZE);
long tdims_ip[5];
long tdims_proj[5];
for (int i = 0; i < 5; i++) {
assert((im_dims[i] == evec_dims[i]) || (1 == im_dims[i]) || (1 == evec_dims[i]));
assert((W_dims[i] == evec_dims[i]) || (1 == W_dims[i]) || (1 == evec_dims[i]));
tdims_ip[i] = (1 == im_dims[i]) ? evec_dims[i] : im_dims[i];
tdims_proj[i] = (1 == W_dims[i]) ? evec_dims[i] : W_dims[i];
}
// Starting parameter sweep with SURE.
float mse = -1.;
float old_mse = 0.;
float s = -0.1;
float c = 0.99;
long ctr1 = 0;
long ctr2 = 0;
debug_printf(DP_INFO, "---------------------------------------------\n");
debug_printf(DP_INFO, "| CTR1 | CTR2 | Crop | Est. MSE |\n");
debug_printf(DP_INFO, "---------------------------------------------\n");
while (fabs(s) > 1.E-4) {
ctr1++;
while ( (c < 0.999)
&& (c > 0.001)
&& ( (ctr2 <= 1)
|| (mse < old_mse))) {
ctr2++;
md_clear(5, W_dims, ip, CFL_SIZE);
md_clear(5, im_dims, proj, CFL_SIZE);
md_clear(5, div_dims, div, CFL_SIZE);
md_clear(5, evec_dims, M, CFL_SIZE);
md_clear(5, evec_dims, LM, CFL_SIZE);
md_clear(5, calreg_dims, TC, CFL_SIZE);
md_copy(5, evec_dims, M, evec_data, CFL_SIZE);
old_mse = mse;
mse = 0.;
md_crop_weight(5, evec_dims, M, md_crop_thresh_fun, c, W);
md_zfmacc2(5, tdims_ip, stro_ip, ip, str1_ip, im, str2_ip, M); // Projection.
md_zfmac2(5, tdims_proj, stro_proj, proj, str1_proj, ip, str2_proj, M);
linop_forward(lop_fft_im, 5, im_dims, proj, 5, im_dims, proj); // Low res proj img.
md_resize_center(5, calreg_dims, TC, im_dims, proj, CFL_SIZE);
md_resize_center(5, im_dims, proj, calreg_dims, TC, CFL_SIZE);
linop_adjoint(lop_fft_im, 5, im_dims, proj, 5, im_dims, proj);
#if 1
complex float* diff = md_alloc_sameplace(5, im_dims, CFL_SIZE, im);
md_zsub(5, im_dims, diff, im, proj);
mse += powf(md_znorm(5, im_dims, diff), 2);
md_free(diff);
#else
for (long jdx = 0; jdx < md_calc_size(5, im_dims); jdx++)
mse += powf(cabsf(im[jdx] - proj[jdx]), 2.);
#endif
linop_forward(lop_fft_evec, 5, evec_dims, LM, 5, evec_dims, M); // low-res maps .
md_resize_center(5, cropdims, CM, evec_dims, LM, CFL_SIZE);
md_resize_center(5, evec_dims, LM, cropdims, CM, CFL_SIZE);
linop_adjoint(lop_fft_evec, 5, evec_dims, LM, 5, evec_dims, LM);
md_zfmacc2(5, evec_dims, stro_div, div, str1_div, LM, str2_div, LM); // Calc SURE div using low res maps.
complex float div_cpu;
md_copy(1, MD_DIMS(1), &div_cpu, div, CFL_SIZE);
mse += 2. * var * crealf(div_cpu);
if (ctr2 == 1)
debug_printf(DP_INFO, "| %4ld | %4ld | %0.4f | %0.12e |\n", ctr1, ctr2, c, mse);
else
debug_printf(DP_INFO, "| | %4ld | %0.4f | %0.12e |\n", ctr2, c, mse);
c = c + s;
}
c -= s;
ctr2 = 0;
s = -s / 2;
c += s;
}
linop_free(lop_fft_im);
linop_free(lop_fft_evec);
c = c + s;
debug_printf(DP_INFO, "---------------------------------------------\n");
md_free(im);
md_free(TC);
md_free(CM);
md_free(M);
md_free(LM);
md_free(W);
md_free(ip);
md_free(proj);
md_free(div);
debug_printf(DP_DEBUG1, "Calculated c: %.4f\n", c);
return c;
}
void calone(const struct ecalib_conf* conf, const long cov_dims[4], complex float* imgcov, int SN, float svals[SN], const long calreg_dims[DIMS], const complex float* data)
{
assert(1 == md_calc_size(DIMS - 5, calreg_dims + 5));
#if 1
long nskerns_dims[5];
complex float* nskerns;
compute_kernels(conf, nskerns_dims, &nskerns, SN, svals, calreg_dims, data);
#else
long channels = calreg_dims[3];
long kx = conf->kdims[0];
long ky = conf->kdims[1];
long kz = conf->kdims[2];
long nskerns_dims[5] = { kx, ky, kz, channels, 0 };
long N = md_calc_size(4, nskerns_dims);
assert(N > 0);
nskerns_dims[4] = N;
complex float* nskerns = md_alloc(5, nskerns_dims, CFL_SIZE);
long nr_kernels = channels;
nskerns_dims[4] = channels;
spirit_kernel(nskerns_dims, nskerns, calreg_dims, data);
#endif
compute_imgcov(cov_dims, imgcov, nskerns_dims, nskerns);
md_free(nskerns);
}
/* calculate point-wise maps
*
*/
void eigenmaps(const long out_dims[DIMS], complex float* optr, complex float* eptr, const complex float* imgcov2, const long msk_dims[3], const bool* msk, bool orthiter, bool ecal_usegpu)
{
#ifdef USE_CUDA
if (ecal_usegpu) {
//FIXME cuda version should be able to return sensitivities for a subset of image-space points
assert(!msk);
eigenmapscu(out_dims, optr, eptr, imgcov2);
return;
}
#else
assert(!ecal_usegpu);
#endif
long channels = out_dims[3];
long maps = out_dims[4];
assert(DIMS >= 5);
assert(1 == md_calc_size(DIMS - 5, out_dims + 5));
assert(maps <= channels);
long xx = out_dims[0];
long yy = out_dims[1];
long zz = out_dims[2];
float scale = 1.; // for some reason, not
if (msk_dims) {
assert(msk_dims[0] == xx);
assert(msk_dims[1] == yy);
assert(msk_dims[2] == zz);
}
md_clear(5, out_dims, optr, CFL_SIZE);
#pragma omp parallel for collapse(3)
for (long k = 0; k < zz; k++) {
for (long j = 0; j < yy; j++) {
for (long i = 0; i < xx; i++) {
if (!msk || msk[i + xx * (j + yy * k)]) {
float val[channels];
complex float cov[channels][channels];
complex float tmp[channels * (channels + 1) / 2];
for (long l = 0; l < channels * (channels + 1) / 2; l++)
tmp[l] = imgcov2[((l * zz + k) * yy + j) * xx + i] / scale;
unpack_tri_matrix(channels, cov, tmp);
if (orthiter)
eigen_herm3(maps, channels, val, cov);
else
lapack_eig(channels, val, cov);
for (long u = 0; u < maps; u++) {
long ru = (orthiter ? maps : channels) - 1 - u;
for (long v = 0; v < channels; v++)
optr[((((u * channels + v) * zz + k) * yy + j) * xx + i)] = cov[ru][v];
if (NULL != eptr)
eptr[((u * zz + k) * yy + j) * xx + i] = val[ru];
}
}
}
}
}
}
void caltwo(const struct ecalib_conf* conf, const long out_dims[DIMS], complex float* out_data, complex float* emaps, const long in_dims[4], complex float* in_data, const long msk_dims[3], const bool* msk)
{
long xx = out_dims[0];
long yy = out_dims[1];
long zz = out_dims[2];
long xh = in_dims[0];
long yh = in_dims[1];
long zh = in_dims[2];
long channels = out_dims[3];
long cosize = channels * (channels + 1) / 2;
assert(DIMS >= 5);
assert(1 == md_calc_size(DIMS - 5, out_dims + 5));
assert(in_dims[3] == cosize);
long cov_dims[4] = { xh, yh, zh, cosize };
long covbig_dims[4] = { xx, yy, zz, cosize };
assert(((xx == 1) && (xh == 1)) || (xx >= xh));
assert(((yy == 1) && (yh == 1)) || (yy >= yh));
assert(((zz == 1) && (zh == 1)) || (zz >= zh));
assert((1 == xh) || (0 == xh % 2));
assert((1 == yh) || (0 == yh % 2));
assert((1 == zh) || (0 == zh % 2));
complex float* imgcov2 = md_alloc(4, covbig_dims, CFL_SIZE);
debug_printf(DP_DEBUG1, "Resize...\n");
sinc_zeropad(4, covbig_dims, imgcov2, cov_dims, in_data);
debug_printf(DP_DEBUG1, "Point-wise eigen-decomposition...\n");
eigenmaps(out_dims, out_data, emaps, imgcov2, msk_dims, msk, conf->orthiter, conf->usegpu);
md_free(imgcov2);
}
void calone_dims(const struct ecalib_conf* conf, long cov_dims[4], long channels)
{
long kx = conf->kdims[0];
long ky = conf->kdims[1];
long kz = conf->kdims[2];
cov_dims[0] = (1 == kx) ? 1 : (2 * kx);
cov_dims[1] = (1 == ky) ? 1 : (2 * ky);
cov_dims[2] = (1 == kz) ? 1 : (2 * kz);
cov_dims[3] = channels * (channels + 1) / 2;
}
const struct ecalib_conf ecalib_defaults = { { 6, 6, 6 }, 0.001, -1, -1., false, false, 0.8, true, false, -1., false, true, -1., false };
void calib2(const struct ecalib_conf* conf, const long out_dims[DIMS], complex float* out_data, complex float* eptr, int SN, float svals[SN], const long calreg_dims[DIMS], const complex float* data, const long msk_dims[3], const bool* msk)
{
long channels = calreg_dims[3];
long maps = out_dims[4];
assert(calreg_dims[3] == out_dims[3]);
assert(maps <= channels);
assert(1 == md_calc_size(DIMS - 5, out_dims + 5));
assert(1 == md_calc_size(DIMS - 5, calreg_dims + 5));
complex float rot[channels][channels];
if (conf->rotphase) {
// rotate the the phase with respect to the first principle component
long scc_dims[DIMS] = MD_INIT_ARRAY(DIMS, 1);
scc_dims[COIL_DIM] = channels;
scc_dims[MAPS_DIM] = channels;
scc(scc_dims, &rot[0][0], calreg_dims, data);
} else {
for (int i = 0; i < channels; i++)
for (int j = 0; j < channels; j++)
rot[i][j] = (i == j) ? 1. : 0.;
}
long cov_dims[4];
calone_dims(conf, cov_dims, channels);
complex float* imgcov = md_alloc(4, cov_dims, CFL_SIZE);
calone(conf, cov_dims, imgcov, SN, svals, calreg_dims, data);
caltwo(conf, out_dims, out_data, eptr, cov_dims, imgcov, msk_dims, msk);
/* Intensity and phase normalization similar as proposed
* for adaptive combine (Walsh's method) in
* Griswold et al., ISMRM 10:2410 (2002)
*/
if (conf->intensity) {
debug_printf(DP_DEBUG1, "Normalize...\n");
/* I think the reason this works is because inhomogeneity usually
* comes from only a few coil elements which are close. The l1-norm
* is more resilient against such outliers. -- Martin
*/
normalizel1(DIMS, COIL_FLAG, out_dims, out_data);
md_zsmul(DIMS, out_dims, out_data, out_data, sqrtf((float)channels));
}
const complex float* data_tmp = data;
#ifdef USE_CUDA
if (conf->usegpu)
data_tmp = md_gpu_move(DIMS, calreg_dims, data, CFL_SIZE);
#endif
float c = (conf->crop >= 0.) ? conf->crop : sure_crop(conf->var, out_dims, out_data, eptr, calreg_dims, data_tmp);
if (data_tmp != data)
md_free(data_tmp);
debug_printf(DP_DEBUG1, "Crop maps... (c = %.2f)\n", c);
crop_sens(out_dims, out_data, conf->softcrop, c, eptr);
debug_printf(DP_DEBUG1, "Fix phase...\n");
fixphase2(DIMS, out_dims, COIL_DIM, rot[0], out_data, out_data);
md_free(imgcov);
}
void calib(const struct ecalib_conf* conf, const long out_dims[DIMS], complex float* out_data, complex float* eptr, int SN, float svals[SN], const long calreg_dims[DIMS], const complex float* data)
{
calib2(conf, out_dims, out_data, eptr, SN, svals, calreg_dims, data, NULL, NULL);
}
static void perturb(const long dims[2], complex float* vecs, float amt)
{
complex float* noise = md_alloc(2, dims, CFL_SIZE);
md_gaussian_rand(2, dims, noise);
for (long j = 0; j < dims[1]; j++) {
float nrm = md_znorm(1, dims, noise + j * dims[0]);
complex float val = amt / nrm;
md_zsmul(1, dims, noise + j * dims[0], noise + j * dims[0], val);
}
md_zadd(2, dims, vecs, vecs, noise);
for (long j = 0; j < dims[1]; j++) {
float nrm = md_znorm(1, dims, vecs + j * dims[0]);
complex float val = 1 / nrm;
md_zsmul(1, dims, vecs + j * dims[0], vecs + j * dims[0], val);
}
md_free(noise);
}
static int number_of_kernels(const struct ecalib_conf* conf, int N, const float val[N])
{
int n = 0;
if (-1 != conf->numsv) {
n = conf->numsv;
assert(-1. == conf->percentsv);
assert(-1. == conf->threshold);
} else if (conf->percentsv != -1.) {
n = N * conf->percentsv / 100.;
assert(-1 == conf->numsv);
assert(-1. == conf->threshold);
} else {
assert(-1 == conf->numsv);
assert(-1. == conf->percentsv);
for (int i = 0; i < N; i++)
if (val[i] / val[0] > sqrtf(conf->threshold))
n++;
}
if (val[0] <= 0.)
error("No signal.\n");
debug_printf(DP_DEBUG1, "Using %d/%ld kernels (%.2f%%, last SV: %f%s).\n", n, N, (float)n / (float)N * 100., (n > 0) ? (val[n - 1] / val[0]) : 1., conf->weighting ? ", weighted" : "");
float tr = 0.;
for (int i = 0; i < N; i++) {
tr += powf(val[i], 2.);
debug_printf(DP_DEBUG3, "SVALS %f (%f)\n", val[i], val[i] / val[0]);
}
debug_printf(DP_DEBUG3, "\nTRACE: %f (%f)\n", tr, tr / (float)N);
assert(n <= N);
return n;
}
void compute_kernels(const struct ecalib_conf* conf, long nskerns_dims[5], complex float** nskerns_ptr, int SN, float val[SN], const long caldims[DIMS], const complex float* caldata)
{
assert(1 == md_calc_size(DIMS - 5, caldims + 5));
nskerns_dims[0] = conf->kdims[0];
nskerns_dims[1] = conf->kdims[1];
nskerns_dims[2] = conf->kdims[2];
nskerns_dims[3] = caldims[3];
long N = md_calc_size(4, nskerns_dims);
assert(N > 0);
nskerns_dims[4] = N;
complex float* nskerns = md_alloc(5, nskerns_dims, CFL_SIZE);
*nskerns_ptr = nskerns;
PTR_ALLOC(complex float[N][N], vec);
assert(NULL != val);
assert(SN == N);
debug_printf(DP_DEBUG1, "Build calibration matrix and SVD...\n");
#ifdef CALMAT_SVD
calmat_svd(conf->kdims, N, *vec, val, caldims, caldata);
if (conf->weighting)
soft_weight_singular_vectors(N, conf->var, conf->kdims, caldims, val, val);
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
#ifndef FLIP
nskerns[i * N + j] = ((*vec)[j][i]) * (conf->weighting ? val[i] : 1.);
#else
nskerns[i * N + j] = ((*vec)[j][N - 1 - i]) * (conf->weighting ? val[N - 1 - i] : 1.);
#endif
#else
covariance_function(conf->kdims, N, *vec, caldims, caldata);
debug_printf(DP_DEBUG1, "Eigen decomposition... (size: %ld)\n", N);
// we could apply Nystroem method here to speed it up
float tmp_val[N];
lapack_eig(N, tmp_val, *vec);
// reverse and square root, test for smaller null to avoid NaNs
for (int i = 0; i < N; i++)
val[i] = (tmp_val[N - 1 - i] < 0.) ? 0. : sqrtf(tmp_val[N - 1 - i]);
if (conf->weighting)
soft_weight_singular_vectors(N, conf-> var, conf->kdims, caldims, val, val);
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
#ifndef FLIP
nskerns[i * N + j] = (*vec)[N - 1 - i][j] * (conf->weighting ? val[i] : 1.); // flip
#else
nskerns[i * N + j] = (*vec)[i][j] * (conf->weighting ? val[N - 1 - i] : 1.); // flip
#endif
#endif
if (conf->perturb > 0.) {
long dims[2] = { N, N };
perturb(dims, nskerns, conf->perturb);
}
#ifndef FLIP
nskerns_dims[4] = number_of_kernels(conf, N, val);
#else
nskerns_dims[4] = N - number_of_kernels(conf, N, val);
#endif
PTR_FREE(vec);
}
void compute_imgcov(const long cov_dims[4], complex float* imgcov, const long nskerns_dims[5], const complex float* nskerns)
{
debug_printf(DP_DEBUG1, "Zeropad...\n");
long xh = cov_dims[0];
long yh = cov_dims[1];
long zh = cov_dims[2];
long kx = nskerns_dims[0];
long ky = nskerns_dims[1];
long kz = nskerns_dims[2];
long channels = nskerns_dims[3];
long nr_kernels = nskerns_dims[4];
long imgkern_dims[5] = { xh, yh, zh, channels, nr_kernels };
complex float* imgkern1 = md_alloc(5, imgkern_dims, CFL_SIZE);
complex float* imgkern2 = md_alloc(5, imgkern_dims, CFL_SIZE);
md_resize_center(5, imgkern_dims, imgkern1, nskerns_dims, nskerns, CFL_SIZE);
// resort array
debug_printf(DP_DEBUG1, "FFT (juggling)...\n");
long istr[5];
long mstr[5];
long idim[5] = { xh, yh, zh, channels, nr_kernels };
long mdim[5] = { nr_kernels, channels, xh, yh, zh };
md_calc_strides(5, istr, idim, CFL_SIZE);
md_calc_strides(5, mstr, mdim, CFL_SIZE);
long m2str[5] = { mstr[2], mstr[3], mstr[4], mstr[1], mstr[0] };
ifftmod(5, imgkern_dims, FFT_FLAGS, imgkern1, imgkern1);
ifft2(5, imgkern_dims, FFT_FLAGS, m2str, imgkern2, istr, imgkern1);
float scalesq = (kx * ky * kz) * (xh * yh * zh); // second part for FFT scaling
md_free(imgkern1);
debug_printf(DP_DEBUG1, "Calculate Gram matrix...\n");
int cosize = channels * (channels + 1) / 2;
assert(cov_dims[3] == cosize);
#pragma omp parallel for collapse(3)
for (int k = 0; k < zh; k++) {
for (int j = 0; j < yh; j++) {
for (int i = 0; i < xh; i++) {
complex float gram[cosize];
gram_matrix2(channels, gram, nr_kernels, (const complex float (*)[nr_kernels])(imgkern2 + ((k * yh + j) * xh + i) * (channels * nr_kernels)));
#ifdef FLIP
// add (scaled) identity matrix
for (int i = 0, l = 0; i < channels; i++)
for (int j = 0; j <= i; j++, l++)
gram[l] = ((i == j) ? (kx * ky * kz) : 0.) - gram[l];
#endif
for (int l = 0; l < cosize; l++)
imgcov[(((l * zh) + k) * yh + j) * xh + i] = gram[l] / scalesq;
}
}
}
md_free(imgkern2);
}
|
GB_unaryop__ainv_int8_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int8_int32
// op(A') function: GB_tran__ainv_int8_int32
// C type: int8_t
// A type: int32_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int8_t z = (int8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT8 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int8_int32
(
int8_t *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int8_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Example_loop.1.c | /*
* @@name: loop.2c
* @@type: C
* @@compilable: yes
* @@linkable: yes
* @@expect: success
* @@version: omp_5.0
*/
#include <stdio.h>
#define N 100
int main()
{
float x[N], y[N];
float a = 2.0;
for(int i=0;i<N;i++){ x[i]=i; y[i]=0;} // initialize
#pragma omp parallel
{
#pragma omp loop
for(int i = 0; i < N; ++i) y[i] = a*x[i] + y[i];
}
if(y[N-1] != (N-1)*2.0) printf("Error: 2*(N-1) != y[N-1]=%f",y[N-1]);
}
|
pr66820.c | /* PR middle-end/66820 */
/* { dg-do compile } */
/* { dg-options "-fopenmp" } */
void bar (char *);
void
foo (char **x)
{
#pragma omp parallel for
for (int i = 0; i < 16; i++)
{
char y[50];
__builtin_strcpy (y, x[i]);
__builtin_strcat (y, "foo");
bar (y);
}
}
|
UnitLocality.h | #ifndef DASH__UTIL__UNIT_LOCALITY_H__INCLUDED
#define DASH__UTIL__UNIT_LOCALITY_H__INCLUDED
#include <dash/util/Locality.h>
#include <dash/util/LocalityDomain.h>
#include <dash/util/Config.h>
#include <dash/algorithm/internal/String.h>
#include <dash/dart/if/dart_types.h>
#include <dash/dart/if/dart_locality.h>
#include <dash/Exception.h>
#include <dash/Team.h>
#include <string>
#include <vector>
#include <unordered_map>
#include <utility>
#include <iterator>
#include <algorithm>
namespace dash {
namespace util {
/**
* Wrapper of a single \c dart_unit_locality_t object.
*/
class UnitLocality
{
private:
typedef UnitLocality self_t;
public:
UnitLocality(
const dash::Team & team,
team_unit_t unit)
: _team(&team)
{
DASH_ASSERT_RETURNS(
dart_unit_locality(
_team->dart_id(), unit, &_unit_locality),
DART_OK);
dart_domain_locality_t * team_domain;
DASH_ASSERT_RETURNS(
dart_domain_team_locality(
team.dart_id(), ".", &team_domain),
DART_OK);
DASH_ASSERT_RETURNS(
dart_domain_find(
team_domain, _unit_locality->domain_tag, &_unit_domain),
DART_OK);
dart_domain_locality_t * node_locality = _unit_domain;
while (node_locality->scope > DART_LOCALITY_SCOPE_NODE) {
node_locality = node_locality->parent;
}
_node_domain = dash::util::LocalityDomain(node_locality);
}
UnitLocality(
global_unit_t unit)
: UnitLocality(dash::Team::All(), team_unit_t(unit))
{ }
UnitLocality()
: UnitLocality(dash::Team::All(), dash::Team::All().myid())
{ }
UnitLocality(const UnitLocality &) = default;
UnitLocality & operator=(const UnitLocality &) = default;
inline const dart_hwinfo_t & hwinfo() const
{
DASH_ASSERT(nullptr != _unit_locality);
return _unit_locality->hwinfo;
}
inline dart_hwinfo_t & hwinfo()
{
DASH_ASSERT(nullptr != _unit_locality);
return _unit_locality->hwinfo;
}
inline dart_domain_locality_t & domain()
{
DASH_ASSERT(nullptr != _unit_locality);
return *_unit_domain;
}
inline const dart_domain_locality_t & domain() const
{
DASH_ASSERT(nullptr != _unit_locality);
return *_unit_domain;
}
inline const dash::Team & team() const
{
if (nullptr == _team) {
return dash::Team::Null();
}
return *_team;
}
inline team_unit_t unit_id() const
{
return nullptr == _unit_locality
? UNDEFINED_TEAM_UNIT_ID
: team_unit_t(_unit_locality->unit);
}
inline dash::util::LocalityDomain & node_domain()
{
return _node_domain;
}
inline dash::util::LocalityDomain parent()
{
return dash::util::LocalityDomain(*_unit_domain->parent);
}
inline dash::util::LocalityDomain parent_in_scope(
dash::util::Locality::Scope scope)
{
if (scope == dash::util::Locality::Scope::Node) {
return node_domain();
}
dart_domain_locality_t * parent_domain = _unit_domain;
for (int rlevel = _unit_locality->hwinfo.num_scopes;
rlevel >= 0;
rlevel--) {
if (parent_domain == nullptr) {
DASH_THROW(
dash::exception::InvalidArgument,
"Unit domain is undefined");
}
if (static_cast<int>(_unit_locality->hwinfo.scopes[rlevel].scope) <=
static_cast<int>(scope)) {
return dash::util::LocalityDomain(*parent_domain);
}
parent_domain = parent_domain->parent;
}
DASH_THROW(
dash::exception::InvalidArgument,
"Could not find parent domain of unit in scope " << scope);
}
inline std::string domain_tag() const
{
DASH_ASSERT(nullptr != _unit_locality);
return _unit_domain->domain_tag;
}
inline std::string host() const
{
DASH_ASSERT(nullptr != _unit_locality);
return _unit_locality->hwinfo.host;
}
inline void set_domain_tag(
const std::string & tag)
{
strcpy(_unit_domain->domain_tag, tag.c_str());
}
inline void set_host(
const std::string & hostname)
{
strcpy(_unit_locality->hwinfo.host, hostname.c_str());
}
inline int num_cores() const
{
DASH_ASSERT(nullptr != _unit_locality);
return (_unit_locality->hwinfo.num_cores);
}
inline int min_threads()
{
return (_unit_locality == nullptr)
? -1 : std::max<int>(_unit_locality->hwinfo.min_threads, 1);
}
inline int max_threads()
{
return (_unit_locality == nullptr)
? -1 : std::max<int>(_unit_locality->hwinfo.max_threads, 1);
}
inline int num_threads() const
{
DASH_ASSERT(nullptr != _unit_locality);
return (dash::util::Config::get<bool>("DASH_MAX_SMT")
? _unit_locality->hwinfo.max_threads
: _unit_locality->hwinfo.min_threads);
}
inline int num_numa() const
{
dart_domain_locality_t * dom = _unit_domain;
while (dom->scope >= DART_LOCALITY_SCOPE_NUMA) {
dom = dom->parent;
}
return dom->num_domains;
}
inline int numa_id() const
{
return (nullptr == _unit_locality ? -1 : _unit_locality->hwinfo.numa_id);
}
inline int cpu_id() const
{
return (nullptr == _unit_locality ? -1 : _unit_locality->hwinfo.cpu_id);
}
inline int cpu_mhz() const
{
DASH_ASSERT(nullptr != _unit_locality);
return (_unit_locality->hwinfo.max_cpu_mhz);
}
inline int max_shmem_mbps() const
{
DASH_ASSERT(nullptr != _unit_locality);
return (_unit_locality->hwinfo.max_shmem_mbps);
}
inline int max_cpu_mhz()
{
return (_unit_locality == nullptr)
? -1 : std::max<int>(_unit_locality->hwinfo.max_cpu_mhz, 1);
}
inline int min_cpu_mhz()
{
return (_unit_locality == nullptr)
? -1 : std::max<int>(_unit_locality->hwinfo.min_cpu_mhz, 1);
}
inline int cache_line_size(int cache_level)
{
return (_unit_locality == nullptr)
? 64 : std::max<int>(
_unit_locality->hwinfo.cache_line_sizes[cache_level],
64);
}
inline std::string hostname()
{
return (_unit_locality == nullptr) ? "" : _unit_locality->hwinfo.host;
}
/**
* Number of threads currently available to the active unit.
*
* The returned value is calculated from unit locality data and hardware
* specifications and can, for example, be used to set the \c num_threads
* parameter of OpenMP sections:
*
* \code
* #ifdef DASH_ENABLE_OPENMP
* auto n_threads = dash::util::Locality::NumUnitDomainThreads();
* if (n_threads > 1) {
* #pragma omp parallel num_threads(n_threads) private(t_id)
* {
* // ...
* }
* #endif
* \endcode
*
* The following configuration keys affect the number of available
* threads:
*
* - <tt>DASH_DISABLE_THREADS</tt>:
* If set, disables multi-threading at unit scope and this method
* returns 1.
* - <tt>DASH_MAX_SMT</tt>:
* If set, virtual SMT CPUs (hyperthreads) instead of physical cores
* are used to determine availble threads.
* - <tt>DASH_MAX_UNIT_THREADS</tt>:
* Specifies the maximum number of threads available to a single
* unit.
*
* Note that these settings may differ between hosts.
*
* Example for MPI:
*
* <tt>
* mpirun -host node.0 -env DASH_MAX_UNIT_THREADS 4 -n 16 myprogram
* : -host node.1 -env DASH_MAX_UNIT_THREADS 2 -n 32 myprogram
* </tt>
*
* The DASH configuration can also be changed at run time with the
* \c dash::util::Config interface.
*
* \see dash::util::Config
* \see dash::util::TeamLocality
*
*/
inline int num_domain_threads()
{
auto n_threads = num_cores();
if (dash::util::Config::get<bool>("DASH_DISABLE_THREADS")) {
// Threads disabled in unit scope:
n_threads = 1;
} else if (dash::util::Config::get<bool>("DASH_MAX_SMT")) {
// Configured to use SMT (hyperthreads):
n_threads *= max_threads();
} else {
// Start one thread on every physical core assigned to this unit:
n_threads *= min_threads();
}
if (dash::util::Config::is_set("DASH_MAX_UNIT_THREADS")) {
n_threads = std::min(dash::util::Config::get<int>(
"DASH_MAX_UNIT_THREADS"),
n_threads);
}
return n_threads;
}
private:
const dash::Team * _team = nullptr;
dart_unit_locality_t * _unit_locality = nullptr;
dart_domain_locality_t * _unit_domain = nullptr;
dash::util::LocalityDomain _node_domain;
}; // class UnitLocality
} // namespace util
} // namespace dash
#endif // DASH__UTIL__UNIT_LOCALITY_H__INCLUDED
|
imag_self_energy_with_g.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <stdio.h>
#include <stdlib.h>
#include <phonoc_array.h>
#include <phonoc_utils.h>
#include <phonoc_const.h>
#include <phonon3_h/imag_self_energy_with_g.h>
#include <triplet_h/triplet.h>
static void
detailed_imag_self_energy_at_triplet(double *detailed_imag_self_energy,
double *imag_self_energy,
const int num_band0,
const int num_band,
const double *fc3_normal_squared,
const double *frequencies,
const int *triplets,
const double *g1,
const double *g2_3,
const char *g_zero,
const double *temperatures,
const int num_temps,
const double cutoff_frequency);
static double
collect_detailed_imag_self_energy(double *imag_self_energy,
const int num_band,
const double *fc3_normal_squared,
const double *n1,
const double *n2,
const double *g1,
const double *g2_3,
const char *g_zero);
static double
collect_detailed_imag_self_energy_0K(double *imag_self_energy,
const int num_band,
const double *fc3_normal_squared,
const double *n1,
const double *n2,
const double *g,
const char *g_zero);
static void set_occupations(double *n1,
double *n2,
const int num_band,
const double temperature,
const int *triplets,
const double *frequencies,
const double cutoff_frequency);
void ise_get_imag_self_energy_at_bands_with_g(double *imag_self_energy,
const Darray *fc3_normal_squared,
const double *frequencies,
const int *triplets,
const int *weights,
const double *g,
const char *g_zero,
const double temperature,
const double cutoff_frequency)
{
int i, j, num_triplets, num_band0, num_band, num_band_prod, num_g_pos;
int (*g_pos)[4];
double *ise;
g_pos = NULL;
ise = NULL;
num_triplets = fc3_normal_squared->dims[0];
num_band0 = fc3_normal_squared->dims[1];
num_band = fc3_normal_squared->dims[2];
num_band_prod = num_band0 * num_band * num_band;
ise = (double*)malloc(sizeof(double) * num_triplets * num_band0);
#pragma omp parallel for private(num_g_pos, j, g_pos)
for (i = 0; i < num_triplets; i++) {
g_pos = (int(*)[4])malloc(sizeof(int[4]) * num_band_prod);
num_g_pos = ise_set_g_pos(g_pos,
num_band0,
num_band,
g_zero + i * num_band_prod);
ise_imag_self_energy_at_triplet(
ise + i * num_band0,
num_band0,
num_band,
fc3_normal_squared->data + i * num_band_prod,
frequencies,
triplets + i * 3,
weights[i],
g + i * num_band_prod,
g + (i + num_triplets) * num_band_prod,
g_pos,
num_g_pos,
&temperature,
1,
cutoff_frequency,
0);
free(g_pos);
g_pos = NULL;
}
for (i = 0; i < num_band0; i++) {
imag_self_energy[i] = 0;
}
for (i = 0; i < num_triplets; i++) {
for (j = 0; j < num_band0; j++) {
imag_self_energy[j] += ise[i * num_band0 + j];
}
}
free(ise);
ise = NULL;
}
void ise_get_detailed_imag_self_energy_at_bands_with_g
(double *detailed_imag_self_energy,
double *imag_self_energy_N,
double *imag_self_energy_U,
const Darray *fc3_normal_squared,
const double *frequencies,
const int *triplets,
const int *weights,
const int *grid_address,
const double *g,
const char *g_zero,
const double temperature,
const double cutoff_frequency)
{
double *ise;
int i, j, num_triplets, num_band0, num_band, num_band_prod;
int *is_N;
double ise_tmp, N, U;
ise = NULL;
is_N = NULL;
num_triplets = fc3_normal_squared->dims[0];
num_band0 = fc3_normal_squared->dims[1];
num_band = fc3_normal_squared->dims[2];
num_band_prod = num_band0 * num_band * num_band;
ise = (double*)malloc(sizeof(double) * num_triplets * num_band0);
/* detailed_imag_self_energy has the same shape as fc3_normal_squared. */
#pragma omp parallel for
for (i = 0; i < num_triplets; i++) {
detailed_imag_self_energy_at_triplet
(detailed_imag_self_energy + i * num_band_prod,
ise + i * num_band0,
num_band0,
num_band,
fc3_normal_squared->data + i * num_band_prod,
frequencies,
triplets + i * 3,
g + i * num_band_prod,
g + (i + num_triplets) * num_band_prod,
g_zero + i * num_band_prod,
&temperature,
1,
cutoff_frequency);
}
is_N = (int*)malloc(sizeof(int) * num_triplets);
for (i = 0; i < num_triplets; i++) {
is_N[i] = tpl_is_N(triplets + i * 3, grid_address);
}
for (i = 0; i < num_band0; i++) {
N = 0;
U = 0;
/* #pragma omp parallel for private(ise_tmp) reduction(+:N,U) */
for (j = 0; j < num_triplets; j++) {
ise_tmp = ise[j * num_band0 + i] * weights[j];
if (is_N[j]) {
N += ise_tmp;
} else {
U += ise_tmp;
}
}
imag_self_energy_N[i] = N;
imag_self_energy_U[i] = U;
}
free(is_N);
is_N = NULL;
free(ise);
ise = NULL;
}
void ise_imag_self_energy_at_triplet(double *imag_self_energy,
const int num_band0,
const int num_band,
const double *fc3_normal_squared,
const double *frequencies,
const int *triplets,
const int triplet_weight,
const double *g1,
const double *g2_3,
PHPYCONST int (*g_pos)[4],
const int num_g_pos,
const double *temperatures,
const int num_temps,
const double cutoff_frequency,
const int openmp_at_bands)
{
int i, j;
double *n1, *n2;
n1 = (double*)malloc(sizeof(double) * num_temps * num_band);
n2 = (double*)malloc(sizeof(double) * num_temps * num_band);
for (i = 0; i < num_temps; i++) {
set_occupations(n1 + i * num_band,
n2 + i * num_band,
num_band,
temperatures[i],
triplets,
frequencies,
cutoff_frequency);
}
for (i = 0; i < num_band0 * num_temps; i++) {
imag_self_energy[i] = 0;
}
/* Do not use OpenMP here!! */
/* g_pos[i][0] takes value 0 <= x < num_band0 only, */
/* which causes race condition. */
for (i = 0; i < num_g_pos; i++) {
for (j = 0; j < num_temps; j++) {
if (n1[j * num_band + g_pos[i][1]] < 0 ||
n2[j * num_band + g_pos[i][2]] < 0) {
;
} else {
if (temperatures[j] > 0) {
imag_self_energy[j * num_band0 + g_pos[i][0]] +=
((n1[j * num_band + g_pos[i][1]] +
n2[j * num_band + g_pos[i][2]] + 1) * g1[g_pos[i][3]] +
(n1[j * num_band + g_pos[i][1]] -
n2[j * num_band + g_pos[i][2]]) * g2_3[g_pos[i][3]]) *
fc3_normal_squared[g_pos[i][3]] * triplet_weight;
} else {
imag_self_energy[j * num_band0 + g_pos[i][0]] +=
g1[g_pos[i][3]] * fc3_normal_squared[g_pos[i][3]] * triplet_weight;
}
}
}
}
free(n1);
n1 = NULL;
free(n2);
n2 = NULL;
}
int ise_set_g_pos(int (*g_pos)[4],
const int num_band0,
const int num_band,
const char *g_zero)
{
int num_g_pos, j, k, l, jkl;
num_g_pos = 0;
jkl = 0;
for (j = 0; j < num_band0; j++) {
for (k = 0; k < num_band; k++) {
for (l = 0; l < num_band; l++) {
if (!g_zero[jkl]) {
g_pos[num_g_pos][0] = j;
g_pos[num_g_pos][1] = k;
g_pos[num_g_pos][2] = l;
g_pos[num_g_pos][3] = jkl;
num_g_pos++;
}
jkl++;
}
}
}
return num_g_pos;
}
static void
detailed_imag_self_energy_at_triplet(double *detailed_imag_self_energy,
double *imag_self_energy,
const int num_band0,
const int num_band,
const double *fc3_normal_squared,
const double *frequencies,
const int *triplets,
const double *g1,
const double *g2_3,
const char *g_zero,
const double *temperatures,
const int num_temps,
const double cutoff_frequency)
{
int i, j, adrs_shift;
double *n1, *n2;
n1 = NULL;
n2 = NULL;
n1 = (double*)malloc(sizeof(double) * num_band);
n2 = (double*)malloc(sizeof(double) * num_band);
for (i = 0; i < num_temps; i++) {
set_occupations(n1,
n2,
num_band,
temperatures[i],
triplets,
frequencies,
cutoff_frequency);
for (j = 0; j < num_band0; j++) {
adrs_shift = j * num_band * num_band;
if (temperatures[i] > 0) {
imag_self_energy[i * num_band0 + j] =
collect_detailed_imag_self_energy
(detailed_imag_self_energy + adrs_shift,
num_band,
fc3_normal_squared + adrs_shift,
n1,
n2,
g1 + adrs_shift,
g2_3 + adrs_shift,
g_zero + adrs_shift);
} else {
imag_self_energy[i * num_band0 + j] =
collect_detailed_imag_self_energy_0K
(detailed_imag_self_energy + adrs_shift,
num_band,
fc3_normal_squared + adrs_shift,
n1,
n2,
g1 + adrs_shift,
g_zero + adrs_shift);
}
}
}
free(n1);
n1 = NULL;
free(n2);
n2 = NULL;
}
static double
collect_detailed_imag_self_energy(double *imag_self_energy,
const int num_band,
const double *fc3_normal_squared,
const double *n1,
const double *n2,
const double *g1,
const double *g2_3,
const char *g_zero)
{
int ij, i, j;
double sum_g;
sum_g = 0;
for (ij = 0; ij < num_band * num_band; ij++) {
imag_self_energy[ij] = 0;
if (g_zero[ij]) {continue;}
i = ij / num_band;
j = ij % num_band;
if (n1[i] < 0 || n2[j] < 0) {continue;}
imag_self_energy[ij] = (((n1[i] + n2[j] + 1) * g1[ij] +
(n1[i] - n2[j]) * g2_3[ij]) *
fc3_normal_squared[ij]);
sum_g += imag_self_energy[ij];
}
return sum_g;
}
static double
collect_detailed_imag_self_energy_0K(double *imag_self_energy,
const int num_band,
const double *fc3_normal_squared,
const double *n1,
const double *n2,
const double *g1,
const char *g_zero)
{
int ij, i, j;
double sum_g;
sum_g = 0;
for (ij = 0; ij < num_band * num_band; ij++) {
imag_self_energy[ij] = 0;
if (g_zero[ij]) {continue;}
i = ij / num_band;
j = ij % num_band;
if (n1[i] < 0 || n2[j] < 0) {continue;}
imag_self_energy[ij] = g1[ij] * fc3_normal_squared[ij];
sum_g += imag_self_energy[ij];
}
return sum_g;
}
static void set_occupations(double *n1,
double *n2,
const int num_band,
const double temperature,
const int *triplets,
const double *frequencies,
const double cutoff_frequency)
{
int j;
double f1, f2;
for (j = 0; j < num_band; j++) {
f1 = frequencies[triplets[1] * num_band + j];
f2 = frequencies[triplets[2] * num_band + j];
if (f1 > cutoff_frequency) {
n1[j] = bose_einstein(f1, temperature);
} else {
n1[j] = -1;
}
if (f2 > cutoff_frequency) {
n2[j] = bose_einstein(f2, temperature);
} else {
n2[j] = -1;
}
}
}
|
image.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% IIIII M M AAA GGGG EEEEE %
% I MM MM A A G E %
% I M M M AAAAA G GG EEE %
% I M M A A G G E %
% IIIII M M A A GGGG EEEEE %
% %
% %
% MagickCore Image Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/animate.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/compress.h"
#include "magick/constitute.h"
#include "magick/delegate.h"
#include "magick/deprecate.h"
#include "magick/display.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magic.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/semaphore.h"
#include "magick/signature-private.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/timer.h"
#include "magick/timer-private.h"
#include "magick/token.h"
#include "magick/token-private.h"
#include "magick/utility.h"
#include "magick/version.h"
#include "magick/xwindow-private.h"
/*
Constant declaration.
*/
const char
BackgroundColor[] = "#ffffff", /* white */
BorderColor[] = "#dfdfdf", /* gray */
DefaultTileFrame[] = "15x15+3+3",
DefaultTileGeometry[] = "120x120+4+3>",
DefaultTileLabel[] = "%f\n%G\n%b",
ForegroundColor[] = "#000", /* black */
LoadImageTag[] = "Load/Image",
LoadImagesTag[] = "Load/Images",
MatteColor[] = "#bdbdbd", /* gray */
PSDensityGeometry[] = "72.0x72.0",
PSPageGeometry[] = "612x792",
SaveImageTag[] = "Save/Image",
SaveImagesTag[] = "Save/Images",
TransparentColor[] = "#00000000"; /* transparent black */
const double
DefaultResolution = 72.0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImage() returns a pointer to an image structure initialized to
% default values.
%
% The format of the AcquireImage method is:
%
% Image *AcquireImage(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
*/
MagickExport Image *AcquireImage(const ImageInfo *image_info)
{
const char
*option;
Image
*image;
MagickStatusType
flags;
/*
Allocate image structure.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
image=(Image *) AcquireCriticalMemory(sizeof(*image));
(void) memset(image,0,sizeof(*image));
/*
Initialize Image structure.
*/
(void) CopyMagickString(image->magick,"MIFF",MaxTextExtent);
image->storage_class=DirectClass;
image->depth=MAGICKCORE_QUANTUM_DEPTH;
image->colorspace=sRGBColorspace;
image->rendering_intent=PerceptualIntent;
image->gamma=1.000f/2.200f;
image->chromaticity.red_primary.x=0.6400f;
image->chromaticity.red_primary.y=0.3300f;
image->chromaticity.red_primary.z=0.0300f;
image->chromaticity.green_primary.x=0.3000f;
image->chromaticity.green_primary.y=0.6000f;
image->chromaticity.green_primary.z=0.1000f;
image->chromaticity.blue_primary.x=0.1500f;
image->chromaticity.blue_primary.y=0.0600f;
image->chromaticity.blue_primary.z=0.7900f;
image->chromaticity.white_point.x=0.3127f;
image->chromaticity.white_point.y=0.3290f;
image->chromaticity.white_point.z=0.3583f;
image->interlace=NoInterlace;
image->ticks_per_second=UndefinedTicksPerSecond;
image->compose=OverCompositeOp;
image->blur=1.0;
InitializeExceptionInfo(&image->exception);
(void) QueryColorDatabase(BackgroundColor,&image->background_color,
&image->exception);
(void) QueryColorDatabase(BorderColor,&image->border_color,&image->exception);
(void) QueryColorDatabase(MatteColor,&image->matte_color,&image->exception);
(void) QueryColorDatabase(TransparentColor,&image->transparent_color,
&image->exception);
GetTimerInfo(&image->timer);
image->ping=MagickFalse;
image->cache=AcquirePixelCache(0);
image->blob=CloneBlobInfo((BlobInfo *) NULL);
image->timestamp=GetMagickTime();
image->debug=IsEventLogging();
image->reference_count=1;
image->semaphore=AllocateSemaphoreInfo();
image->signature=MagickCoreSignature;
if (image_info == (ImageInfo *) NULL)
return(image);
/*
Transfer image info.
*/
SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue :
MagickFalse);
(void) CopyMagickString(image->filename,image_info->filename,MaxTextExtent);
(void) CopyMagickString(image->magick_filename,image_info->filename,
MaxTextExtent);
(void) CopyMagickString(image->magick,image_info->magick,MaxTextExtent);
if (image_info->size != (char *) NULL)
{
(void) ParseAbsoluteGeometry(image_info->size,&image->extract_info);
image->columns=image->extract_info.width;
image->rows=image->extract_info.height;
image->offset=image->extract_info.x;
image->extract_info.x=0;
image->extract_info.y=0;
}
if (image_info->extract != (char *) NULL)
{
RectangleInfo
geometry;
(void) memset(&geometry,0,sizeof(geometry));
flags=ParseAbsoluteGeometry(image_info->extract,&geometry);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
image->extract_info=geometry;
Swap(image->columns,image->extract_info.width);
Swap(image->rows,image->extract_info.height);
}
}
image->compression=image_info->compression;
image->quality=image_info->quality;
image->endian=image_info->endian;
image->interlace=image_info->interlace;
image->units=image_info->units;
if (image_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(image_info->density,&geometry_info);
if ((flags & RhoValue) != 0)
image->x_resolution=geometry_info.rho;
image->y_resolution=image->x_resolution;
if ((flags & SigmaValue) != 0)
image->y_resolution=geometry_info.sigma;
}
if (image_info->page != (char *) NULL)
{
char
*geometry;
image->page=image->extract_info;
geometry=GetPageGeometry(image_info->page);
(void) ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
if (image_info->depth != 0)
image->depth=image_info->depth;
image->dither=image_info->dither;
image->background_color=image_info->background_color;
image->border_color=image_info->border_color;
image->matte_color=image_info->matte_color;
image->transparent_color=image_info->transparent_color;
image->ping=image_info->ping;
image->progress_monitor=image_info->progress_monitor;
image->client_data=image_info->client_data;
if (image_info->cache != (void *) NULL)
ClonePixelCacheMethods(image->cache,image_info->cache);
(void) SyncImageSettings(image_info,image);
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(option,&geometry_info);
if ((flags & GreaterValue) != 0)
{
if (image->delay > (size_t) floor(geometry_info.rho+0.5))
image->delay=(size_t) floor(geometry_info.rho+0.5);
}
else
if ((flags & LessValue) != 0)
{
if (image->delay < (size_t) floor(geometry_info.rho+0.5))
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
else
image->delay=(size_t) floor(geometry_info.rho+0.5);
if ((flags & SigmaValue) != 0)
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
option=GetImageOption(image_info,"dispose");
if (option != (const char *) NULL)
image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions,
MagickFalse,option);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageInfo() allocates the ImageInfo structure.
%
% The format of the AcquireImageInfo method is:
%
% ImageInfo *AcquireImageInfo(void)
%
*/
MagickExport ImageInfo *AcquireImageInfo(void)
{
ImageInfo
*image_info;
image_info=(ImageInfo *) AcquireMagickMemory(sizeof(*image_info));
if (image_info == (ImageInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetImageInfo(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e N e x t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireNextImage() initializes the next image in a sequence to
% default values. The next member of image points to the newly allocated
% image. If there is a memory shortage, next is assigned NULL.
%
% The format of the AcquireNextImage method is:
%
% void AcquireNextImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o image: the image.
%
*/
MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image)
{
/*
Allocate image structure.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->next=AcquireImage(image_info);
if (GetNextImageInList(image) == (Image *) NULL)
return;
(void) CopyMagickString(GetNextImageInList(image)->filename,image->filename,
MaxTextExtent);
if (image_info != (ImageInfo *) NULL)
(void) CopyMagickString(GetNextImageInList(image)->filename,
image_info->filename,MaxTextExtent);
DestroyBlob(GetNextImageInList(image));
image->next->blob=ReferenceBlob(image->blob);
image->next->endian=image->endian;
image->next->scene=image->scene+1;
image->next->previous=image;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A p p e n d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AppendImages() takes all images from the current image pointer to the end
% of the image list and appends them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the AppendImages method is:
%
% Image *AppendImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AppendImages(const Image *images,
const MagickBooleanType stack,ExceptionInfo *exception)
{
#define AppendImageTag "Append/Image"
CacheView
*append_view;
Image
*append_image;
MagickBooleanType
homogeneous_colorspace,
matte,
status;
MagickOffsetType
n;
RectangleInfo
geometry;
register const Image
*next;
size_t
depth,
height,
number_images,
width;
ssize_t
x_offset,
y,
y_offset;
/*
Compute maximum area of appended area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
matte=images->matte;
number_images=1;
width=images->columns;
height=images->rows;
depth=images->depth;
homogeneous_colorspace=MagickTrue;
next=GetNextImageInList(images);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->depth > depth)
depth=next->depth;
if (next->colorspace != images->colorspace)
homogeneous_colorspace=MagickFalse;
if (next->matte != MagickFalse)
matte=MagickTrue;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
continue;
}
width+=next->columns;
if (next->rows > height)
height=next->rows;
}
/*
Append images.
*/
append_image=CloneImage(images,width,height,MagickTrue,exception);
if (append_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(append_image,DirectClass) == MagickFalse)
{
InheritException(exception,&append_image->exception);
append_image=DestroyImage(append_image);
return((Image *) NULL);
}
if (homogeneous_colorspace == MagickFalse)
(void) SetImageColorspace(append_image,sRGBColorspace);
append_image->depth=depth;
append_image->matte=matte;
append_image->page=images->page;
(void) SetImageBackgroundColor(append_image);
status=MagickTrue;
x_offset=0;
y_offset=0;
next=images;
append_view=AcquireAuthenticCacheView(append_image,exception);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
CacheView
*image_view;
MagickBooleanType
proceed;
SetGeometry(append_image,&geometry);
GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry);
if (stack != MagickFalse)
x_offset-=geometry.x;
else
y_offset-=geometry.y;
image_view=AcquireVirtualCacheView(next,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(next,next,next->rows,1)
#endif
for (y=0; y < (ssize_t) next->rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict append_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset,
next->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
append_indexes=GetCacheViewAuthenticIndexQueue(append_view);
for (x=0; x < (ssize_t) next->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (next->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if ((next->colorspace == CMYKColorspace) &&
(append_image->colorspace == CMYKColorspace))
SetPixelIndex(append_indexes+x,GetPixelIndex(indexes+x));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(append_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (stack == MagickFalse)
{
x_offset+=(ssize_t) next->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) next->rows;
}
proceed=SetImageProgress(append_image,AppendImageTag,n,number_images);
if (proceed == MagickFalse)
break;
next=GetNextImageInList(next);
}
append_view=DestroyCacheView(append_view);
if (status == MagickFalse)
append_image=DestroyImage(append_image);
return(append_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a t c h I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CatchImageException() returns if no exceptions are found in the image
% sequence, otherwise it determines the most severe exception and reports
% it as a warning or error depending on the severity.
%
% The format of the CatchImageException method is:
%
% ExceptionType CatchImageException(Image *image)
%
% A description of each parameter follows:
%
% o image: An image sequence.
%
*/
MagickExport ExceptionType CatchImageException(Image *image)
{
ExceptionInfo
*exception;
ExceptionType
severity;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
exception=AcquireExceptionInfo();
GetImageException(image,exception);
CatchException(exception);
severity=exception->severity;
exception=DestroyExceptionInfo(exception);
return(severity);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l i p I m a g e P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipImagePath() sets the image clip mask based any clipping path information
% if it exists.
%
% The format of the ClipImagePath method is:
%
% MagickBooleanType ClipImagePath(Image *image,const char *pathname,
% const MagickBooleanType inside)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
*/
MagickExport MagickBooleanType ClipImage(Image *image)
{
return(ClipImagePath(image,"#1",MagickTrue));
}
MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname,
const MagickBooleanType inside)
{
#define ClipImagePathTag "ClipPath/Image"
char
*property;
const char
*value;
Image
*clip_mask;
ImageInfo
*image_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pathname != NULL);
property=AcquireString(pathname);
(void) FormatLocaleString(property,MaxTextExtent,"8BIM:1999,2998:%s",
pathname);
value=GetImageProperty(image,property);
property=DestroyString(property);
if (value == (const char *) NULL)
{
ThrowFileException(&image->exception,OptionError,"NoClipPathDefined",
image->filename);
return(MagickFalse);
}
image_info=AcquireImageInfo();
(void) CopyMagickString(image_info->filename,image->filename,MaxTextExtent);
(void) ConcatenateMagickString(image_info->filename,pathname,MaxTextExtent);
clip_mask=BlobToImage(image_info,value,strlen(value),&image->exception);
image_info=DestroyImageInfo(image_info);
if (clip_mask == (Image *) NULL)
return(MagickFalse);
if (clip_mask->storage_class == PseudoClass)
{
(void) SyncImage(clip_mask);
if (SetImageStorageClass(clip_mask,DirectClass) == MagickFalse)
return(MagickFalse);
}
if (inside == MagickFalse)
(void) NegateImage(clip_mask,MagickFalse);
(void) FormatLocaleString(clip_mask->magick_filename,MaxTextExtent,
"8BIM:1999,2998:%s\nPS",pathname);
(void) SetImageClipMask(image,clip_mask);
clip_mask=DestroyImage(clip_mask);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImage() copies an image and returns the copy as a new image object.
%
% If the specified columns and rows is 0, an exact copy of the image is
% returned, otherwise the pixel data is undefined and must be initialized
% with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On
% failure, a NULL image is returned and exception describes the reason for the
% failure.
%
% The format of the CloneImage method is:
%
% Image *CloneImage(const Image *image,const size_t columns,
% const size_t rows,const MagickBooleanType orphan,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the cloned image.
%
% o rows: the number of rows in the cloned image.
%
% o detach: With a value other than 0, the cloned image is detached from
% its parent I/O stream.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CloneImage(const Image *image,const size_t columns,
const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception)
{
double
scale;
Image
*clone_image;
size_t
length;
/*
Clone the image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((image->columns == 0) || (image->rows == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError,
"NegativeOrZeroImageSize","`%s'",image->filename);
return((Image *) NULL);
}
clone_image=(Image *) AcquireCriticalMemory(sizeof(*clone_image));
(void) memset(clone_image,0,sizeof(*clone_image));
clone_image->signature=MagickCoreSignature;
clone_image->storage_class=image->storage_class;
clone_image->channels=image->channels;
clone_image->colorspace=image->colorspace;
clone_image->matte=image->matte;
clone_image->columns=image->columns;
clone_image->rows=image->rows;
clone_image->dither=image->dither;
(void) CloneImageProfiles(clone_image,image);
(void) CloneImageProperties(clone_image,image);
(void) CloneImageArtifacts(clone_image,image);
GetTimerInfo(&clone_image->timer);
InitializeExceptionInfo(&clone_image->exception);
InheritException(&clone_image->exception,&image->exception);
if (image->ascii85 != (void *) NULL)
Ascii85Initialize(clone_image);
clone_image->extent=image->extent;
clone_image->magick_columns=image->magick_columns;
clone_image->magick_rows=image->magick_rows;
clone_image->type=image->type;
(void) CopyMagickString(clone_image->magick_filename,image->magick_filename,
MaxTextExtent);
(void) CopyMagickString(clone_image->magick,image->magick,MaxTextExtent);
(void) CopyMagickString(clone_image->filename,image->filename,MaxTextExtent);
clone_image->progress_monitor=image->progress_monitor;
clone_image->client_data=image->client_data;
clone_image->reference_count=1;
clone_image->next=image->next;
clone_image->previous=image->previous;
clone_image->list=NewImageList();
clone_image->clip_mask=NewImageList();
clone_image->mask=NewImageList();
if (detach == MagickFalse)
clone_image->blob=ReferenceBlob(image->blob);
else
{
clone_image->next=NewImageList();
clone_image->previous=NewImageList();
clone_image->blob=CloneBlobInfo((BlobInfo *) NULL);
}
clone_image->ping=image->ping;
clone_image->debug=IsEventLogging();
clone_image->semaphore=AllocateSemaphoreInfo();
if (image->colormap != (PixelPacket *) NULL)
{
/*
Allocate and copy the image colormap.
*/
clone_image->colors=image->colors;
length=(size_t) image->colors;
clone_image->colormap=(PixelPacket *) AcquireQuantumMemory(length+1,
sizeof(*clone_image->colormap));
if (clone_image->colormap == (PixelPacket *) NULL)
{
clone_image=DestroyImage(clone_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memcpy(clone_image->colormap,image->colormap,length*
sizeof(*clone_image->colormap));
}
if ((columns == 0) || (rows == 0))
{
if (image->montage != (char *) NULL)
(void) CloneString(&clone_image->montage,image->montage);
if (image->directory != (char *) NULL)
(void) CloneString(&clone_image->directory,image->directory);
if (image->clip_mask != (Image *) NULL)
clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue,
exception);
if (image->mask != (Image *) NULL)
clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception);
clone_image->cache=ReferencePixelCache(image->cache);
return(clone_image);
}
if ((columns == image->columns) && (rows == image->rows))
{
if (image->clip_mask != (Image *) NULL)
clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue,
exception);
if (image->mask != (Image *) NULL)
clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception);
}
scale=1.0;
if (image->columns != 0)
scale=(double) columns/(double) image->columns;
clone_image->page.width=(size_t) floor(scale*image->page.width+0.5);
clone_image->page.x=(ssize_t) ceil(scale*image->page.x-0.5);
clone_image->tile_offset.x=(ssize_t) ceil(scale*image->tile_offset.x-0.5);
scale=1.0;
if (image->rows != 0)
scale=(double) rows/(double) image->rows;
clone_image->page.height=(size_t) floor(scale*image->page.height+0.5);
clone_image->page.y=(ssize_t) ceil(scale*image->page.y-0.5);
clone_image->tile_offset.y=(ssize_t) ceil(scale*image->tile_offset.y-0.5);
clone_image->cache=ClonePixelCache(image->cache);
if (SetImageExtent(clone_image,columns,rows) == MagickFalse)
{
InheritException(exception,&clone_image->exception);
clone_image=DestroyImage(clone_image);
}
return(clone_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageInfo() makes a copy of the given image info structure. If
% NULL is specified, a new image info structure is created initialized to
% default values.
%
% The format of the CloneImageInfo method is:
%
% ImageInfo *CloneImageInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info)
{
ImageInfo
*clone_info;
clone_info=AcquireImageInfo();
if (image_info == (ImageInfo *) NULL)
return(clone_info);
clone_info->compression=image_info->compression;
clone_info->temporary=image_info->temporary;
clone_info->adjoin=image_info->adjoin;
clone_info->antialias=image_info->antialias;
clone_info->scene=image_info->scene;
clone_info->number_scenes=image_info->number_scenes;
clone_info->depth=image_info->depth;
if (image_info->size != (char *) NULL)
(void) CloneString(&clone_info->size,image_info->size);
if (image_info->extract != (char *) NULL)
(void) CloneString(&clone_info->extract,image_info->extract);
if (image_info->scenes != (char *) NULL)
(void) CloneString(&clone_info->scenes,image_info->scenes);
if (image_info->page != (char *) NULL)
(void) CloneString(&clone_info->page,image_info->page);
clone_info->interlace=image_info->interlace;
clone_info->endian=image_info->endian;
clone_info->units=image_info->units;
clone_info->quality=image_info->quality;
if (image_info->sampling_factor != (char *) NULL)
(void) CloneString(&clone_info->sampling_factor,
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,image_info->server_name);
if (image_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,image_info->font);
if (image_info->texture != (char *) NULL)
(void) CloneString(&clone_info->texture,image_info->texture);
if (image_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,image_info->density);
clone_info->pointsize=image_info->pointsize;
clone_info->fuzz=image_info->fuzz;
clone_info->pen=image_info->pen;
clone_info->background_color=image_info->background_color;
clone_info->border_color=image_info->border_color;
clone_info->matte_color=image_info->matte_color;
clone_info->transparent_color=image_info->transparent_color;
clone_info->dither=image_info->dither;
clone_info->monochrome=image_info->monochrome;
clone_info->colors=image_info->colors;
clone_info->colorspace=image_info->colorspace;
clone_info->type=image_info->type;
clone_info->orientation=image_info->orientation;
clone_info->preview_type=image_info->preview_type;
clone_info->group=image_info->group;
clone_info->ping=image_info->ping;
clone_info->verbose=image_info->verbose;
if (image_info->view != (char *) NULL)
(void) CloneString(&clone_info->view,image_info->view);
if (image_info->authenticate != (char *) NULL)
(void) CloneString(&clone_info->authenticate,image_info->authenticate);
(void) CloneImageOptions(clone_info,image_info);
clone_info->progress_monitor=image_info->progress_monitor;
clone_info->client_data=image_info->client_data;
clone_info->cache=image_info->cache;
if (image_info->cache != (void *) NULL)
clone_info->cache=ReferencePixelCache(image_info->cache);
if (image_info->profile != (void *) NULL)
clone_info->profile=(void *) CloneStringInfo((StringInfo *)
image_info->profile);
SetImageInfoFile(clone_info,image_info->file);
SetImageInfoBlob(clone_info,image_info->blob,image_info->length);
clone_info->stream=image_info->stream;
clone_info->virtual_pixel_method=image_info->virtual_pixel_method;
(void) CopyMagickString(clone_info->magick,image_info->magick,MaxTextExtent);
(void) CopyMagickString(clone_info->unique,image_info->unique,MaxTextExtent);
(void) CopyMagickString(clone_info->zero,image_info->zero,MaxTextExtent);
(void) CopyMagickString(clone_info->filename,image_info->filename,
MaxTextExtent);
clone_info->subimage=image_info->scene; /* deprecated */
clone_info->subrange=image_info->number_scenes; /* deprecated */
clone_info->channel=image_info->channel;
clone_info->debug=IsEventLogging();
clone_info->signature=image_info->signature;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o p y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CopyImagePixels() copies pixels from the source image as defined by the
% geometry the destination image at the specified offset.
%
% The format of the CopyImagePixels method is:
%
% MagickBooleanType CopyImagePixels(Image *image,const Image *source_image,
% const RectangleInfo *geometry,const OffsetInfo *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the destination image.
%
% o source_image: the source image.
%
% o geometry: define the dimensions of the source pixel rectangle.
%
% o offset: define the offset in the destination image.
%
% o exception: return the highest severity exception.
%
*/
MagickExport MagickBooleanType CopyImagePixels(Image *image,
const Image *source_image,const RectangleInfo *geometry,
const OffsetInfo *offset,ExceptionInfo *exception)
{
#define CopyImageTag "Copy/Image"
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(source_image != (Image *) NULL);
assert(geometry != (RectangleInfo *) NULL);
assert(offset != (OffsetInfo *) NULL);
if ((offset->x < 0) || (offset->y < 0) ||
((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) ||
((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows))
ThrowBinaryException(OptionError,"GeometryDoesNotContainImage",
image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
/*
Copy image pixels.
*/
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,image,geometry->height,1)
#endif
for (y=0; y < (ssize_t) geometry->height; y++)
{
register const IndexPacket
*magick_restrict source_indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y,
geometry->width,1,exception);
q=GetCacheViewAuthenticPixels(image_view,offset->x,y+offset->y,
geometry->width,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
source_indexes=GetCacheViewVirtualIndexQueue(source_view);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) geometry->width; x++)
{
*q=(*p);
if (image->colorspace == CMYKColorspace)
indexes[x]=source_indexes[x];
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CopyImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
source_view=DestroyCacheView(source_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImage() dereferences an image, deallocating memory associated with
% the image if the reference count becomes zero.
%
% The format of the DestroyImage method is:
%
% Image *DestroyImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *DestroyImage(Image *image)
{
MagickBooleanType
destroy;
/*
Dereference image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
destroy=MagickFalse;
LockSemaphoreInfo(image->semaphore);
image->reference_count--;
if (image->reference_count == 0)
destroy=MagickTrue;
UnlockSemaphoreInfo(image->semaphore);
if (destroy == MagickFalse)
return((Image *) NULL);
/*
Destroy image.
*/
DestroyImagePixels(image);
if (image->clip_mask != (Image *) NULL)
image->clip_mask=DestroyImage(image->clip_mask);
if (image->mask != (Image *) NULL)
image->mask=DestroyImage(image->mask);
if (image->montage != (char *) NULL)
image->montage=DestroyString(image->montage);
if (image->directory != (char *) NULL)
image->directory=DestroyString(image->directory);
if (image->colormap != (PixelPacket *) NULL)
image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap);
if (image->geometry != (char *) NULL)
image->geometry=DestroyString(image->geometry);
DestroyImageProfiles(image);
DestroyImageProperties(image);
DestroyImageArtifacts(image);
if (image->ascii85 != (Ascii85Info*) NULL)
image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85);
DestroyBlob(image);
(void) ClearExceptionInfo(&image->exception,MagickTrue);
if (image->semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&image->semaphore);
image->signature=(~MagickCoreSignature);
image=(Image *) RelinquishMagickMemory(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageInfo() deallocates memory associated with an ImageInfo
% structure.
%
% The format of the DestroyImageInfo method is:
%
% ImageInfo *DestroyImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
if (image_info->size != (char *) NULL)
image_info->size=DestroyString(image_info->size);
if (image_info->extract != (char *) NULL)
image_info->extract=DestroyString(image_info->extract);
if (image_info->scenes != (char *) NULL)
image_info->scenes=DestroyString(image_info->scenes);
if (image_info->page != (char *) NULL)
image_info->page=DestroyString(image_info->page);
if (image_info->sampling_factor != (char *) NULL)
image_info->sampling_factor=DestroyString(
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
image_info->server_name=DestroyString(
image_info->server_name);
if (image_info->font != (char *) NULL)
image_info->font=DestroyString(image_info->font);
if (image_info->texture != (char *) NULL)
image_info->texture=DestroyString(image_info->texture);
if (image_info->density != (char *) NULL)
image_info->density=DestroyString(image_info->density);
if (image_info->view != (char *) NULL)
image_info->view=DestroyString(image_info->view);
if (image_info->authenticate != (char *) NULL)
image_info->authenticate=DestroyString(
image_info->authenticate);
DestroyImageOptions(image_info);
if (image_info->cache != (void *) NULL)
image_info->cache=DestroyPixelCache(image_info->cache);
if (image_info->profile != (StringInfo *) NULL)
image_info->profile=(void *) DestroyStringInfo((StringInfo *)
image_info->profile);
image_info->signature=(~MagickCoreSignature);
image_info=(ImageInfo *) RelinquishMagickMemory(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s a s s o c i a t e I m a g e S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DisassociateImageStream() disassociates the image stream. It checks if the
% blob of the specified image is referenced by other images. If the reference
% count is higher then 1 a new blob is assigned to the specified image.
%
% The format of the DisassociateImageStream method is:
%
% void DisassociateImageStream(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DisassociateImageStream(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
DisassociateBlob(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C l i p M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageClipMask() returns the clip path associated with the image.
%
% The format of the GetImageClipMask method is:
%
% Image *GetImageClipMask(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *GetImageClipMask(const Image *image,
ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->clip_mask == (Image *) NULL)
return((Image *) NULL);
return(CloneImage(image->clip_mask,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageException() traverses an image sequence and returns any
% error more severe than noted by the exception parameter.
%
% The format of the GetImageException method is:
%
% void GetImageException(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to a list of one or more images.
%
% o exception: return the highest severity exception.
%
*/
MagickExport void GetImageException(Image *image,ExceptionInfo *exception)
{
register Image
*next;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->exception.severity == UndefinedException)
continue;
if (next->exception.severity > exception->severity)
InheritException(exception,&next->exception);
next->exception.severity=UndefinedException;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfo() initializes image_info to default values.
%
% The format of the GetImageInfo method is:
%
% void GetImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport void GetImageInfo(ImageInfo *image_info)
{
char
*synchronize;
ExceptionInfo
*exception;
/*
File and image dimension members.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info != (ImageInfo *) NULL);
(void) memset(image_info,0,sizeof(*image_info));
image_info->adjoin=MagickTrue;
image_info->interlace=NoInterlace;
image_info->channel=DefaultChannels;
image_info->quality=UndefinedCompressionQuality;
image_info->antialias=MagickTrue;
image_info->dither=MagickTrue;
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
image_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
exception=AcquireExceptionInfo();
(void) QueryColorDatabase(BackgroundColor,&image_info->background_color,
exception);
(void) QueryColorDatabase(BorderColor,&image_info->border_color,exception);
(void) QueryColorDatabase(MatteColor,&image_info->matte_color,exception);
(void) QueryColorDatabase(TransparentColor,&image_info->transparent_color,
exception);
exception=DestroyExceptionInfo(exception);
image_info->debug=IsEventLogging();
image_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfoFile() returns the image info file member.
%
% The format of the GetImageInfoFile method is:
%
% FILE *GetImageInfoFile(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info)
{
return(image_info->file);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMask() returns the mask associated with the image.
%
% The format of the GetImageMask method is:
%
% Image *GetImageMask(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *GetImageMask(const Image *image,ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->mask == (Image *) NULL)
return((Image *) NULL);
return(CloneImage(image->mask,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannels() returns the number of pixel channels associated with the
% specified image.
%
% The format of the GetChannels method is:
%
% size_t GetImageChannels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport size_t GetImageChannels(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(image->channels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e R e f e r e n c e C o u n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageReferenceCount() returns the image reference count.
%
% The format of the GetReferenceCount method is:
%
% ssize_t GetImageReferenceCount(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport ssize_t GetImageReferenceCount(Image *image)
{
ssize_t
reference_count;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
LockSemaphoreInfo(image->semaphore);
reference_count=image->reference_count;
UnlockSemaphoreInfo(image->semaphore);
return(reference_count);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageVirtualPixelMethod() gets the "virtual pixels" method for the
% image. A virtual pixel is any pixel access that is outside the boundaries
% of the image cache.
%
% The format of the GetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(GetPixelCacheVirtualMethod(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p r e t I m a g e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpretImageFilename() interprets embedded characters in an image filename.
% The filename length is returned.
%
% The format of the InterpretImageFilename method is:
%
% size_t InterpretImageFilename(const ImageInfo *image_info,Image *image,
% const char *format,int value,char *filename)
%
% A description of each parameter follows.
%
% o image_info: the image info..
%
% o image: the image.
%
% o format: A filename describing the format to use to write the numeric
% argument. Only the first numeric format identifier is replaced.
%
% o value: Numeric value to substitute into format filename.
%
% o filename: return the formatted filename in this character buffer.
%
*/
MagickExport size_t InterpretImageFilename(const ImageInfo *image_info,
Image *image,const char *format,int value,char *filename)
{
char
*q;
int
c;
MagickBooleanType
canonical;
register const char
*p;
ssize_t
field_width,
offset;
canonical=MagickFalse;
offset=0;
(void) CopyMagickString(filename,format,MaxTextExtent);
for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%'))
{
q=(char *) p+1;
if (*q == '%')
{
p=q+1;
continue;
}
field_width=0;
if (*q == '0')
field_width=(ssize_t) strtol(q,&q,10);
switch (*q)
{
case 'd':
case 'o':
case 'x':
{
q++;
c=(*q);
*q='\0';
(void) FormatLocaleString(filename+(p-format-offset),(size_t)
(MaxTextExtent-(p-format-offset)),p,value);
offset+=(4-field_width);
*q=c;
(void) ConcatenateMagickString(filename,q,MaxTextExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
case '[':
{
char
pattern[MaxTextExtent];
const char
*value;
register char
*r;
register ssize_t
i;
ssize_t
depth;
/*
Image option.
*/
if (strchr(p,']') == (char *) NULL)
break;
depth=1;
r=q+1;
for (i=0; (i < (MaxTextExtent-1L)) && (*r != '\0'); i++)
{
if (*r == '[')
depth++;
if (*r == ']')
depth--;
if (depth <= 0)
break;
pattern[i]=(*r++);
}
pattern[i]='\0';
if (LocaleNCompare(pattern,"filename:",9) != 0)
break;
value=(const char *) NULL;
if (image != (Image *) NULL)
value=GetImageProperty(image,pattern);
if ((value == (const char *) NULL) &&
(image != (Image *) NULL))
value=GetImageArtifact(image,pattern);
if ((value == (const char *) NULL) &&
(image_info != (ImageInfo *) NULL))
value=GetImageOption(image_info,pattern);
if (value == (const char *) NULL)
break;
q--;
c=(*q);
*q='\0';
(void) CopyMagickString(filename+(p-format-offset),value,(size_t)
(MaxTextExtent-(p-format-offset)));
offset+=strlen(pattern)-strlen(value)+3;
*q=c;
(void) ConcatenateMagickString(filename,r+1,MaxTextExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
default:
break;
}
}
if (canonical == MagickFalse)
(void) CopyMagickString(filename,format,MaxTextExtent);
else
for (q=filename; *q != '\0'; q++)
if ((*q == '%') && (*(q+1) == '%'))
(void) CopyMagickString(q,q+1,(size_t) (MaxTextExtent-(q-filename)));
return(strlen(filename));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s H i g h D y n a m i c R a n g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsHighDynamicRangeImage() returns MagickTrue if any pixel component is
% non-integer or exceeds the bounds of the quantum depth (e.g. for Q16
% 0..65535.
%
% The format of the IsHighDynamicRangeImage method is:
%
% MagickBooleanType IsHighDynamicRangeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image,
ExceptionInfo *exception)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
(void) image;
(void) exception;
return(MagickFalse);
#else
CacheView
*image_view;
MagickBooleanType
status;
MagickPixelPacket
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
GetMagickPixelPacket(image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((pixel.red < 0.0) || (pixel.red > QuantumRange) ||
(pixel.red != (QuantumAny) pixel.red))
break;
if ((pixel.green < 0.0) || (pixel.green > QuantumRange) ||
(pixel.green != (QuantumAny) pixel.green))
break;
if ((pixel.blue < 0.0) || (pixel.blue > QuantumRange) ||
(pixel.blue != (QuantumAny) pixel.blue))
break;
if (pixel.matte != MagickFalse)
{
if ((pixel.opacity < 0.0) || (pixel.opacity > QuantumRange) ||
(pixel.opacity != (QuantumAny) pixel.opacity))
break;
}
if (pixel.colorspace == CMYKColorspace)
{
if ((pixel.index < 0.0) || (pixel.index > QuantumRange) ||
(pixel.index != (QuantumAny) pixel.index))
break;
}
p++;
}
if (x < (ssize_t) image->columns)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status != MagickFalse ? MagickFalse : MagickTrue);
#endif
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e O b j e c t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageObject() returns MagickTrue if the image sequence contains a valid
% set of image objects.
%
% The format of the IsImageObject method is:
%
% MagickBooleanType IsImageObject(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageObject(const Image *image)
{
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
if (p->signature != MagickCoreSignature)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s T a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsTaintImage() returns MagickTrue any pixel in the image has been altered
% since it was first constituted.
%
% The format of the IsTaintImage method is:
%
% MagickBooleanType IsTaintImage(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsTaintImage(const Image *image)
{
char
magick[MaxTextExtent],
filename[MaxTextExtent];
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
(void) CopyMagickString(magick,image->magick,MaxTextExtent);
(void) CopyMagickString(filename,image->filename,MaxTextExtent);
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (p->taint != MagickFalse)
return(MagickTrue);
if (LocaleCompare(p->magick,magick) != 0)
return(MagickTrue);
if (LocaleCompare(p->filename,filename) != 0)
return(MagickTrue);
}
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModifyImage() ensures that there is only a single reference to the image
% to be modified, updating the provided image pointer to point to a clone of
% the original image if necessary.
%
% The format of the ModifyImage method is:
%
% MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ModifyImage(Image **image,
ExceptionInfo *exception)
{
Image
*clone_image;
assert(image != (Image **) NULL);
assert(*image != (Image *) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
if (GetImageReferenceCount(*image) <= 1)
return(MagickTrue);
clone_image=CloneImage(*image,0,0,MagickTrue,exception);
LockSemaphoreInfo((*image)->semaphore);
(*image)->reference_count--;
UnlockSemaphoreInfo((*image)->semaphore);
*image=clone_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w M a g i c k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewMagickImage() creates a blank image canvas of the specified size and
% background color.
%
% The format of the NewMagickImage method is:
%
% Image *NewMagickImage(const ImageInfo *image_info,const size_t width,
% const size_t height,const MagickPixelPacket *background)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the image width.
%
% o height: the image height.
%
% o background: the image color.
%
*/
MagickExport Image *NewMagickImage(const ImageInfo *image_info,
const size_t width,const size_t height,const MagickPixelPacket *background)
{
CacheView
*image_view;
ExceptionInfo
*exception;
Image
*image;
ssize_t
y;
MagickBooleanType
status;
assert(image_info != (const ImageInfo *) NULL);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info->signature == MagickCoreSignature);
assert(background != (const MagickPixelPacket *) NULL);
image=AcquireImage(image_info);
image->columns=width;
image->rows=height;
image->colorspace=background->colorspace;
image->matte=background->matte;
image->fuzz=background->fuzz;
image->depth=background->depth;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(image,background,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e f e r e n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferenceImage() increments the reference count associated with an image
% returning a pointer to the image.
%
% The format of the ReferenceImage method is:
%
% Image *ReferenceImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *ReferenceImage(Image *image)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
LockSemaphoreInfo(image->semaphore);
image->reference_count++;
UnlockSemaphoreInfo(image->semaphore);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePage() resets the image page canvas and position.
%
% The format of the ResetImagePage method is:
%
% MagickBooleanType ResetImagePage(Image *image,const char *page)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o page: the relative page specification.
%
*/
MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page)
{
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
flags=ParseAbsoluteGeometry(page,&geometry);
if ((flags & WidthValue) != 0)
{
if ((flags & HeightValue) == 0)
geometry.height=geometry.width;
image->page.width=geometry.width;
image->page.height=geometry.height;
}
if ((flags & AspectValue) != 0)
{
if ((flags & XValue) != 0)
image->page.x+=geometry.x;
if ((flags & YValue) != 0)
image->page.y+=geometry.y;
}
else
{
if ((flags & XValue) != 0)
{
image->page.x=geometry.x;
if ((image->page.width == 0) && (geometry.x > 0))
image->page.width=image->columns+geometry.x;
}
if ((flags & YValue) != 0)
{
image->page.y=geometry.y;
if ((image->page.height == 0) && (geometry.y > 0))
image->page.height=image->rows+geometry.y;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePixels() reset the image pixels, that is, all the pixel components
% are zereod.
%
% The format of the SetImage method is:
%
% MagickBooleanType ResetImagePixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ResetImagePixels(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
const void
*pixels;
MagickBooleanType
status;
MagickSizeType
length;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
pixels=AcquirePixelCachePixels(image,&length,exception);
if (pixels != (void *) NULL)
{
/*
Reset in-core image pixels.
*/
(void) memset((void *) pixels,0,(size_t) length);
return(MagickTrue);
}
/*
Reset image pixels.
*/
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) memset(q,0,sizeof(PixelPacket));
if ((image->storage_class == PseudoClass) ||
(image->colorspace == CMYKColorspace))
indexes[x]=0;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e B a c k g r o u n d C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageBackgroundColor() initializes the image pixels to the image
% background color. The background color is defined by the background_color
% member of the image structure.
%
% The format of the SetImage method is:
%
% MagickBooleanType SetImageBackgroundColor(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType SetImageBackgroundColor(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
IndexPacket
index;
MagickBooleanType
status;
MagickPixelPacket
background;
PixelPacket
pixel;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if ((IsPixelGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) TransformImageColorspace(image,RGBColorspace);
if ((image->background_color.opacity != OpaqueOpacity) &&
(image->matte == MagickFalse))
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *)
NULL,&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
index=0;
pixel.opacity=OpaqueOpacity;
SetPixelPacket(image,&background,&pixel,&index);
/*
Set image background color.
*/
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
*q++=pixel;
if (image->colorspace == CMYKColorspace)
{
register IndexPacket
*magick_restrict indexes;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,index);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannels() sets the number of pixels channels associated with the
% image.
%
% The format of the SetImageChannels method is:
%
% MagickBooleanType SetImageChannels(Image *image,const size_t channels)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channels: The number of pixel channels.
%
*/
MagickExport MagickBooleanType SetImageChannels(Image *image,
const size_t channels)
{
image->channels=channels;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColor() set the entire image canvas to the specified color.
%
% The format of the SetImageColor method is:
%
% MagickBooleanType SetImageColor(Image *image,
% const MagickPixelPacket *color)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o background: the image color.
%
*/
MagickExport MagickBooleanType SetImageColor(Image *image,
const MagickPixelPacket *color)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
assert(color != (const MagickPixelPacket *) NULL);
image->colorspace=color->colorspace;
image->matte=color->matte;
image->fuzz=color->fuzz;
image->depth=color->depth;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(image,color,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageStorageClass() sets the image class: DirectClass for true color
% images or PseudoClass for colormapped images.
%
% The format of the SetImageStorageClass method is:
%
% MagickBooleanType SetImageStorageClass(Image *image,
% const ClassType storage_class)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o storage_class: The image class.
%
*/
MagickExport MagickBooleanType SetImageStorageClass(Image *image,
const ClassType storage_class)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->storage_class=storage_class;
return(SyncImagePixelCache(image,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C l i p M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageClipMask() associates a clip path with the image. The clip path
% must be the same dimensions as the image. Set any pixel component of
% the clip path to TransparentOpacity to prevent that corresponding image
% pixel component from being updated when SyncAuthenticPixels() is applied.
%
% The format of the SetImageClipMask method is:
%
% MagickBooleanType SetImageClipMask(Image *image,const Image *clip_mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clip_mask: the image clip path.
%
*/
MagickExport MagickBooleanType SetImageClipMask(Image *image,
const Image *clip_mask)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (clip_mask != (const Image *) NULL)
if ((clip_mask->columns != image->columns) ||
(clip_mask->rows != image->rows))
ThrowBinaryImageException(ImageError,"ImageSizeDiffers",image->filename);
if (image->clip_mask != (Image *) NULL)
image->clip_mask=DestroyImage(image->clip_mask);
image->clip_mask=NewImageList();
if (clip_mask == (Image *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
image->clip_mask=CloneImage(clip_mask,0,0,MagickTrue,&image->exception);
if (image->clip_mask == (Image *) NULL)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageExtent() sets the image size (i.e. columns & rows).
%
% The format of the SetImageExtent method is:
%
% MagickBooleanType SetImageExtent(Image *image,const size_t columns,
% const size_t rows)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: The image width in pixels.
%
% o rows: The image height in pixels.
%
*/
MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns,
const size_t rows)
{
if ((columns == 0) || (rows == 0))
ThrowBinaryImageException(ImageError,"NegativeOrZeroImageSize",
image->filename);
image->columns=columns;
image->rows=rows;
if (image->depth == 0)
{
image->depth=8;
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ImageError,"ImageDepthNotSupported","`%s'",image->filename);
}
if (image->depth > (8*sizeof(MagickSizeType)))
{
image->depth=8*sizeof(MagickSizeType);
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ImageError,"ImageDepthNotSupported","`%s'",image->filename);
}
return(SyncImagePixelCache(image,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfo() initializes the `magick' field of the ImageInfo structure.
% It is set to a type of image format based on the prefix or suffix of the
% filename. For example, `ps:image' returns PS indicating a Postscript image.
% JPEG is returned for this filename: `image.jpg'. The filename prefix has
% precendence over the suffix. Use an optional index enclosed in brackets
% after a file name to specify a desired scene of a multi-resolution image
% format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value
% indicates success.
%
% The format of the SetImageInfo method is:
%
% MagickBooleanType SetImageInfo(ImageInfo *image_info,
% const unsigned int frames,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o frames: the number of images you intend to write.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info,
const unsigned int frames,ExceptionInfo *exception)
{
char
extension[MaxTextExtent],
filename[MaxTextExtent],
magic[MaxTextExtent],
*q,
subimage[MaxTextExtent];
const MagicInfo
*magic_info;
const MagickInfo
*magick_info;
ExceptionInfo
*sans_exception;
Image
*image;
MagickBooleanType
status;
register const char
*p;
ssize_t
count;
unsigned char
magick[2*MaxTextExtent];
/*
Look for 'image.format' in filename.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
*subimage='\0';
GetPathComponent(image_info->filename,SubimagePath,subimage);
if (*subimage != '\0')
{
/*
Look for scene specification (e.g. img0001.pcd[4]).
*/
if (IsSceneGeometry(subimage,MagickFalse) == MagickFalse)
{
if (IsGeometry(subimage) != MagickFalse)
(void) CloneString(&image_info->extract,subimage);
}
else
{
size_t
first,
last;
(void) CloneString(&image_info->scenes,subimage);
image_info->scene=StringToUnsignedLong(image_info->scenes);
image_info->number_scenes=image_info->scene;
p=image_info->scenes;
for (q=(char *) image_info->scenes; *q != '\0'; p++)
{
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ','))
p++;
first=(size_t) strtol(p,&q,10);
last=first;
while (isspace((int) ((unsigned char) *q)) != 0)
q++;
if (*q == '-')
last=(size_t) strtol(q+1,&q,10);
if (first > last)
Swap(first,last);
if (first < image_info->scene)
image_info->scene=first;
if (last > image_info->number_scenes)
image_info->number_scenes=last;
p=q;
}
image_info->number_scenes-=image_info->scene-1;
image_info->subimage=image_info->scene;
image_info->subrange=image_info->number_scenes;
}
}
*extension='\0';
if (*image_info->magick == '\0')
GetPathComponent(image_info->filename,ExtensionPath,extension);
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if (*extension != '\0')
if ((LocaleCompare(extension,"gz") == 0) ||
(LocaleCompare(extension,"Z") == 0) ||
(LocaleCompare(extension,"svgz") == 0) ||
(LocaleCompare(extension,"wmz") == 0))
{
char
path[MaxTextExtent];
(void) CopyMagickString(path,image_info->filename,MaxTextExtent);
path[strlen(path)-strlen(extension)-1]='\0';
GetPathComponent(path,ExtensionPath,extension);
}
#endif
#if defined(MAGICKCORE_BZLIB_DELEGATE)
if (*extension != '\0')
if (LocaleCompare(extension,"bz2") == 0)
{
char
path[MaxTextExtent];
(void) CopyMagickString(path,image_info->filename,MaxTextExtent);
path[strlen(path)-strlen(extension)-1]='\0';
GetPathComponent(path,ExtensionPath,extension);
}
#endif
image_info->affirm=MagickFalse;
sans_exception=AcquireExceptionInfo();
if ((*extension != '\0') && (IsGlob(extension) == MagickFalse))
{
MagickFormatType
format_type;
register ssize_t
i;
static const char
*format_type_formats[] =
{
"AUTOTRACE",
"BROWSE",
"DCRAW",
"EDIT",
"LAUNCH",
"MPEG:DECODE",
"MPEG:ENCODE",
"PRINT",
"PS:ALPHA",
"PS:CMYK",
"PS:COLOR",
"PS:GRAY",
"PS:MONO",
"SCAN",
"SHOW",
"WIN",
(char *) NULL
};
/*
User specified image format.
*/
(void) CopyMagickString(magic,extension,MaxTextExtent);
LocaleUpper(magic);
/*
Look for explicit image formats.
*/
format_type=UndefinedFormatType;
i=0;
while ((format_type == UndefinedFormatType) &&
(format_type_formats[i] != (char *) NULL))
{
if ((*magic == *format_type_formats[i]) &&
(LocaleCompare(magic,format_type_formats[i]) == 0))
format_type=ExplicitFormatType;
i++;
}
magick_info=GetMagickInfo(magic,sans_exception);
if ((magick_info != (const MagickInfo *) NULL) &&
(magick_info->format_type != UndefinedFormatType))
format_type=magick_info->format_type;
if (format_type == UndefinedFormatType)
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
else
if (format_type == ExplicitFormatType)
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
}
if (LocaleCompare(magic,"RGB") == 0)
image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */
}
/*
Look for explicit 'format:image' in filename.
*/
*magic='\0';
GetPathComponent(image_info->filename,MagickPath,magic);
if (*magic == '\0')
{
(void) CopyMagickString(magic,image_info->magick,MaxTextExtent);
magick_info=GetMagickInfo(magic,sans_exception);
if (frames == 0)
GetPathComponent(image_info->filename,CanonicalPath,filename);
else
GetPathComponent(image_info->filename,SubcanonicalPath,filename);
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
}
else
{
const DelegateInfo
*delegate_info;
/*
User specified image format.
*/
LocaleUpper(magic);
magick_info=GetMagickInfo(magic,sans_exception);
delegate_info=GetDelegateInfo(magic,"*",sans_exception);
if (delegate_info == (const DelegateInfo *) NULL)
delegate_info=GetDelegateInfo("*",magic,sans_exception);
if (((magick_info != (const MagickInfo *) NULL) ||
(delegate_info != (const DelegateInfo *) NULL)) &&
(IsMagickConflict(magic) == MagickFalse))
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
GetPathComponent(image_info->filename,CanonicalPath,filename);
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
}
}
sans_exception=DestroyExceptionInfo(sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
if ((image_info->adjoin != MagickFalse) && (frames > 1))
{
/*
Test for multiple image support (e.g. image%02d.png).
*/
(void) InterpretImageFilename(image_info,(Image *) NULL,
image_info->filename,(int) image_info->scene,filename);
if ((LocaleCompare(filename,image_info->filename) != 0) &&
(strchr(filename,'%') == (char *) NULL))
image_info->adjoin=MagickFalse;
}
if ((image_info->adjoin != MagickFalse) && (frames > 0))
{
/*
Some image formats do not support multiple frames per file.
*/
magick_info=GetMagickInfo(magic,exception);
if (magick_info != (const MagickInfo *) NULL)
if (GetMagickAdjoin(magick_info) == MagickFalse)
image_info->adjoin=MagickFalse;
}
if (image_info->affirm != MagickFalse)
return(MagickTrue);
if (frames == 0)
{
/*
Determine the image format from the first few bytes of the file.
*/
image=AcquireImage(image_info);
(void) CopyMagickString(image->filename,image_info->filename,
MaxTextExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
if ((IsBlobSeekable(image) == MagickFalse) ||
(IsBlobExempt(image) != MagickFalse))
{
/*
Copy image to a seekable temporary file.
*/
*filename='\0';
status=ImageToFile(image,filename,exception);
(void) CloseBlob(image);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
SetImageInfoFile(image_info,(FILE *) NULL);
(void) CopyMagickString(image->filename,filename,MaxTextExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
image_info->temporary=MagickTrue;
}
(void) memset(magick,0,sizeof(magick));
count=ReadBlob(image,2*MaxTextExtent,magick);
(void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR);
(void) CloseBlob(image);
image=DestroyImage(image);
/*
Check magic.xml configuration file.
*/
sans_exception=AcquireExceptionInfo();
magic_info=GetMagicInfo(magick,(size_t) count,sans_exception);
if ((magic_info != (const MagicInfo *) NULL) &&
(GetMagicName(magic_info) != (char *) NULL))
{
(void) CopyMagickString(image_info->magick,GetMagicName(magic_info),
MaxTextExtent);
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
return(MagickTrue);
}
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoBlob() sets the image info blob member.
%
% The format of the SetImageInfoBlob method is:
%
% void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
% const size_t length)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o blob: the blob.
%
% o length: the blob length.
%
*/
MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
const size_t length)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->blob=(void *) blob;
image_info->length=length;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoFile() sets the image info file member.
%
% The format of the SetImageInfoFile method is:
%
% void SetImageInfoFile(ImageInfo *image_info,FILE *file)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o file: the file.
%
*/
MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->file=file;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMask() associates a mask with the image. The mask must be the same
% dimensions as the image.
%
% The format of the SetImageMask method is:
%
% MagickBooleanType SetImageMask(Image *image,const Image *mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mask: the image mask.
%
*/
MagickExport MagickBooleanType SetImageMask(Image *image,const Image *mask)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (mask != (const Image *) NULL)
if ((mask->columns != image->columns) || (mask->rows != image->rows))
ThrowBinaryImageException(ImageError,"ImageSizeDiffers",image->filename);
if (image->mask != (Image *) NULL)
image->mask=DestroyImage(image->mask);
image->mask=NewImageList();
if (mask == (Image *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
image->mask=CloneImage(mask,0,0,MagickTrue,&image->exception);
if (image->mask == (Image *) NULL)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e O p a c i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageOpacity() sets the opacity levels of the image.
%
% The format of the SetImageOpacity method is:
%
% MagickBooleanType SetImageOpacity(Image *image,const Quantum opacity)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: the level of transparency: 0 is fully opaque and QuantumRange is
% fully transparent.
%
*/
MagickExport MagickBooleanType SetImageOpacity(Image *image,
const Quantum opacity)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
image->matte=MagickTrue;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelOpacity(q,opacity);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageVirtualPixelMethod() sets the "virtual pixels" method for the
% image and returns the previous setting. A virtual pixel is any pixel access
% that is outside the boundaries of the image cache.
%
% The format of the SetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image,
% const VirtualPixelMethod virtual_pixel_method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
*/
MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image,
const VirtualPixelMethod virtual_pixel_method)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(SetPixelCacheVirtualMethod(image,virtual_pixel_method));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S m u s h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SmushImages() takes all images from the current image pointer to the end
% of the image list and smushes them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the SmushImages method is:
%
% Image *SmushImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o offset: minimum distance in pixels between images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t SmushXGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*left_view,
*right_view;
const Image
*left_image,
*right_image;
RectangleInfo
left_geometry,
right_geometry;
register const PixelPacket
*p;
register ssize_t
i,
y;
size_t
gap;
ssize_t
x;
if (images->previous == (Image *) NULL)
return(0);
right_image=images;
SetGeometry(smush_image,&right_geometry);
GravityAdjustGeometry(right_image->columns,right_image->rows,
right_image->gravity,&right_geometry);
left_image=images->previous;
SetGeometry(smush_image,&left_geometry);
GravityAdjustGeometry(left_image->columns,left_image->rows,
left_image->gravity,&left_geometry);
gap=right_image->columns;
left_view=AcquireVirtualCacheView(left_image,exception);
right_view=AcquireVirtualCacheView(right_image,exception);
for (y=0; y < (ssize_t) smush_image->rows; y++)
{
for (x=(ssize_t) left_image->columns-1; x > 0; x--)
{
p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((left_image->columns-x-1) >= gap))
break;
}
i=(ssize_t) left_image->columns-x-1;
for (x=0; x < (ssize_t) right_image->columns; x++)
{
p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1,
exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((x+i) >= (ssize_t) gap))
break;
}
if ((x+i) < (ssize_t) gap)
gap=(size_t) (x+i);
}
right_view=DestroyCacheView(right_view);
left_view=DestroyCacheView(left_view);
if (y < (ssize_t) smush_image->rows)
return(offset);
return((ssize_t) gap-offset);
}
static ssize_t SmushYGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*bottom_view,
*top_view;
const Image
*bottom_image,
*top_image;
RectangleInfo
bottom_geometry,
top_geometry;
register const PixelPacket
*p;
register ssize_t
i,
x;
size_t
gap;
ssize_t
y;
if (images->previous == (Image *) NULL)
return(0);
bottom_image=images;
SetGeometry(smush_image,&bottom_geometry);
GravityAdjustGeometry(bottom_image->columns,bottom_image->rows,
bottom_image->gravity,&bottom_geometry);
top_image=images->previous;
SetGeometry(smush_image,&top_geometry);
GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity,
&top_geometry);
gap=bottom_image->rows;
top_view=AcquireVirtualCacheView(top_image,exception);
bottom_view=AcquireVirtualCacheView(bottom_image,exception);
for (x=0; x < (ssize_t) smush_image->columns; x++)
{
for (y=(ssize_t) top_image->rows-1; y > 0; y--)
{
p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((top_image->rows-y-1) >= gap))
break;
}
i=(ssize_t) top_image->rows-y-1;
for (y=0; y < (ssize_t) bottom_image->rows; y++)
{
p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1,
exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((y+i) >= (ssize_t) gap))
break;
}
if ((y+i) < (ssize_t) gap)
gap=(size_t) (y+i);
}
bottom_view=DestroyCacheView(bottom_view);
top_view=DestroyCacheView(top_view);
if (x < (ssize_t) smush_image->columns)
return(offset);
return((ssize_t) gap-offset);
}
MagickExport Image *SmushImages(const Image *images,
const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception)
{
#define SmushImageTag "Smush/Image"
CacheView
*smush_view;
const Image
*image;
Image
*smush_image;
MagickBooleanType
matte,
proceed,
status;
MagickOffsetType
n;
RectangleInfo
geometry;
register const Image
*next;
size_t
height,
number_images,
width;
ssize_t
x_offset,
y_offset;
/*
Compute maximum area of smushed area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
matte=image->matte;
number_images=1;
width=image->columns;
height=image->rows;
next=GetNextImageInList(image);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->matte != MagickFalse)
matte=MagickTrue;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
if (next->previous != (Image *) NULL)
height+=offset;
continue;
}
width+=next->columns;
if (next->previous != (Image *) NULL)
width+=offset;
if (next->rows > height)
height=next->rows;
}
/*
Smush images.
*/
smush_image=CloneImage(image,width,height,MagickTrue,exception);
if (smush_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(smush_image,DirectClass) == MagickFalse)
{
InheritException(exception,&smush_image->exception);
smush_image=DestroyImage(smush_image);
return((Image *) NULL);
}
smush_image->matte=matte;
(void) SetImageBackgroundColor(smush_image);
status=MagickTrue;
x_offset=0;
y_offset=0;
smush_view=AcquireVirtualCacheView(smush_image,exception);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
SetGeometry(smush_image,&geometry);
GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry);
if (stack != MagickFalse)
{
x_offset-=geometry.x;
y_offset-=SmushYGap(smush_image,image,offset,exception);
}
else
{
x_offset-=SmushXGap(smush_image,image,offset,exception);
y_offset-=geometry.y;
}
status=CompositeImage(smush_image,OverCompositeOp,image,x_offset,y_offset);
proceed=SetImageProgress(image,SmushImageTag,n,number_images);
if (proceed == MagickFalse)
break;
if (stack == MagickFalse)
{
x_offset+=(ssize_t) image->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) image->rows;
}
image=GetNextImageInList(image);
}
if (stack == MagickFalse)
smush_image->columns=(size_t) x_offset;
else
smush_image->rows=(size_t) y_offset;
smush_view=DestroyCacheView(smush_view);
if (status == MagickFalse)
smush_image=DestroyImage(smush_image);
return(smush_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t r i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StripImage() strips an image of all profiles and comments.
%
% The format of the StripImage method is:
%
% MagickBooleanType StripImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType StripImage(Image *image)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
DestroyImageProfiles(image);
(void) DeleteImageProperty(image,"comment");
(void) DeleteImageProperty(image,"date:create");
(void) DeleteImageProperty(image,"date:modify");
status=SetImageArtifact(image,"png:exclude-chunk",
"bKGD,caNv,cHRM,eXIf,gAMA,iCCP,iTXt,pHYs,sRGB,tEXt,zCCP,zTXt,date");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImage() initializes the red, green, and blue intensities of each pixel
% as defined by the colormap index.
%
% The format of the SyncImage method is:
%
% MagickBooleanType SyncImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static inline IndexPacket PushColormapIndex(Image *image,
const size_t index,MagickBooleanType *range_exception)
{
if (index < image->colors)
return((IndexPacket) index);
*range_exception=MagickTrue;
return((IndexPacket) 0);
}
MagickExport MagickBooleanType SyncImage(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
range_exception,
status,
taint;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->ping != MagickFalse)
return(MagickTrue);
if (image->storage_class != PseudoClass)
return(MagickFalse);
assert(image->colormap != (PixelPacket *) NULL);
range_exception=MagickFalse;
status=MagickTrue;
taint=image->taint;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(range_exception,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
index;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=PushColormapIndex(image,(size_t) GetPixelIndex(indexes+x),
&range_exception);
if (image->matte == MagickFalse)
SetPixelRgb(q,image->colormap+(ssize_t) index)
else
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->taint=taint;
if ((image->ping == MagickFalse) && (range_exception != MagickFalse))
(void) ThrowMagickException(&image->exception,GetMagickModule(),
CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e S e t t i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageSettings() syncs image_info options into per-image attributes.
%
% The format of the SyncImageSettings method is:
%
% MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
% Image *image)
% MagickBooleanType SyncImagesSettings(const ImageInfo *image_info,
% Image *image)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info,
Image *images)
{
Image
*image;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
(void) SyncImageSettings(image_info,image);
(void) DeleteImageOption(image_info,"page");
return(MagickTrue);
}
MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
Image *image)
{
char
property[MaxTextExtent];
const char
*option,
*value;
GeometryInfo
geometry_info;
MagickStatusType
flags;
ResolutionType
units;
/*
Sync image options.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
option=GetImageOption(image_info,"background");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->background_color,
&image->exception);
option=GetImageOption(image_info,"bias");
if (option != (const char *) NULL)
image->bias=StringToDoubleInterval(option,(double) QuantumRange+1.0);
option=GetImageOption(image_info,"black-point-compensation");
if (option != (const char *) NULL)
image->black_point_compensation=(MagickBooleanType) ParseCommandOption(
MagickBooleanOptions,MagickFalse,option);
option=GetImageOption(image_info,"blue-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.blue_primary.x=geometry_info.rho;
image->chromaticity.blue_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x;
}
option=GetImageOption(image_info,"bordercolor");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->border_color,&image->exception);
option=GetImageOption(image_info,"colors");
if (option != (const char *) NULL)
image->colors=StringToUnsignedLong(option);
option=GetImageOption(image_info,"compose");
if (option != (const char *) NULL)
image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,option);
option=GetImageOption(image_info,"compress");
if (option != (const char *) NULL)
image->compression=(CompressionType) ParseCommandOption(
MagickCompressOptions,MagickFalse,option);
option=GetImageOption(image_info,"debug");
if (option != (const char *) NULL)
image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"density");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
/*
Set image density.
*/
flags=ParseGeometry(option,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
}
option=GetImageOption(image_info,"depth");
if (option != (const char *) NULL)
image->depth=StringToUnsignedLong(option);
option=GetImageOption(image_info,"endian");
if (option != (const char *) NULL)
image->endian=(EndianType) ParseCommandOption(MagickEndianOptions,
MagickFalse,option);
option=GetImageOption(image_info,"filter");
if (option != (const char *) NULL)
image->filter=(FilterTypes) ParseCommandOption(MagickFilterOptions,
MagickFalse,option);
option=GetImageOption(image_info,"fuzz");
if (option != (const char *) NULL)
image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0);
option=GetImageOption(image_info,"gravity");
if (option != (const char *) NULL)
image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(image_info,"green-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.green_primary.x=geometry_info.rho;
image->chromaticity.green_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.green_primary.y=image->chromaticity.green_primary.x;
}
option=GetImageOption(image_info,"intensity");
if (option != (const char *) NULL)
image->intensity=(PixelIntensityMethod) ParseCommandOption(
MagickPixelIntensityOptions,MagickFalse,option);
option=GetImageOption(image_info,"intent");
if (option != (const char *) NULL)
image->rendering_intent=(RenderingIntent) ParseCommandOption(
MagickIntentOptions,MagickFalse,option);
option=GetImageOption(image_info,"interlace");
if (option != (const char *) NULL)
image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions,
MagickFalse,option);
option=GetImageOption(image_info,"interpolate");
if (option != (const char *) NULL)
image->interpolate=(InterpolatePixelMethod) ParseCommandOption(
MagickInterpolateOptions,MagickFalse,option);
option=GetImageOption(image_info,"loop");
if (option != (const char *) NULL)
image->iterations=StringToUnsignedLong(option);
option=GetImageOption(image_info,"mattecolor");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->matte_color,&image->exception);
option=GetImageOption(image_info,"orient");
if (option != (const char *) NULL)
image->orientation=(OrientationType) ParseCommandOption(
MagickOrientationOptions,MagickFalse,option);
option=GetImageOption(image_info,"page");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"quality");
if (option != (const char *) NULL)
image->quality=StringToUnsignedLong(option);
option=GetImageOption(image_info,"red-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.red_primary.x=geometry_info.rho;
image->chromaticity.red_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.red_primary.y=image->chromaticity.red_primary.x;
}
if (image_info->quality != UndefinedCompressionQuality)
image->quality=image_info->quality;
option=GetImageOption(image_info,"scene");
if (option != (const char *) NULL)
image->scene=StringToUnsignedLong(option);
option=GetImageOption(image_info,"taint");
if (option != (const char *) NULL)
image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"tile-offset");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"transparent-color");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->transparent_color,
&image->exception);
option=GetImageOption(image_info,"type");
if (option != (const char *) NULL)
image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse,
option);
option=GetImageOption(image_info,"units");
units=image_info->units;
if (option != (const char *) NULL)
units=(ResolutionType) ParseCommandOption(MagickResolutionOptions,
MagickFalse,option);
if (units != UndefinedResolution)
{
if (image->units != units)
switch (image->units)
{
case PixelsPerInchResolution:
{
if (units == PixelsPerCentimeterResolution)
{
image->x_resolution/=2.54;
image->y_resolution/=2.54;
}
break;
}
case PixelsPerCentimeterResolution:
{
if (units == PixelsPerInchResolution)
{
image->x_resolution=(double) ((size_t) (100.0*2.54*
image->x_resolution+0.5))/100.0;
image->y_resolution=(double) ((size_t) (100.0*2.54*
image->y_resolution+0.5))/100.0;
}
break;
}
default:
break;
}
image->units=units;
option=GetImageOption(image_info,"density");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
}
}
option=GetImageOption(image_info,"white-point");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.white_point.x=geometry_info.rho;
image->chromaticity.white_point.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.white_point.y=image->chromaticity.white_point.x;
}
ResetImageOptionIterator(image_info);
for (option=GetNextImageOption(image_info); option != (const char *) NULL; )
{
value=GetImageOption(image_info,option);
if (value != (const char *) NULL)
{
(void) FormatLocaleString(property,MaxTextExtent,"%s",option);
(void) SetImageArtifact(image,property,value);
}
option=GetNextImageOption(image_info);
}
return(MagickTrue);
}
|
omp_dbg.c | #include <stdio.h>
#include <omp.h>
#include "context_descriptor.h"
#include "register_context.h"
#include "dvmh_omp_event.h"
#include "dvmh_omp_thread_info.h"
#include "dvmh_omp_event_analyzer.h"
#include "omp_dbg.h"
// Тут определены вспомогательные макросы для удобства
#define TO_THREAD_INFO(long_value) ((dvmh_omp_thread_info *) (long_value))
#define TO_LONG(ptr) ((long) (ptr))
#define TO_DESC(long_value) ((context_descriptor *) (long_value))
#define IS_INITIALIZED(thread_id_ptr) (((int) (*(thread_id_ptr))) != -1 ? 1 : 0 )
static dvmh_omp_thread_info *initial_thread_info = NULL;
static dvmh_omp_event *get_parent_event(long *StaticContextHandle)
{
context_descriptor *cd = (context_descriptor *) *StaticContextHandle;
if (cd->info.type != CONTEXT_PARALLEL){
fprintf(stderr, "Context Type is not parallel");
return NULL;
}
return (dvmh_omp_event *) cd->parallel.parent_event;
}
static void set_parent_event(long *StaticContextHandle, dvmh_omp_event *pe)
{
context_descriptor *cd = (context_descriptor *) *StaticContextHandle;
if (cd->info.type != CONTEXT_PARALLEL){
fprintf(stderr, "Context Type is not parallel");
return;
}
cd->parallel.parent_event = (void *) pe;
}
long DBG_Get_Addr(void *VarPtr)
{
return (long) VarPtr;
}
void DBG_Type_Control()
{
//fprintf (stderr, "DBG_Type_Control\n");
}
void DBG_Init(long *ThreadID)
{
initial_thread_info = dvmh_omp_thread_info_create();
*ThreadID = TO_LONG(initial_thread_info);
dvmh_omp_event *event = dvmh_omp_event_create(DVMH_OMP_EVENT_INIT);
dvmh_omp_event_set_thread_id(event, TO_LONG(ThreadID));
dvmh_omp_event_set_begin_time(event, omp_get_wtime());
dvmh_omp_event_set_context_descriptor(event, NULL);
dvmh_omp_thread_info_event_occured(initial_thread_info, event);
fprintf(stdout, "Starting performance analyze\n");
}
void DBG_Finalize()
{
dvmh_omp_thread_info *thread_info;
#pragma omp critical
{
thread_info = initial_thread_info;
initial_thread_info = NULL;
}
if (thread_info == NULL){
return;
}
dvmh_omp_event *event = dvmh_omp_thread_info_get_active_event(thread_info);
dvmh_omp_event_set_end_time(event, omp_get_wtime());
dvmh_omp_event_analyzer(event);
dvmh_omp_event_destroy(event);
dvmh_omp_thread_info_event_finished(thread_info);
dvmh_omp_thread_info_destroy(thread_info);
unregister_contexts();
}
void DBG_Get_Handle(long *StaticContextHandle, char* ContextString, long StringLength)
{
*StaticContextHandle = (long) register_context(ContextString);
}
void DBG_BeforeParallel (long *StaticContextHandle, long *ThreadID, int *NumThreadsResults, int *IfExprResult)
{
dvmh_omp_event *parent_event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event *event = dvmh_omp_event_create(DVMH_OMP_EVENT_PARALLEL_REGION);
dvmh_omp_event_add_subevent(parent_event, event);
dvmh_omp_event_set_thread_id(event, TO_LONG(ThreadID));
dvmh_omp_event_set_begin_time(event, omp_get_wtime());
dvmh_omp_event_set_context_descriptor(event, TO_DESC(*StaticContextHandle));
dvmh_omp_thread_info_event_occured(TO_THREAD_INFO(*ThreadID), event);
set_parent_event(StaticContextHandle, event);
}
void DBG_ParallelEvent (long *StaticContextHandle, long *ThreadID)
{
if (!IS_INITIALIZED(ThreadID)) {
*ThreadID = TO_LONG(dvmh_omp_thread_info_create());
}
dvmh_omp_event *parent_event = get_parent_event(StaticContextHandle);
dvmh_omp_event *event = dvmh_omp_event_create(DVMH_OMP_EVENT_PARALLEL_THREAD);
dvmh_omp_event_add_subevent(parent_event, event);
dvmh_omp_event_set_thread_id(event, TO_LONG(ThreadID));
dvmh_omp_event_set_begin_time(event, omp_get_wtime());
dvmh_omp_event_set_context_descriptor(event, TO_DESC(*StaticContextHandle));
dvmh_omp_thread_info_event_occured(TO_THREAD_INFO(*ThreadID), event);
}
void DBG_ParallelEventEnd (long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event_set_end_time(event, omp_get_wtime());
dvmh_omp_thread_info_event_finished(TO_THREAD_INFO(*ThreadID));
if (!dvmh_omp_thread_info_is_alive(TO_THREAD_INFO(*ThreadID))) {
dvmh_omp_thread_info_destroy(TO_THREAD_INFO(*ThreadID));
*ThreadID = -1; // если параллельная область крутится во внешнем цикле
}
}
void DBG_AfterParallel (long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event_set_end_time(event, omp_get_wtime());
dvmh_omp_thread_info_event_finished(TO_THREAD_INFO(*ThreadID));
}
void DBG_BeforeInterval (long *StaticContextHandle, long *ThreadID, long *IntervalIndex)
{
dvmh_omp_event *parent_event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event *event = dvmh_omp_event_create(DVMH_OMP_EVENT_INTERVAL);
dvmh_omp_event_add_subevent(parent_event, event);
dvmh_omp_event_set_thread_id(event, TO_LONG(ThreadID));
dvmh_omp_event_set_begin_time(event, omp_get_wtime());
dvmh_omp_event_set_context_descriptor(event, TO_DESC(*StaticContextHandle));
dvmh_omp_thread_info_event_occured(TO_THREAD_INFO(*ThreadID), event);
}
void DBG_AfterInterval (long *StaticContextHandle, long *ThreadID, long *IntervalIndex)
{
dvmh_omp_event *event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event_set_end_time(event, omp_get_wtime());
dvmh_omp_thread_info_event_finished(TO_THREAD_INFO(*ThreadID));
}
void DBG_BeforeOMPLoop(long *StaticContextHandle, long *ThreadID, long *Init, long *Last, long *Step, int *ChunkSize)
{
dvmh_omp_event *parent_event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event *event = dvmh_omp_event_create(DVMH_OMP_EVENT_OMP_LOOP);
dvmh_omp_event_add_subevent(parent_event, event);
dvmh_omp_event_set_thread_id(event, TO_LONG(ThreadID));
dvmh_omp_event_set_begin_time(event, omp_get_wtime());
dvmh_omp_event_set_context_descriptor(event, TO_DESC(*StaticContextHandle));
dvmh_omp_thread_info_event_occured(TO_THREAD_INFO(*ThreadID), event);
}
void DBG_OMPIter(long *StaticContextHandle, long *ThreadID, long *Index)
{
dvmh_omp_event *parent_event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
if (dvmh_omp_event_get_type(parent_event) == DVMH_OMP_EVENT_OMP_ITER){
// закрываем предыдущую итерацию цикла
dvmh_omp_event_set_end_time(parent_event, omp_get_wtime());
dvmh_omp_thread_info_event_finished(TO_THREAD_INFO(*ThreadID));
parent_event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
}
dvmh_omp_event *event = dvmh_omp_event_create(DVMH_OMP_EVENT_OMP_ITER);
dvmh_omp_event_add_subevent(parent_event, event);
dvmh_omp_event_set_thread_id(event, TO_LONG(ThreadID));
dvmh_omp_event_set_begin_time(event, omp_get_wtime());
dvmh_omp_event_set_context_descriptor(event, TO_DESC(*StaticContextHandle));
dvmh_omp_thread_info_event_occured(TO_THREAD_INFO(*ThreadID), event);
}
void DBG_AfterOMPLoop (long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
if (dvmh_omp_event_get_type(event) == DVMH_OMP_EVENT_OMP_ITER
|| dvmh_omp_event_get_type(event) == DVMH_OMP_EVENT_OMP_IF_ITER){
dvmh_omp_event_set_end_time(event, omp_get_wtime());
dvmh_omp_thread_info_event_finished(TO_THREAD_INFO(*ThreadID));
event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
}
dvmh_omp_event_set_end_time(event, omp_get_wtime());
dvmh_omp_thread_info_event_finished(TO_THREAD_INFO(*ThreadID));
}
void DBG_BeforeSections (long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *parent_event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event *event = dvmh_omp_event_create(DVMH_OMP_EVENT_SECTIONS);
dvmh_omp_event_add_subevent(parent_event, event);
dvmh_omp_event_set_thread_id(event, TO_LONG(ThreadID));
dvmh_omp_event_set_begin_time(event, omp_get_wtime());
dvmh_omp_event_set_context_descriptor(event, TO_DESC(*StaticContextHandle));
dvmh_omp_thread_info_event_occured(TO_THREAD_INFO(*ThreadID), event);
}
void DBG_SectionEvent(long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *parent_event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event *event = dvmh_omp_event_create(DVMH_OMP_EVENT_SECTION);
dvmh_omp_event_add_subevent(parent_event, event);
dvmh_omp_event_set_thread_id(event, TO_LONG(ThreadID));
dvmh_omp_event_set_begin_time(event, omp_get_wtime());
dvmh_omp_event_set_context_descriptor(event, TO_DESC(*StaticContextHandle));
dvmh_omp_thread_info_event_occured(TO_THREAD_INFO(*ThreadID), event);
}
void DBG_SectionEventEnd(long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event_set_end_time(event, omp_get_wtime());
dvmh_omp_thread_info_event_finished(TO_THREAD_INFO(*ThreadID));
}
void DBG_AfterSections(long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event_set_end_time(event, omp_get_wtime());
dvmh_omp_thread_info_event_finished(TO_THREAD_INFO(*ThreadID));
}
void DBG_BeforeSingle (long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *parent_event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event *event = dvmh_omp_event_create(DVMH_OMP_EVENT_SINGLE_OUTER);
dvmh_omp_event_add_subevent(parent_event, event);
dvmh_omp_event_set_thread_id(event, TO_LONG(ThreadID));
dvmh_omp_event_set_begin_time(event, omp_get_wtime());
dvmh_omp_event_set_context_descriptor(event, TO_DESC(*StaticContextHandle));
dvmh_omp_thread_info_event_occured(TO_THREAD_INFO(*ThreadID), event);
}
void DBG_SingleEvent(long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *parent_event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event *event = dvmh_omp_event_create(DVMH_OMP_EVENT_SINGLE_INNER);
dvmh_omp_event_add_subevent(parent_event, event);
dvmh_omp_event_set_thread_id(event, TO_LONG(ThreadID));
dvmh_omp_event_set_begin_time(event, omp_get_wtime());
dvmh_omp_event_set_context_descriptor(event, TO_DESC(*StaticContextHandle));
dvmh_omp_thread_info_event_occured(TO_THREAD_INFO(*ThreadID), event);
}
void DBG_SingleEventEnd(long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event_set_end_time(event, omp_get_wtime());
dvmh_omp_thread_info_event_finished(TO_THREAD_INFO(*ThreadID));
}
void DBG_AfterSingle (long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event_set_end_time(event, omp_get_wtime());
dvmh_omp_thread_info_event_finished(TO_THREAD_INFO(*ThreadID));
}
void DBG_BeforeWorkshare (long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *parent_event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event *event = dvmh_omp_event_create(DVMH_OMP_EVENT_WORKSHARE);
dvmh_omp_event_add_subevent(parent_event, event);
dvmh_omp_event_set_thread_id(event, TO_LONG(ThreadID));
dvmh_omp_event_set_begin_time(event, omp_get_wtime());
dvmh_omp_event_set_context_descriptor(event, TO_DESC(*StaticContextHandle));
dvmh_omp_thread_info_event_occured(TO_THREAD_INFO(*ThreadID), event);
}
void DBG_AfterWorkshare(long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event_set_end_time(event, omp_get_wtime());
dvmh_omp_thread_info_event_finished(TO_THREAD_INFO(*ThreadID));
}
void DBG_MasterBegin(long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *parent_event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event *event = dvmh_omp_event_create(DVMH_OMP_EVENT_MASTER);
dvmh_omp_event_add_subevent(parent_event, event);
dvmh_omp_event_set_thread_id(event, TO_LONG(ThreadID));
dvmh_omp_event_set_begin_time(event, omp_get_wtime());
dvmh_omp_event_set_context_descriptor(event, TO_DESC(*StaticContextHandle));
dvmh_omp_thread_info_event_occured(TO_THREAD_INFO(*ThreadID), event);
}
void DBG_MasterEnd(long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event_set_end_time(event, omp_get_wtime());
dvmh_omp_thread_info_event_finished(TO_THREAD_INFO(*ThreadID));
}
void DBG_BeforeCritical (long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *parent_event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event *event = dvmh_omp_event_create(DVMH_OMP_EVENT_CRITICAL_OUTER);
dvmh_omp_event_add_subevent(parent_event, event);
dvmh_omp_event_set_thread_id(event, TO_LONG(ThreadID));
dvmh_omp_event_set_begin_time(event, omp_get_wtime());
dvmh_omp_event_set_context_descriptor(event, TO_DESC(*StaticContextHandle));
dvmh_omp_thread_info_event_occured(TO_THREAD_INFO(*ThreadID), event);
}
void DBG_CriticalEvent(long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *parent_event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event *event = dvmh_omp_event_create(DVMH_OMP_EVENT_CRITICAL_INNER);
dvmh_omp_event_add_subevent(parent_event, event);
dvmh_omp_event_set_thread_id(event, TO_LONG(ThreadID));
dvmh_omp_event_set_begin_time(event, omp_get_wtime());
dvmh_omp_event_set_context_descriptor(event, TO_DESC(*StaticContextHandle));
dvmh_omp_thread_info_event_occured(TO_THREAD_INFO(*ThreadID), event);
}
void DBG_CriticalEventEnd(long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event_set_end_time(event, omp_get_wtime());
dvmh_omp_thread_info_event_finished(TO_THREAD_INFO(*ThreadID));
}
void DBG_AfterCritical(long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event_set_end_time(event, omp_get_wtime());
dvmh_omp_thread_info_event_finished(TO_THREAD_INFO(*ThreadID));
}
void DBG_BeforeBarrier(long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *parent_event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event *event = dvmh_omp_event_create(DVMH_OMP_EVENT_BARRIER);
dvmh_omp_event_add_subevent(parent_event, event);
dvmh_omp_event_set_thread_id(event, TO_LONG(ThreadID));
dvmh_omp_event_set_begin_time(event, omp_get_wtime());
dvmh_omp_event_set_context_descriptor(event, TO_DESC(*StaticContextHandle));
dvmh_omp_thread_info_event_occured(TO_THREAD_INFO(*ThreadID), event);
}
void DBG_AfterBarrier(long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event_set_end_time(event, omp_get_wtime());
dvmh_omp_thread_info_event_finished(TO_THREAD_INFO(*ThreadID));
}
void DBG_FlushEvent(long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *parent_event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event *event = dvmh_omp_event_create(DVMH_OMP_EVENT_FLUSH);
dvmh_omp_event_add_subevent(parent_event, event);
dvmh_omp_event_set_thread_id(event, TO_LONG(ThreadID));
dvmh_omp_event_set_begin_time(event, omp_get_wtime());
dvmh_omp_event_set_context_descriptor(event, TO_DESC(*StaticContextHandle));
dvmh_omp_thread_info_event_occured(TO_THREAD_INFO(*ThreadID), event);
}
void DBG_FlushEventEnd(long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event_set_end_time(event, omp_get_wtime());
dvmh_omp_thread_info_event_finished(TO_THREAD_INFO(*ThreadID));
}
void DBG_BeforeOrdered (long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *parent_event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event *event = dvmh_omp_event_create(DVMH_OMP_EVENT_ORDERED_OUTER);
dvmh_omp_event_add_subevent(parent_event, event);
dvmh_omp_event_set_thread_id(event, TO_LONG(ThreadID));
dvmh_omp_event_set_begin_time(event, omp_get_wtime());
dvmh_omp_event_set_context_descriptor(event, TO_DESC(*StaticContextHandle));
dvmh_omp_thread_info_event_occured(TO_THREAD_INFO(*ThreadID), event);
}
void DBG_OrderedEvent(long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *parent_event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event *event = dvmh_omp_event_create(DVMH_OMP_EVENT_ORDERED_INNER);
dvmh_omp_event_add_subevent(parent_event, event);
dvmh_omp_event_set_thread_id(event, TO_LONG(ThreadID));
dvmh_omp_event_set_begin_time(event, omp_get_wtime());
dvmh_omp_event_set_context_descriptor(event, TO_DESC(*StaticContextHandle));
dvmh_omp_thread_info_event_occured(TO_THREAD_INFO(*ThreadID), event);
}
void DBG_AfterOrdered(long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
if (dvmh_omp_event_get_type(event) == DVMH_OMP_EVENT_ORDERED_INNER){
dvmh_omp_event_set_end_time(event, omp_get_wtime());
dvmh_omp_thread_info_event_finished(TO_THREAD_INFO(*ThreadID));
event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
}
dvmh_omp_event_set_end_time(event, omp_get_wtime());
dvmh_omp_thread_info_event_finished(TO_THREAD_INFO(*ThreadID));
}
void DBG_ThreadPrivateEvent(long *StaticContextHandle, long *ThreadID)
{
//fprintf (stderr, "ThreadPrivateEvent\n");
return;
}
void DBG_RegVar(long *StaticContextHandle, long *ThreadID, void*pAddr)
{
//fprintf (stderr, "DBG_RegVar\n");
return;
}
void DBG_RegArr(long *StaticContextHandle, long *ThreadID, long *ArrSize, void* pAddr)
{
// fprintf (stderr, "DBG_RegArr\n");
return;
}
void DBG_RegCommon(long *StaticContextHandle, long *ThreadID)
{
//fprintf (stderr, "DBG_RegCommon\n");
return;
}
void DBG_ReadVar(long* StaticContextHandle, long *ThreadID, void*pAddr, long *var_name)
{
//fprintf (stderr, "DBG_ReadVar\n");
return;
}
void DBG_ReadArr(long* StaticContextHandle, long *ThreadID, void*pAddr, long *var_name, void*pBase)
{
//fprintf (stderr, "DBG_ReadArr\n");
return;
}
void DBG_WriteVarBegin(long *StaticContextHandle, long *ThreadID, void*pAddr, long* var_name)
{
//fprintf (stderr, "DBG_WriteVarBegin\n");
return;
}
void DBG_WriteVarEnd(long *StaticContextHandle, long *ThreadID, void*pAddr, long* var_name)
{
//fprintf (stderr, "DBG_WriteVarEnd\n");
return;
}
void DBG_WriteArrBegin(long *StaticContextHandle, long *ThreadID, void*pAddr, long* var_name, void*pBase)
{
//fprintf (stderr, "DBG_WriteArrBegin\n");
return;
}
void DBG_WriteArrEnd(long *StaticContextHandle, long *ThreadID, void*pAddr, long* var_name, void*pBase)
{
//fprintf (stderr, "DBG_WriteArrBegin\n");
return;
}
void DBG_BegSL(long *StaticContextHandle, long *ThreadID, long *Init, long *Last, long *Step)
{
dvmh_omp_event *parent_event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event *event = dvmh_omp_event_create(DVMH_OMP_EVENT_SERIAL_LOOP);
dvmh_omp_event_add_subevent(parent_event, event);
dvmh_omp_event_set_thread_id(event, TO_LONG(ThreadID));
dvmh_omp_event_set_begin_time(event, omp_get_wtime());
dvmh_omp_event_set_context_descriptor(event, TO_DESC(*StaticContextHandle));
dvmh_omp_thread_info_event_occured(TO_THREAD_INFO(*ThreadID), event);
}
void DBG_SIter(long *StaticContextHandle, long *ThreadID, long *Index)
{
dvmh_omp_event *parent_event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
if (dvmh_omp_event_get_type(parent_event) == DVMH_OMP_EVENT_SERIAL_ITER){
// закрываем предыдущую итерацию цикла
dvmh_omp_event_set_end_time(parent_event, omp_get_wtime());
dvmh_omp_thread_info_event_finished(TO_THREAD_INFO(*ThreadID));
parent_event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
}
dvmh_omp_event *event = dvmh_omp_event_create(DVMH_OMP_EVENT_SERIAL_ITER);
dvmh_omp_event_add_subevent(parent_event, event);
dvmh_omp_event_set_thread_id(event, TO_LONG(ThreadID));
dvmh_omp_event_set_begin_time(event, omp_get_wtime());
dvmh_omp_event_set_context_descriptor(event, TO_DESC(*StaticContextHandle));
dvmh_omp_thread_info_event_occured(TO_THREAD_INFO(*ThreadID), event);
}
void DBG_EndSL(long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
if (dvmh_omp_event_get_type(event) == DVMH_OMP_EVENT_SERIAL_ITER
|| dvmh_omp_event_get_type(event) == DVMH_OMP_EVENT_SERIAL_IF_ITER){
dvmh_omp_event_set_end_time(event, omp_get_wtime());
dvmh_omp_thread_info_event_finished(TO_THREAD_INFO(*ThreadID));
event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
}
dvmh_omp_event_set_end_time(event, omp_get_wtime());
dvmh_omp_thread_info_event_finished(TO_THREAD_INFO(*ThreadID));
}
void DBG_BeforeFuncCall(long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *parent_event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event *event = dvmh_omp_event_create(DVMH_OMP_EVENT_FUNCTION_CAL);
dvmh_omp_event_add_subevent(parent_event, event);
dvmh_omp_event_set_thread_id(event, TO_LONG(ThreadID));
dvmh_omp_event_set_begin_time(event, omp_get_wtime());
dvmh_omp_event_set_context_descriptor(event, TO_DESC(*StaticContextHandle));
dvmh_omp_thread_info_event_occured(TO_THREAD_INFO(*ThreadID), event);
}
void DBG_FuncParVar(long *StaticContextHandle, long *ThreadID, int *Position, void*pAddr, long *var_name, int *IsRead)
{
//fprintf (stderr, "DBG_FuncParVar\n");
return;
}
void DBG_FuncParArr(long *StaticContextHandle, long *ThreadID, int *Position, void*pAddr, long *var_name, void*pBase, int *IsRead)
{
//fprintf (stderr, "DBG_FuncParArr\n");
return;
}
void DBG_AfterFuncCall(long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event_set_end_time(event, omp_get_wtime());
dvmh_omp_thread_info_event_finished(TO_THREAD_INFO(*ThreadID));
}
void DBG_FuncBegin(long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *parent_event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event *event = dvmh_omp_event_create(DVMH_OMP_EVENT_FUNCTION);
dvmh_omp_event_add_subevent(parent_event, event);
dvmh_omp_event_set_thread_id(event, TO_LONG(ThreadID));
dvmh_omp_event_set_begin_time(event, omp_get_wtime());
dvmh_omp_event_set_context_descriptor(event, TO_DESC(*StaticContextHandle));
dvmh_omp_thread_info_event_occured(TO_THREAD_INFO(*ThreadID), event);
}
void DBG_FuncEnd(long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event_set_end_time(event, omp_get_wtime());
dvmh_omp_thread_info_event_finished(TO_THREAD_INFO(*ThreadID));
}
void DBG_RegParVar(long *StaticContextHandle, long *ThreadID, void*pAddr, int *Position)
{
//fprintf (stderr, "DBG_RegParVar\n");
return;
}
void DBG_RegParArr(long *StaticContextHandle, long *ThreadID, long *ArrSize, void*pAddr, int *Position)
{
//fprintf (stderr, "DBG_RegParArr\n");
return;
}
void DBG_SIfIter(long *StaticContextHandle, long *ThreadID, long *Index, long *IfVar)
{
dvmh_omp_event *parent_event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
if (dvmh_omp_event_get_type(parent_event) == DVMH_OMP_EVENT_SERIAL_IF_ITER){
// закрываем предыдущую итерацию цикла
dvmh_omp_event_set_end_time(parent_event, omp_get_wtime());
dvmh_omp_thread_info_event_finished(TO_THREAD_INFO(*ThreadID));
parent_event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
}
dvmh_omp_event *event = dvmh_omp_event_create(DVMH_OMP_EVENT_SERIAL_IF_ITER);
dvmh_omp_event_add_subevent(parent_event, event);
dvmh_omp_event_set_thread_id(event, TO_LONG(ThreadID));
dvmh_omp_event_set_begin_time(event, omp_get_wtime());
dvmh_omp_event_set_context_descriptor(event, TO_DESC(*StaticContextHandle));
dvmh_omp_thread_info_event_occured(TO_THREAD_INFO(*ThreadID), event);
}
void DBG_OMPIfIter(long *StaticContextHandle, long *ThreadID, long *Index, long *IfVar)
{
dvmh_omp_event *parent_event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
if (dvmh_omp_event_get_type(parent_event) == DVMH_OMP_EVENT_OMP_IF_ITER){
// закрываем предыдущую итерацию цикла
dvmh_omp_event_set_end_time(parent_event, omp_get_wtime());
dvmh_omp_thread_info_event_finished(TO_THREAD_INFO(*ThreadID));
parent_event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
}
dvmh_omp_event *event = dvmh_omp_event_create(DVMH_OMP_EVENT_OMP_IF_ITER);
dvmh_omp_event_add_subevent(parent_event, event);
dvmh_omp_event_set_thread_id(event, TO_LONG(ThreadID));
dvmh_omp_event_set_begin_time(event, omp_get_wtime());
dvmh_omp_event_set_context_descriptor(event, TO_DESC(*StaticContextHandle));
dvmh_omp_thread_info_event_occured(TO_THREAD_INFO(*ThreadID), event);
}
void DBG_BeforeIO(long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *parent_event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event *event = dvmh_omp_event_create(DVMH_OMP_EVENT_IO);
dvmh_omp_event_add_subevent(parent_event, event);
dvmh_omp_event_set_thread_id(event, TO_LONG(ThreadID));
dvmh_omp_event_set_begin_time(event, omp_get_wtime());
dvmh_omp_event_set_context_descriptor(event, TO_DESC(*StaticContextHandle));
dvmh_omp_thread_info_event_occured(TO_THREAD_INFO(*ThreadID), event);
}
void DBG_AfterIO(long *StaticContextHandle, long *ThreadID)
{
dvmh_omp_event *event = dvmh_omp_thread_info_get_active_event(TO_THREAD_INFO(*ThreadID));
dvmh_omp_event_set_end_time(event, omp_get_wtime());
dvmh_omp_thread_info_event_finished(TO_THREAD_INFO(*ThreadID));
}
|
1740.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "3mm.h"
/* Array initialization. */
static
void init_array(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nk; j++)
A[i][j] = ((DATA_TYPE) i*j) / ni;
for (i = 0; i < nk; i++)
for (j = 0; j < nj; j++)
B[i][j] = ((DATA_TYPE) i*(j+1)) / nj;
for (i = 0; i < nj; i++)
for (j = 0; j < nm; j++)
C[i][j] = ((DATA_TYPE) i*(j+3)) / nl;
for (i = 0; i < nm; i++)
for (j = 0; j < nl; j++)
D[i][j] = ((DATA_TYPE) i*(j+2)) / nk;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nl,
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nl; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]);
if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_3mm(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl),
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j, k;
#pragma scop
#pragma omp parallel private (i, j, k) num_threads(#P11)
{
/* E := A*B */
for (i = 0; i < _PB_NI; i++)
{
#pragma omp
for (j = 0; j < _PB_NJ; j++)
{
E[i][j] = 0;
for (k = 0; k < _PB_NK; ++k)
E[i][j] += A[i][k] * B[k][j];
}
}
/* F := C*D */
for (i = 0; i < _PB_NJ; i++)
{
#pragma omp
for (j = 0; j < _PB_NL; j++)
{
F[i][j] = 0;
for (k = 0; k < _PB_NM; ++k)
F[i][j] += C[i][k] * D[k][j];
}
}
/* G := E*F */
for (i = 0; i < _PB_NI; i++)
{
#pragma omp
for (j = 0; j < _PB_NL; j++)
{
G[i][j] = 0;
for (k = 0; k < _PB_NJ; ++k)
G[i][j] += E[i][k] * F[k][j];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
int nk = NK;
int nl = NL;
int nm = NM;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj);
POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl);
POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm);
POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl);
POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl);
/* Initialize array(s). */
init_array (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_3mm (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(E),
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(F),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D),
POLYBENCH_ARRAY(G));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(E);
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
POLYBENCH_FREE_ARRAY(F);
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(D);
POLYBENCH_FREE_ARRAY(G);
return 0;
}
|
mol2inchi.c | /*
* International Chemical Identifier (InChI)
* Version 1
* Software version 1.05
* January 27, 2017
*
* The InChI library and programs are free software developed under the
* auspices of the International Union of Pure and Applied Chemistry (IUPAC).
* Originally developed at NIST.
* Modifications and additions by IUPAC and the InChI Trust.
* Some portions of code were developed/changed by external contributors
* (either contractor or volunteer) which are listed in the file
* 'External-contributors' included in this distribution.
*
* IUPAC/InChI-Trust Licence No.1.0 for the
* International Chemical Identifier (InChI)
* Copyright (C) IUPAC and InChI Trust Limited
*
* This library is free software; you can redistribute it and/or modify it
* under the terms of the IUPAC/InChI Trust InChI Licence No.1.0,
* or any later version.
*
* Please note that this library is distributed WITHOUT ANY WARRANTIES
* whatsoever, whether expressed or implied.
* See the IUPAC/InChI-Trust InChI Licence No.1.0 for more details.
*
* You should have received a copy of the IUPAC/InChI Trust InChI
* Licence No. 1.0 with this library; if not, please write to:
*
* The InChI Trust
* 8 Cavendish Avenue
* Cambridge CB1 7US
* UK
*
* or e-mail to alan@inchi-trust.org
*
*/
#pragma warning( disable : 4996 )
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <stdarg.h>
#include <errno.h>
#include <limits.h>
#include <float.h>
#include <time.h>
#include <omp.h>
#include "../../../../INCHI_BASE/src/inchi_api.h"
#include "mol2inchi.h"
#include "moreitil.h"
#ifdef BUILD_WITH_ENG_OPTIONS
#include "shuffler.h"
/* local functions */
int extract_MOL_counts_and_version(char *str, M2I_NUM *num_at, M2I_NUM *num_bo);
int extract_counts_from_MOL_V3000(char *str, M2I_NUM *num_at, M2I_NUM *num_bo);
int extract_name_from_MOL(char *str, char *name, size_t max_symbols);
double getCPUTime(void);
#endif
/****************************************************************************/
int main(int argc, char *argv[])
{
int retcode=0, result=0, nerrs=0, k;
char *fname=NULL;
FILE *f=NULL;
char *text=NULL;
char *option1;
char *out=NULL, *log=NULL;
struct WorkerDetails wd;
time_t begin;
#if defined(_WIN32)
const char *platform="Windows";
#else
const char *platform="Linux";
#endif
char banner[255];
sprintf( banner, "%s\n%-s Build of %-s %-s%s\n",
APP_DESCRIPTION,
platform, __DATE__, __TIME__,
RELEASE_IS_FINAL?"":" *** pre-release, for evaluation only ***");
/* Parse command line
assume that the first item is input filename,
then mol2ichi own switches, then all the others
which should be passed to InChI calculation algorithm
*/
memset( &wd, 0, sizeof(wd) );
fprintf( stderr, "%-s\n", banner);
if ( argc < 2 )
{
print_help();
retcode = 1;
goto finish;
}
fprintf( stderr, "Started at ");
print_time();
ctime(&begin);
/* Set worker details */
wd.get_inchikey = 0;
wd.do_not_print_inchi = 0;
wd.output_error_inchi = 0;
wd.do_benchmark = 0;
wd.nperms = 0;
wd.tmax_shuffle = 0;
wd.dtmax_shuffle = 0.0;
wd.verbose = 1;
wd.out = NULL;
wd.log = NULL;
wd.rstart = 1;
wd.rend = 999999999;
wd.nmol = 0;
wd.n_inchi_err = 0;
/* limit calculations by 60 sec */
#ifdef _WIN32
strcpy(wd.options, "/W60");
#else
strcpy(wd.options, "-W60");
#endif
fname = argv[1];
for(k=2; k<argc; k++)
{
option1 = argv[k]+1;
/* Treat mol2inchi own switches */
if ( !own_stricmp(option1, "NOINCHI") )
{
wd.do_not_print_inchi = 1;
}
#ifdef BUILD_WITH_ENG_OPTIONS
else if ( !own_stricmp(option1, "BENCHMARK") )
{
wd.do_benchmark = 1;
}
else if ( !own_memicmp( option1, "NSHUFFLE:", 9 ) )
{
wd.nperms = strtol(option1+9, NULL, 10);
if ( wd.nperms < 0 )
wd.nperms = 0;
}
else if ( !own_memicmp( option1, "TSHUFFLE:", 9 ) )
{
wd.tmax_shuffle = strtol(option1+9, NULL, 10);
if ( wd.tmax_shuffle < 0 )
wd.tmax_shuffle = 0; /* no time limit for shuffling */
wd.dtmax_shuffle = (double) wd.tmax_shuffle;
}
else if ( !own_memicmp( option1, "VERBOSE:", 8 ) )
{
wd.verbose = strtol(option1+8, NULL, 10);
if ( wd.verbose < 0 )
wd.verbose = 0;
}
#endif
else if ( !own_memicmp( option1, "START:", 6 ) )
{
wd.rstart = strtol(option1+6, NULL, 10);
if ( wd.rstart < 1 )
wd.rstart = 1;
}
else if ( !own_memicmp( option1, "END:", 4 ) )
{
wd.rend = strtol(option1+4, NULL, 10);
if ( wd.rend < 1 )
wd.rend = 1;
if ( wd.rend < wd.rstart )
wd.rend = wd.rstart;
}
else if ( !own_memicmp( option1, "RECORD:", 7 ) )
{
int num = strtol(option1+7, NULL, 10);
wd.rstart = wd.rend = num;
}
else
{
if ( !own_stricmp(option1, "KEY") )
{
wd.get_inchikey = 1;
}
/* other switches to be passed to inchi-calc worker */
strcat( wd.options, " " );
strcat( wd.options, argv[k] );
}
}
fprintf( stderr, "by the following command line:\n\"");
for(k=0; k<argc-1; k++)
fprintf( stderr, "%-s ",argv[k]);
fprintf( stderr, "%-s\"\n", argv[argc-1]);
f = fopen(fname,"rb");
if ( !f )
{
retcode = 2;
goto finish;
}
/* Warning: in this demo, we assume that molfile's size does not */
/* exceed some large MOLBUFSIZE; be (more) careful for production. */
text = (char *) calloc( MOLBUFSIZE, sizeof(char) );
if ( !text )
{
retcode = 3;
goto finish;
}
fprintf( stderr, "\n" );
/* Main loop */
while ( get_next_molfile_as_text( f, text, MOLBUFSIZE ) > 0 )
{
wd.nmol++;
if ( wd.nmol < wd.rstart )
continue;
if ( wd.nmol > wd.rend )
break;
nerrs = work_on_molfile_text ( text, &wd );
}
finish:
if ( retcode==0 )
fprintf( stderr, "\nFinished OK at ");
else
fprintf( stderr, "\nFinished with exit code %-d at ", retcode);
print_time();
fprintf( stderr, "Processed %lu structure(s) with %-ld error(s).\n",
wd.nmol, wd.n_inchi_err );
if ( text )
free( text );
return retcode;
}
/*
Process one MOLFile text
Optionally organize benchmarking and
repeat calculations with shuffled atoms
*/
int work_on_molfile_text( const char* text, struct WorkerDetails * wd)
{
int res=0, nerrs=0, shuffle_atoms=0;
int lenname=0, max_mname_symbols=79;
long irepeat=0;
double startTime=0.0;
double run_duration=0.0;
int nrepeats=0;
int have_problems=0;
#ifdef BUILD_WITH_ENG_OPTIONS
M2I_NUM numbers[PERMAXATOMS];
M2I_NUM nat = -1, nbonds = -1;
char mname[80];
MFLines *mfdata=NULL;
long i;
int version;
#endif
/* Warning: in this demo, we assume that printout (InChI, etc.) */
/* does not exceed some large INCHIPRINTOUTSIZE */
char output0[INCHIPRINTOUTSIZE], errstr0[4096];
char ikey0[28];
ikey0[0] = output0[0] = errstr0[0] = '\0';
#ifdef BUILD_WITH_ENG_OPTIONS
/* Get info for printout */
lenname = extract_name_from_MOL( (char *) text, mname, max_mname_symbols);
if ( !lenname )
strcpy(mname, "");
if ( wd->do_benchmark )
{
startTime = getCPUTime( );
}
#endif
/* Perform a single run */
nerrs = run_once_on_molfile_text( text, wd, ikey0, output0, errstr0);
if ( errstr0[0] )
fprintf( stderr, "%-s\n", errstr0);
#ifndef BUILD_WITH_ENG_OPTIONS
fprintf( stdout, "%-s", output0 );
fprintf( stdout, "\n");
fflush(NULL);
#else
/* have_problems = extract_counts_from_MOL_V3000( (char *) text, &nat, &nbonds); */
version = extract_MOL_counts_and_version( (char *) text, &nat, &nbonds);
have_problems = (version!=2000) && (version!=3000);
if ( wd->do_benchmark )
{
run_duration = getCPUTime( ) - startTime;
if ( wd->nperms > 0 )
{
/* Permutations requested */
nrepeats = wd->nperms;
if ( nrepeats > 0 )
{
if ( version!=3000 || nat<1 || nat>PERMAXATOMS )
{
nrepeats = 0;
if ( wd->verbose >= 1 )
fprintf( stderr,
"No shuffle for struct #%-ld: not V3000 data file or too many atoms (code %-d).\n",
wd->nmol, have_problems );
}
}
if ( nrepeats > 0 && wd->tmax_shuffle > 0 )
{
/* Check single-run timing... */
if ( run_duration >= wd->dtmax_shuffle )
{
/* Single-run duration exceeds treshold, do not repeat */
nrepeats = 0;
if ( wd->verbose >= 1 )
fprintf( stderr, "No shuffle for struct #%-ld: too long run.\n", wd->nmol );
}
}
}
} /* if ( wd->do_benchmark ) */
fprintf( stdout, "%-s\t%-s\t", output0, mname );
if ( wd->do_benchmark )
{
fprintf( stdout, "#%-ld\t%-d\t%-d\t%-lf",
irepeat, nat, nbonds,run_duration );
if ( ikey0[0] )
fprintf( stdout, "%-s", (nrepeats < 1)?"\t---":"\t***" );
}
fprintf( stdout, "\n");
fflush(NULL);
if ( nerrs > 0 )
{
/* Failed, nothing to do here anymore */
return nerrs;
}
if ( nrepeats < 1 )
return 0;
/* Repeat nrepeat times */
/* Prepare data structs for repeated calulations */
for (i=0; i<nat; i++)
numbers[i] = i;
mfdata = new_mflines( (char *) text, nat, nbonds );
/*#pragma omp parallel num_threads(4)*/
#ifdef _OPENMP
{
#ifdef WIN32
int maxcores=3; /* laptop */
#else
int maxcores=5; /* server */
#endif
int nthreads = ( nrepeats <= maxcores ) ? nrepeats : maxcores;
omp_set_num_threads( nthreads );
fprintf( stderr, "Using OpenMP with %-d threads\n", ( nrepeats <= maxcores ) ? nrepeats : maxcores );
}
#endif
#pragma omp parallel for firstprivate( numbers ) private(startTime)
for ( irepeat=1; irepeat<nrepeats+1; irepeat++)
{
int res1;
char *mftext=NULL;
char ikey[28];
/* Warning: in this demo, we just assume that printout (InChI, etc.) */
/* does not exceed some large INCHIPRINTOUTSIZE; be (more) careful. */
char output[INCHIPRINTOUTSIZE], errstr[4096];
output[0] = errstr[0] = ikey[0] = '\0';
#ifdef _OPENMP
/* Make sure that different threads go with different rand */
srand( (unsigned int) (time(NULL)) ^ omp_get_thread_num());
#else
srand( (unsigned int) (time(NULL)) );
#endif
/* Re-order the atoms and make new 'Molfile as text' */
shuffle( (void *) numbers, (M2I_NUM) nat, sizeof(M2I_NUM) );
mftext = shuffled_mflines( mfdata, numbers );
if ( NULL==mftext )
continue;
/* debug printout */
if ( wd->verbose >= 1 )
{
if ( wd->verbose >= 2 )
#pragma omp critical
{
fprintf( stderr, "%-s perm#%-.4ld { %-d", mname, irepeat, numbers[0]);
for (i=1; i<nat-1; i++) fprintf( stderr, ", %-d", numbers[i]);
fprintf( stderr, ", %-d }\n", numbers[nat-1]);
if ( wd->verbose >= 10 )
{
fprintf( stderr, "\n\n%-s\n\n", mftext);
}
}
}
if ( wd->do_benchmark )
startTime = getCPUTime( );
res1 = run_once_on_molfile_text( (const char *) mftext, wd, ikey, output, errstr);
if ( wd->do_benchmark )
{
run_duration = getCPUTime( ) - startTime;
}
if ( errstr[0] )
#pragma omp critical
{
if ( wd->verbose >= 1 )
#ifdef _OPENMP
fprintf( stderr, "(%-s) [thread %-d] \n", errstr, omp_get_thread_num());
#else
fprintf( stderr, "(%-s)\n", errstr );
#endif
}
if ( wd->do_benchmark )
#pragma omp critical
{
{
fprintf( stdout, "%-s\t%-s\t#%-ld\t%-d\t%-d\t%-lf",
output, mname, irepeat, nat, nbonds,run_duration );
if ( ikey0[0] && ikey[0] )
fprintf( stdout, "%-s",
strcmp(ikey,ikey0)?"\t!!!":"\t..." );
fprintf( stdout, "\n");
fflush(NULL);
}
}
if ( mftext )
free( mftext );
}
if ( mfdata )
del_mflines( mfdata );
#endif
return 0;
}
/* Actual InChI calculation worker */
int run_once_on_molfile_text( const char* mftext,
struct WorkerDetails* wd,
char *ikey,
char *output,
char *errstr)
{
int res=0, errs=0;
inchi_Output iout, *result = &iout;
const char *delim = "\n";
memset( result, 0, sizeof(*result) );
/* Call API function to calculate InChI */
res = MakeINCHIFromMolfileText( mftext, wd->options, result );
if ( res==mol2inchi_Ret_ERROR ||
res==mol2inchi_Ret_ERROR_get ||
res==mol2inchi_Ret_ERROR_comp )
{
errs++;
wd->n_inchi_err++;
sprintf( errstr, "Error " );
}
else if ( res==mol2inchi_Ret_WARNING )
{
sprintf( errstr, "Warning " );
}
if ( result->szLog && result->szLog[0] )
sprintf( errstr,
"%-s structure #%-lu",
result->szLog, wd->nmol );
/* Print InChI and other stuff if InChI is not empty */
if ( result->szInChI && result->szInChI[0] )
{
/* Calculate and print InChIKey */
if ( wd->get_inchikey )
{
int ik_ret=0; /* InChIKey-calc result code */
int xhash1, xhash2;
char szXtra1[256], szXtra2[256];
xhash1 = xhash2 = 0;
ik_ret = GetINCHIKeyFromINCHI( result->szInChI,
xhash1,
xhash2,
ikey,
szXtra1,
szXtra2);
if (ik_ret!=INCHIKEY_OK)
ikey[0]='\0';
}
sprintf(output, "Structure: %-lu%s%-s%s%-s%s%-s",
wd->nmol,
wd->do_not_print_inchi ? "" : delim,
wd->do_not_print_inchi ? "" : result->szInChI,
ikey[0] ? delim : "",
ikey[0] ? ikey : "",
result->szAuxInfo && result->szAuxInfo[0] ? delim : "",
result->szAuxInfo && result->szAuxInfo[0] ? result->szAuxInfo : ""
);
}
/* Reset output data structure */
FreeINCHI ( result );
return errs;
}
/*
Print program usage instructions
*/
void print_help(void)
{
fprintf( stderr, "Usage: \n");
fprintf( stderr, "mol2inchi inputfilename [options]\n");
fprintf( stderr, "Options:\n");
fprintf( stderr, "\tSTART:n - start from SDF record n\n");
fprintf( stderr, "\tEND:n - end when reached SDF record n\n");
fprintf( stderr, "\tRECORD:n - process only SDF record n\n");
fprintf( stderr, "\tNOINCHI - do not print InChI itself\n");
fprintf( stderr, "\tKEY - calc and print InChIKey\n");
fprintf( stderr, "\tPOLYMERS - treat polymers (experimental)\n");
#ifdef BUILD_WITH_ENG_OPTIONS
fprintf( stderr, "\tBENCHMARK - collect timing\n");
fprintf( stderr, "\tNSHUFFLE:n - reorder atoms n times and repeat\n");
fprintf( stderr, "\tTSHUFFLE:t - do not repeat runs of t sec or longer\n");
fprintf( stderr, "\tVERBOSE:n - > 0 means more output\n");
#endif
fprintf( stderr, "\t[common InChI API options]\n");
}
#ifdef BUILD_WITH_ENG_OPTIONS
/******************************************************************/
int extract_name_from_MOL(char *str, char *name, size_t max_symbols)
{
char *p;
size_t ncopied;
#define CNAMEL 4096
char buf[CNAMEL];
if (str==NULL)
return -1;
if (strlen(str)<1)
return -1;
p = get_substr_in_between(str, "", "\n", buf, CNAMEL-2, &ncopied);
if ( ncopied )
{
if ( ncopied > max_symbols )
ncopied = max_symbols;
memcpy( name, buf, ncopied );
name[ncopied++]='\0';
}
return (int) ncopied;
}
/***************************************************************************/
int extract_counts_from_MOL_V3000(char *str, M2I_NUM *num_at, M2I_NUM *num_bo)
{
size_t n, ncopied;
char *p, *pp, *next;
#define CBUFL 16
char buf[CBUFL];
*num_at = *num_bo = -1;
if (str==NULL)
return -1;
if (strlen(str)<1)
return -2;
p = str;
n = 5;
while (n--)
while ((++p)[0]!='\n') ;
++p;
next = get_substr_in_between(p, "M V30 COUNTS ", " ", buf, CBUFL-2, &ncopied);
if ( !ncopied )
return -3;
if ( !strlen(buf) )
return -4;
*num_at = (M2I_NUM ) strtol(buf, NULL, 10);
if ( *num_at < 1)
return -5;
pp = p + strlen("M V30 COUNTS ") + strlen(buf) +1;
buf[0] = '\0';
next = get_substr_in_between(pp, "", " ", buf, CBUFL-2, &ncopied);
if ( ncopied )
if ( strlen(buf) )
*num_bo = (M2I_NUM ) strtol(buf, NULL, 10);
return 0;
}
/***************************************************************************/
int extract_MOL_counts_and_version(char *str, M2I_NUM *num_at, M2I_NUM *num_bo)
{
size_t n, ncopied;
char *p, *pp, *next;
#define CBUFL 16
char buf[CBUFL];
int version;
char *p2000=NULL;
version = *num_at = *num_bo = -1;
if (str==NULL)
return -1;
if (strlen(str)<1)
return -2;
p = str;
n = 3;
while (n--)
while ((++p)[0]!='\n') ;
++p;
p2000 = strstr(p, "V2000");
if ( p2000)
{
int k;
char sna[4], snb[4];
if ( strlen(p) < 6 )
return version;
for (k=0; k<3; k++)
{
sna[k] = p[k];
snb[k] = p[k+3];
}
sna[3] = snb[3] = '\0';
*num_at = (M2I_NUM ) strtol(sna, NULL, 10);
if ( *num_at < 1)
return -5;
*num_bo = (M2I_NUM ) strtol(snb, NULL, 10);
version = 2000;
return version;
}
version = 3000;
p = str;
n = 5;
while (n--)
while ((++p)[0]!='\n') ;
++p;
next = get_substr_in_between(p, "M V30 COUNTS ", " ", buf, CBUFL-2, &ncopied);
if ( !ncopied )
return -3;
if ( !strlen(buf) )
return -4;
*num_at = (M2I_NUM ) strtol(buf, NULL, 10);
if ( *num_at < 1)
return -5;
pp = p + strlen("M V30 COUNTS ") + strlen(buf) +1;
buf[0] = '\0';
next = get_substr_in_between(pp, "", " ", buf, CBUFL-2, &ncopied);
if ( ncopied )
if ( strlen(buf) )
*num_bo = (M2I_NUM ) strtol(buf, NULL, 10);
return version;
}
#endif
|
GB_unop__identity_int8_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int8_fp64)
// op(A') function: GB (_unop_tran__identity_int8_fp64)
// C type: int8_t
// A type: double
// cast: int8_t cij = GB_cast_to_int8_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = GB_cast_to_int8_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = GB_cast_to_int8_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int8_fp64)
(
int8_t *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
int8_t z = GB_cast_to_int8_t ((double) (aij)) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
int8_t z = GB_cast_to_int8_t ((double) (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int8_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ParFriends.h | /****************************************************************/
/* Parallel Combinatorial BLAS Library (for Graph Computations) */
/* version 1.6 -------------------------------------------------*/
/* date: 6/15/2017 ---------------------------------------------*/
/* authors: Ariful Azad, Aydin Buluc --------------------------*/
/****************************************************************/
/*
Copyright (c) 2010-2017, The Regents of the University of California
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#ifndef _PAR_FRIENDS_H_
#define _PAR_FRIENDS_H_
#include "mpi.h"
#include <iostream>
#include <cstdarg>
#include "SpParMat.h"
#include "SpParHelper.h"
#include "MPIType.h"
#include "Friends.h"
#include "OptBuf.h"
#include "mtSpGEMM.h"
#include "MultiwayMerge.h"
namespace combblas {
template <class IT, class NT, class DER>
class SpParMat;
/*************************************************************************************************/
/**************************** FRIEND FUNCTIONS FOR PARALLEL CLASSES ******************************/
/*************************************************************************************************/
/**
** Concatenate all the FullyDistVec<IT,NT> objects into a single one
**/
template <typename IT, typename NT>
FullyDistVec<IT,NT> Concatenate ( std::vector< FullyDistVec<IT,NT> > & vecs)
{
if(vecs.size() < 1)
{
SpParHelper::Print("Warning: Nothing to concatenate, returning empty ");
return FullyDistVec<IT,NT>();
}
else if (vecs.size() < 2)
{
return vecs[1];
}
else
{
typename std::vector< FullyDistVec<IT,NT> >::iterator it = vecs.begin();
std::shared_ptr<CommGrid> commGridPtr = it->getcommgrid();
MPI_Comm World = commGridPtr->GetWorld();
IT nglen = it->TotalLength(); // new global length
IT cumloclen = it->MyLocLength(); // existing cumulative local lengths
++it;
for(; it != vecs.end(); ++it)
{
if(*(commGridPtr) != *(it->getcommgrid()))
{
SpParHelper::Print("Grids are not comparable for FullyDistVec<IT,NT>::EWiseApply\n");
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
}
nglen += it->TotalLength();
cumloclen += it->MyLocLength();
}
FullyDistVec<IT,NT> ConCat (commGridPtr, nglen, NT());
int nprocs = commGridPtr->GetSize();
std::vector< std::vector< NT > > data(nprocs);
std::vector< std::vector< IT > > inds(nprocs);
IT gloffset = 0;
for(it = vecs.begin(); it != vecs.end(); ++it)
{
IT loclen = it->LocArrSize();
for(IT i=0; i < loclen; ++i)
{
IT locind;
IT loffset = it->LengthUntil();
int owner = ConCat.Owner(gloffset+loffset+i, locind);
data[owner].push_back(it->arr[i]);
inds[owner].push_back(locind);
}
gloffset += it->TotalLength();
}
int * sendcnt = new int[nprocs];
int * sdispls = new int[nprocs];
for(int i=0; i<nprocs; ++i)
sendcnt[i] = (int) data[i].size();
int * rdispls = new int[nprocs];
int * recvcnt = new int[nprocs];
MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World); // share the request counts
sdispls[0] = 0;
rdispls[0] = 0;
for(int i=0; i<nprocs-1; ++i)
{
sdispls[i+1] = sdispls[i] + sendcnt[i];
rdispls[i+1] = rdispls[i] + recvcnt[i];
}
IT totrecv = std::accumulate(recvcnt,recvcnt+nprocs,static_cast<IT>(0));
NT * senddatabuf = new NT[cumloclen];
for(int i=0; i<nprocs; ++i)
{
std::copy(data[i].begin(), data[i].end(), senddatabuf+sdispls[i]);
std::vector<NT>().swap(data[i]); // delete data vectors
}
NT * recvdatabuf = new NT[totrecv];
MPI_Alltoallv(senddatabuf, sendcnt, sdispls, MPIType<NT>(), recvdatabuf, recvcnt, rdispls, MPIType<NT>(), World); // send data
delete [] senddatabuf;
IT * sendindsbuf = new IT[cumloclen];
for(int i=0; i<nprocs; ++i)
{
std::copy(inds[i].begin(), inds[i].end(), sendindsbuf+sdispls[i]);
std::vector<IT>().swap(inds[i]); // delete inds vectors
}
IT * recvindsbuf = new IT[totrecv];
MPI_Alltoallv(sendindsbuf, sendcnt, sdispls, MPIType<IT>(), recvindsbuf, recvcnt, rdispls, MPIType<IT>(), World); // send new inds
DeleteAll(sendindsbuf, sendcnt, sdispls);
for(int i=0; i<nprocs; ++i)
{
for(int j = rdispls[i]; j < rdispls[i] + recvcnt[i]; ++j)
{
ConCat.arr[recvindsbuf[j]] = recvdatabuf[j];
}
}
DeleteAll(recvindsbuf, recvcnt, rdispls);
return ConCat;
}
}
template <typename MATRIXA, typename MATRIXB>
bool CheckSpGEMMCompliance(const MATRIXA & A, const MATRIXB & B)
{
if(A.getncol() != B.getnrow())
{
std::ostringstream outs;
outs << "Can not multiply, dimensions does not match"<< std::endl;
outs << A.getncol() << " != " << B.getnrow() << std::endl;
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
return false;
}
if((void*) &A == (void*) &B)
{
std::ostringstream outs;
outs << "Can not multiply, inputs alias (make a temporary copy of one of them first)"<< std::endl;
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, MATRIXALIAS);
return false;
}
return true;
}
// Combined logic for prune, recovery, and select
template <typename IT, typename NT, typename DER>
void MCLPruneRecoverySelect(SpParMat<IT,NT,DER> & A, NT hardThreshold, IT selectNum, IT recoverNum, NT recoverPct, int kselectVersion)
{
#ifdef TIMING
double t0, t1;
#endif
// Prune and create a new pruned matrix
SpParMat<IT,NT,DER> PrunedA = A.Prune(std::bind2nd(std::less_equal<NT>(), hardThreshold), false);
// column-wise statistics of the pruned matrix
FullyDistVec<IT,NT> colSums = PrunedA.Reduce(Column, std::plus<NT>(), 0.0);
FullyDistVec<IT,NT> nnzPerColumn = PrunedA.Reduce(Column, std::plus<NT>(), 0.0, [](NT val){return 1.0;});
FullyDistVec<IT,NT> pruneCols(A.getcommgrid(), A.getncol(), hardThreshold);
PrunedA.FreeMemory();
// Check if we need recovery
// columns with nnz < recoverNum (r)
FullyDistSpVec<IT,NT> recoverCols(nnzPerColumn, std::bind2nd(std::less<NT>(), recoverNum));
recoverCols = recoverPct;
// columns with nnz < r AND sum < recoverPct (pct)
recoverCols = EWiseApply<NT>(recoverCols, colSums,
[](NT spval, NT dval){return spval;},
[](NT spval, NT dval){return dval < spval;},
false, NT());
IT nrecover = recoverCols.getnnz();
if(nrecover > 0)
{
#ifdef TIMING
t0=MPI_Wtime();
#endif
A.Kselect(recoverCols, recoverNum, kselectVersion);
#ifdef TIMING
t1=MPI_Wtime();
mcl_kselecttime += (t1-t0);
#endif
pruneCols.Set(recoverCols);
#ifdef COMBBLAS_DEBUG
std::ostringstream outs;
outs << "Number of columns needing recovery: " << nrecover << std::endl;
SpParHelper::Print(outs.str());
#endif
}
if(selectNum>0)
{
// remaining columns will be up for selection
FullyDistSpVec<IT,NT> selectCols = EWiseApply<NT>(recoverCols, colSums,
[](NT spval, NT dval){return spval;},
[](NT spval, NT dval){return spval==-1;},
true, static_cast<NT>(-1));
selectCols = selectNum;
selectCols = EWiseApply<NT>(selectCols, nnzPerColumn,
[](NT spval, NT dval){return spval;},
[](NT spval, NT dval){return dval > spval;},
false, NT());
IT nselect = selectCols.getnnz();
if(nselect > 0 )
{
#ifdef TIMING
t0=MPI_Wtime();
#endif
A.Kselect(selectCols, selectNum, kselectVersion); // PrunedA would also work
#ifdef TIMING
t1=MPI_Wtime();
mcl_kselecttime += (t1-t0);
#endif
pruneCols.Set(selectCols);
#ifdef COMBBLAS_DEBUG
std::ostringstream outs;
outs << "Number of columns needing selection: " << nselect << std::endl;
SpParHelper::Print(outs.str());
#endif
#ifdef TIMING
t0=MPI_Wtime();
#endif
SpParMat<IT,NT,DER> selectedA = A.PruneColumn(pruneCols, std::less<NT>(), false);
#ifdef TIMING
t1=MPI_Wtime();
mcl_prunecolumntime += (t1-t0);
#endif
if(recoverNum>0 ) // recovery can be attempted after selection
{
FullyDistVec<IT,NT> nnzPerColumn1 = selectedA.Reduce(Column, std::plus<NT>(), 0.0, [](NT val){return 1.0;});
FullyDistVec<IT,NT> colSums1 = selectedA.Reduce(Column, std::plus<NT>(), 0.0);
selectedA.FreeMemory();
// slected columns with nnz < recoverNum (r)
selectCols = recoverNum;
selectCols = EWiseApply<NT>(selectCols, nnzPerColumn1,
[](NT spval, NT dval){return spval;},
[](NT spval, NT dval){return dval < spval;},
false, NT());
// selected columns with sum < recoverPct (pct)
selectCols = recoverPct;
selectCols = EWiseApply<NT>(selectCols, colSums1,
[](NT spval, NT dval){return spval;},
[](NT spval, NT dval){return dval < spval;},
false, NT());
IT n_recovery_after_select = selectCols.getnnz();
if(n_recovery_after_select>0)
{
// mclExpandVector2 does it on the original vector
// mclExpandVector1 does it one pruned vector
#ifdef TIMING
t0=MPI_Wtime();
#endif
A.Kselect(selectCols, recoverNum, kselectVersion); // Kselect on PrunedA might give different result
#ifdef TIMING
t1=MPI_Wtime();
mcl_kselecttime += (t1-t0);
#endif
pruneCols.Set(selectCols);
#ifdef COMBBLAS_DEBUG
std::ostringstream outs1;
outs1 << "Number of columns needing recovery after selection: " << nselect << std::endl;
SpParHelper::Print(outs1.str());
#endif
}
}
}
}
// final prune
#ifdef TIMING
t0=MPI_Wtime();
#endif
A.PruneColumn(pruneCols, std::less<NT>(), true);
#ifdef TIMING
t1=MPI_Wtime();
mcl_prunecolumntime += (t1-t0);
#endif
// Add loops for empty columns
if(recoverNum<=0 ) // if recoverNum>0, recovery would have added nonzeros in empty columns
{
FullyDistVec<IT,NT> nnzPerColumnA = A.Reduce(Column, std::plus<NT>(), 0.0, [](NT val){return 1.0;});
FullyDistSpVec<IT,NT> emptyColumns(nnzPerColumnA, std::bind2nd(std::equal_to<NT>(), 0.0));
emptyColumns = 1.00;
//Ariful: We need a selective AddLoops function with a sparse vector
//A.AddLoops(emptyColumns);
}
}
/**
* Broadcasts A multiple times (#phases) in order to save storage in the output
* Only uses 1/phases of C memory if the threshold/max limits are proper
*/
template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
SpParMat<IU,NUO,UDERO> MemEfficientSpGEMM (SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B,
int phases, NUO hardThreshold, IU selectNum, IU recoverNum, NUO recoverPct, int kselectVersion, int64_t perProcessMemory)
{
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
if(A.getncol() != B.getnrow())
{
std::ostringstream outs;
outs << "Can not multiply, dimensions does not match"<< std::endl;
outs << A.getncol() << " != " << B.getnrow() << std::endl;
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
return SpParMat< IU,NUO,UDERO >();
}
if(phases <1 || phases >= A.getncol())
{
SpParHelper::Print("MemEfficientSpGEMM: The value of phases is too small or large. Resetting to 1.\n");
phases = 1;
}
int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication
std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy);
if(perProcessMemory>0) // estimate the number of phases permitted by memory
{
int p;
MPI_Comm World = GridC->GetWorld();
MPI_Comm_size(World,&p);
// max nnz(A) in a porcess
int64_t lannz = A.getlocalnnz();
int64_t gannz;
MPI_Allreduce(&lannz, &gannz, 1, MPIType<int64_t>(), MPI_MAX, World);
int64_t inputMem = gannz * 20 * 4; // for four copies (two for SUMMA)
// max nnz(A^2) stored by summa in a porcess
int64_t asquareNNZ = EstPerProcessNnzSUMMA(A,B);
int64_t asquareMem = asquareNNZ * 24 * 2; // an extra copy in multiway merge and in selection/recovery step
// estimate kselect memory
int64_t d = ceil( (asquareNNZ * sqrt(p))/ B.getlocalcols() ); // average nnz per column in A^2 (it is an overestimate because asquareNNZ is estimated based on unmerged matrices)
// this is equivalent to (asquareNNZ * p) / B.getcol()
int64_t k = std::min(std::max(selectNum, recoverNum), d );
int64_t kselectmem = B.getlocalcols() * k * 8 * 3;
// estimate output memory
int64_t outputNNZ = (B.getlocalcols() * k)/sqrt(p);
int64_t outputMem = outputNNZ * 20 * 2;
//inputMem + outputMem + asquareMem/phases + kselectmem/phases < memory
int64_t remainingMem = perProcessMemory*1000000000 - inputMem - outputMem;
if(remainingMem > 0)
{
phases = 1 + (asquareMem+kselectmem) / remainingMem;
}
if(myrank==0)
{
if(remainingMem < 0)
{
std::cout << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n Warning: input and output memory requirement is greater than per-process avaiable memory. Keeping phase to the value supplied at the command line. The program may go out of memory and crash! \n !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" << std::endl;
}
#ifdef SHOW_MEMORY_USAGE
int64_t maxMemory = kselectmem/phases + inputMem + outputMem + asquareMem / phases;
if(maxMemory>1000000000)
std::cout << "phases: " << phases << ": per process memory: " << perProcessMemory << " GB asquareMem: " << asquareMem/1000000000.00 << " GB" << " inputMem: " << inputMem/1000000000.00 << " GB" << " outputMem: " << outputMem/1000000000.00 << " GB" << " kselectmem: " << kselectmem/1000000000.00 << " GB" << std::endl;
else
std::cout << "phases: " << phases << ": per process memory: " << perProcessMemory << " GB asquareMem: " << asquareMem/1000000.00 << " MB" << " inputMem: " << inputMem/1000000.00 << " MB" << " outputMem: " << outputMem/1000000.00 << " MB" << " kselectmem: " << kselectmem/1000000.00 << " MB" << std::endl;
#endif
}
}
IU C_m = A.spSeq->getnrow();
IU C_n = B.spSeq->getncol();
std::vector< UDERB > PiecesOfB;
UDERB CopyB = *(B.spSeq); // we allow alias matrices as input because of this local copy
CopyB.ColSplit(phases, PiecesOfB); // CopyB's memory is destroyed at this point
MPI_Barrier(GridC->GetWorld());
IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages);
IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages);
SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld());
// Remotely fetched matrices are stored as pointers
UDERA * ARecv;
UDERB * BRecv;
std::vector< UDERO > toconcatenate;
int Aself = (A.commGrid)->GetRankInProcRow();
int Bself = (B.commGrid)->GetRankInProcCol();
for(int p = 0; p< phases; ++p)
{
SpParHelper::GetSetSizes( PiecesOfB[p], BRecvSizes, (B.commGrid)->GetColWorld());
std::vector< SpTuples<IU,NUO> *> tomerge;
for(int i = 0; i < stages; ++i)
{
std::vector<IU> ess;
if(i == Aself) ARecv = A.spSeq; // shallow-copy
else
{
ess.resize(UDERA::esscount);
for(int j=0; j< UDERA::esscount; ++j)
ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
ARecv = new UDERA(); // first, create the object
}
#ifdef TIMING
double t0=MPI_Wtime();
#endif
SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements
#ifdef TIMING
double t1=MPI_Wtime();
mcl_Abcasttime += (t1-t0);
#endif
ess.clear();
if(i == Bself) BRecv = &(PiecesOfB[p]); // shallow-copy
else
{
ess.resize(UDERB::esscount);
for(int j=0; j< UDERB::esscount; ++j)
ess[j] = BRecvSizes[j][i];
BRecv = new UDERB();
}
#ifdef TIMING
double t2=MPI_Wtime();
#endif
SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements
#ifdef TIMING
double t3=MPI_Wtime();
mcl_Bbcasttime += (t3-t2);
#endif
#ifdef TIMING
double t4=MPI_Wtime();
#endif
SpTuples<IU,NUO> * C_cont = LocalSpGEMM<SR, NUO>(*ARecv, *BRecv,i != Aself, i != Bself);
#ifdef TIMING
double t5=MPI_Wtime();
mcl_localspgemmtime += (t5-t4);
#endif
if(!C_cont->isZero())
tomerge.push_back(C_cont);
else
delete C_cont;
} // all stages executed
#ifdef SHOW_MEMORY_USAGE
int64_t gcnnz_unmerged, lcnnz_unmerged = 0;
for(size_t i = 0; i < tomerge.size(); ++i)
{
lcnnz_unmerged += tomerge[i]->getnnz();
}
MPI_Allreduce(&lcnnz_unmerged, &gcnnz_unmerged, 1, MPIType<int64_t>(), MPI_MAX, MPI_COMM_WORLD);
int64_t summa_memory = gcnnz_unmerged*20;//(gannz*2 + phase_nnz + gcnnz_unmerged + gannz + gannz/phases) * 20; // last two for broadcasts
if(myrank==0)
{
if(summa_memory>1000000000)
std::cout << p+1 << ". unmerged: " << summa_memory/1000000000.00 << "GB " ;
else
std::cout << p+1 << ". unmerged: " << summa_memory/1000000.00 << " MB " ;
}
#endif
#ifdef TIMING
double t6=MPI_Wtime();
#endif
//UDERO OnePieceOfC(MergeAll<SR>(tomerge, C_m, PiecesOfB[p].getncol(),true), false);
// TODO: MultiwayMerge can directly return UDERO inorder to avoid the extra copy
SpTuples<IU,NUO> * OnePieceOfC_tuples = MultiwayMerge<SR>(tomerge, C_m, PiecesOfB[p].getncol(),true);
#ifdef SHOW_MEMORY_USAGE
int64_t gcnnz_merged, lcnnz_merged ;
lcnnz_merged = OnePieceOfC_tuples->getnnz();
MPI_Allreduce(&lcnnz_merged, &gcnnz_merged, 1, MPIType<int64_t>(), MPI_MAX, MPI_COMM_WORLD);
// TODO: we can remove gcnnz_merged memory here because we don't need to concatenate anymore
int64_t merge_memory = gcnnz_merged*2*20;//(gannz*2 + phase_nnz + gcnnz_unmerged + gcnnz_merged*2) * 20;
if(myrank==0)
{
if(merge_memory>1000000000)
std::cout << " merged: " << merge_memory/1000000000.00 << "GB " ;
else
std::cout << " merged: " << merge_memory/1000000.00 << " MB " ;
}
#endif
#ifdef TIMING
double t7=MPI_Wtime();
mcl_multiwaymergetime += (t7-t6);
#endif
UDERO * OnePieceOfC = new UDERO(* OnePieceOfC_tuples, false);
delete OnePieceOfC_tuples;
SpParMat<IU,NUO,UDERO> OnePieceOfC_mat(OnePieceOfC, GridC);
MCLPruneRecoverySelect(OnePieceOfC_mat, hardThreshold, selectNum, recoverNum, recoverPct, kselectVersion);
#ifdef SHOW_MEMORY_USAGE
int64_t gcnnz_pruned, lcnnz_pruned ;
lcnnz_pruned = OnePieceOfC_mat.getlocalnnz();
MPI_Allreduce(&lcnnz_pruned, &gcnnz_pruned, 1, MPIType<int64_t>(), MPI_MAX, MPI_COMM_WORLD);
// TODO: we can remove gcnnz_merged memory here because we don't need to concatenate anymore
int64_t prune_memory = gcnnz_pruned*2*20;//(gannz*2 + phase_nnz + gcnnz_pruned*2) * 20 + kselectmem; // 3 extra copies of OnePieceOfC_mat, we can make it one extra copy!
//phase_nnz += gcnnz_pruned;
if(myrank==0)
{
if(prune_memory>1000000000)
std::cout << "Prune: " << prune_memory/1000000000.00 << "GB " << std::endl ;
else
std::cout << "Prune: " << prune_memory/1000000.00 << " MB " << std::endl ;
}
#endif
// ABAB: Change this to accept pointers to objects
toconcatenate.push_back(OnePieceOfC_mat.seq());
}
UDERO * C = new UDERO(0,C_m, C_n,0);
C->ColConcatenate(toconcatenate); // ABAB: Change this to accept a vector of pointers to pointers to DER objects
SpHelper::deallocate2D(ARecvSizes, UDERA::esscount);
SpHelper::deallocate2D(BRecvSizes, UDERA::esscount);
return SpParMat<IU,NUO,UDERO> (C, GridC);
}
/**
* Parallel C = A*B routine that uses a double buffered broadcasting scheme
* @pre { Input matrices, A and B, should not alias }
* Most memory efficient version available. Total stages: 2*sqrt(p)
* Memory requirement during first sqrt(p) stages: <= (3/2)*(nnz(A)+nnz(B))+(1/2)*nnz(C)
* Memory requirement during second sqrt(p) stages: <= nnz(A)+nnz(B)+nnz(C)
* Final memory requirement: nnz(C) if clearA and clearB are true
**/
template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
SpParMat<IU,NUO,UDERO> Mult_AnXBn_DoubleBuff
(SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false )
{
if(!CheckSpGEMMCompliance(A,B) )
{
return SpParMat< IU,NUO,UDERO >();
}
int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication
std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy);
IU C_m = A.spSeq->getnrow();
IU C_n = B.spSeq->getncol();
UDERA * A1seq = new UDERA();
UDERA * A2seq = new UDERA();
UDERB * B1seq = new UDERB();
UDERB * B2seq = new UDERB();
(A.spSeq)->Split( *A1seq, *A2seq);
const_cast< UDERB* >(B.spSeq)->Transpose();
(B.spSeq)->Split( *B1seq, *B2seq);
MPI_Barrier(GridC->GetWorld());
IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages);
IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages);
SpParHelper::GetSetSizes( *A1seq, ARecvSizes, (A.commGrid)->GetRowWorld());
SpParHelper::GetSetSizes( *B1seq, BRecvSizes, (B.commGrid)->GetColWorld());
// Remotely fetched matrices are stored as pointers
UDERA * ARecv;
UDERB * BRecv;
std::vector< SpTuples<IU,NUO> *> tomerge;
int Aself = (A.commGrid)->GetRankInProcRow();
int Bself = (B.commGrid)->GetRankInProcCol();
for(int i = 0; i < stages; ++i)
{
std::vector<IU> ess;
if(i == Aself)
{
ARecv = A1seq; // shallow-copy
}
else
{
ess.resize(UDERA::esscount);
for(int j=0; j< UDERA::esscount; ++j)
{
ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
}
ARecv = new UDERA(); // first, create the object
}
SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements
ess.clear();
if(i == Bself)
{
BRecv = B1seq; // shallow-copy
}
else
{
ess.resize(UDERB::esscount);
for(int j=0; j< UDERB::esscount; ++j)
{
ess[j] = BRecvSizes[j][i];
}
BRecv = new UDERB();
}
SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements
SpTuples<IU,NUO> * C_cont = MultiplyReturnTuples<SR, NUO>
(*ARecv, *BRecv, // parameters themselves
false, true, // transpose information (B is transposed)
i != Aself, // 'delete A' condition
i != Bself); // 'delete B' condition
if(!C_cont->isZero())
tomerge.push_back(C_cont);
else
delete C_cont;
}
if(clearA) delete A1seq;
if(clearB) delete B1seq;
// Set the new dimensions
SpParHelper::GetSetSizes( *A2seq, ARecvSizes, (A.commGrid)->GetRowWorld());
SpParHelper::GetSetSizes( *B2seq, BRecvSizes, (B.commGrid)->GetColWorld());
// Start the second round
for(int i = 0; i < stages; ++i)
{
std::vector<IU> ess;
if(i == Aself)
{
ARecv = A2seq; // shallow-copy
}
else
{
ess.resize(UDERA::esscount);
for(int j=0; j< UDERA::esscount; ++j)
{
ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
}
ARecv = new UDERA(); // first, create the object
}
SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements
ess.clear();
if(i == Bself)
{
BRecv = B2seq; // shallow-copy
}
else
{
ess.resize(UDERB::esscount);
for(int j=0; j< UDERB::esscount; ++j)
{
ess[j] = BRecvSizes[j][i];
}
BRecv = new UDERB();
}
SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements
SpTuples<IU,NUO> * C_cont = MultiplyReturnTuples<SR, NUO>
(*ARecv, *BRecv, // parameters themselves
false, true, // transpose information (B is transposed)
i != Aself, // 'delete A' condition
i != Bself); // 'delete B' condition
if(!C_cont->isZero())
tomerge.push_back(C_cont);
else
delete C_cont;
}
SpHelper::deallocate2D(ARecvSizes, UDERA::esscount);
SpHelper::deallocate2D(BRecvSizes, UDERB::esscount);
if(clearA)
{
delete A2seq;
delete A.spSeq;
A.spSeq = NULL;
}
else
{
(A.spSeq)->Merge(*A1seq, *A2seq);
delete A1seq;
delete A2seq;
}
if(clearB)
{
delete B2seq;
delete B.spSeq;
B.spSeq = NULL;
}
else
{
(B.spSeq)->Merge(*B1seq, *B2seq);
delete B1seq;
delete B2seq;
const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original
}
UDERO * C = new UDERO(MergeAll<SR>(tomerge, C_m, C_n,true), false);
return SpParMat<IU,NUO,UDERO> (C, GridC); // return the result object
}
/**
* Parallel A = B*C routine that uses only MPI-1 features
* Relies on simple blocking broadcast
* @pre { Input matrices, A and B, should not alias }
**/
template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
SpParMat<IU, NUO, UDERO> Mult_AnXBn_Synch
(SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false )
{
if(!CheckSpGEMMCompliance(A,B) )
{
return SpParMat< IU,NUO,UDERO >();
}
int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication
std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy);
IU C_m = A.spSeq->getnrow();
IU C_n = B.spSeq->getncol();
//const_cast< UDERB* >(B.spSeq)->Transpose(); // do not transpose for colum-by-column multiplication
MPI_Barrier(GridC->GetWorld());
IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages);
IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages);
SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld());
SpParHelper::GetSetSizes( *(B.spSeq), BRecvSizes, (B.commGrid)->GetColWorld());
// Remotely fetched matrices are stored as pointers
UDERA * ARecv;
UDERB * BRecv;
std::vector< SpTuples<IU,NUO> *> tomerge;
int Aself = (A.commGrid)->GetRankInProcRow();
int Bself = (B.commGrid)->GetRankInProcCol();
for(int i = 0; i < stages; ++i)
{
std::vector<IU> ess;
if(i == Aself)
{
ARecv = A.spSeq; // shallow-copy
}
else
{
ess.resize(UDERA::esscount);
for(int j=0; j< UDERA::esscount; ++j)
{
ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
}
ARecv = new UDERA(); // first, create the object
}
SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements
ess.clear();
if(i == Bself)
{
BRecv = B.spSeq; // shallow-copy
}
else
{
ess.resize(UDERB::esscount);
for(int j=0; j< UDERB::esscount; ++j)
{
ess[j] = BRecvSizes[j][i];
}
BRecv = new UDERB();
}
SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements
/*
// before activating this transpose B first
SpTuples<IU,NUO> * C_cont = MultiplyReturnTuples<SR, NUO>
(*ARecv, *BRecv, // parameters themselves
false, true, // transpose information (B is transposed)
i != Aself, // 'delete A' condition
i != Bself); // 'delete B' condition
*/
SpTuples<IU,NUO> * C_cont = LocalSpGEMM<SR, NUO>
(*ARecv, *BRecv, // parameters themselves
i != Aself, // 'delete A' condition
i != Bself); // 'delete B' condition
if(!C_cont->isZero())
tomerge.push_back(C_cont);
#ifdef COMBBLAS_DEBUG
std::ostringstream outs;
outs << i << "th SUMMA iteration"<< std::endl;
SpParHelper::Print(outs.str());
#endif
}
if(clearA && A.spSeq != NULL)
{
delete A.spSeq;
A.spSeq = NULL;
}
if(clearB && B.spSeq != NULL)
{
delete B.spSeq;
B.spSeq = NULL;
}
SpHelper::deallocate2D(ARecvSizes, UDERA::esscount);
SpHelper::deallocate2D(BRecvSizes, UDERB::esscount);
//UDERO * C = new UDERO(MergeAll<SR>(tomerge, C_m, C_n,true), false);
// First get the result in SpTuples, then convert to UDER
// the last parameter to MergeAll deletes tomerge arrays
SpTuples<IU,NUO> * C_tuples = MultiwayMerge<SR>(tomerge, C_m, C_n,true);
UDERO * C = new UDERO(*C_tuples, false);
//if(!clearB)
// const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original
return SpParMat<IU,NUO,UDERO> (C, GridC); // return the result object
}
/**
* Estimate the maximum nnz needed to store in a process from all stages of SUMMA before reduction
* @pre { Input matrices, A and B, should not alias }
**/
template <typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
IU EstPerProcessNnzSUMMA(SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B)
{
IU nnzC_SUMMA = 0;
if(A.getncol() != B.getnrow())
{
std::ostringstream outs;
outs << "Can not multiply, dimensions does not match"<< std::endl;
outs << A.getncol() << " != " << B.getnrow() << std::endl;
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
return nnzC_SUMMA;
}
int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication
std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy);
MPI_Barrier(GridC->GetWorld());
IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages);
IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages);
SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld());
SpParHelper::GetSetSizes( *(B.spSeq), BRecvSizes, (B.commGrid)->GetColWorld());
// Remotely fetched matrices are stored as pointers
UDERA * ARecv;
UDERB * BRecv;
int Aself = (A.commGrid)->GetRankInProcRow();
int Bself = (B.commGrid)->GetRankInProcCol();
for(int i = 0; i < stages; ++i)
{
std::vector<IU> ess;
if(i == Aself)
{
ARecv = A.spSeq; // shallow-copy
}
else
{
ess.resize(UDERA::esscount);
for(int j=0; j< UDERA::esscount; ++j)
{
ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
}
ARecv = new UDERA(); // first, create the object
}
SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements
ess.clear();
if(i == Bself)
{
BRecv = B.spSeq; // shallow-copy
}
else
{
ess.resize(UDERB::esscount);
for(int j=0; j< UDERB::esscount; ++j)
{
ess[j] = BRecvSizes[j][i];
}
BRecv = new UDERB();
}
SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements
IU* colnnzC = estimateNNZ(*ARecv, *BRecv);
IU nzc = BRecv->GetDCSC()->nzc;
IU nnzC_stage = 0;
#ifdef THREADED
#pragma omp parallel for reduction (+:nnzC_stage)
#endif
for (IU k=0; k<nzc; k++)
{
nnzC_stage = nnzC_stage + colnnzC[k];
}
nnzC_SUMMA += nnzC_stage;
}
SpHelper::deallocate2D(ARecvSizes, UDERA::esscount);
SpHelper::deallocate2D(BRecvSizes, UDERB::esscount);
IU nnzC_SUMMA_max = 0;
MPI_Allreduce(&nnzC_SUMMA, &nnzC_SUMMA_max, 1, MPIType<IU>(), MPI_MAX, GridC->GetWorld());
return nnzC_SUMMA_max;
}
template <typename MATRIX, typename VECTOR>
void CheckSpMVCompliance(const MATRIX & A, const VECTOR & x)
{
if(A.getncol() != x.TotalLength())
{
std::ostringstream outs;
outs << "Can not multiply, dimensions does not match"<< std::endl;
outs << A.getncol() << " != " << x.TotalLength() << std::endl;
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
}
if(! ( *(A.getcommgrid()) == *(x.getcommgrid())) )
{
std::cout << "Grids are not comparable for SpMV" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
}
}
template <typename SR, typename IU, typename NUM, typename UDER>
FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV
(const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue, OptBuf<int32_t, typename promote_trait<NUM,IU>::T_promote > & optbuf);
template <typename SR, typename IU, typename NUM, typename UDER>
FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV
(const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue)
{
typedef typename promote_trait<NUM,IU>::T_promote T_promote;
OptBuf<int32_t, T_promote > optbuf = OptBuf<int32_t, T_promote >();
return SpMV<SR>(A, x, indexisvalue, optbuf);
}
/**
* Step 1 of the sparse SpMV algorithm
* @param[in,out] trxlocnz, lenuntil,trxinds,trxnums { set or allocated }
* @param[in] indexisvalue
**/
template<typename IU, typename NV>
void TransposeVector(MPI_Comm & World, const FullyDistSpVec<IU,NV> & x, int32_t & trxlocnz, IU & lenuntil, int32_t * & trxinds, NV * & trxnums, bool indexisvalue)
{
int32_t xlocnz = (int32_t) x.getlocnnz();
int32_t roffst = (int32_t) x.RowLenUntil(); // since trxinds is int32_t
int32_t roffset;
IU luntil = x.LengthUntil();
int diagneigh = x.commGrid->GetComplementRank();
MPI_Status status;
MPI_Sendrecv(&roffst, 1, MPIType<int32_t>(), diagneigh, TROST, &roffset, 1, MPIType<int32_t>(), diagneigh, TROST, World, &status);
MPI_Sendrecv(&xlocnz, 1, MPIType<int32_t>(), diagneigh, TRNNZ, &trxlocnz, 1, MPIType<int32_t>(), diagneigh, TRNNZ, World, &status);
MPI_Sendrecv(&luntil, 1, MPIType<IU>(), diagneigh, TRLUT, &lenuntil, 1, MPIType<IU>(), diagneigh, TRLUT, World, &status);
// ABAB: Important observation is that local indices (given by x.ind) is 32-bit addressible
// Copy them to 32 bit integers and transfer that to save 50% of off-node bandwidth
trxinds = new int32_t[trxlocnz];
int32_t * temp_xind = new int32_t[xlocnz];
#ifdef THREADED
#pragma omp parallel for
#endif
for(int i=0; i< xlocnz; ++i)
temp_xind[i] = (int32_t) x.ind[i];
MPI_Sendrecv(temp_xind, xlocnz, MPIType<int32_t>(), diagneigh, TRI, trxinds, trxlocnz, MPIType<int32_t>(), diagneigh, TRI, World, &status);
delete [] temp_xind;
if(!indexisvalue)
{
trxnums = new NV[trxlocnz];
MPI_Sendrecv(const_cast<NV*>(SpHelper::p2a(x.num)), xlocnz, MPIType<NV>(), diagneigh, TRX, trxnums, trxlocnz, MPIType<NV>(), diagneigh, TRX, World, &status);
}
std::transform(trxinds, trxinds+trxlocnz, trxinds, std::bind2nd(std::plus<int32_t>(), roffset)); // fullydist indexing (p pieces) -> matrix indexing (sqrt(p) pieces)
}
/**
* Step 2 of the sparse SpMV algorithm
* @param[in,out] trxinds, trxnums { deallocated }
* @param[in,out] indacc, numacc { allocated }
* @param[in,out] accnz { set }
* @param[in] trxlocnz, lenuntil, indexisvalue
**/
template<typename IU, typename NV>
void AllGatherVector(MPI_Comm & ColWorld, int trxlocnz, IU lenuntil, int32_t * & trxinds, NV * & trxnums,
int32_t * & indacc, NV * & numacc, int & accnz, bool indexisvalue)
{
int colneighs, colrank;
MPI_Comm_size(ColWorld, &colneighs);
MPI_Comm_rank(ColWorld, &colrank);
int * colnz = new int[colneighs];
colnz[colrank] = trxlocnz;
MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colnz, 1, MPI_INT, ColWorld);
int * dpls = new int[colneighs](); // displacements (zero initialized pid)
std::partial_sum(colnz, colnz+colneighs-1, dpls+1);
accnz = std::accumulate(colnz, colnz+colneighs, 0);
indacc = new int32_t[accnz];
numacc = new NV[accnz];
// ABAB: Future issues here, colnz is of type int (MPI limitation)
// What if the aggregate vector size along the processor row/column is not 32-bit addressible?
// This will happen when n/sqrt(p) > 2^31
// Currently we can solve a small problem (scale 32) with 4096 processor
// For a medium problem (scale 35), we'll need 32K processors which gives sqrt(p) ~ 180
// 2^35 / 180 ~ 2^29 / 3 which is not an issue !
#ifdef TIMING
double t0=MPI_Wtime();
#endif
MPI_Allgatherv(trxinds, trxlocnz, MPIType<int32_t>(), indacc, colnz, dpls, MPIType<int32_t>(), ColWorld);
delete [] trxinds;
if(indexisvalue)
{
IU lenuntilcol;
if(colrank == 0) lenuntilcol = lenuntil;
MPI_Bcast(&lenuntilcol, 1, MPIType<IU>(), 0, ColWorld);
for(int i=0; i< accnz; ++i) // fill numerical values from indices
{
numacc[i] = indacc[i] + lenuntilcol;
}
}
else
{
MPI_Allgatherv(trxnums, trxlocnz, MPIType<NV>(), numacc, colnz, dpls, MPIType<NV>(), ColWorld);
delete [] trxnums;
}
#ifdef TIMING
double t1=MPI_Wtime();
cblas_allgathertime += (t1-t0);
#endif
DeleteAll(colnz,dpls);
}
/**
* Step 3 of the sparse SpMV algorithm, with the semiring
* @param[in,out] optbuf {scratch space for all-to-all (fold) communication}
* @param[in,out] indacc, numacc {index and values of the input vector, deleted upon exit}
* @param[in,out] sendindbuf, sendnumbuf {index and values of the output vector, created}
**/
template<typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER>
void LocalSpMV(const SpParMat<IU,NUM,UDER> & A, int rowneighs, OptBuf<int32_t, OVT > & optbuf, int32_t * & indacc, IVT * & numacc,
int32_t * & sendindbuf, OVT * & sendnumbuf, int * & sdispls, int * sendcnt, int accnz, bool indexisvalue, PreAllocatedSPA<OVT> & SPA)
{
if(optbuf.totmax > 0) // graph500 optimization enabled
{
if(A.spSeq->getnsplit() > 0)
{
// optbuf.{inds/nums/dspls} and sendcnt are all pre-allocated and only filled by dcsc_gespmv_threaded
generic_gespmv_threaded_setbuffers<SR> (*(A.spSeq), indacc, numacc, accnz, optbuf.inds, optbuf.nums, sendcnt, optbuf.dspls, rowneighs);
}
else
{
generic_gespmv<SR> (*(A.spSeq), indacc, numacc, accnz, optbuf.inds, optbuf.nums, sendcnt, optbuf.dspls, rowneighs, indexisvalue);
}
DeleteAll(indacc,numacc);
}
else
{
if(A.spSeq->getnsplit() > 0)
{
// sendindbuf/sendnumbuf/sdispls are all allocated and filled by dcsc_gespmv_threaded
int totalsent = generic_gespmv_threaded<SR> (*(A.spSeq), indacc, numacc, accnz, sendindbuf, sendnumbuf, sdispls, rowneighs, SPA);
DeleteAll(indacc, numacc);
for(int i=0; i<rowneighs-1; ++i)
sendcnt[i] = sdispls[i+1] - sdispls[i];
sendcnt[rowneighs-1] = totalsent - sdispls[rowneighs-1];
}
else
{
// default SpMSpV
std::vector< int32_t > indy;
std::vector< OVT > numy;
generic_gespmv<SR>(*(A.spSeq), indacc, numacc, accnz, indy, numy, SPA);
DeleteAll(indacc, numacc);
int32_t bufsize = indy.size(); // as compact as possible
sendindbuf = new int32_t[bufsize];
sendnumbuf = new OVT[bufsize];
int32_t perproc = A.getlocalrows() / rowneighs;
int k = 0; // index to buffer
for(int i=0; i<rowneighs; ++i)
{
int32_t end_this = (i==rowneighs-1) ? A.getlocalrows(): (i+1)*perproc;
while(k < bufsize && indy[k] < end_this)
{
sendindbuf[k] = indy[k] - i*perproc;
sendnumbuf[k] = numy[k];
++sendcnt[i];
++k;
}
}
sdispls = new int[rowneighs]();
std::partial_sum(sendcnt, sendcnt+rowneighs-1, sdispls+1);
//#endif
}
}
}
// non threaded
template <typename SR, typename IU, typename OVT>
void MergeContributions(int* listSizes, std::vector<int32_t *> & indsvec, std::vector<OVT *> & numsvec, std::vector<IU>& mergedind, std::vector<OVT>& mergednum)
{
int nlists = indsvec.size();
// this condition is checked in the caller SpMV function.
// I am still putting it here for completeness
if(nlists == 1)
{
// simply copy data
int veclen = listSizes[0];
mergedind.resize(veclen);
mergednum.resize(veclen);
for(int i=0; i<veclen; i++)
{
mergedind[i] = indsvec[0][i];
mergednum[i] = numsvec[0][i];
}
return;
}
int32_t hsize = 0;
int32_t inf = std::numeric_limits<int32_t>::min();
int32_t sup = std::numeric_limits<int32_t>::max();
KNHeap< int32_t, int32_t > sHeap(sup, inf);
int * processed = new int[nlists]();
for(int i=0; i<nlists; ++i)
{
if(listSizes[i] > 0)
{
// key, list_id
sHeap.insert(indsvec[i][0], i);
++hsize;
}
}
int32_t key, locv;
if(hsize > 0)
{
sHeap.deleteMin(&key, &locv);
mergedind.push_back( static_cast<IU>(key));
mergednum.push_back(numsvec[locv][0]); // nothing is processed yet
if( (++(processed[locv])) < listSizes[locv] )
sHeap.insert(indsvec[locv][processed[locv]], locv);
else
--hsize;
}
while(hsize > 0)
{
sHeap.deleteMin(&key, &locv);
if(mergedind.back() == static_cast<IU>(key))
{
mergednum.back() = SR::add(mergednum.back(), numsvec[locv][processed[locv]]);
// ABAB: Benchmark actually allows us to be non-deterministic in terms of parent selection
// We can just skip this addition operator (if it's a max/min select)
}
else
{
mergedind.push_back(static_cast<IU>(key));
mergednum.push_back(numsvec[locv][processed[locv]]);
}
if( (++(processed[locv])) < listSizes[locv] )
sHeap.insert(indsvec[locv][processed[locv]], locv);
else
--hsize;
}
DeleteAll(processed);
}
template <typename SR, typename IU, typename OVT>
void MergeContributions_threaded(int * & listSizes, std::vector<int32_t *> & indsvec, std::vector<OVT *> & numsvec, std::vector<IU> & mergedind, std::vector<OVT> & mergednum, IU maxindex)
{
int nlists = indsvec.size();
// this condition is checked in the caller SpMV function.
// I am still putting it here for completeness
if(nlists == 1)
{
// simply copy data
int veclen = listSizes[0];
mergedind.resize(veclen);
mergednum.resize(veclen);
#ifdef THREADED
#pragma omp parallel for
#endif
for(int i=0; i<veclen; i++)
{
mergedind[i] = indsvec[0][i];
mergednum[i] = numsvec[0][i];
}
return;
}
int nthreads=1;
#ifdef THREADED
#pragma omp parallel
{
nthreads = omp_get_num_threads();
}
#endif
int nsplits = 4*nthreads; // oversplit for load balance
nsplits = std::min(nsplits, (int)maxindex);
std::vector< std::vector<int32_t> > splitters(nlists);
for(int k=0; k< nlists; k++)
{
splitters[k].resize(nsplits+1);
splitters[k][0] = static_cast<int32_t>(0);
#pragma omp parallel for
for(int i=1; i< nsplits; i++)
{
IU cur_idx = i * (maxindex/nsplits);
auto it = std::lower_bound (indsvec[k], indsvec[k] + listSizes[k], cur_idx);
splitters[k][i] = (int32_t) (it - indsvec[k]);
}
splitters[k][nsplits] = listSizes[k];
}
// ------ perform merge in parallel ------
std::vector<std::vector<IU>> indsBuf(nsplits);
std::vector<std::vector<OVT>> numsBuf(nsplits);
//TODO: allocate these vectors here before calling MergeContributions
#pragma omp parallel for schedule(dynamic)
for(int i=0; i< nsplits; i++)
{
std::vector<int32_t *> tIndsVec(nlists);
std::vector<OVT *> tNumsVec(nlists);
std::vector<int> tLengths(nlists);
for(int j=0; j< nlists; ++j)
{
tIndsVec[j] = indsvec[j] + splitters[j][i];
tNumsVec[j] = numsvec[j] + splitters[j][i];
tLengths[j]= splitters[j][i+1] - splitters[j][i];
}
MergeContributions<SR>(tLengths.data(), tIndsVec, tNumsVec, indsBuf[i], numsBuf[i]);
}
// ------ concatenate merged tuples processed by threads ------
std::vector<IU> tdisp(nsplits+1);
tdisp[0] = 0;
for(int i=0; i<nsplits; ++i)
{
tdisp[i+1] = tdisp[i] + indsBuf[i].size();
}
mergedind.resize(tdisp[nsplits]);
mergednum.resize(tdisp[nsplits]);
#pragma omp parallel for schedule(dynamic)
for(int i=0; i< nsplits; i++)
{
std::copy(indsBuf[i].data() , indsBuf[i].data() + indsBuf[i].size(), mergedind.data() + tdisp[i]);
std::copy(numsBuf[i].data() , numsBuf[i].data() + numsBuf[i].size(), mergednum.data() + tdisp[i]);
}
}
/**
* This version is the most flexible sparse matrix X sparse vector [Used in KDT]
* It accepts different types for the matrix (NUM), the input vector (IVT) and the output vector (OVT)
* without relying on automatic type promotion
* Input (x) and output (y) vectors can be ALIASED because y is not written until the algorithm is done with x.
*/
template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER>
void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y,
bool indexisvalue, OptBuf<int32_t, OVT > & optbuf, PreAllocatedSPA<OVT> & SPA)
{
CheckSpMVCompliance(A,x);
optbuf.MarkEmpty();
y.glen = A.getnrow(); // in case it is not set already
MPI_Comm World = x.commGrid->GetWorld();
MPI_Comm ColWorld = x.commGrid->GetColWorld();
MPI_Comm RowWorld = x.commGrid->GetRowWorld();
int accnz;
int32_t trxlocnz;
IU lenuntil;
int32_t *trxinds, *indacc;
IVT *trxnums, *numacc;
#ifdef TIMING
double t0=MPI_Wtime();
#endif
TransposeVector(World, x, trxlocnz, lenuntil, trxinds, trxnums, indexisvalue);
#ifdef TIMING
double t1=MPI_Wtime();
cblas_transvectime += (t1-t0);
#endif
if(x.commGrid->GetGridRows() > 1)
{
AllGatherVector(ColWorld, trxlocnz, lenuntil, trxinds, trxnums, indacc, numacc, accnz, indexisvalue); // trxindS/trxnums deallocated, indacc/numacc allocated, accnz set
}
else
{
accnz = trxlocnz;
indacc = trxinds; // aliasing ptr
numacc = trxnums; // aliasing ptr
}
int rowneighs;
MPI_Comm_size(RowWorld, &rowneighs);
int * sendcnt = new int[rowneighs]();
int32_t * sendindbuf;
OVT * sendnumbuf;
int * sdispls;
#ifdef TIMING
double t2=MPI_Wtime();
#endif
LocalSpMV<SR>(A, rowneighs, optbuf, indacc, numacc, sendindbuf, sendnumbuf, sdispls, sendcnt, accnz, indexisvalue, SPA); // indacc/numacc deallocated, sendindbuf/sendnumbuf/sdispls allocated
#ifdef TIMING
double t3=MPI_Wtime();
cblas_localspmvtime += (t3-t2);
#endif
if(x.commGrid->GetGridCols() == 1)
{
y.ind.resize(sendcnt[0]);
y.num.resize(sendcnt[0]);
if(optbuf.totmax > 0 ) // graph500 optimization enabled
{
#ifdef THREADED
#pragma omp parallel for
#endif
for(int i=0; i<sendcnt[0]; i++)
{
y.ind[i] = optbuf.inds[i];
y.num[i] = optbuf.nums[i];
}
}
else
{
#ifdef THREADED
#pragma omp parallel for
#endif
for(int i=0; i<sendcnt[0]; i++)
{
y.ind[i] = sendindbuf[i];
y.num[i] = sendnumbuf[i];
}
DeleteAll(sendindbuf, sendnumbuf,sdispls);
}
delete [] sendcnt;
return;
}
int * rdispls = new int[rowneighs];
int * recvcnt = new int[rowneighs];
MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, RowWorld); // share the request counts
// receive displacements are exact whereas send displacements have slack
rdispls[0] = 0;
for(int i=0; i<rowneighs-1; ++i)
{
rdispls[i+1] = rdispls[i] + recvcnt[i];
}
int totrecv = std::accumulate(recvcnt,recvcnt+rowneighs,0);
int32_t * recvindbuf = new int32_t[totrecv];
OVT * recvnumbuf = new OVT[totrecv];
#ifdef TIMING
double t4=MPI_Wtime();
#endif
if(optbuf.totmax > 0 ) // graph500 optimization enabled
{
MPI_Alltoallv(optbuf.inds, sendcnt, optbuf.dspls, MPIType<int32_t>(), recvindbuf, recvcnt, rdispls, MPIType<int32_t>(), RowWorld);
MPI_Alltoallv(optbuf.nums, sendcnt, optbuf.dspls, MPIType<OVT>(), recvnumbuf, recvcnt, rdispls, MPIType<OVT>(), RowWorld);
delete [] sendcnt;
}
else
{
MPI_Alltoallv(sendindbuf, sendcnt, sdispls, MPIType<int32_t>(), recvindbuf, recvcnt, rdispls, MPIType<int32_t>(), RowWorld);
MPI_Alltoallv(sendnumbuf, sendcnt, sdispls, MPIType<OVT>(), recvnumbuf, recvcnt, rdispls, MPIType<OVT>(), RowWorld);
DeleteAll(sendindbuf, sendnumbuf, sendcnt, sdispls);
}
#ifdef TIMING
double t5=MPI_Wtime();
cblas_alltoalltime += (t5-t4);
#endif
#ifdef TIMING
double t6=MPI_Wtime();
#endif
//MergeContributions<SR>(y,recvcnt, rdispls, recvindbuf, recvnumbuf, rowneighs);
// free memory of y, in case it was aliased
std::vector<IU>().swap(y.ind);
std::vector<OVT>().swap(y.num);
std::vector<int32_t *> indsvec(rowneighs);
std::vector<OVT *> numsvec(rowneighs);
#ifdef THREADED
#pragma omp parallel for
#endif
for(int i=0; i<rowneighs; i++)
{
indsvec[i] = recvindbuf+rdispls[i];
numsvec[i] = recvnumbuf+rdispls[i];
}
#ifdef THREADED
MergeContributions_threaded<SR>(recvcnt, indsvec, numsvec, y.ind, y.num, y.MyLocLength());
#else
MergeContributions<SR>(recvcnt, indsvec, numsvec, y.ind, y.num);
#endif
DeleteAll(recvcnt, rdispls,recvindbuf, recvnumbuf);
#ifdef TIMING
double t7=MPI_Wtime();
cblas_mergeconttime += (t7-t6);
#endif
}
template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER>
void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue, PreAllocatedSPA<OVT> & SPA)
{
OptBuf< int32_t, OVT > optbuf = OptBuf< int32_t,OVT >();
SpMV<SR>(A, x, y, indexisvalue, optbuf, SPA);
}
template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER>
void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue)
{
OptBuf< int32_t, OVT > optbuf = OptBuf< int32_t,OVT >();
PreAllocatedSPA<OVT> SPA;
SpMV<SR>(A, x, y, indexisvalue, optbuf, SPA);
}
template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER>
void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue, OptBuf<int32_t, OVT > & optbuf)
{
PreAllocatedSPA<OVT> SPA;
SpMV<SR>(A, x, y, indexisvalue, optbuf, SPA);
}
/**
* Automatic type promotion is ONLY done here, all the callee functions (in Friends.h and below) are initialized with the promoted type
* If indexisvalues = true, then we do not need to transfer values for x (happens for BFS iterations with boolean matrices and integer rhs vectors)
**/
template <typename SR, typename IU, typename NUM, typename UDER>
FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV
(const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue, OptBuf<int32_t, typename promote_trait<NUM,IU>::T_promote > & optbuf)
{
typedef typename promote_trait<NUM,IU>::T_promote T_promote;
FullyDistSpVec<IU, T_promote> y ( x.getcommgrid(), A.getnrow()); // identity doesn't matter for sparse vectors
SpMV<SR>(A, x, y, indexisvalue, optbuf);
return y;
}
/**
* Parallel dense SpMV
**/
template <typename SR, typename IU, typename NUM, typename NUV, typename UDER>
FullyDistVec<IU,typename promote_trait<NUM,NUV>::T_promote> SpMV
(const SpParMat<IU,NUM,UDER> & A, const FullyDistVec<IU,NUV> & x )
{
typedef typename promote_trait<NUM,NUV>::T_promote T_promote;
CheckSpMVCompliance(A, x);
MPI_Comm World = x.commGrid->GetWorld();
MPI_Comm ColWorld = x.commGrid->GetColWorld();
MPI_Comm RowWorld = x.commGrid->GetRowWorld();
int xsize = (int) x.LocArrSize();
int trxsize = 0;
int diagneigh = x.commGrid->GetComplementRank();
MPI_Status status;
MPI_Sendrecv(&xsize, 1, MPI_INT, diagneigh, TRX, &trxsize, 1, MPI_INT, diagneigh, TRX, World, &status);
NUV * trxnums = new NUV[trxsize];
MPI_Sendrecv(const_cast<NUV*>(SpHelper::p2a(x.arr)), xsize, MPIType<NUV>(), diagneigh, TRX, trxnums, trxsize, MPIType<NUV>(), diagneigh, TRX, World, &status);
int colneighs, colrank;
MPI_Comm_size(ColWorld, &colneighs);
MPI_Comm_rank(ColWorld, &colrank);
int * colsize = new int[colneighs];
colsize[colrank] = trxsize;
MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colsize, 1, MPI_INT, ColWorld);
int * dpls = new int[colneighs](); // displacements (zero initialized pid)
std::partial_sum(colsize, colsize+colneighs-1, dpls+1);
int accsize = std::accumulate(colsize, colsize+colneighs, 0);
NUV * numacc = new NUV[accsize];
MPI_Allgatherv(trxnums, trxsize, MPIType<NUV>(), numacc, colsize, dpls, MPIType<NUV>(), ColWorld);
delete [] trxnums;
// serial SpMV with dense vector
T_promote id = SR::id();
IU ysize = A.getlocalrows();
T_promote * localy = new T_promote[ysize];
std::fill_n(localy, ysize, id);
#ifdef THREADED
dcsc_gespmv_threaded<SR>(*(A.spSeq), numacc, localy);
#else
dcsc_gespmv<SR>(*(A.spSeq), numacc, localy);
#endif
DeleteAll(numacc,colsize, dpls);
// FullyDistVec<IT,NT>(shared_ptr<CommGrid> grid, IT globallen, NT initval, NT id)
FullyDistVec<IU, T_promote> y ( x.commGrid, A.getnrow(), id);
int rowneighs;
MPI_Comm_size(RowWorld, &rowneighs);
IU begptr, endptr;
for(int i=0; i< rowneighs; ++i)
{
begptr = y.RowLenUntil(i);
if(i == rowneighs-1)
{
endptr = ysize;
}
else
{
endptr = y.RowLenUntil(i+1);
}
MPI_Reduce(localy+begptr, SpHelper::p2a(y.arr), endptr-begptr, MPIType<T_promote>(), SR::mpi_op(), i, RowWorld);
}
delete [] localy;
return y;
}
/**
* \TODO: Old version that is no longer considered optimal
* Kept for legacy purposes
* To be removed when other functionals are fully tested.
**/
template <typename SR, typename IU, typename NUM, typename NUV, typename UDER>
FullyDistSpVec<IU,typename promote_trait<NUM,NUV>::T_promote> SpMV
(const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,NUV> & x)
{
typedef typename promote_trait<NUM,NUV>::T_promote T_promote;
CheckSpMVCompliance(A, x);
MPI_Comm World = x.commGrid->GetWorld();
MPI_Comm ColWorld = x.commGrid->GetColWorld();
MPI_Comm RowWorld = x.commGrid->GetRowWorld();
int xlocnz = (int) x.getlocnnz();
int trxlocnz = 0;
int roffst = x.RowLenUntil();
int offset;
int diagneigh = x.commGrid->GetComplementRank();
MPI_Status status;
MPI_Sendrecv(&xlocnz, 1, MPI_INT, diagneigh, TRX, &trxlocnz, 1, MPI_INT, diagneigh, TRX, World, &status);
MPI_Sendrecv(&roffst, 1, MPI_INT, diagneigh, TROST, &offset, 1, MPI_INT, diagneigh, TROST, World, &status);
IU * trxinds = new IU[trxlocnz];
NUV * trxnums = new NUV[trxlocnz];
MPI_Sendrecv(const_cast<IU*>(SpHelper::p2a(x.ind)), xlocnz, MPIType<IU>(), diagneigh, TRX, trxinds, trxlocnz, MPIType<IU>(), diagneigh, TRX, World, &status);
MPI_Sendrecv(const_cast<NUV*>(SpHelper::p2a(x.num)), xlocnz, MPIType<NUV>(), diagneigh, TRX, trxnums, trxlocnz, MPIType<NUV>(), diagneigh, TRX, World, &status);
std::transform(trxinds, trxinds+trxlocnz, trxinds, std::bind2nd(std::plus<IU>(), offset)); // fullydist indexing (n pieces) -> matrix indexing (sqrt(p) pieces)
int colneighs, colrank;
MPI_Comm_size(ColWorld, &colneighs);
MPI_Comm_rank(ColWorld, &colrank);
int * colnz = new int[colneighs];
colnz[colrank] = trxlocnz;
MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colnz, 1, MPI_INT, ColWorld);
int * dpls = new int[colneighs](); // displacements (zero initialized pid)
std::partial_sum(colnz, colnz+colneighs-1, dpls+1);
int accnz = std::accumulate(colnz, colnz+colneighs, 0);
IU * indacc = new IU[accnz];
NUV * numacc = new NUV[accnz];
// ABAB: Future issues here, colnz is of type int (MPI limitation)
// What if the aggregate vector size along the processor row/column is not 32-bit addressible?
MPI_Allgatherv(trxinds, trxlocnz, MPIType<IU>(), indacc, colnz, dpls, MPIType<IU>(), ColWorld);
MPI_Allgatherv(trxnums, trxlocnz, MPIType<NUV>(), numacc, colnz, dpls, MPIType<NUV>(), ColWorld);
DeleteAll(trxinds, trxnums);
// serial SpMV with sparse vector
std::vector< int32_t > indy;
std::vector< T_promote > numy;
int32_t * tmpindacc = new int32_t[accnz];
for(int i=0; i< accnz; ++i) tmpindacc[i] = indacc[i];
delete [] indacc;
dcsc_gespmv<SR>(*(A.spSeq), tmpindacc, numacc, accnz, indy, numy); // actual multiplication
DeleteAll(tmpindacc, numacc);
DeleteAll(colnz, dpls);
FullyDistSpVec<IU, T_promote> y ( x.commGrid, A.getnrow()); // identity doesn't matter for sparse vectors
IU yintlen = y.MyRowLength();
int rowneighs;
MPI_Comm_size(RowWorld,&rowneighs);
std::vector< std::vector<IU> > sendind(rowneighs);
std::vector< std::vector<T_promote> > sendnum(rowneighs);
typename std::vector<int32_t>::size_type outnz = indy.size();
for(typename std::vector<IU>::size_type i=0; i< outnz; ++i)
{
IU locind;
int rown = y.OwnerWithinRow(yintlen, static_cast<IU>(indy[i]), locind);
sendind[rown].push_back(locind);
sendnum[rown].push_back(numy[i]);
}
IU * sendindbuf = new IU[outnz];
T_promote * sendnumbuf = new T_promote[outnz];
int * sendcnt = new int[rowneighs];
int * sdispls = new int[rowneighs];
for(int i=0; i<rowneighs; ++i)
sendcnt[i] = sendind[i].size();
int * rdispls = new int[rowneighs];
int * recvcnt = new int[rowneighs];
MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, RowWorld); // share the request counts
sdispls[0] = 0;
rdispls[0] = 0;
for(int i=0; i<rowneighs-1; ++i)
{
sdispls[i+1] = sdispls[i] + sendcnt[i];
rdispls[i+1] = rdispls[i] + recvcnt[i];
}
int totrecv = std::accumulate(recvcnt,recvcnt+rowneighs,0);
IU * recvindbuf = new IU[totrecv];
T_promote * recvnumbuf = new T_promote[totrecv];
for(int i=0; i<rowneighs; ++i)
{
std::copy(sendind[i].begin(), sendind[i].end(), sendindbuf+sdispls[i]);
std::vector<IU>().swap(sendind[i]);
}
for(int i=0; i<rowneighs; ++i)
{
std::copy(sendnum[i].begin(), sendnum[i].end(), sendnumbuf+sdispls[i]);
std::vector<T_promote>().swap(sendnum[i]);
}
MPI_Alltoallv(sendindbuf, sendcnt, sdispls, MPIType<IU>(), recvindbuf, recvcnt, rdispls, MPIType<IU>(), RowWorld);
MPI_Alltoallv(sendnumbuf, sendcnt, sdispls, MPIType<T_promote>(), recvnumbuf, recvcnt, rdispls, MPIType<T_promote>(), RowWorld);
DeleteAll(sendindbuf, sendnumbuf);
DeleteAll(sendcnt, recvcnt, sdispls, rdispls);
// define a SPA-like data structure
IU ysize = y.MyLocLength();
T_promote * localy = new T_promote[ysize];
bool * isthere = new bool[ysize];
std::vector<IU> nzinds; // nonzero indices
std::fill_n(isthere, ysize, false);
for(int i=0; i< totrecv; ++i)
{
if(!isthere[recvindbuf[i]])
{
localy[recvindbuf[i]] = recvnumbuf[i]; // initial assignment
nzinds.push_back(recvindbuf[i]);
isthere[recvindbuf[i]] = true;
}
else
{
localy[recvindbuf[i]] = SR::add(localy[recvindbuf[i]], recvnumbuf[i]);
}
}
DeleteAll(isthere, recvindbuf, recvnumbuf);
sort(nzinds.begin(), nzinds.end());
int nnzy = nzinds.size();
y.ind.resize(nnzy);
y.num.resize(nnzy);
for(int i=0; i< nnzy; ++i)
{
y.ind[i] = nzinds[i];
y.num[i] = localy[nzinds[i]];
}
delete [] localy;
return y;
}
template <typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
SpParMat<IU,typename promote_trait<NU1,NU2>::T_promote,typename promote_trait<UDERA,UDERB>::T_promote> EWiseMult
(const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B , bool exclude)
{
typedef typename promote_trait<NU1,NU2>::T_promote N_promote;
typedef typename promote_trait<UDERA,UDERB>::T_promote DER_promote;
if(*(A.commGrid) == *(B.commGrid))
{
DER_promote * result = new DER_promote( EWiseMult(*(A.spSeq),*(B.spSeq),exclude) );
return SpParMat<IU, N_promote, DER_promote> (result, A.commGrid);
}
else
{
std::cout << "Grids are not comparable elementwise multiplication" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return SpParMat< IU,N_promote,DER_promote >();
}
}
template <typename RETT, typename RETDER, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation>
SpParMat<IU,RETT,RETDER> EWiseApply
(const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, bool notB, const NU2& defaultBVal)
{
if(*(A.commGrid) == *(B.commGrid))
{
RETDER * result = new RETDER( EWiseApply<RETT>(*(A.spSeq),*(B.spSeq), __binary_op, notB, defaultBVal) );
return SpParMat<IU, RETT, RETDER> (result, A.commGrid);
}
else
{
std::cout << "Grids are not comparable elementwise apply" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return SpParMat< IU,RETT,RETDER >();
}
}
template <typename RETT, typename RETDER, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation, typename _BinaryPredicate>
SpParMat<IU,RETT,RETDER> EWiseApply
(const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, _BinaryPredicate do_op, bool allowANulls, bool allowBNulls, const NU1& ANullVal, const NU2& BNullVal, const bool allowIntersect, const bool useExtendedBinOp)
{
if(*(A.commGrid) == *(B.commGrid))
{
RETDER * result = new RETDER( EWiseApply<RETT>(*(A.spSeq),*(B.spSeq), __binary_op, do_op, allowANulls, allowBNulls, ANullVal, BNullVal, allowIntersect) );
return SpParMat<IU, RETT, RETDER> (result, A.commGrid);
}
else
{
std::cout << "Grids are not comparable elementwise apply" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return SpParMat< IU,RETT,RETDER >();
}
}
// plain adapter
template <typename RETT, typename RETDER, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation, typename _BinaryPredicate>
SpParMat<IU,RETT,RETDER>
EWiseApply (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, _BinaryPredicate do_op, bool allowANulls, bool allowBNulls, const NU1& ANullVal, const NU2& BNullVal, const bool allowIntersect = true)
{
return EWiseApply<RETT, RETDER>(A, B,
EWiseExtToPlainAdapter<RETT, NU1, NU2, _BinaryOperation>(__binary_op),
EWiseExtToPlainAdapter<bool, NU1, NU2, _BinaryPredicate>(do_op),
allowANulls, allowBNulls, ANullVal, BNullVal, allowIntersect, true);
}
// end adapter
/**
* if exclude is true, then we prune all entries W[i] != zero from V
* if exclude is false, then we perform a proper elementwise multiplication
**/
template <typename IU, typename NU1, typename NU2>
FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote> EWiseMult
(const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , bool exclude, NU2 zero)
{
typedef typename promote_trait<NU1,NU2>::T_promote T_promote;
if(*(V.commGrid) == *(W.commGrid))
{
FullyDistSpVec< IU, T_promote> Product(V.commGrid);
if(V.glen != W.glen)
{
std::cerr << "Vector dimensions don't match for EWiseMult\n";
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
}
else
{
Product.glen = V.glen;
IU size= V.getlocnnz();
if(exclude)
{
#if defined(_OPENMP) && defined(CBLAS_EXPERIMENTAL) // not faster than serial
int actual_splits = cblas_splits * 1; // 1 is the parallel slackness
std::vector <IU> tlosizes (actual_splits, 0);
std::vector < std::vector<IU> > tlinds(actual_splits);
std::vector < std::vector<T_promote> > tlnums(actual_splits);
IU tlsize = size / actual_splits;
#pragma omp parallel for //schedule(dynamic, 1)
for(IU t = 0; t < actual_splits; ++t)
{
IU tlbegin = t*tlsize;
IU tlend = (t==actual_splits-1)? size : (t+1)*tlsize;
for(IU i=tlbegin; i<tlend; ++i)
{
if(W.arr[V.ind[i]] == zero) // keep only those
{
tlinds[t].push_back(V.ind[i]);
tlnums[t].push_back(V.num[i]);
tlosizes[t]++;
}
}
}
std::vector<IU> prefix_sum(actual_splits+1,0);
std::partial_sum(tlosizes.begin(), tlosizes.end(), prefix_sum.begin()+1);
Product.ind.resize(prefix_sum[actual_splits]);
Product.num.resize(prefix_sum[actual_splits]);
#pragma omp parallel for //schedule(dynamic, 1)
for(IU t=0; t< actual_splits; ++t)
{
std::copy(tlinds[t].begin(), tlinds[t].end(), Product.ind.begin()+prefix_sum[t]);
std::copy(tlnums[t].begin(), tlnums[t].end(), Product.num.begin()+prefix_sum[t]);
}
#else
for(IU i=0; i<size; ++i)
{
if(W.arr[V.ind[i]] == zero) // keep only those
{
Product.ind.push_back(V.ind[i]);
Product.num.push_back(V.num[i]);
}
}
#endif
}
else
{
for(IU i=0; i<size; ++i)
{
if(W.arr[V.ind[i]] != zero) // keep only those
{
Product.ind.push_back(V.ind[i]);
Product.num.push_back(V.num[i] * W.arr[V.ind[i]]);
}
}
}
}
return Product;
}
else
{
std::cout << "Grids are not comparable elementwise multiplication" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return FullyDistSpVec< IU,T_promote>();
}
}
/**
Threaded EWiseApply. Only called internally from EWiseApply.
**/
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
FullyDistSpVec<IU,RET> EWiseApply_threaded
(const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp)
{
typedef RET T_promote; //typedef typename promote_trait<NU1,NU2>::T_promote T_promote;
if(*(V.commGrid) == *(W.commGrid))
{
FullyDistSpVec< IU, T_promote> Product(V.commGrid);
if(V.TotalLength() != W.TotalLength())
{
std::ostringstream outs;
outs << "Vector dimensions don't match (" << V.TotalLength() << " vs " << W.TotalLength() << ") for EWiseApply (short version)\n";
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
}
else
{
int nthreads=1;
#ifdef _OPENMP
#pragma omp parallel
{
nthreads = omp_get_num_threads();
}
#endif
Product.glen = V.glen;
IU size= W.LocArrSize();
IU spsize = V.getlocnnz();
// temporary result vectors per thread
std::vector<std::vector<IU>> tProductInd(nthreads);
std::vector<std::vector<T_promote>> tProductVal(nthreads);
IU perthread; //chunk of tProductInd or tProductVal allocated to each thread
if (allowVNulls)
perthread = size/nthreads;
else
perthread = spsize/nthreads;
#ifdef _OPENMP
#pragma omp parallel
#endif
{
int curthread = 0;
#ifdef _OPENMP
curthread = omp_get_thread_num();
#endif
IU tStartIdx = perthread * curthread;
IU tNextIdx = perthread * (curthread+1);
if (allowVNulls)
{
if(curthread == nthreads-1) tNextIdx = size;
// get sparse part for the current thread
auto it = std::lower_bound (V.ind.begin(), V.ind.end(), tStartIdx);
IU tSpIdx = (IU) std::distance(V.ind.begin(), it);
// iterate over the dense vector
for(IU tIdx=tStartIdx; tIdx < tNextIdx; ++tIdx)
{
if(tSpIdx < spsize && V.ind[tSpIdx] < tNextIdx && V.ind[tSpIdx] == tIdx)
{
if (_doOp(V.num[tSpIdx], W.arr[tIdx], false, false))
{
tProductInd[curthread].push_back(tIdx);
tProductVal[curthread].push_back (_binary_op(V.num[tSpIdx], W.arr[tIdx], false, false));
}
tSpIdx++;
}
else
{
if (_doOp(Vzero, W.arr[tIdx], true, false))
{
tProductInd[curthread].push_back(tIdx);
tProductVal[curthread].push_back (_binary_op(Vzero, W.arr[tIdx], true, false));
}
}
}
}
else // iterate over the sparse vector
{
if(curthread == nthreads-1) tNextIdx = spsize;
for(IU tSpIdx=tStartIdx; tSpIdx < tNextIdx; ++tSpIdx)
{
if (_doOp(V.num[tSpIdx], W.arr[V.ind[tSpIdx]], false, false))
{
tProductInd[curthread].push_back( V.ind[tSpIdx]);
tProductVal[curthread].push_back (_binary_op(V.num[tSpIdx], W.arr[V.ind[tSpIdx]], false, false));
}
}
}
}
std::vector<IU> tdisp(nthreads+1);
tdisp[0] = 0;
for(int i=0; i<nthreads; ++i)
{
tdisp[i+1] = tdisp[i] + tProductInd[i].size();
}
// copy results from temporary vectors
Product.ind.resize(tdisp[nthreads]);
Product.num.resize(tdisp[nthreads]);
#ifdef _OPENMP
#pragma omp parallel
#endif
{
int curthread = 0;
#ifdef _OPENMP
curthread = omp_get_thread_num();
#endif
std::copy(tProductInd[curthread].begin(), tProductInd[curthread].end(), Product.ind.data() + tdisp[curthread]);
std::copy(tProductVal[curthread].begin() , tProductVal[curthread].end(), Product.num.data() + tdisp[curthread]);
}
}
return Product;
}
else
{
std::cout << "Grids are not comparable for EWiseApply" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return FullyDistSpVec< IU,T_promote>();
}
}
/**
* Performs an arbitrary binary operation _binary_op on the corresponding elements of two vectors with the result stored in a return vector ret.
* The binary operatiation is only performed if the binary predicate _doOp returns true for those elements. Otherwise the binary operation is not
* performed and ret does not contain an element at that position.
* More formally the operation is defined as:
* if (_doOp(V[i], W[i]))
* ret[i] = _binary_op(V[i], W[i])
* else
* // ret[i] is not set
* Hence _doOp can be used to implement a filter on either of the vectors.
*
* The above is only defined if both V[i] and W[i] exist (i.e. an intersection). To allow a union operation (ex. when V[i] doesn't exist but W[i] does)
* the allowVNulls flag is set to true and the Vzero argument is used as the missing V[i] value.
*
* The type of each element of ret must not necessarily be related to the types of V or W, so the return type must be explicitly specified as a template parameter:
* FullyDistSpVec<int, double> r = EWiseApply<double>(V, W, plus, retTrue, false, 0)
**/
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
FullyDistSpVec<IU,RET> EWiseApply
(const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp)
{
#ifdef _OPENMP
return EWiseApply_threaded<RET>(V, W, _binary_op, _doOp, allowVNulls, Vzero, useExtendedBinOp);
#else
typedef RET T_promote; //typedef typename promote_trait<NU1,NU2>::T_promote T_promote;
if(*(V.commGrid) == *(W.commGrid))
{
FullyDistSpVec< IU, T_promote> Product(V.commGrid);
//FullyDistVec< IU, NU1> DV (V); // Ariful: I am not sure why it was there??
if(V.TotalLength() != W.TotalLength())
{
std::ostringstream outs;
outs << "Vector dimensions don't match (" << V.TotalLength() << " vs " << W.TotalLength() << ") for EWiseApply (short version)\n";
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
}
else
{
Product.glen = V.glen;
IU size= W.LocArrSize();
IU spsize = V.getlocnnz();
IU sp_iter = 0;
if (allowVNulls)
{
// iterate over the dense vector
for(IU i=0; i<size; ++i)
{
if(sp_iter < spsize && V.ind[sp_iter] == i)
{
if (_doOp(V.num[sp_iter], W.arr[i], false, false))
{
Product.ind.push_back(i);
Product.num.push_back(_binary_op(V.num[sp_iter], W.arr[i], false, false));
}
sp_iter++;
}
else
{
if (_doOp(Vzero, W.arr[i], true, false))
{
Product.ind.push_back(i);
Product.num.push_back(_binary_op(Vzero, W.arr[i], true, false));
}
}
}
}
else
{
// iterate over the sparse vector
for(sp_iter = 0; sp_iter < spsize; ++sp_iter)
{
if (_doOp(V.num[sp_iter], W.arr[V.ind[sp_iter]], false, false))
{
Product.ind.push_back(V.ind[sp_iter]);
Product.num.push_back(_binary_op(V.num[sp_iter], W.arr[V.ind[sp_iter]], false, false));
}
}
}
}
return Product;
}
else
{
std::cout << "Grids are not comparable for EWiseApply" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return FullyDistSpVec< IU,T_promote>();
}
#endif
}
/**
* Performs an arbitrary binary operation _binary_op on the corresponding elements of two vectors with the result stored in a return vector ret.
* The binary operatiation is only performed if the binary predicate _doOp returns true for those elements. Otherwise the binary operation is not
* performed and ret does not contain an element at that position.
* More formally the operation is defined as:
* if (_doOp(V[i], W[i]))
* ret[i] = _binary_op(V[i], W[i])
* else
* // ret[i] is not set
* Hence _doOp can be used to implement a filter on either of the vectors.
*
* The above is only defined if both V[i] and W[i] exist (i.e. an intersection). To allow a union operation (ex. when V[i] doesn't exist but W[i] does)
* the allowVNulls flag is set to true and the Vzero argument is used as the missing V[i] value.
* !allowVNulls && !allowWNulls => intersection
* !allowVNulls && allowWNulls => operate on all elements of V
* allowVNulls && !allowWNulls => operate on all elements of W
* allowVNulls && allowWNulls => union
*
* The type of each element of ret must not necessarily be related to the types of V or W, so the return type must be explicitly specified as a template parameter:
* FullyDistSpVec<int, double> r = EWiseApply<double>(V, W, plus, ...)
* For intersection, Vzero and Wzero are irrelevant
* ABAB: \todo: Should allowIntersect be "false" for all SetDifference uses?
**/
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
FullyDistSpVec<IU,RET> EWiseApply
(const FullyDistSpVec<IU,NU1> & V, const FullyDistSpVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, bool allowWNulls, NU1 Vzero, NU2 Wzero, const bool allowIntersect, const bool useExtendedBinOp)
{
typedef RET T_promote; // typename promote_trait<NU1,NU2>::T_promote T_promote;
if(*(V.commGrid) == *(W.commGrid))
{
FullyDistSpVec< IU, T_promote> Product(V.commGrid);
if(V.glen != W.glen)
{
std::ostringstream outs;
outs << "Vector dimensions don't match (" << V.glen << " vs " << W.glen << ") for EWiseApply (full version)\n";
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
}
else
{
Product.glen = V.glen;
typename std::vector< IU >::const_iterator indV = V.ind.begin();
typename std::vector< NU1 >::const_iterator numV = V.num.begin();
typename std::vector< IU >::const_iterator indW = W.ind.begin();
typename std::vector< NU2 >::const_iterator numW = W.num.begin();
while (indV < V.ind.end() && indW < W.ind.end())
{
if (*indV == *indW)
{
// overlap
if (allowIntersect)
{
if (_doOp(*numV, *numW, false, false))
{
Product.ind.push_back(*indV);
Product.num.push_back(_binary_op(*numV, *numW, false, false));
}
}
indV++; numV++;
indW++; numW++;
}
else if (*indV < *indW)
{
// V has value but W does not
if (allowWNulls)
{
if (_doOp(*numV, Wzero, false, true))
{
Product.ind.push_back(*indV);
Product.num.push_back(_binary_op(*numV, Wzero, false, true));
}
}
indV++; numV++;
}
else //(*indV > *indW)
{
// W has value but V does not
if (allowVNulls)
{
if (_doOp(Vzero, *numW, true, false))
{
Product.ind.push_back(*indW);
Product.num.push_back(_binary_op(Vzero, *numW, true, false));
}
}
indW++; numW++;
}
}
// clean up
while (allowWNulls && indV < V.ind.end())
{
if (_doOp(*numV, Wzero, false, true))
{
Product.ind.push_back(*indV);
Product.num.push_back(_binary_op(*numV, Wzero, false, true));
}
indV++; numV++;
}
while (allowVNulls && indW < W.ind.end())
{
if (_doOp(Vzero, *numW, true, false))
{
Product.ind.push_back(*indW);
Product.num.push_back(_binary_op(Vzero, *numW, true, false));
}
indW++; numW++;
}
}
return Product;
}
else
{
std::cout << "Grids are not comparable for EWiseApply" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return FullyDistSpVec< IU,T_promote>();
}
}
// plain callback versions
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
FullyDistSpVec<IU,RET> EWiseApply
(const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero)
{
return EWiseApply<RET>(V, W,
EWiseExtToPlainAdapter<RET, NU1, NU2, _BinaryOperation>(_binary_op),
EWiseExtToPlainAdapter<bool, NU1, NU2, _BinaryPredicate>(_doOp),
allowVNulls, Vzero, true);
}
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
FullyDistSpVec<IU,RET> EWiseApply
(const FullyDistSpVec<IU,NU1> & V, const FullyDistSpVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, bool allowWNulls, NU1 Vzero, NU2 Wzero, const bool allowIntersect = true)
{
return EWiseApply<RET>(V, W,
EWiseExtToPlainAdapter<RET, NU1, NU2, _BinaryOperation>(_binary_op),
EWiseExtToPlainAdapter<bool, NU1, NU2, _BinaryPredicate>(_doOp),
allowVNulls, allowWNulls, Vzero, Wzero, allowIntersect, true);
}
}
#endif
|
track_ellipse.c | #include "track_ellipse.h"
void ellipsetrack(avi_t *video, double *xc0, double *yc0, int Nc, int R, int Np, int Nf) {
/*
% ELLIPSETRACK tracks cells in the movie specified by 'video', at
% locations 'xc0'/'yc0' with radii R using an ellipse with Np discrete
% points, starting at frame number one and stopping at frame number 'Nf'.
%
% INPUTS:
% video.......pointer to avi video object
% xc0,yc0.....initial center location (Nc entries)
% Nc..........number of cells
% R...........initial radius
% Np..........nbr of snaxels points per snake
% Nf..........nbr of frames in which to track
%
% Matlab code written by: DREW GILLIAM (based on code by GANG DONG /
% NILANJAN RAY)
% Ported to C by: MICHAEL BOYER
*/
int i, j;
// Compute angle parameter
double *t = (double *) malloc(sizeof(double) * Np);
double increment = (2.0 * PI) / (double) Np;
for (i = 0; i < Np; i++) {
t[i] = increment * (double) i ;
}
// Allocate space for a snake for each cell in each frame
double **xc = alloc_2d_double(Nc, Nf + 1);
double **yc = alloc_2d_double(Nc, Nf + 1);
double ***r = alloc_3d_double(Nc, Np, Nf + 1);
double ***x = alloc_3d_double(Nc, Np, Nf + 1);
double ***y = alloc_3d_double(Nc, Np, Nf + 1);
// Save the first snake for each cell
for (i = 0; i < Nc; i++) {
xc[i][0] = xc0[i];
yc[i][0] = yc0[i];
for (j = 0; j < Np; j++) {
r[i][j][0] = (double) R;
}
}
// Generate ellipse points for each cell
for (i = 0; i < Nc; i++) {
for (j = 0; j < Np; j++) {
x[i][j][0] = xc[i][0] + (r[i][j][0] * cos(t[j]));
y[i][j][0] = yc[i][0] + (r[i][j][0] * sin(t[j]));
}
}
// Keep track of the total time spent on computing
// the MGVF matrix and evolving the snakes
long long MGVF_time = 0;
long long snake_time = 0;
// Process each frame
int frame_num, cell_num;
for (frame_num = 1; frame_num <= Nf; frame_num++) {
printf("\rProcessing frame %d / %d", frame_num, Nf);
fflush(stdout);
// Get the current video frame and its dimensions
MAT *I = get_frame(video, frame_num, 0, 1);
int Ih = I->m;
int Iw = I->n;
// Set the current positions equal to the previous positions
for (i = 0; i < Nc; i++) {
xc[i][frame_num] = xc[i][frame_num - 1];
yc[i][frame_num] = yc[i][frame_num - 1];
for (j = 0; j < Np; j++) {
r[i][j][frame_num] = r[i][j][frame_num - 1];
}
}
// Split the work among multiple threads, if OPEN is defined
#ifdef OPEN
#pragma omp parallel for num_threads(omp_num_threads) private(i, j)
#endif
// Track each cell
for (cell_num = 0; cell_num < Nc; cell_num++) {
// Make copies of the current cell's location
double xci = xc[cell_num][frame_num];
double yci = yc[cell_num][frame_num];
double *ri = (double *) malloc(sizeof(double) * Np);
for (j = 0; j < Np; j++) {
ri[j] = r[cell_num][j][frame_num];
}
// Add up the last ten y-values for this cell
// (or fewer if there are not yet ten previous frames)
double ycavg = 0.0;
for (i = (frame_num > 10 ? frame_num - 10 : 0); i < frame_num; i++) {
ycavg += yc[cell_num][i];
}
// Compute the average of the last ten y-values
// (this represents the expected y-location of the cell)
ycavg = ycavg / (double) (frame_num > 10 ? 10 : frame_num);
// Determine the range of the subimage surrounding the current position
int u1 = max(xci - 4.0 * R + 0.5, 0 );
int u2 = min(xci + 4.0 * R + 0.5, Iw - 1);
int v1 = max(yci - 2.0 * R + 1.5, 0 );
int v2 = min(yci + 2.0 * R + 1.5, Ih - 1);
// Extract the subimage
MAT *Isub = m_get(v2 - v1 + 1, u2 - u1 + 1);
for (i = v1; i <= v2; i++) {
for (j = u1; j <= u2; j++) {
m_set_val(Isub, i - v1, j - u1, m_get_val(I, i, j));
}
}
// Compute the subimage gradient magnitude
MAT *Ix = gradient_x(Isub);
MAT *Iy = gradient_y(Isub);
MAT *IE = m_get(Isub->m, Isub->n);
for (i = 0; i < Isub->m; i++) {
for (j = 0; j < Isub->n; j++) {
double temp_x = m_get_val(Ix, i, j);
double temp_y = m_get_val(Iy, i, j);
m_set_val(IE, i, j, sqrt((temp_x * temp_x) + (temp_y * temp_y)));
}
}
// Compute the motion gradient vector flow (MGVF) edgemaps
long long MGVF_start_time = get_time();
MAT *IMGVF = MGVF(IE, 1, 1);
MGVF_time += get_time() - MGVF_start_time;
// Determine the position of the cell in the subimage
xci = xci - (double) u1;
yci = yci - (double) (v1 - 1);
ycavg = ycavg - (double) (v1 - 1);
// Evolve the snake
long long snake_start_time = get_time();
ellipseevolve(IMGVF, &xci, &yci, ri, t, Np, (double) R, ycavg);
snake_time += get_time() - snake_start_time;
// Compute the cell's new position in the full image
xci = xci + u1;
yci = yci + (v1 - 1);
// Store the new location of the cell and the snake
xc[cell_num][frame_num] = xci;
yc[cell_num][frame_num] = yci;
for (j = 0; j < Np; j++) {
r[cell_num][j][frame_num] = ri[j];
x[cell_num][j][frame_num] = xc[cell_num][frame_num] + (ri[j] * cos(t[j]));
y[cell_num][j][frame_num] = yc[cell_num][frame_num] + (ri[j] * sin(t[j]));
}
// Output the updated center of each cell
//printf("%d,%f,%f\n", cell_num, xci[cell_num], yci[cell_num]);
// Free temporary memory
m_free(IMGVF);
free(ri);
}
#ifdef OUTPUT
if (frame_num == Nf)
{
FILE * pFile;
pFile = fopen ("result.txt","w+");
for (cell_num = 0; cell_num < Nc; cell_num++)
fprintf(pFile,"\n%d,%f,%f", cell_num, xc[cell_num][Nf], yc[cell_num][Nf]);
fclose (pFile);
}
#endif
// Output a new line to visually distinguish the output from different frames
//printf("\n");
}
// Free temporary memory
free(t);
free_2d_double(xc);
free_2d_double(yc);
free_3d_double(r);
free_3d_double(x);
free_3d_double(y);
// Report average processing time per frame
printf("\n\nTracking runtime (average per frame):\n");
printf("------------------------------------\n");
printf("MGVF computation: %.5f seconds\n", ((float) (MGVF_time)) / (float) (1000*1000*Nf));
printf(" Snake evolution: %.5f seconds\n", ((float) (snake_time)) / (float) (1000*1000*Nf));
}
MAT *MGVF(MAT *I, double vx, double vy) {
/*
% MGVF calculate the motion gradient vector flow (MGVF)
% for the image 'I'
%
% Based on the algorithm in:
% Motion gradient vector flow: an external force for tracking rolling
% leukocytes with shape and size constrained active contours
% Ray, N. and Acton, S.T.
% IEEE Transactions on Medical Imaging
% Volume: 23, Issue: 12, December 2004
% Pages: 1466 - 1478
%
% INPUTS
% I...........image
% vx,vy.......velocity vector
%
% OUTPUT
% IMGVF.......MGVF vector field as image
%
% Matlab code written by: DREW GILLIAM (based on work by GANG DONG /
% NILANJAN RAY)
% Ported to C by: MICHAEL BOYER
*/
// Constants
double converge = 0.00001;
double mu = 0.5;
double epsilon = 0.0000000001;
double lambda = 8.0 * mu + 1.0;
// Smallest positive value expressable in double-precision
double eps = pow(2.0, -52.0);
// Maximum number of iterations to compute the MGVF matrix
int iterations = 500;
// Find the maximum and minimum values in I
int m = I->m, n = I->n, i, j;
double Imax = m_get_val(I, 0, 0);
double Imin = m_get_val(I, 0, 0);
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
double temp = m_get_val(I, i, j);
if (temp > Imax) Imax = temp;
else if (temp < Imin) Imin = temp;
}
}
// Normalize the image I
double scale = 1.0 / (Imax - Imin + eps);
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
double old_val = m_get_val(I, i, j);
m_set_val(I, i, j, (old_val - Imin) * scale);
}
}
// Initialize the output matrix IMGVF with values from I
MAT *IMGVF = m_get(m, n);
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
m_set_val(IMGVF, i, j, m_get_val(I, i, j));
}
}
// Precompute row and column indices for the
// neighbor difference computation below
int *rowU = (int *) malloc(sizeof(int) * m);
int *rowD = (int *) malloc(sizeof(int) * m);
int *colL = (int *) malloc(sizeof(int) * n);
int *colR = (int *) malloc(sizeof(int) * n);
rowU[0] = 0;
rowD[m - 1] = m - 1;
for (i = 1; i < m; i++) {
rowU[i] = i - 1;
rowD[i - 1] = i;
}
colL[0] = 0;
colR[n - 1] = n - 1;
for (j = 1; j < n; j++) {
colL[j] = j - 1;
colR[j - 1] = j;
}
// Allocate matrices used in the while loop below
MAT *U = m_get(m, n), *D = m_get(m, n), *L = m_get(m, n), *R = m_get(m, n);
MAT *UR = m_get(m, n), *DR = m_get(m, n), *UL = m_get(m, n), *DL = m_get(m, n);
MAT *UHe = m_get(m, n), *DHe = m_get(m, n), *LHe = m_get(m, n), *RHe = m_get(m, n);
MAT *URHe = m_get(m, n), *DRHe = m_get(m, n), *ULHe = m_get(m, n), *DLHe = m_get(m, n);
// Precompute constants to avoid division in the for loops below
double mu_over_lambda = mu / lambda;
double one_over_lambda = 1.0 / lambda;
// Compute the MGVF
int iter = 0;
double mean_diff = 1.0;
while ((iter < iterations) && (mean_diff > converge)) {
// Compute the difference between each pixel and its eight neighbors
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
double subtrahend = m_get_val(IMGVF, i, j);
m_set_val(U, i, j, m_get_val(IMGVF, rowU[i], j) - subtrahend);
m_set_val(D, i, j, m_get_val(IMGVF, rowD[i], j) - subtrahend);
m_set_val(L, i, j, m_get_val(IMGVF, i, colL[j]) - subtrahend);
m_set_val(R, i, j, m_get_val(IMGVF, i, colR[j]) - subtrahend);
m_set_val(UR, i, j, m_get_val(IMGVF, rowU[i], colR[j]) - subtrahend);
m_set_val(DR, i, j, m_get_val(IMGVF, rowD[i], colR[j]) - subtrahend);
m_set_val(UL, i, j, m_get_val(IMGVF, rowU[i], colL[j]) - subtrahend);
m_set_val(DL, i, j, m_get_val(IMGVF, rowD[i], colL[j]) - subtrahend);
}
}
// Compute the regularized heaviside version of the matrices above
heaviside( UHe, U, -vy, epsilon);
heaviside( DHe, D, vy, epsilon);
heaviside( LHe, L, -vx, epsilon);
heaviside( RHe, R, vx, epsilon);
heaviside(URHe, UR, vx - vy, epsilon);
heaviside(DRHe, DR, vx + vy, epsilon);
heaviside(ULHe, UL, -vx - vy, epsilon);
heaviside(DLHe, DL, vy - vx, epsilon);
// Update the IMGVF matrix
double total_diff = 0.0;
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
// Store the old value so we can compute the difference later
double old_val = m_get_val(IMGVF, i, j);
// Compute IMGVF += (mu / lambda)(UHe .*U + DHe .*D + LHe .*L + RHe .*R +
// URHe.*UR + DRHe.*DR + ULHe.*UL + DLHe.*DL);
double vU = m_get_val(UHe, i, j) * m_get_val(U, i, j);
double vD = m_get_val(DHe, i, j) * m_get_val(D, i, j);
double vL = m_get_val(LHe, i, j) * m_get_val(L, i, j);
double vR = m_get_val(RHe, i, j) * m_get_val(R, i, j);
double vUR = m_get_val(URHe, i, j) * m_get_val(UR, i, j);
double vDR = m_get_val(DRHe, i, j) * m_get_val(DR, i, j);
double vUL = m_get_val(ULHe, i, j) * m_get_val(UL, i, j);
double vDL = m_get_val(DLHe, i, j) * m_get_val(DL, i, j);
double vHe = old_val + mu_over_lambda * (vU + vD + vL + vR + vUR + vDR + vUL + vDL);
// Compute IMGVF -= (1 / lambda)(I .* (IMGVF - I))
double vI = m_get_val(I, i, j);
double new_val = vHe - (one_over_lambda * vI * (vHe - vI));
m_set_val(IMGVF, i, j, new_val);
// Keep track of the absolute value of the differences
// between this iteration and the previous one
total_diff += fabs(new_val - old_val);
}
}
// Compute the mean absolute difference between this iteration
// and the previous one to check for convergence
mean_diff = total_diff / (double) (m * n);
iter++;
}
// Free memory
free(rowU); free(rowD); free(colL); free(colR);
m_free(U); m_free(D); m_free(L); m_free(R);
m_free(UR); m_free(DR); m_free(UL); m_free(DL);
m_free(UHe); m_free(DHe); m_free(LHe); m_free(RHe);
m_free(URHe); m_free(DRHe); m_free(ULHe); m_free(DLHe);
return IMGVF;
}
// Regularized version of the Heaviside step function,
// parameterized by a small positive number 'e'
void heaviside(MAT *H, MAT *z, double v, double e) {
int m = z->m, n = z->n, i, j;
// Precompute constants to avoid division in the for loops below
double one_over_pi = 1.0 / PI;
double one_over_e = 1.0 / e;
// Compute H = (1 / pi) * atan((z * v) / e) + 0.5
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
double z_val = m_get_val(z, i, j) * v;
double H_val = one_over_pi * atan(z_val * one_over_e) + 0.5;
m_set_val(H, i, j, H_val);
}
}
// A simpler, faster approximation of the Heaviside function
/* for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
double z_val = m_get_val(z, i, j) * v;
double H_val = 0.5;
if (z_val < -0.0001) H_val = 0.0;
else if (z_val > 0.0001) H_val = 1.0;
m_set_val(H, i, j, H_val);
}
} */
}
void ellipseevolve(MAT *f, double *xc0, double *yc0, double *r0, double *t, int Np, double Er, double Ey) {
/*
% ELLIPSEEVOLVE evolves a parametric snake according
% to some energy constraints.
%
% INPUTS:
% f............potential surface
% xc0,yc0......initial center position
% r0,t.........initial radii & angle vectors (with Np elements each)
% Np...........number of snaxel points per snake
% Er...........expected radius
% Ey...........expected y position
%
% OUTPUTS
% xc0,yc0.......final center position
% r0...........final radii
%
% Matlab code written by: DREW GILLIAM (based on work by GANG DONG /
% NILANJAN RAY)
% Ported to C by: MICHAEL BOYER
*/
// Constants
double deltax = 0.2;
double deltay = 0.2;
double deltar = 0.2;
double converge = 0.1;
double lambdaedge = 1;
double lambdasize = 0.2;
double lambdapath = 0.05;
int iterations = 1000; // maximum number of iterations
int i, j;
// Initialize variables
double xc = *xc0;
double yc = *yc0;
double *r = (double *) malloc(sizeof(double) * Np);
for (i = 0; i < Np; i++) r[i] = r0[i];
// Compute the x- and y-gradients of the MGVF matrix
MAT *fx = gradient_x(f);
MAT *fy = gradient_y(f);
// Normalize the gradients
int fh = f->m, fw = f->n;
for (i = 0; i < fh; i++) {
for (j = 0; j < fw; j++) {
double temp_x = m_get_val(fx, i, j);
double temp_y = m_get_val(fy, i, j);
double fmag = sqrt((temp_x * temp_x) + (temp_y * temp_y));
m_set_val(fx, i, j, temp_x / fmag);
m_set_val(fy, i, j, temp_y / fmag);
}
}
double *r_old = (double *) malloc(sizeof(double) * Np);
VEC *x = v_get(Np);
VEC *y = v_get(Np);
// Evolve the snake
int iter = 0;
double snakediff = 1.0;
while (iter < iterations && snakediff > converge) {
// Save the values from the previous iteration
double xc_old = xc, yc_old = yc;
for (i = 0; i < Np; i++) {
r_old[i] = r[i];
}
// Compute the locations of the snaxels
for (i = 0; i < Np; i++) {
v_set_val(x, i, xc + r[i] * cos(t[i]));
v_set_val(y, i, yc + r[i] * sin(t[i]));
}
// See if any of the points in the snake are off the edge of the image
double min_x = v_get_val(x, 0), max_x = v_get_val(x, 0);
double min_y = v_get_val(y, 0), max_y = v_get_val(y, 0);
for (i = 1; i < Np; i++) {
double x_i = v_get_val(x, i);
if (x_i < min_x) min_x = x_i;
else if (x_i > max_x) max_x = x_i;
double y_i = v_get_val(y, i);
if (y_i < min_y) min_y = y_i;
else if (y_i > max_y) max_y = y_i;
}
if (min_x < 0.0 || max_x > (double) fw - 1.0 || min_y < 0 || max_y > (double) fh - 1.0) break;
// Compute the length of the snake
double L = 0.0;
for (i = 0; i < Np - 1; i++) {
double diff_x = v_get_val(x, i + 1) - v_get_val(x, i);
double diff_y = v_get_val(y, i + 1) - v_get_val(y, i);
L += sqrt((diff_x * diff_x) + (diff_y * diff_y));
}
double diff_x = v_get_val(x, 0) - v_get_val(x, Np - 1);
double diff_y = v_get_val(y, 0) - v_get_val(y, Np - 1);
L += sqrt((diff_x * diff_x) + (diff_y * diff_y));
// Compute the potential surface at each snaxel
MAT *vf = linear_interp2(f, x, y);
MAT *vfx = linear_interp2(fx, x, y);
MAT *vfy = linear_interp2(fy, x, y);
// Compute the average potential surface around the snake
double vfmean = sum_m(vf ) / L;
double vfxmean = sum_m(vfx) / L;
double vfymean = sum_m(vfy) / L;
// Compute the radial potential surface
int m = vf->m, n = vf->n;
MAT *vfr = m_get(m, n);
for (i = 0; i < n; i++) {
double vf_val = m_get_val(vf, 0, i);
double vfx_val = m_get_val(vfx, 0, i);
double vfy_val = m_get_val(vfy, 0, i);
double x_val = v_get_val(x, i);
double y_val = v_get_val(y, i);
double new_val = (vf_val + vfx_val * (x_val - xc) + vfy_val * (y_val - yc) - vfmean) / L;
m_set_val(vfr, 0, i, new_val);
}
// Update the snake center and snaxels
xc = xc + (deltax * lambdaedge * vfxmean);
yc = (yc + (deltay * lambdaedge * vfymean) + (deltay * lambdapath * Ey)) / (1.0 + deltay * lambdapath);
double r_diff = 0.0;
for (i = 0; i < Np; i++) {
r[i] = (r[i] + (deltar * lambdaedge * m_get_val(vfr, 0, i)) + (deltar * lambdasize * Er)) /
(1.0 + deltar * lambdasize);
r_diff += fabs(r[i] - r_old[i]);
}
// Test for convergence
snakediff = fabs(xc - xc_old) + fabs(yc - yc_old) + r_diff;
// Free temporary matrices
m_free(vf);
m_free(vfx);
m_free(vfy);
m_free(vfr);
iter++;
}
// Set the return values
*xc0 = xc;
*yc0 = yc;
for (i = 0; i < Np; i++)
r0[i] = r[i];
// Free memory
free(r); free(r_old);
v_free( x); v_free( y);
m_free(fx); m_free(fy);
}
// Returns the sum of all of the elements in the specified matrix
double sum_m(MAT *matrix) {
if (matrix == NULL) return 0.0;
int i, j;
double sum = 0.0;
for (i = 0; i < matrix->m; i++)
for (j = 0; j < matrix->n; j++)
sum += m_get_val(matrix, i, j);
return sum;
}
// Returns the sum of all of the elements in the specified vector
double sum_v(VEC *vector) {
if (vector == NULL) return 0.0;
int i;
double sum = 0.0;
for (i = 0; i < vector->dim; i++)
sum += v_get_val(vector, i);
return sum;
}
// Creates a zeroed x-by-y matrix of doubles
double **alloc_2d_double(int x, int y) {
if (x < 1 || y < 1) return NULL;
// Allocate the data and the pointers to the data
double *data = (double *) calloc(x * y, sizeof(double));
double **pointers = (double **) malloc(sizeof(double *) * x);
// Make the pointers point to the data
int i;
for (i = 0; i < x; i++) {
pointers[i] = data + (i * y);
}
return pointers;
}
// Creates a zeroed x-by-y-by-z matrix of doubles
double ***alloc_3d_double(int x, int y, int z) {
if (x < 1 || y < 1 || z < 1) return NULL;
// Allocate the data and the two levels of pointers
double *data = (double *) calloc(x * y * z, sizeof(double));
double **pointers_to_data = (double **) malloc(sizeof(double *) * x * y);
double ***pointers_to_pointers = (double ***) malloc(sizeof(double **) * x);
// Make the pointers point to the data
int i;
for (i = 0; i < x * y; i++) pointers_to_data[i] = data + (i * z);
for (i = 0; i < x; i++) pointers_to_pointers[i] = pointers_to_data + (i * y);
return pointers_to_pointers;
}
// Frees a 2d matrix generated by the alloc_2d_double function
void free_2d_double(double **p) {
if (p != NULL) {
if (p[0] != NULL) free(p[0]);
free(p);
}
}
// Frees a 3d matrix generated by the alloc_3d_double function
void free_3d_double(double ***p) {
if (p != NULL) {
if (p[0] != NULL) {
if (p[0][0] != NULL) free(p[0][0]);
free(p[0]);
}
free(p);
}
}
|
hci.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Alexander Sokolov <alexander.y.sokolov@gmail.com>
*
* Slater-Condon rule implementation for Heat-Bath CI
*/
#include <stdlib.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include "hci.h"
//#include <omp.h>
#include <limits.h>
// Computes C' = H * C in the selected CI basis
void contract_h_c(double *h1, double *eri, int norb, int neleca, int nelecb, uint64_t *strs, double *civec, double *hdiag, uint64_t ndet, double *ci1) {
int *ts = malloc(sizeof(int) * ndet);
#pragma omp parallel default(none) shared(h1, eri, norb, neleca, nelecb, strs, civec, hdiag, ndet, ci1, ts)
{
size_t ip, jp, p;
int nset = (norb + 63) / 64;
// Calculate excitation level for prescreening
ts[0] = 0;
uint64_t *str1a = strs;
uint64_t *str1b = strs + nset;
#pragma omp for schedule(static)
for (ip = 1; ip < ndet; ++ip) {
uint64_t *stria = strs + ip * 2 * nset;
uint64_t *strib = strs + ip * 2 * nset + nset;
ts[ip] = (n_excitations(stria, str1a, nset) + n_excitations(strib, str1b, nset));
}
// Loop over pairs of determinants
#pragma omp for schedule(static)
for (ip = 0; ip < ndet; ++ip) {
for (jp = 0; jp < ndet; ++jp) {
if (abs(ts[ip] - ts[jp]) < 3) {
uint64_t *stria = strs + ip * 2 * nset;
uint64_t *strib = strs + ip * 2 * nset + nset;
uint64_t *strja = strs + jp * 2 * nset;
uint64_t *strjb = strs + jp * 2 * nset + nset;
int n_excit_a = n_excitations(stria, strja, nset);
int n_excit_b = n_excitations(strib, strjb, nset);
// Diagonal term
if (ip == jp) {
ci1[ip] += hdiag[ip] * civec[ip];
}
// Single excitation
else if ((n_excit_a + n_excit_b) == 1) {
int *ia;
// alpha->alpha
if (n_excit_b == 0) {
ia = get_single_excitation(stria, strja, nset);
int i = ia[0];
int a = ia[1];
double sign = compute_cre_des_sign(a, i, stria, nset);
int *occsa = compute_occ_list(stria, nset, norb, neleca);
int *occsb = compute_occ_list(strib, nset, norb, nelecb);
double fai = h1[a * norb + i];
for (p = 0; p < neleca; ++p) {
int k = occsa[p];
int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i;
int kiak = k * norb * norb * norb + i * norb * norb + a * norb + k;
fai += eri[kkai] - eri[kiak];
}
for (p = 0; p < nelecb; ++p) {
int k = occsb[p];
int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i;
fai += eri[kkai];
}
if (fabs(fai) > 1.0E-14) ci1[ip] += sign * fai * civec[jp];
free(occsa);
free(occsb);
}
// beta->beta
else if (n_excit_a == 0) {
ia = get_single_excitation(strib, strjb, nset);
int i = ia[0];
int a = ia[1];
double sign = compute_cre_des_sign(a, i, strib, nset);
int *occsa = compute_occ_list(stria, nset, norb, neleca);
int *occsb = compute_occ_list(strib, nset, norb, nelecb);
double fai = h1[a * norb + i];
for (p = 0; p < nelecb; ++p) {
int k = occsb[p];
int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i;
int kiak = k * norb * norb * norb + i * norb * norb + a * norb + k;
fai += eri[kkai] - eri[kiak];
}
for (p = 0; p < neleca; ++p) {
int k = occsa[p];
int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i;
fai += eri[kkai];
}
if (fabs(fai) > 1.0E-14) ci1[ip] += sign * fai * civec[jp];
free(occsa);
free(occsb);
}
free(ia);
}
// Double excitation
else if ((n_excit_a + n_excit_b) == 2) {
int i, j, a, b;
// alpha,alpha->alpha,alpha
if (n_excit_b == 0) {
int *ijab = get_double_excitation(stria, strja, nset);
i = ijab[0]; j = ijab[1]; a = ijab[2]; b = ijab[3];
double v, sign;
int ajbi = a * norb * norb * norb + j * norb * norb + b * norb + i;
int aibj = a * norb * norb * norb + i * norb * norb + b * norb + j;
if (a > j || i > b) {
v = eri[ajbi] - eri[aibj];
sign = compute_cre_des_sign(b, i, stria, nset);
sign *= compute_cre_des_sign(a, j, stria, nset);
}
else {
v = eri[aibj] - eri[ajbi];
sign = compute_cre_des_sign(b, j, stria, nset);
sign *= compute_cre_des_sign(a, i, stria, nset);
}
if (fabs(v) > 1.0E-14) ci1[ip] += sign * v * civec[jp];
free(ijab);
}
// beta,beta->beta,beta
else if (n_excit_a == 0) {
int *ijab = get_double_excitation(strib, strjb, nset);
i = ijab[0]; j = ijab[1]; a = ijab[2]; b = ijab[3];
double v, sign;
int ajbi = a * norb * norb * norb + j * norb * norb + b * norb + i;
int aibj = a * norb * norb * norb + i * norb * norb + b * norb + j;
if (a > j || i > b) {
v = eri[ajbi] - eri[aibj];
sign = compute_cre_des_sign(b, i, strib, nset);
sign *= compute_cre_des_sign(a, j, strib, nset);
}
else {
v = eri[aibj] - eri[ajbi];
sign = compute_cre_des_sign(b, j, strib, nset);
sign *= compute_cre_des_sign(a, i, strib, nset);
}
if (fabs(v) > 1.0E-14) ci1[ip] += sign * v * civec[jp];
free(ijab);
}
// alpha,beta->alpha,beta
else {
int *ia = get_single_excitation(stria, strja, nset);
int *jb = get_single_excitation(strib, strjb, nset);
i = ia[0]; a = ia[1]; j = jb[0]; b = jb[1];
double v = eri[a * norb * norb * norb + i * norb * norb + b * norb + j];
double sign = compute_cre_des_sign(a, i, stria, nset);
sign *= compute_cre_des_sign(b, j, strib, nset);
if (fabs(v) > 1.0E-14) ci1[ip] += sign * v * civec[jp];
free(ia);
free(jb);
}
}
} // end if over ts
} // end loop over jp
} // end loop over ip
} // end omp
free(ts);
}
// Compare two strings and compute excitation level
int n_excitations(uint64_t *str1, uint64_t *str2, int nset) {
size_t p;
int d = 0;
for (p = 0; p < nset; ++p) {
d += popcount(str1[p] ^ str2[p]);
}
return d / 2;
}
// Compute number of set bits in a string
int popcount(uint64_t x) {
const uint64_t m1 = 0x5555555555555555; //binary: 0101...
const uint64_t m2 = 0x3333333333333333; //binary: 00110011..
const uint64_t m4 = 0x0f0f0f0f0f0f0f0f; //binary: 4 zeros, 4 ones ...
const uint64_t m8 = 0x00ff00ff00ff00ff; //binary: 8 zeros, 8 ones ...
const uint64_t m16 = 0x0000ffff0000ffff; //binary: 16 zeros, 16 ones ...
const uint64_t m32 = 0x00000000ffffffff; //binary: 32 zeros, 32 ones
x = (x & m1 ) + ((x >> 1) & m1 ); //put count of each 2 bits into those 2 bits
x = (x & m2 ) + ((x >> 2) & m2 ); //put count of each 4 bits into those 4 bits
x = (x & m4 ) + ((x >> 4) & m4 ); //put count of each 8 bits into those 8 bits
x = (x & m8 ) + ((x >> 8) & m8 ); //put count of each 16 bits into those 16 bits
x = (x & m16) + ((x >> 16) & m16); //put count of each 32 bits into those 32 bits
x = (x & m32) + ((x >> 32) & m32); //put count of each 64 bits into those 64 bits
return x;
}
// Compute orbital indices for a single excitation
int *get_single_excitation(uint64_t *str1, uint64_t *str2, int nset) {
size_t p;
int *ia = malloc(sizeof(int) * 2);
for (p = 0; p < nset; ++p) {
size_t pp = nset - p - 1;
uint64_t str_tmp = str1[pp] ^ str2[pp];
uint64_t str_particle = str_tmp & str2[pp];
uint64_t str_hole = str_tmp & str1[pp];
if (popcount(str_particle) == 1) {
ia[1] = trailz(str_particle) + 64 * p;
}
if (popcount(str_hole) == 1) {
ia[0] = trailz(str_hole) + 64 * p;
}
}
return ia;
}
// Compute orbital indices for a double excitation
int *get_double_excitation(uint64_t *str1, uint64_t *str2, int nset) {
size_t p;
int *ijab = malloc(sizeof(int) * 4);
int particle_ind = 2;
int hole_ind = 0;
for (p = 0; p < nset; ++p) {
size_t pp = nset - p - 1;
uint64_t str_tmp = str1[pp] ^ str2[pp];
uint64_t str_particle = str_tmp & str2[pp];
uint64_t str_hole = str_tmp & str1[pp];
int n_particle = popcount(str_particle);
int n_hole = popcount(str_hole);
if (n_particle == 1) {
ijab[particle_ind] = trailz(str_particle) + 64 * p;
particle_ind++;
}
else if (n_particle == 2) {
int a = trailz(str_particle);
ijab[2] = a + 64 * p;
str_particle &= ~(1ULL << a);
int b = trailz(str_particle);
ijab[3] = b + 64 * p;
}
if (n_hole == 1) {
ijab[hole_ind] = trailz(str_hole) + 64 * p;
hole_ind++;
}
else if (n_hole == 2) {
int i = trailz(str_hole);
ijab[0] = i + 64 * p;
str_hole &= ~(1ULL << i);
int j = trailz(str_hole);
ijab[1] = j + 64 * p;
}
}
return ijab;
}
// Compute number of trailing zeros in a bit string
int trailz(uint64_t v) {
int c = 64;
// Trick to unset all bits but the first one
v &= -(int64_t) v;
if (v) c--;
if (v & 0x00000000ffffffff) c -= 32;
if (v & 0x0000ffff0000ffff) c -= 16;
if (v & 0x00ff00ff00ff00ff) c -= 8;
if (v & 0x0f0f0f0f0f0f0f0f) c -= 4;
if (v & 0x3333333333333333) c -= 2;
if (v & 0x5555555555555555) c -= 1;
return c;
}
// Function to print int as a char for debug purposes
char *int2bin(uint64_t i) {
size_t bits = sizeof(uint64_t) * CHAR_BIT;
char * str = malloc(bits + 1);
if(!str) return NULL;
str[bits] = 0;
// type punning because signed shift is implementation-defined
uint64_t u = *(uint64_t *)&i;
for(; bits--; u >>= 1)
str[bits] = u & 1 ? '1' : '0';
return str;
}
// Compute sign for a pair of creation and desctruction operators
double compute_cre_des_sign(int p, int q, uint64_t *str, int nset) {
double sign;
int nperm;
size_t i;
int pg = p / 64;
int qg = q / 64;
int pb = p % 64;
int qb = q % 64;
if (pg > qg) {
nperm = 0;
for (i = nset-pg; i < nset-qg-1; ++i) {
nperm += popcount(str[i]);
}
nperm += popcount(str[nset -1 - pg] & ((1ULL << pb) - 1));
nperm += str[nset -1 - qg] >> (qb + 1);
}
else if (pg < qg) {
nperm = 0;
for (i = nset-qg; i < nset-pg-1; ++i) {
nperm += popcount(str[i]);
}
nperm += popcount(str[nset -1 - qg] & ((1ULL << qb) - 1));
nperm += str[nset -1 - pg] >> (pb + 1);
}
else {
uint64_t mask;
if (p > q) mask = (1ULL << pb) - (1ULL << (qb + 1));
else mask = (1ULL << qb) - (1ULL << (pb + 1));
nperm = popcount(str[nset -1 - pg] & mask);
}
if (nperm % 2) sign = -1.0;
else sign = 1.0;
return sign;
}
// Compute a list of occupied orbitals for a given string
int *compute_occ_list(uint64_t *string, int nset, int norb, int nelec) {
size_t k, i;
int *occ = malloc(sizeof(int) * nelec);
int off = 0;
int occ_ind = 0;
for (k = nset; k > 0; --k) {
int i_max = ((norb - off) < 64 ? (norb - off) : 64);
for (i = 0; i < i_max; ++i) {
int i_occ = (string[k-1] >> i) & 1;
if (i_occ) {
occ[occ_ind] = i + off;
occ_ind++;
}
}
off += 64;
}
return occ;
}
// Compute a list of occupied orbitals for a given string
int *compute_vir_list(uint64_t *string, int nset, int norb, int nelec) {
size_t k, i;
int *vir = malloc(sizeof(int) * (norb-nelec));
int off = 0;
int vir_ind = 0;
for (k = nset; k > 0; --k) {
int i_max = ((norb - off) < 64 ? (norb - off) : 64);
for (i = 0; i < i_max; ++i) {
int i_occ = (string[k-1] >> i) & 1;
if (!i_occ) {
vir[vir_ind] = i + off;
vir_ind++;
}
}
off += 64;
}
return vir;
}
// Select determinants to include in the CI space
void select_strs(double *h1, double *eri, double *jk, uint64_t *eri_sorted, uint64_t *jk_sorted, int norb, int neleca, int nelecb, uint64_t *strs, double *civec, uint64_t ndet_start, uint64_t ndet_finish, double select_cutoff, uint64_t *strs_add, uint64_t* strs_add_size) {
size_t p, q, r, i, k, a, ip, jp, kp, lp, ij, iset, idet;
uint64_t max_strs_add = strs_add_size[0];
int nset = (norb + 63) / 64;
// Compute Fock intermediates
double *focka = malloc(sizeof(double) * norb * norb);
double *fockb = malloc(sizeof(double) * norb * norb);
for (p = 0; p < norb; ++p) {
for (q = 0; q < norb; ++q) {
double vja = 0.0;
double vka = 0.0;
for (i = 0; i < neleca; ++i) {
size_t iipq = i * norb * norb * norb + i * norb * norb + p * norb + q;
size_t piiq = p * norb * norb * norb + i * norb * norb + i * norb + q;
vja += eri[iipq];
vka += eri[piiq];
}
double vjb = 0.0;
double vkb = 0.0;
for (i = 0; i < nelecb; ++i) {
size_t iipq = i * norb * norb * norb + i * norb * norb + p * norb + q;
size_t piiq = p * norb * norb * norb + i * norb * norb + i * norb + q;
vjb += eri[iipq];
vkb += eri[piiq];
}
focka[p * norb + q] = h1[p * norb + q] + vja + vjb - vka;
fockb[p * norb + q] = h1[p * norb + q] + vja + vjb - vkb;
}
}
int *holes_a = malloc(sizeof(int) * norb);
int *holes_b = malloc(sizeof(int) * norb);
int *particles_a = malloc(sizeof(int) * norb);
int *particles_b = malloc(sizeof(int) * norb);
uint64_t strs_added = 0;
// Loop over determinants
for (idet = ndet_start; idet < ndet_finish; ++idet) {
uint64_t *stra = strs + idet * 2 * nset;
uint64_t *strb = strs + idet * 2 * nset + nset;
int *occsa = compute_occ_list(stra, nset, norb, neleca);
int *occsb = compute_occ_list(strb, nset, norb, nelecb);
int *virsa = compute_vir_list(stra, nset, norb, neleca);
int *virsb = compute_vir_list(strb, nset, norb, nelecb);
double tol = select_cutoff / fabs(civec[idet]);
// Single excitations
int n_holes_a = 0;
int n_holes_b = 0;
int n_particles_a = 0;
int n_particles_b = 0;
for (p = 0; p < (norb - neleca); ++p) {
i = virsa[p];
if (i < neleca) {
holes_a[n_holes_a] = i;
n_holes_a++;
}
}
for (p = 0; p < neleca; ++p) {
i = occsa[p];
if (i >= neleca) {
particles_a[n_particles_a] = i;
n_particles_a++;
}
}
for (p = 0; p < (norb - nelecb); ++p) {
i = virsb[p];
if (i < nelecb) {
holes_b[n_holes_b] = i;
n_holes_b++;
}
}
for (p = 0; p < nelecb; ++p) {
i = occsb[p];
if (i >= nelecb) {
particles_b[n_particles_b] = i;
n_particles_b++;
}
}
// TODO: recompute Fock for each |Phi_I> and make sure it matches Fock in the code below
// alpha->alpha
for (p = 0; p < neleca; ++p) {
i = occsa[p];
for (q = 0; q < (norb - neleca); ++q) {
a = virsa[q];
double fai = focka[a * norb + i];
for (r = 0; r < n_particles_a; ++r) {
k = particles_a[r];
fai += jk[k * norb * norb * norb + k * norb * norb + a * norb + i];
}
for (r = 0; r < n_holes_a; ++r) {
k = holes_a[r];
fai -= jk[k * norb * norb * norb + k * norb * norb + a * norb + i];
}
for (r = 0; r < n_particles_b; ++r) {
k = particles_b[r];
fai += eri[k * norb * norb * norb + k * norb * norb + a * norb + i];
}
for (r = 0; r < n_holes_b; ++r) {
k = holes_b[r];
fai -= eri[k * norb * norb * norb + k * norb * norb + a * norb + i];
}
if (fabs(fai) > tol) {
uint64_t *tmp = toggle_bit(stra, nset, a);
uint64_t *new_str = toggle_bit(tmp, nset, i);
for (iset = 0; iset < nset; ++iset) {
// new alpha string
strs_add[strs_added * 2 * nset + iset] = new_str[iset];
// old beta string
strs_add[strs_added * 2 * nset + nset + iset] = strb[iset];
}
free(tmp);
free(new_str);
strs_added++;
}
}
}
// beta->beta
for (p = 0; p < nelecb; ++p) {
i = occsb[p];
for (q = 0; q < (norb - nelecb); ++q) {
a = virsb[q];
double fai = fockb[a * norb + i];
for (r = 0; r < n_particles_b; ++r) {
k = particles_b[r];
fai += jk[k * norb * norb * norb + k * norb * norb + a * norb + i];
}
for (r = 0; r < n_holes_b; ++r) {
k = holes_b[r];
fai -= jk[k * norb * norb * norb + k * norb * norb + a * norb + i];
}
for (r = 0; r < n_particles_a; ++r) {
k = particles_a[r];
fai += eri[k * norb * norb * norb + k * norb * norb + a * norb + i];
}
for (r = 0; r < n_holes_a; ++r) {
k = holes_a[r];
fai -= eri[k * norb * norb * norb + k * norb * norb + a * norb + i];
}
if (fabs(fai) > tol) {
uint64_t *tmp = toggle_bit(strb, nset, a);
uint64_t *new_str = toggle_bit(tmp, nset, i);
for (iset = 0; iset < nset; ++iset) {
// old alpha string
strs_add[strs_added * 2 * nset + iset] = stra[iset];
// new beta string
strs_add[strs_added * 2 * nset + nset + iset] = new_str[iset];
}
free(tmp);
free(new_str);
strs_added++;
}
}
}
size_t ip_occ, jp_occ, kp_occ, lp_occ, ih;
// Double excitations
for (p = 0; p < norb * norb * norb * norb; ++p) {
ih = jk_sorted[p];
int aaaa_bbbb_done = (fabs(jk[ih]) < tol);
if (!aaaa_bbbb_done) {
lp = ih % norb;
ij = ih / norb;
kp = ij % norb;
ij = ij / norb;
jp = ij % norb;
ip = ij / norb;
// alpha,alpha->alpha,alpha
ip_occ = 0;
jp_occ = 0;
kp_occ = 0;
lp_occ = 0;
for (r = 0; r < neleca; ++r) {
int occ_index = occsa[r];
if (ip == occ_index) ip_occ = 1;
if (jp == occ_index) jp_occ = 1;
if (kp == occ_index) kp_occ = 1;
if (lp == occ_index) lp_occ = 1;
}
if (jp_occ && lp_occ && !ip_occ && !kp_occ) {
uint64_t *tmp = toggle_bit(stra, nset, jp);
uint64_t *new_str = toggle_bit(tmp, nset, ip);
tmp = toggle_bit(new_str, nset, lp);
new_str = toggle_bit(tmp, nset, kp);
for (iset = 0; iset < nset; ++iset) {
strs_add[strs_added * 2 * nset + iset] = new_str[iset];
strs_add[strs_added * 2 * nset + nset + iset] = strb[iset];
}
free(tmp);
free(new_str);
strs_added++;
}
// beta,beta->beta,beta
ip_occ = 0;
jp_occ = 0;
kp_occ = 0;
lp_occ = 0;
for (r = 0; r < nelecb; ++r) {
int occ_index = occsb[r];
if (ip == occ_index) ip_occ = 1;
if (jp == occ_index) jp_occ = 1;
if (kp == occ_index) kp_occ = 1;
if (lp == occ_index) lp_occ = 1;
}
if (jp_occ && lp_occ && !ip_occ && !kp_occ) {
uint64_t *tmp = toggle_bit(strb, nset, jp);
uint64_t *new_str = toggle_bit(tmp, nset, ip);
tmp = toggle_bit(new_str, nset, lp);
new_str = toggle_bit(tmp, nset, kp);
for (iset = 0; iset < nset; ++iset) {
strs_add[strs_added * 2 * nset + iset] = stra[iset];
strs_add[strs_added * 2 * nset + nset + iset] = new_str[iset];
}
free(tmp);
free(new_str);
strs_added++;
}
}
// alpha,beta->alpha,beta
ih = eri_sorted[p];
int aabb_done = (fabs(eri[ih]) < tol);
if (!aabb_done) {
lp = ih % norb;
ij = ih / norb;
kp = ij % norb;
ij = ij / norb;
jp = ij % norb;
ip = ij / norb;
ip_occ = 0;
jp_occ = 0;
kp_occ = 0;
lp_occ = 0;
for (r = 0; r < neleca; ++r) {
int occ_index = occsa[r];
if (ip == occ_index) ip_occ = 1;
if (jp == occ_index) jp_occ = 1;
}
for (r = 0; r < nelecb; ++r) {
int occ_index = occsb[r];
if (kp == occ_index) kp_occ = 1;
if (lp == occ_index) lp_occ = 1;
}
if (jp_occ && lp_occ && !ip_occ && !kp_occ) {
uint64_t *tmp = toggle_bit(stra, nset, jp);
uint64_t *new_str_a = toggle_bit(tmp, nset, ip);
tmp = toggle_bit(strb, nset, lp);
uint64_t *new_str_b = toggle_bit(tmp, nset, kp);
for (iset = 0; iset < nset; ++iset) {
strs_add[strs_added * 2 * nset + iset] = new_str_a[iset];
strs_add[strs_added * 2 * nset + nset + iset] = new_str_b[iset];
}
free(tmp);
free(new_str_a);
free(new_str_b);
strs_added++;
}
}
// Break statement
if (aaaa_bbbb_done && aabb_done) {
break;
}
}
free(occsa);
free(occsb);
free(virsa);
free(virsb);
if (strs_added > max_strs_add) {
printf("\nError: Number of selected strings is greater than the size of the buffer array (%ld vs %ld).\n", strs_added, max_strs_add);
exit(EXIT_FAILURE);
}
} // end loop over determinants
free(focka);
free(fockb);
free(holes_a);
free(holes_b);
free(particles_a);
free(particles_b);
strs_add_size[0] = strs_added;
}
// Toggle bit at a specified position
uint64_t *toggle_bit(uint64_t *str, int nset, int p) {
size_t i;
uint64_t *new_str = malloc(sizeof(uint64_t) * nset);
for (i = 0; i < nset; ++i) {
new_str[i] = str[i];
}
int p_set = p / 64;
int p_rel = p % 64;
new_str[nset - p_set - 1] ^= 1ULL << p_rel;
return new_str;
}
// Compares two string indices and determines the order
int order(uint64_t *strs_i, uint64_t *strs_j, int nset) {
size_t i;
for (i = 0; i < nset; ++i) {
if (strs_i[i] > strs_j[i]) return 1;
else if (strs_j[i] > strs_i[i]) return -1;
}
return 0;
}
// Recursive quick sort of string array indices
void qsort_idx(uint64_t *strs, uint64_t *idx, uint64_t *nstrs_, int nset, uint64_t *new_idx) {
size_t p;
uint64_t nstrs = nstrs_[0];
if (nstrs <= 1) {
for (p = 0; p < nstrs; ++p) new_idx[p] = idx[p];
}
else {
uint64_t ref = idx[nstrs - 1];
uint64_t *group_lt = malloc(sizeof(uint64_t) * nstrs);
uint64_t *group_gt = malloc(sizeof(uint64_t) * nstrs);
uint64_t group_lt_nstrs = 0;
uint64_t group_gt_nstrs = 0;
for (p = 0; p < (nstrs - 1); ++p) {
uint64_t i = idx[p];
uint64_t *stri = strs + i * nset;
uint64_t *strj = strs + ref * nset;
int c = order(stri, strj, nset);
if (c == -1) {
group_lt[group_lt_nstrs] = i;
group_lt_nstrs++;
}
else if (c == 1) {
group_gt[group_gt_nstrs] = i;
group_gt_nstrs++;
}
}
uint64_t *new_idx_lt = malloc(sizeof(uint64_t) * group_lt_nstrs);
uint64_t *new_idx_gt = malloc(sizeof(uint64_t) * group_gt_nstrs);
qsort_idx(strs, group_lt, &group_lt_nstrs, nset, new_idx_lt);
qsort_idx(strs, group_gt, &group_gt_nstrs, nset, new_idx_gt);
nstrs = group_lt_nstrs + group_gt_nstrs + 1;
nstrs_[0] = nstrs;
for (p = 0; p < nstrs; ++p) {
if (p < group_lt_nstrs) new_idx[p] = new_idx_lt[p];
else if (p == group_lt_nstrs) new_idx[p] = ref;
else new_idx[p] = new_idx_gt[p - group_lt_nstrs - 1];
}
free(new_idx_lt);
free(new_idx_gt);
free(group_lt);
free(group_gt);
}
}
// Helper function to perform recursive sort (nset is a total number of strings)
void argunique(uint64_t *strs, uint64_t *sort_idx, uint64_t *nstrs_, int nset) {
size_t p;
uint64_t *init_idx = malloc(sizeof(uint64_t) * nstrs_[0]);
for (p = 0; p < nstrs_[0]; ++p) init_idx[p] = p;
qsort_idx(strs, init_idx, nstrs_, nset, sort_idx);
free(init_idx);
}
// Computes C' = S2 * C in the selected CI basis
void contract_ss_c(int norb, int neleca, int nelecb, uint64_t *strs, double *civec, uint64_t ndet, double *ci1) {
int *ts = malloc(sizeof(int) * ndet);
#pragma omp parallel default(none) shared(norb, neleca, nelecb, strs, civec, ndet, ci1, ts)
{
size_t ip, jp, p, q;
int nset = (norb + 63) / 64;
// Calculate excitation level for prescreening
ts[0] = 0;
uint64_t *str1a = strs;
uint64_t *str1b = strs + nset;
#pragma omp for schedule(static)
for (ip = 1; ip < ndet; ++ip) {
uint64_t *stria = strs + ip * 2 * nset;
uint64_t *strib = strs + ip * 2 * nset + nset;
ts[ip] = (n_excitations(stria, str1a, nset) + n_excitations(strib, str1b, nset));
}
// Loop over pairs of determinants
#pragma omp for schedule(static)
for (ip = 0; ip < ndet; ++ip) {
for (jp = 0; jp < ndet; ++jp) {
if (abs(ts[ip] - ts[jp]) < 3) {
uint64_t *stria = strs + ip * 2 * nset;
uint64_t *strib = strs + ip * 2 * nset + nset;
uint64_t *strja = strs + jp * 2 * nset;
uint64_t *strjb = strs + jp * 2 * nset + nset;
int n_excit_a = n_excitations(stria, strja, nset);
int n_excit_b = n_excitations(strib, strjb, nset);
// Diagonal term
if (ip == jp) {
double apb = (double) (neleca + nelecb);
double amb = (double) (neleca - nelecb);
double prefactor = apb / 2.0 + amb * amb / 4.0;
int *occsa = compute_occ_list(stria, nset, norb, neleca);
int *occsb = compute_occ_list(strib, nset, norb, nelecb);
for (p = 0; p < neleca; ++p) {
int pa = occsa[p];
for (q = 0; q < nelecb; ++q) {
int qb = occsb[q];
if (pa == qb) prefactor -= 1.0;
}
}
ci1[ip] += prefactor * civec[ip];
free(occsa);
free(occsb);
}
// Double excitation
else if ((n_excit_a + n_excit_b) == 2) {
int i, j, a, b;
// alpha,beta->alpha,beta
if (n_excit_a == n_excit_b) {
int *ia = get_single_excitation(stria, strja, nset);
int *jb = get_single_excitation(strib, strjb, nset);
i = ia[0]; a = ia[1]; j = jb[0]; b = jb[1];
if (i == b && j == a) {
double sign = compute_cre_des_sign(a, i, stria, nset);
sign *= compute_cre_des_sign(b, j, strib, nset);
ci1[ip] -= sign * civec[jp];
}
free(ia);
free(jb);
}
}
} // end if over ts
} // end loop over jp
} // end loop over ip
} // end omp
free(ts);
}
// Computes C' = H * C and C'' = S2 * C simultaneously in the selected CI basis
void contract_h_c_ss_c(double *h1, double *eri, int norb, int neleca, int nelecb, uint64_t *strs, double *civec, double *hdiag, uint64_t ndet, double *ci1, double *ci2) {
int *ts = malloc(sizeof(int) * ndet);
#pragma omp parallel default(none) shared(h1, eri, norb, neleca, nelecb, strs, civec, hdiag, ndet, ci1, ci2, ts)
{
size_t ip, jp, p, q;
int nset = (norb + 63) / 64;
// Calculate excitation level for prescreening
ts[0] = 0;
uint64_t *str1a = strs;
uint64_t *str1b = strs + nset;
#pragma omp for schedule(static)
for (ip = 1; ip < ndet; ++ip) {
uint64_t *stria = strs + ip * 2 * nset;
uint64_t *strib = strs + ip * 2 * nset + nset;
ts[ip] = (n_excitations(stria, str1a, nset) + n_excitations(strib, str1b, nset));
}
// Loop over pairs of determinants
#pragma omp for schedule(static)
for (ip = 0; ip < ndet; ++ip) {
for (jp = 0; jp < ndet; ++jp) {
if (abs(ts[ip] - ts[jp]) < 3) {
uint64_t *stria = strs + ip * 2 * nset;
uint64_t *strib = strs + ip * 2 * nset + nset;
uint64_t *strja = strs + jp * 2 * nset;
uint64_t *strjb = strs + jp * 2 * nset + nset;
int n_excit_a = n_excitations(stria, strja, nset);
int n_excit_b = n_excitations(strib, strjb, nset);
// Diagonal term
if (ip == jp) {
ci1[ip] += hdiag[ip] * civec[ip];
// S^2
double apb = (double) (neleca + nelecb);
double amb = (double) (neleca - nelecb);
double prefactor = apb / 2.0 + amb * amb / 4.0;
int *occsa = compute_occ_list(stria, nset, norb, neleca);
int *occsb = compute_occ_list(strib, nset, norb, nelecb);
for (p = 0; p < neleca; ++p) {
int pa = occsa[p];
for (q = 0; q < nelecb; ++q) {
int qb = occsb[q];
if (pa == qb) prefactor -= 1.0;
}
}
ci2[ip] += prefactor * civec[ip];
free(occsa);
free(occsb);
}
// Single excitation
else if ((n_excit_a + n_excit_b) == 1) {
int *ia;
// alpha->alpha
if (n_excit_b == 0) {
ia = get_single_excitation(stria, strja, nset);
int i = ia[0];
int a = ia[1];
double sign = compute_cre_des_sign(a, i, stria, nset);
int *occsa = compute_occ_list(stria, nset, norb, neleca);
int *occsb = compute_occ_list(strib, nset, norb, nelecb);
double fai = h1[a * norb + i];
for (p = 0; p < neleca; ++p) {
int k = occsa[p];
int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i;
int kiak = k * norb * norb * norb + i * norb * norb + a * norb + k;
fai += eri[kkai] - eri[kiak];
}
for (p = 0; p < nelecb; ++p) {
int k = occsb[p];
int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i;
fai += eri[kkai];
}
if (fabs(fai) > 1.0E-14) ci1[ip] += sign * fai * civec[jp];
free(occsa);
free(occsb);
}
// beta->beta
else if (n_excit_a == 0) {
ia = get_single_excitation(strib, strjb, nset);
int i = ia[0];
int a = ia[1];
double sign = compute_cre_des_sign(a, i, strib, nset);
int *occsa = compute_occ_list(stria, nset, norb, neleca);
int *occsb = compute_occ_list(strib, nset, norb, nelecb);
double fai = h1[a * norb + i];
for (p = 0; p < nelecb; ++p) {
int k = occsb[p];
int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i;
int kiak = k * norb * norb * norb + i * norb * norb + a * norb + k;
fai += eri[kkai] - eri[kiak];
}
for (p = 0; p < neleca; ++p) {
int k = occsa[p];
int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i;
fai += eri[kkai];
}
if (fabs(fai) > 1.0E-14) ci1[ip] += sign * fai * civec[jp];
free(occsa);
free(occsb);
}
free(ia);
}
// Double excitation
else if ((n_excit_a + n_excit_b) == 2) {
int i, j, a, b;
// alpha,alpha->alpha,alpha
if (n_excit_b == 0) {
int *ijab = get_double_excitation(stria, strja, nset);
i = ijab[0]; j = ijab[1]; a = ijab[2]; b = ijab[3];
double v, sign;
int ajbi = a * norb * norb * norb + j * norb * norb + b * norb + i;
int aibj = a * norb * norb * norb + i * norb * norb + b * norb + j;
if (a > j || i > b) {
v = eri[ajbi] - eri[aibj];
sign = compute_cre_des_sign(b, i, stria, nset);
sign *= compute_cre_des_sign(a, j, stria, nset);
}
else {
v = eri[aibj] - eri[ajbi];
sign = compute_cre_des_sign(b, j, stria, nset);
sign *= compute_cre_des_sign(a, i, stria, nset);
}
if (fabs(v) > 1.0E-14) ci1[ip] += sign * v * civec[jp];
free(ijab);
}
// beta,beta->beta,beta
else if (n_excit_a == 0) {
int *ijab = get_double_excitation(strib, strjb, nset);
i = ijab[0]; j = ijab[1]; a = ijab[2]; b = ijab[3];
double v, sign;
int ajbi = a * norb * norb * norb + j * norb * norb + b * norb + i;
int aibj = a * norb * norb * norb + i * norb * norb + b * norb + j;
if (a > j || i > b) {
v = eri[ajbi] - eri[aibj];
sign = compute_cre_des_sign(b, i, strib, nset);
sign *= compute_cre_des_sign(a, j, strib, nset);
}
else {
v = eri[aibj] - eri[ajbi];
sign = compute_cre_des_sign(b, j, strib, nset);
sign *= compute_cre_des_sign(a, i, strib, nset);
}
if (fabs(v) > 1.0E-14) ci1[ip] += sign * v * civec[jp];
free(ijab);
}
// alpha,beta->alpha,beta
else {
int *ia = get_single_excitation(stria, strja, nset);
int *jb = get_single_excitation(strib, strjb, nset);
i = ia[0]; a = ia[1]; j = jb[0]; b = jb[1];
double v = eri[a * norb * norb * norb + i * norb * norb + b * norb + j];
double sign = compute_cre_des_sign(a, i, stria, nset);
sign *= compute_cre_des_sign(b, j, strib, nset);
if (fabs(v) > 1.0E-14) ci1[ip] += sign * v * civec[jp];
// S^2
if (i == b && j == a) {
ci2[ip] -= sign * civec[jp];
}
free(ia);
free(jb);
}
}
} // end if over ts
} // end loop over jp
} // end loop over ip
} // end omp
free(ts);
}
// 2-RDM is sorted in physicists notation: gamma_pqsr=<\Phi|a_p^dag a_q^dag a_r a_s|\Phi>
void compute_rdm12s(int norb, int neleca, int nelecb, uint64_t *strs, double *civec, uint64_t ndet, double *rdm1a, double *rdm1b, double *rdm2aa, double *rdm2ab, double *rdm2bb) {
#pragma omp parallel default(none) shared(norb, neleca, nelecb, strs, civec, ndet, rdm1a, rdm1b, rdm2aa, rdm2ab, rdm2bb)
{
size_t ip, jp, p, q, r, s;
int nset = (norb + 63) / 64;
double ci_sq = 0.0;
double *rdm1a_private = malloc(sizeof(double) * norb * norb);
double *rdm1b_private = malloc(sizeof(double) * norb * norb);
double *rdm2aa_private = malloc(sizeof(double) * norb * norb * norb * norb);
double *rdm2ab_private = malloc(sizeof(double) * norb * norb * norb * norb);
double *rdm2bb_private = malloc(sizeof(double) * norb * norb * norb * norb);
for (p = 0; p < norb * norb; ++p) {
rdm1a_private[p] = 0.0;
rdm1b_private[p] = 0.0;
}
for (p = 0; p < norb * norb * norb * norb; ++p) {
rdm2aa_private[p] = 0.0;
rdm2ab_private[p] = 0.0;
rdm2bb_private[p] = 0.0;
}
// Loop over pairs of determinants
#pragma omp for schedule(static)
for (ip = 0; ip < ndet; ++ip) {
for (jp = 0; jp < ndet; ++jp) {
uint64_t *stria = strs + ip * 2 * nset;
uint64_t *strib = strs + ip * 2 * nset + nset;
uint64_t *strja = strs + jp * 2 * nset;
uint64_t *strjb = strs + jp * 2 * nset + nset;
int n_excit_a = n_excitations(stria, strja, nset);
int n_excit_b = n_excitations(strib, strjb, nset);
// Diagonal term
if (ip == jp) {
int *occsa = compute_occ_list(stria, nset, norb, neleca);
int *occsb = compute_occ_list(strib, nset, norb, nelecb);
ci_sq = civec[ip] * civec[ip];
// Diagonal rdm1_aa
for (p = 0; p < neleca; ++p) {
int k = occsa[p];
int kk = k * norb + k;
rdm1a_private[kk] += ci_sq;
}
// Diagonal rdm1_bb
for (p = 0; p < nelecb; ++p) {
int k = occsb[p];
int kk = k * norb + k;
rdm1b_private[kk] += ci_sq;
}
// Diagonal rdm2_aaaa
for (p = 0; p < neleca; ++p) {
int k = occsa[p];
for (q = 0; q < neleca; ++q) {
int j = occsa[q];
int kjkj = k * norb * norb * norb + j * norb * norb + k * norb + j;
int kjjk = k * norb * norb * norb + j * norb * norb + j * norb + k;
rdm2aa_private[kjkj] += ci_sq;
rdm2aa_private[kjjk] -= ci_sq;
}
// Diagonal rdm2_abab
for (q = 0; q < nelecb; ++q) {
int j = occsb[q];
int kjkj = k * norb * norb * norb + j * norb * norb + k * norb + j;
rdm2ab_private[kjkj] += ci_sq;
}
}
// Diagonal rdm2_bbbb
for (p = 0; p < nelecb; ++p) {
int k = occsb[p];
for (q = 0; q < nelecb; ++q) {
int j = occsb[q];
int kjkj = k * norb * norb * norb + j * norb * norb + k * norb + j;
int kjjk = k * norb * norb * norb + j * norb * norb + j * norb + k;
rdm2bb_private[kjkj] += ci_sq;
rdm2bb_private[kjjk] -= ci_sq;
}
}
free(occsa);
free(occsb);
}
// Single excitation
else if ((n_excit_a + n_excit_b) == 1) {
int *ia;
// alpha->alpha
if (n_excit_b == 0) {
ia = get_single_excitation(stria, strja, nset);
int i = ia[0];
int a = ia[1];
double sign = compute_cre_des_sign(a, i, stria, nset);
int *occsa = compute_occ_list(stria, nset, norb, neleca);
int *occsb = compute_occ_list(strib, nset, norb, nelecb);
ci_sq = sign * civec[ip] * civec[jp];
// rdm1_aa
rdm1a_private[a * norb + i] += ci_sq;
// rdm2_aaaa
for (p = 0; p < neleca; ++p) {
int k = occsa[p];
int akik = a * norb * norb * norb + k * norb * norb + i * norb + k;
int akki = a * norb * norb * norb + k * norb * norb + k * norb + i;
int kaki = k * norb * norb * norb + a * norb * norb + k * norb + i;
int kaik = k * norb * norb * norb + a * norb * norb + i * norb + k;
rdm2aa_private[akik] += ci_sq;
rdm2aa_private[akki] -= ci_sq;
rdm2aa_private[kaik] -= ci_sq;
rdm2aa_private[kaki] += ci_sq;
}
// rdm2_abab
for (p = 0; p < nelecb; ++p) {
int k = occsb[p];
int akik = a * norb * norb * norb + k * norb * norb + i * norb + k;
rdm2ab_private[akik] += ci_sq;
}
free(occsa);
free(occsb);
}
// beta->beta
else if (n_excit_a == 0) {
ia = get_single_excitation(strib, strjb, nset);
int i = ia[0];
int a = ia[1];
double sign = compute_cre_des_sign(a, i, strib, nset);
int *occsa = compute_occ_list(stria, nset, norb, neleca);
int *occsb = compute_occ_list(strib, nset, norb, nelecb);
ci_sq = sign * civec[ip] * civec[jp];
// rdm1_bb
rdm1b_private[a * norb + i] += ci_sq;
// rdm2_bbbb
for (p = 0; p < nelecb; ++p) {
int k = occsb[p];
int akik = a * norb * norb * norb + k * norb * norb + i * norb + k;
int akki = a * norb * norb * norb + k * norb * norb + k * norb + i;
int kaki = k * norb * norb * norb + a * norb * norb + k * norb + i;
int kaik = k * norb * norb * norb + a * norb * norb + i * norb + k;
rdm2bb_private[akik] += ci_sq;
rdm2bb_private[akki] -= ci_sq;
rdm2bb_private[kaik] -= ci_sq;
rdm2bb_private[kaki] += ci_sq;
}
// rdm2_abab
for (p = 0; p < neleca; ++p) {
int k = occsa[p];
int kaki = k * norb * norb * norb + a * norb * norb + k * norb + i;
rdm2ab_private[kaki] += ci_sq;
}
free(occsa);
free(occsb);
}
free(ia);
}
// Double excitation
else if ((n_excit_a + n_excit_b) == 2) {
int i, j, a, b;
// rdm2_aaaa
if (n_excit_b == 0) {
int *ijab = get_double_excitation(stria, strja, nset);
i = ijab[0]; j = ijab[1]; a = ijab[2]; b = ijab[3];
double sign;
int baij = b * norb * norb * norb + a * norb * norb + i * norb + j;
int baji = b * norb * norb * norb + a * norb * norb + j * norb + i;
int abij = a * norb * norb * norb + b * norb * norb + i * norb + j;
int abji = a * norb * norb * norb + b * norb * norb + j * norb + i;
if (a > j || i > b) {
sign = compute_cre_des_sign(b, i, stria, nset);
sign *= compute_cre_des_sign(a, j, stria, nset);
ci_sq = sign * civec[ip] * civec[jp];
rdm2aa_private[baij] += ci_sq;
rdm2aa_private[baji] -= ci_sq;
rdm2aa_private[abij] -= ci_sq;
rdm2aa_private[abji] += ci_sq;
}
else {
sign = compute_cre_des_sign(b, j, stria, nset);
sign *= compute_cre_des_sign(a, i, stria, nset);
ci_sq = sign * civec[ip] * civec[jp];
rdm2aa_private[baij] -= ci_sq;
rdm2aa_private[baji] += ci_sq;
rdm2aa_private[abij] += ci_sq;
rdm2aa_private[abji] -= ci_sq;
}
free(ijab);
}
// rdm2_bbbb
else if (n_excit_a == 0) {
int *ijab = get_double_excitation(strib, strjb, nset);
i = ijab[0]; j = ijab[1]; a = ijab[2]; b = ijab[3];
double v, sign;
int baij = b * norb * norb * norb + a * norb * norb + i * norb + j;
int baji = b * norb * norb * norb + a * norb * norb + j * norb + i;
int abij = a * norb * norb * norb + b * norb * norb + i * norb + j;
int abji = a * norb * norb * norb + b * norb * norb + j * norb + i;
if (a > j || i > b) {
sign = compute_cre_des_sign(b, i, strib, nset);
sign *= compute_cre_des_sign(a, j, strib, nset);
ci_sq = sign * civec[ip] * civec[jp];
rdm2bb_private[baij] += ci_sq;
rdm2bb_private[baji] -= ci_sq;
rdm2bb_private[abij] -= ci_sq;
rdm2bb_private[abji] += ci_sq;
}
else {
sign = compute_cre_des_sign(b, j, strib, nset);
sign *= compute_cre_des_sign(a, i, strib, nset);
ci_sq = sign * civec[ip] * civec[jp];
rdm2bb_private[baij] -= ci_sq;
rdm2bb_private[baji] += ci_sq;
rdm2bb_private[abij] += ci_sq;
rdm2bb_private[abji] -= ci_sq;
}
free(ijab);
}
// rdm2_abab
else {
int *ia = get_single_excitation(stria, strja, nset);
int *jb = get_single_excitation(strib, strjb, nset);
i = ia[0]; a = ia[1]; j = jb[0]; b = jb[1];
double sign = compute_cre_des_sign(a, i, stria, nset);
sign *= compute_cre_des_sign(b, j, strib, nset);
ci_sq = sign * civec[ip] * civec[jp];
int abij = a * norb * norb * norb + b * norb * norb + i * norb + j;
rdm2ab_private[abij] += ci_sq;
free(ia);
free(jb);
}
}
} // end loop over jp
} // end loop over ip
#pragma omp critical
{
for (p = 0; p < norb * norb; ++p) {
rdm1a[p] += rdm1a_private[p];
rdm1b[p] += rdm1b_private[p];
}
for (p = 0; p < norb * norb * norb * norb; ++p) {
rdm2aa[p] += rdm2aa_private[p];
rdm2ab[p] += rdm2ab_private[p];
rdm2bb[p] += rdm2bb_private[p];
}
}
free(rdm1a_private);
free(rdm1b_private);
free(rdm2aa_private);
free(rdm2ab_private);
free(rdm2bb_private);
} // end omp
}
|
test.c |
#include <stdio.h>
#include <omp.h>
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define HOST_MAX_TEAMS 128
#define TRIALS (1)
#define N (992)
#define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;})
#define ZERO(X) ZERO_ARRAY(N, X)
int main(void) {
check_offloading();
double A[N], B[N], C[N], D[N], E[N];
double * pA = (double *) malloc(N*sizeof(double));
int fail = 0;
INIT();
//
// Test: if clause
//
ZERO(A);
int num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512;
// the number of teams started is implementation dependent
int actual_teams = -1;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams if(0) map(tofrom:actual_teams)
{
if(omp_get_team_num() == 0)
actual_teams = omp_get_num_teams();
A[omp_get_team_num()] += omp_get_team_num();
}
}
for (int i = 0 ; i < actual_teams ; i++)
if (A[i] != i*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: device clause
//
ZERO(A);
num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams device(0) map(tofrom:actual_teams)
{
if(omp_get_team_num() == 0)
actual_teams = omp_get_num_teams();
A[omp_get_team_num()] += omp_get_team_num();
}
}
for (int i = 0 ; i < actual_teams ; i++)
if (A[i] != i*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: map clause
//
ZERO(pA);
num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams map(pA[:N]) map(tofrom:actual_teams)
{
if(omp_get_team_num() == 0)
actual_teams = omp_get_num_teams();
pA[omp_get_team_num()] += omp_get_team_num();
}
}
for (int i = 0 ; i < actual_teams ; i++)
if (pA[i] != i*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, pA[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: num_teams and omp_get_team_num()
//
ZERO(A);
num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams num_teams(num_teams)
{
A[omp_get_team_num()] += omp_get_team_num();
}
}
for (int i = 0 ; i < num_teams ; i++)
if (A[i] != i*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: thread_limit and omp_get_thread_num()
//
ZERO(A);
fail = 0;
int num_threads = omp_is_initial_device() ? HOST_MAX_TEAMS : 256;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams num_teams(1) thread_limit(num_threads)
#pragma omp parallel
{
int tid = omp_get_thread_num();
A[tid] += (double) tid;
}
}
for (int i = 0 ; i < num_threads ; i++)
if (A[i] != i*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: if statement in teams region
//
ZERO(A);
fail = 0;
num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams num_teams(num_teams)
{
if (omp_get_team_num() % 2 == 0) {
int teid = omp_get_team_num();
A[teid] += (double) 1;
}
else {
int teid = omp_get_team_num();
A[teid] += (double) 2;
}
}
}
for (int i = 0 ; i < num_teams ; i++) {
if (i % 2 == 0) {
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
} else
if (A[i] != 2*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) 2*TRIALS, A[i]);
fail = 1;
}
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
/* // */
/* // Test: num_teams and thread_limit by simulating a distribute pragma */
/* // */
/* ZERO(A); */
/* fail = 0; */
/* for (int t = 0 ; t < TRIALS ; t++) { */
/* #pragma omp target teams num_teams(2) thread_limit(496) */
/* { */
/* if (omp_get_team_num() == 0) { */
/* #pragma omp parallel */
/* { */
/* A[omp_get_team_num()*496+omp_get_thread_num()] += omp_get_thread_num(); */
/* if(omp_get_thread_num() == 498) printf("teid = %d, tid = %d, accessing %d\n", omp_get_team_num(), omp_get_thread_num(), omp_get_team_num()*496+omp_get_thread_num()); */
/* } */
/* } else { */
/* #pragma omp parallel */
/* { */
/* if(omp_get_thread_num() == 0) */
/* printf("teid = %d, tid = %d: A= %lf\n", omp_get_team_num(), omp_get_thread_num(), A[omp_get_team_num()*496+omp_get_thread_num()]); */
/* A[omp_get_team_num()*496+omp_get_thread_num()] -= omp_get_thread_num(); */
/* if(omp_get_thread_num() == 0) */
/* printf("teid = %d, tid = %d: A= %lf\n", omp_get_team_num(), omp_get_thread_num(), A[omp_get_team_num()*496+omp_get_thread_num()]); */
/* } */
/* } */
/* } */
/* } */
/* for (int i = 0 ; i < 992 ; i++) { */
/* if (i < 496) { */
/* if (A[i] != i*TRIALS) { */
/* printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]); */
/* fail = 1; */
/* } */
/* } else if(i >= 496) */
/* if (A[i] != -((i-496)*TRIALS)) { */
/* printf("Error at %d, h = %lf, d = %lf\n", i, (double) -((i-496)*TRIALS), A[i]); */
/* fail = 1; */
/* } */
/* } */
/* if(fail) printf("Failed\n"); */
/* else printf("Succeeded\n"); */
//
// Test: private
//
ZERO(A);
fail = 0;
int a = 10;
num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 256;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams num_teams(num_teams) private(a)
{
a = omp_get_team_num();
A[omp_get_team_num()] += a;
}
}
for (int i = 0 ; i < num_teams ; i++)
if (A[i] != i*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: firstprivate
//
ZERO(A);
fail = 0;
a = 10;
num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 256;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams num_teams(num_teams) firstprivate(a)
{
a += omp_get_team_num();
A[omp_get_team_num()] += a;
}
}
for (int i = 0 ; i < num_teams ; i++)
if (A[i] != 10+i*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) (10+i*TRIALS), A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
return 0;
}
|
GB_unop__sinh_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__sinh_fp64_fp64)
// op(A') function: GB (_unop_tran__sinh_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = sinh (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = sinh (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = sinh (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SINH || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__sinh_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = sinh (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = sinh (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__sinh_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__identity_int16_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int16_uint8
// op(A') function: GB_tran__identity_int16_uint8
// C type: int16_t
// A type: uint8_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int16_uint8
(
int16_t *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int16_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__second_fc64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__second_fc64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__second_fc64)
// A.*B function (eWiseMult): GB (_AemultB_03__second_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__second_fc64)
// A*D function (colscale): GB (_AxD__second_fc64)
// D*A function (rowscale): GB (_DxB__second_fc64)
// C+=B function (dense accum): GB (_Cdense_accumB__second_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__second_fc64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_fc64)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB (_bind2nd__second_fc64)
// C=A'+scalar GB (_bind2nd_tran__second_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// B,b type: GxB_FC64_t
// BinaryOp: cij = bij
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
GxB_FC64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = y ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
1
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_FC64 || GxB_NO_SECOND_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__second_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__second_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__second_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__second_fc64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__second_fc64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__second_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__second_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__second_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__second_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__second_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = Bx [p] ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__second_fc64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = Ax [pA] ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB (_bind2nd_tran__second_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bt_hash_type_64.c | /*
* This software is Copyright (c) 2015 Sayantan Datta <std2048 at gmail dot com>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification, are permitted.
*/
#ifdef HAVE_OPENCL
#include <stdlib.h>
#include <stdio.h>
#include "bt_hash_types.h"
uint64_t *loaded_hashes_64 = NULL;
unsigned int *hash_table_64 = NULL;
/* Assuming N < 0x7fffffff */
inline unsigned int modulo64_31b(uint64_t a, unsigned int N)
{
return (unsigned int)(a % N);
}
inline uint64_t add64(uint64_t a, unsigned int b)
{
return (a + b);
}
void allocate_ht_64(unsigned int num_loaded_hashes, unsigned int verbosity)
{
unsigned int i;
if (bt_memalign_alloc((void **)&hash_table_64, 16, 2 * hash_table_size * sizeof(unsigned int)))
bt_error("Couldn't allocate hash_table_64.");
for (i = 0; i < hash_table_size; i++)
hash_table_64[i] = hash_table_64[i + hash_table_size] = 0;
total_memory_in_bytes += 2 * hash_table_size * sizeof(unsigned int);
if (verbosity > 2) {
fprintf(stdout, "Hash Table Size %Lf %% of Number of Loaded Hashes.\n", ((long double)hash_table_size / (long double)num_loaded_hashes) * 100.00);
fprintf(stdout, "Hash Table Size(in GBs):%Lf\n", ((long double)2.0 * hash_table_size * sizeof(unsigned int)) / ((long double)1024 * 1024 * 1024));
}
}
inline unsigned int calc_ht_idx_64(unsigned int hash_location, unsigned int offset)
{
return modulo64_31b(add64(loaded_hashes_64[hash_location], offset), hash_table_size);
}
inline unsigned int zero_check_ht_64(unsigned int hash_table_idx)
{
return (hash_table_64[hash_table_idx] || hash_table_64[hash_table_idx + hash_table_size]);
}
inline void assign_ht_64(unsigned int hash_table_idx, unsigned int hash_location)
{
uint64_t hash = loaded_hashes_64[hash_location];
hash_table_64[hash_table_idx] = (unsigned int)(hash & 0xffffffff);
hash_table_64[hash_table_idx + hash_table_size] = (unsigned int)(hash >> 32);
}
inline void assign0_ht_64(unsigned int hash_table_idx)
{
hash_table_64[hash_table_idx] = hash_table_64[hash_table_idx + hash_table_size] = 0;
}
unsigned int get_offset_64(unsigned int hash_table_idx, unsigned int hash_location)
{
unsigned int z = modulo64_31b(loaded_hashes_64[hash_location], hash_table_size);
return (hash_table_size - z + hash_table_idx);
}
int test_tables_64(unsigned int num_loaded_hashes, OFFSET_TABLE_WORD *offset_table, unsigned int offset_table_size, unsigned int shift64_ot_sz, unsigned int shift128_ot_sz, unsigned int verbosity)
{
unsigned char *hash_table_collisions;
unsigned int i, hash_table_idx, error = 1, count = 0;
uint64_t hash;
if (bt_calloc((void **)&hash_table_collisions, hash_table_size, sizeof(unsigned char)))
bt_error("Failed to allocate memory: hash_table_collisions.");
if (verbosity > 1)
fprintf(stdout, "\nTesting Tables...");
#if _OPENMP
#pragma omp parallel private(i, hash_table_idx, hash)
#endif
{
#if _OPENMP
#pragma omp for
#endif
for (i = 0; i < num_loaded_hashes; i++) {
hash = loaded_hashes_64[i];
hash_table_idx =
calc_ht_idx_64(i,
(unsigned int)offset_table[
modulo64_31b(hash,
offset_table_size)]);
#if _OPENMP
#pragma omp atomic
#endif
hash_table_collisions[hash_table_idx]++;
if (error && (hash_table_64[hash_table_idx] != (unsigned int)(hash & 0xffffffff) ||
hash_table_64[hash_table_idx + hash_table_size] != (unsigned int)(hash >> 32) ||
hash_table_collisions[hash_table_idx] > 1)) {
fprintf(stderr, "Error building tables: Loaded hash Idx:%u, No. of Collosions:%u\n", i, hash_table_collisions[hash_table_idx]);
error = 0;
}
}
#if _OPENMP
#pragma omp single
#endif
for (hash_table_idx = 0; hash_table_idx < hash_table_size; hash_table_idx++)
if (zero_check_ht_64(hash_table_idx))
count++;
#if _OPENMP
#pragma omp barrier
#endif
}
/* Suppress unused variable warning. */
#define UNUSED(x) (void)(x)
UNUSED(shift128_ot_sz);
UNUSED(shift64_ot_sz);
if (count != num_loaded_hashes) {
error = 0;
fprintf(stderr, "Error!! Tables contains extra or less entries.\n");
return 0;
}
bt_free((void **)&hash_table_collisions);
if (error && verbosity > 1)
fprintf(stdout, "OK\n");
return 1;
}
#define check_equal(p, q) \
(loaded_hashes_64[p] == loaded_hashes_64[q])
#define check_non_zero(p) \
(loaded_hashes_64[p])
#define check_zero(p) \
(loaded_hashes_64[p] == 0)
#define set_zero(p) \
loaded_hashes_64[p] = 0
static void remove_duplicates_final(unsigned int num_loaded_hashes, unsigned int hash_table_size, unsigned int *rehash_list)
{
unsigned int i, **hash_location_list, counter;
#define COLLISION_DTYPE unsigned int
COLLISION_DTYPE *collisions;
typedef struct {
unsigned int store_loc1;
unsigned int store_loc2;
unsigned int idx_hash_loc_list;
COLLISION_DTYPE collisions;
COLLISION_DTYPE iter;
} hash_table_data;
hash_table_data *hash_table = NULL;
if (bt_malloc((void **)&hash_table, hash_table_size * sizeof(hash_table_data)))
bt_error("Failed to allocate memory: hash_table.");
if (bt_calloc((void **)&collisions, hash_table_size, sizeof(COLLISION_DTYPE)))
bt_error("Failed to allocate memory: collisions.");
for (i = 0; i < num_loaded_hashes; i++) {
unsigned int idx = loaded_hashes_64[rehash_list[i]] % hash_table_size;
collisions[idx]++;
}
counter = 0;
for (i = 0; i < hash_table_size; i++) {
hash_table[i].collisions = collisions[i];
hash_table[i].iter = 0;
hash_table[i].store_loc1 = hash_table[i].store_loc2 =
hash_table[i].idx_hash_loc_list = 0xffffffff;
if (hash_table[i].collisions > 3)
hash_table[i].idx_hash_loc_list = counter++;
}
if (bt_malloc((void **)&hash_location_list, (counter + 1) * sizeof(unsigned int *)))
bt_error("Failed to allocate memory: hash_location_list.");
counter = 0;
for (i = 0; i < hash_table_size; i++)
if (collisions[i] > 3) {
if (bt_malloc((void **)&hash_location_list[counter], (collisions[i] - 1) * sizeof(unsigned int)))
bt_error("Failed to allocate memory: hash_location_list[counter].");
counter++;
}
for (i = 0; i < num_loaded_hashes; i++) {
unsigned int k = rehash_list[i];
unsigned int idx = loaded_hashes_64[k] % hash_table_size ;
if (collisions[idx] == 2) {
if (!hash_table[idx].iter) {
hash_table[idx].iter++;
hash_table[idx].store_loc1 = k;
}
else if (check_equal(hash_table[idx].store_loc1, k))
set_zero(k);
}
if (collisions[idx] == 3) {
if (!hash_table[idx].iter) {
hash_table[idx].iter++;
hash_table[idx].store_loc1 = k;
}
else if (hash_table[idx].iter == 1) {
if (check_equal(hash_table[idx].store_loc1, k))
set_zero(k);
else
hash_table[idx].store_loc2 = k;
}
else if (check_equal(hash_table[idx].store_loc1, k) ||
check_equal(hash_table[idx].store_loc2, k))
set_zero(k);
}
else if (collisions[idx] > 3) {
unsigned int iter = hash_table[idx].iter;
if (!iter)
hash_location_list[hash_table[idx].idx_hash_loc_list][iter++] = k;
else {
unsigned int j;
for (j = 0; j < iter; j++)
if (check_equal(hash_location_list[hash_table[idx].idx_hash_loc_list][j], k)) {
set_zero(k);
break;
}
if (j == iter && iter < (unsigned int)hash_table[idx].collisions - 1)
hash_location_list[hash_table[idx].idx_hash_loc_list][iter++] = k;
}
hash_table[idx].iter = iter;
}
}
#undef COLLISION_DTYPE
for (i = 0; i < counter; i++)
bt_free((void **)&hash_location_list[i]);
bt_free((void **)&hash_location_list);
bt_free((void **)&hash_table);
bt_free((void **)&collisions);
}
unsigned int remove_duplicates_64(unsigned int num_loaded_hashes, unsigned int hash_table_size, unsigned int verbosity)
{
unsigned int i, num_unique_hashes, *rehash_list, counter;
#define COLLISION_DTYPE unsigned int
COLLISION_DTYPE *collisions;
typedef struct {
unsigned int store_loc1;
unsigned int store_loc2;
unsigned int store_loc3;
COLLISION_DTYPE iter;
} hash_table_data;
hash_table_data *hash_table = NULL;
if (verbosity > 1)
fprintf(stdout, "Removing duplicate hashes...");
if (hash_table_size & (hash_table_size - 1)) {
fprintf(stderr, "Duplicate removal hash table size must power of 2.\n");
return 0;
}
if (bt_malloc((void **)&hash_table, hash_table_size * sizeof(hash_table_data)))
bt_error("Failed to allocate memory: hash_table.");
if (bt_calloc((void **)&collisions, hash_table_size, sizeof(COLLISION_DTYPE)))
bt_error("Failed to allocate memory: collisions.");
#if _OPENMP
#pragma omp parallel private(i)
#endif
{
#if _OPENMP
#pragma omp for
#endif
for (i = 0; i < num_loaded_hashes; i++) {
unsigned int idx = loaded_hashes_64[i] & (hash_table_size - 1);
#if _OPENMP
#pragma omp atomic
#endif
collisions[idx]++;
}
counter = 0;
#if _OPENMP
#pragma omp barrier
#endif
#if _OPENMP
#pragma omp for
#endif
for (i = 0; i < hash_table_size; i++) {
hash_table[i].iter = 0;
if (collisions[i] > 4)
#if _OPENMP
#pragma omp atomic
#endif
counter += (collisions[i] - 3);
}
#if _OPENMP
#pragma omp barrier
#pragma omp sections
#endif
{
#if _OPENMP
#pragma omp section
#endif
{
for (i = 0; i < num_loaded_hashes; i++) {
unsigned int idx = loaded_hashes_64[i] & (hash_table_size - 1);
if (collisions[idx] == 2) {
if (!hash_table[idx].iter) {
hash_table[idx].iter++;
hash_table[idx].store_loc1 = i;
}
else if (check_equal(hash_table[idx].store_loc1, i))
set_zero(i);
}
}
}
#if _OPENMP
#pragma omp section
#endif
{
if (bt_malloc((void **)&rehash_list, counter * sizeof(unsigned int)))
bt_error("Failed to allocate memory: rehash_list.");
counter = 0;
for (i = 0; i < num_loaded_hashes; i++) {
unsigned int idx = loaded_hashes_64[i] & (hash_table_size - 1);
if (collisions[idx] == 3) {
if (!hash_table[idx].iter) {
hash_table[idx].iter++;
hash_table[idx].store_loc1 = i;
}
else if (hash_table[idx].iter == 1) {
if (check_equal(hash_table[idx].store_loc1, i))
set_zero(i);
else {
hash_table[idx].iter++;
hash_table[idx].store_loc2 = i;
}
}
else if (check_equal(hash_table[idx].store_loc1, i) ||
check_equal(hash_table[idx].store_loc2, i))
set_zero(i);
}
else if (collisions[idx] >= 4) {
if (!hash_table[idx].iter) {
hash_table[idx].iter++;
hash_table[idx].store_loc1 = i;
}
else if (hash_table[idx].iter == 1) {
if (check_equal(hash_table[idx].store_loc1, i))
set_zero(i);
else {
hash_table[idx].iter++;
hash_table[idx].store_loc2 = i;
}
}
else if (hash_table[idx].iter == 2) {
if (check_equal(hash_table[idx].store_loc1, i) ||
check_equal(hash_table[idx].store_loc2, i))
set_zero(i);
else {
hash_table[idx].iter++;
hash_table[idx].store_loc3 = i;
}
}
else if (hash_table[idx].iter >= 3) {
if (check_equal(hash_table[idx].store_loc1, i) ||
check_equal(hash_table[idx].store_loc2, i) ||
check_equal(hash_table[idx].store_loc3, i))
set_zero(i);
else {
if (collisions[idx] > 4)
rehash_list[counter++] = i;
}
}
}
}
if (counter)
remove_duplicates_final(counter, counter + (counter >> 1), rehash_list);
bt_free((void **)&rehash_list);
}
}
}
#if 0
{ unsigned int col1 = 0, col2 = 0, col3 = 0, col4 = 0, col5a = 0;
for (i = 0; i < hash_table_size; i++) {
if (collisions[i] == 1)
col1++;
else if (collisions[i] == 2)
col2++;
else if (collisions[i] == 3)
col3++;
else if (collisions[i] == 4)
col4++;
else if (collisions[i] > 4)
col5a += collisions[i];
}
col2 *= 2;
col3 *= 3;
col4 *= 4;
fprintf(stderr, "Statistics:%Lf %Lf %Lf %Lf %Lf\n", (long double)col1 / (long double)num_loaded_hashes,
(long double)col2 / (long double)num_loaded_hashes, (long double)col3 / (long double)num_loaded_hashes,
(long double)col4 / (long double)num_loaded_hashes, (long double)col5a / (long double)num_loaded_hashes);
}
#endif
num_unique_hashes = 0;
for (i = num_loaded_hashes - 1; (int)i >= 0; i--)
if (check_non_zero(i)) {
num_unique_hashes = i;
break;
}
for (i = 0; i <= num_unique_hashes; i++)
if (check_zero(i)) {
unsigned int j;
loaded_hashes_64[i] = loaded_hashes_64[num_unique_hashes];
set_zero(num_unique_hashes);
num_unique_hashes--;
for (j = num_unique_hashes; (int)j >= 0; j--)
if (check_non_zero(j)) {
num_unique_hashes = j;
break;
}
}
#undef COLLISION_DTYPE
bt_free((void **)&collisions);
bt_free((void **)&hash_table);
if (verbosity > 1)
fprintf(stdout, "Done\n");
return (num_unique_hashes + 1);
}
#endif
|
ParallelOpenMP.h | #pragma once
#include <cstddef>
#include <exception>
#ifdef _OPENMP
#define INTRA_OP_PARALLEL
#include <omp.h>
#endif
namespace at {
template <class F>
inline void parallel_for(
const int64_t begin,
const int64_t end,
const int64_t grain_size,
const F& f) {
TORCH_CHECK(grain_size >= 0);
at::internal::lazy_init_num_threads();
if (begin >= end) {
return;
}
if (end - begin == 1) {
f(begin, end);
return;
}
#ifdef _OPENMP
std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
std::exception_ptr eptr;
// Work around memory leak when using 1 thread in nested "omp parallel"
// caused by some buggy OpenMP versions and the fact that omp_in_parallel()
// returns false when omp_get_max_threads() == 1 inside nested "omp parallel"
// See issue gh-32284
#pragma omp parallel if (omp_get_max_threads() > 1 && !omp_in_parallel() && ((end - begin) > grain_size))
{
// choose number of tasks based on grain size and number of threads
// can't use num_threads clause due to bugs in GOMP's thread pool (See #32008)
int64_t num_threads = omp_get_num_threads();
if (grain_size > 0) {
num_threads = std::min(num_threads, divup((end - begin), grain_size));
}
int64_t tid = omp_get_thread_num();
int64_t chunk_size = divup((end - begin), num_threads);
int64_t begin_tid = begin + tid * chunk_size;
if (begin_tid < end) {
try {
f(begin_tid, std::min(end, chunk_size + begin_tid));
} catch (...) {
if (!err_flag.test_and_set()) {
eptr = std::current_exception();
}
}
}
}
if (eptr) {
std::rethrow_exception(eptr);
}
#else
f(begin, end);
#endif
}
template <class scalar_t, class F, class SF>
inline scalar_t parallel_reduce(
const int64_t begin,
const int64_t end,
const int64_t grain_size,
const scalar_t ident,
const F& f,
const SF& sf) {
TORCH_CHECK(grain_size >= 0);
at::internal::lazy_init_num_threads();
if (begin >= end) {
return ident;
} else if (in_parallel_region() || get_num_threads() == 1) {
return f(begin, end, ident);
} else {
const int64_t num_results = divup((end - begin), grain_size);
std::vector<scalar_t> results(num_results);
scalar_t* results_data = results.data();
std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
std::exception_ptr eptr;
#pragma omp parallel for if ((end - begin) >= grain_size)
for (int64_t id = 0; id < num_results; id++) {
int64_t i = begin + id * grain_size;
try {
results_data[id] = f(i, i + std::min(end - i, grain_size), ident);
} catch (...) {
if (!err_flag.test_and_set()) {
eptr = std::current_exception();
}
}
}
if (eptr) {
std::rethrow_exception(eptr);
}
scalar_t result = ident;
for (auto partial_result : results) {
result = sf(result, partial_result);
}
return result;
}
}
} // namespace at
|
task-two.c | /*
* task-two.c -- Archer testcase
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
//
// See tools/archer/LICENSE.txt for details.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// RUN: %libarcher-compile-and-run-race | FileCheck %s
#include <omp.h>
#include <stdio.h>
#include <unistd.h>
#define NUM_THREADS 2
int main(int argc, char *argv[]) {
int var = 0;
int i;
#pragma omp parallel for num_threads(NUM_THREADS) shared(var) schedule(static, \
1)
for (i = 0; i < NUM_THREADS; i++) {
#pragma omp task shared(var) if (0) // the task is inlined an executed locally
{ var++; }
}
int error = (var != 2);
fprintf(stderr, "DONE\n");
return error;
}
// CHECK: WARNING: ThreadSanitizer: data race
// CHECK-NEXT: {{(Write|Read)}} of size 4
// CHECK-NEXT: #0 {{.*}}task-two.c:30
// CHECK: Previous write of size 4
// CHECK-NEXT: #0 {{.*}}task-two.c:30
// CHECK: DONE
// CHECK: ThreadSanitizer: reported 1 warnings
|
Example_task_dep.4.c | /*
* @@name: task_dep.4c
* @@type: C
* @@compilable: yes
* @@linkable: yes
* @@expect: success
* @@version: omp_4.0
*/
#include <stdio.h>
int main() {
int x = 1;
#pragma omp parallel
#pragma omp single
{
#pragma omp task shared(x) depend(out: x)
x = 2;
#pragma omp task shared(x) depend(in: x)
printf("x + 1 = %d. ", x+1);
#pragma omp task shared(x) depend(in: x)
printf("x + 2 = %d\n", x+2);
}
return 0;
}
|
tree-pretty-print.c | /* Modula-3: modified */
/* Pretty formatting of GENERIC trees in C syntax.
Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
Free Software Foundation, Inc.
Adapted from c-pretty-print.c by Diego Novillo <dnovillo@redhat.com>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
#include "output.h"
#include "tree-pretty-print.h"
#include "hashtab.h"
#include "tree-flow.h"
#include "langhooks.h"
#include "tree-iterator.h"
#include "tree-chrec.h"
#include "tree-pass.h"
#include "value-prof.h"
#include "predict.h"
EXTERN_C_START
/* Local functions, macros and variables. */
static const char *op_symbol (const_tree);
static void pretty_print_string (pretty_printer *, const char*);
static void newline_and_indent (pretty_printer *, int);
static void maybe_init_pretty_print (FILE *);
static void print_struct_decl (pretty_printer *, const_tree, int, int);
static void do_niy (pretty_printer *, const_tree);
#define INDENT(SPACE) do { \
int i; for (i = 0; i<SPACE; i++) pp_space (buffer); } while (0)
#define NIY do_niy(buffer,node)
static pretty_printer buffer;
static int initialized = 0;
/* Try to print something for an unknown tree code. */
static void
do_niy (pretty_printer *buffer, const_tree node)
{
int i, len;
pp_string (buffer, "<<< Unknown tree: ");
pp_string (buffer, tree_code_name[(int) TREE_CODE (node)]);
if (EXPR_P (node))
{
len = TREE_OPERAND_LENGTH (node);
for (i = 0; i < len; ++i)
{
newline_and_indent (buffer, 2);
dump_generic_node (buffer, TREE_OPERAND (node, i), 2, 0, false);
}
}
pp_string (buffer, " >>>");
}
/* Debugging function to print out a generic expression. */
DEBUG_FUNCTION void
debug_generic_expr (tree t)
{
print_generic_expr (stderr, t, TDF_VOPS|TDF_MEMSYMS);
fprintf (stderr, "\n");
}
/* Debugging function to print out a generic statement. */
DEBUG_FUNCTION void
debug_generic_stmt (tree t)
{
print_generic_stmt (stderr, t, TDF_VOPS|TDF_MEMSYMS);
fprintf (stderr, "\n");
}
/* Debugging function to print out a chain of trees . */
DEBUG_FUNCTION void
debug_tree_chain (tree t)
{
struct pointer_set_t *seen = pointer_set_create ();
while (t)
{
print_generic_expr (stderr, t, TDF_VOPS|TDF_MEMSYMS|TDF_UID);
fprintf (stderr, " ");
t = TREE_CHAIN (t);
if (pointer_set_insert (seen, t))
{
fprintf (stderr, "... [cycled back to ");
print_generic_expr (stderr, t, TDF_VOPS|TDF_MEMSYMS|TDF_UID);
fprintf (stderr, "]");
break;
}
}
fprintf (stderr, "\n");
pointer_set_destroy (seen);
}
/* Prints declaration DECL to the FILE with details specified by FLAGS. */
void
print_generic_decl (FILE *file, tree decl, int flags)
{
maybe_init_pretty_print (file);
print_declaration (&buffer, decl, 2, flags);
pp_write_text_to_stream (&buffer);
}
/* Print tree T, and its successors, on file FILE. FLAGS specifies details
to show in the dump. See TDF_* in tree-pass.h. */
void
print_generic_stmt (FILE *file, tree t, int flags)
{
maybe_init_pretty_print (file);
dump_generic_node (&buffer, t, 0, flags, true);
pp_flush (&buffer);
}
/* Print tree T, and its successors, on file FILE. FLAGS specifies details
to show in the dump. See TDF_* in tree-pass.h. The output is indented by
INDENT spaces. */
void
print_generic_stmt_indented (FILE *file, tree t, int flags, int indent)
{
int i;
maybe_init_pretty_print (file);
for (i = 0; i < indent; i++)
pp_space (&buffer);
dump_generic_node (&buffer, t, indent, flags, true);
pp_flush (&buffer);
}
/* Print a single expression T on file FILE. FLAGS specifies details to show
in the dump. See TDF_* in tree-pass.h. */
void
print_generic_expr (FILE *file, tree t, int flags)
{
maybe_init_pretty_print (file);
dump_generic_node (&buffer, t, 0, flags, false);
}
/* Dump the name of a _DECL node and its DECL_UID if TDF_UID is set
in FLAGS. */
static void
dump_decl_name (pretty_printer *buffer, tree node, int flags)
{
if (DECL_NAME (node))
{
if ((flags & TDF_ASMNAME) && DECL_ASSEMBLER_NAME_SET_P (node))
pp_tree_identifier (buffer, DECL_ASSEMBLER_NAME (node));
else
pp_tree_identifier (buffer, DECL_NAME (node));
}
if ((flags & TDF_UID) || DECL_NAME (node) == NULL_TREE)
{
if (TREE_CODE (node) == LABEL_DECL && LABEL_DECL_UID (node) != -1)
pp_printf (buffer, "L.%d", (int) LABEL_DECL_UID (node));
else if (TREE_CODE (node) == DEBUG_EXPR_DECL)
{
if (flags & TDF_NOUID)
pp_string (buffer, "D#xxxx");
else
pp_printf (buffer, "D#%i", DEBUG_TEMP_UID (node));
}
else
{
char c = TREE_CODE (node) == CONST_DECL ? 'C' : 'D';
if (flags & TDF_NOUID)
pp_printf (buffer, "%c.xxxx", c);
else
pp_printf (buffer, "%c.%u", c, DECL_UID (node));
}
}
if ((flags & TDF_ALIAS) && DECL_PT_UID (node) != DECL_UID (node))
{
if (flags & TDF_NOUID)
pp_printf (buffer, "ptD.xxxx");
else
pp_printf (buffer, "ptD.%u", DECL_PT_UID (node));
}
}
/* Like the above, but used for pretty printing function calls. */
static void
dump_function_name (pretty_printer *buffer, tree node, int flags)
{
if (TREE_CODE (node) == NOP_EXPR)
node = TREE_OPERAND (node, 0);
if (DECL_NAME (node) && (flags & TDF_ASMNAME) == 0)
pp_string (buffer, lang_hooks.decl_printable_name (node, 1));
else
dump_decl_name (buffer, node, flags);
}
/* Dump a function declaration. NODE is the FUNCTION_TYPE. BUFFER, SPC and
FLAGS are as in dump_generic_node. */
static void
dump_function_declaration (pretty_printer *buffer, tree node,
int spc, int flags)
{
bool wrote_arg = false;
tree arg;
pp_space (buffer);
pp_character (buffer, '(');
/* Print the argument types. The last element in the list is a VOID_TYPE.
The following avoids printing the last element. */
arg = TYPE_ARG_TYPES (node);
while (arg && TREE_CHAIN (arg) && arg != error_mark_node)
{
wrote_arg = true;
dump_generic_node (buffer, TREE_VALUE (arg), spc, flags, false);
arg = TREE_CHAIN (arg);
if (TREE_CHAIN (arg) && TREE_CODE (TREE_CHAIN (arg)) == TREE_LIST)
{
pp_character (buffer, ',');
pp_space (buffer);
}
}
if (!wrote_arg)
pp_string (buffer, "void");
pp_character (buffer, ')');
}
/* Dump the domain associated with an array. */
static void
dump_array_domain (pretty_printer *buffer, tree domain, int spc, int flags)
{
pp_character (buffer, '[');
if (domain)
{
tree min = TYPE_MIN_VALUE (domain);
tree max = TYPE_MAX_VALUE (domain);
if (min && max
&& integer_zerop (min)
&& host_integerp (max, 0))
pp_wide_integer (buffer, TREE_INT_CST_LOW (max) + 1);
else
{
if (min)
dump_generic_node (buffer, min, spc, flags, false);
pp_character (buffer, ':');
if (max)
dump_generic_node (buffer, max, spc, flags, false);
}
}
else
pp_string (buffer, "<unknown>");
pp_character (buffer, ']');
}
/* Dump OpenMP clause CLAUSE. BUFFER, CLAUSE, SPC and FLAGS are as in
dump_generic_node. */
static void
dump_omp_clause (pretty_printer *buffer, tree clause, int spc, int flags)
{
const char *name;
switch (OMP_CLAUSE_CODE (clause))
{
case OMP_CLAUSE_PRIVATE:
name = "private";
goto print_remap;
case OMP_CLAUSE_SHARED:
name = "shared";
goto print_remap;
case OMP_CLAUSE_FIRSTPRIVATE:
name = "firstprivate";
goto print_remap;
case OMP_CLAUSE_LASTPRIVATE:
name = "lastprivate";
goto print_remap;
case OMP_CLAUSE_COPYIN:
name = "copyin";
goto print_remap;
case OMP_CLAUSE_COPYPRIVATE:
name = "copyprivate";
goto print_remap;
print_remap:
pp_string (buffer, name);
pp_character (buffer, '(');
dump_generic_node (buffer, OMP_CLAUSE_DECL (clause),
spc, flags, false);
pp_character (buffer, ')');
break;
case OMP_CLAUSE_REDUCTION:
pp_string (buffer, "reduction(");
pp_string (buffer, op_symbol_code (OMP_CLAUSE_REDUCTION_CODE (clause)));
pp_character (buffer, ':');
dump_generic_node (buffer, OMP_CLAUSE_DECL (clause),
spc, flags, false);
pp_character (buffer, ')');
break;
case OMP_CLAUSE_IF:
pp_string (buffer, "if(");
dump_generic_node (buffer, OMP_CLAUSE_IF_EXPR (clause),
spc, flags, false);
pp_character (buffer, ')');
break;
case OMP_CLAUSE_NUM_THREADS:
pp_string (buffer, "num_threads(");
dump_generic_node (buffer, OMP_CLAUSE_NUM_THREADS_EXPR (clause),
spc, flags, false);
pp_character (buffer, ')');
break;
case OMP_CLAUSE_NOWAIT:
pp_string (buffer, "nowait");
break;
case OMP_CLAUSE_ORDERED:
pp_string (buffer, "ordered");
break;
case OMP_CLAUSE_DEFAULT:
pp_string (buffer, "default(");
switch (OMP_CLAUSE_DEFAULT_KIND (clause))
{
case OMP_CLAUSE_DEFAULT_UNSPECIFIED:
break;
case OMP_CLAUSE_DEFAULT_SHARED:
pp_string (buffer, "shared");
break;
case OMP_CLAUSE_DEFAULT_NONE:
pp_string (buffer, "none");
break;
case OMP_CLAUSE_DEFAULT_PRIVATE:
pp_string (buffer, "private");
break;
case OMP_CLAUSE_DEFAULT_FIRSTPRIVATE:
pp_string (buffer, "firstprivate");
break;
default:
gcc_unreachable ();
}
pp_character (buffer, ')');
break;
case OMP_CLAUSE_SCHEDULE:
pp_string (buffer, "schedule(");
switch (OMP_CLAUSE_SCHEDULE_KIND (clause))
{
case OMP_CLAUSE_SCHEDULE_STATIC:
pp_string (buffer, "static");
break;
case OMP_CLAUSE_SCHEDULE_DYNAMIC:
pp_string (buffer, "dynamic");
break;
case OMP_CLAUSE_SCHEDULE_GUIDED:
pp_string (buffer, "guided");
break;
case OMP_CLAUSE_SCHEDULE_RUNTIME:
pp_string (buffer, "runtime");
break;
case OMP_CLAUSE_SCHEDULE_AUTO:
pp_string (buffer, "auto");
break;
default:
gcc_unreachable ();
}
if (OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (clause))
{
pp_character (buffer, ',');
dump_generic_node (buffer,
OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (clause),
spc, flags, false);
}
pp_character (buffer, ')');
break;
case OMP_CLAUSE_UNTIED:
pp_string (buffer, "untied");
break;
case OMP_CLAUSE_COLLAPSE:
pp_string (buffer, "collapse(");
dump_generic_node (buffer,
OMP_CLAUSE_COLLAPSE_EXPR (clause),
spc, flags, false);
pp_character (buffer, ')');
break;
default:
/* Should never happen. */
dump_generic_node (buffer, clause, spc, flags, false);
break;
}
}
/* Dump the list of OpenMP clauses. BUFFER, SPC and FLAGS are as in
dump_generic_node. */
void
dump_omp_clauses (pretty_printer *buffer, tree clause, int spc, int flags)
{
if (clause == NULL)
return;
pp_space (buffer);
while (1)
{
dump_omp_clause (buffer, clause, spc, flags);
clause = OMP_CLAUSE_CHAIN (clause);
if (clause == NULL)
return;
pp_space (buffer);
}
}
/* Dump location LOC to BUFFER. */
static void
dump_location (pretty_printer *buffer, location_t loc)
{
expanded_location xloc = expand_location (loc);
pp_character (buffer, '[');
if (xloc.file)
{
pp_string (buffer, xloc.file);
pp_string (buffer, " : ");
}
pp_decimal_int (buffer, xloc.line);
pp_string (buffer, "] ");
}
/* Dump lexical block BLOCK. BUFFER, SPC and FLAGS are as in
dump_generic_node. */
static void
dump_block_node (pretty_printer *buffer, tree block, int spc, int flags)
{
tree t;
pp_printf (buffer, "BLOCK #%d ", BLOCK_NUMBER (block));
if (flags & TDF_ADDRESS)
pp_printf (buffer, "[%p] ", (void *) block);
if (BLOCK_ABSTRACT (block))
pp_string (buffer, "[abstract] ");
if (TREE_ASM_WRITTEN (block))
pp_string (buffer, "[written] ");
if (flags & TDF_SLIM)
return;
if (BLOCK_SOURCE_LOCATION (block))
dump_location (buffer, BLOCK_SOURCE_LOCATION (block));
newline_and_indent (buffer, spc + 2);
if (BLOCK_SUPERCONTEXT (block))
{
pp_string (buffer, "SUPERCONTEXT: ");
dump_generic_node (buffer, BLOCK_SUPERCONTEXT (block), 0,
flags | TDF_SLIM, false);
newline_and_indent (buffer, spc + 2);
}
if (BLOCK_SUBBLOCKS (block))
{
pp_string (buffer, "SUBBLOCKS: ");
for (t = BLOCK_SUBBLOCKS (block); t; t = BLOCK_CHAIN (t))
{
dump_generic_node (buffer, t, 0, flags | TDF_SLIM, false);
pp_string (buffer, " ");
}
newline_and_indent (buffer, spc + 2);
}
if (BLOCK_CHAIN (block))
{
pp_string (buffer, "SIBLINGS: ");
for (t = BLOCK_CHAIN (block); t; t = BLOCK_CHAIN (t))
{
dump_generic_node (buffer, t, 0, flags | TDF_SLIM, false);
pp_string (buffer, " ");
}
newline_and_indent (buffer, spc + 2);
}
if (BLOCK_VARS (block))
{
pp_string (buffer, "VARS: ");
for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t))
{
dump_generic_node (buffer, t, 0, flags, false);
pp_string (buffer, " ");
}
newline_and_indent (buffer, spc + 2);
}
if (VEC_length (tree, BLOCK_NONLOCALIZED_VARS (block)) > 0)
{
unsigned i;
VEC(tree,gc) *nlv = BLOCK_NONLOCALIZED_VARS (block);
pp_string (buffer, "NONLOCALIZED_VARS: ");
FOR_EACH_VEC_ELT (tree, nlv, i, t)
{
dump_generic_node (buffer, t, 0, flags, false);
pp_string (buffer, " ");
}
newline_and_indent (buffer, spc + 2);
}
if (BLOCK_ABSTRACT_ORIGIN (block))
{
pp_string (buffer, "ABSTRACT_ORIGIN: ");
dump_generic_node (buffer, BLOCK_ABSTRACT_ORIGIN (block), 0,
flags | TDF_SLIM, false);
newline_and_indent (buffer, spc + 2);
}
if (BLOCK_FRAGMENT_ORIGIN (block))
{
pp_string (buffer, "FRAGMENT_ORIGIN: ");
dump_generic_node (buffer, BLOCK_FRAGMENT_ORIGIN (block), 0,
flags | TDF_SLIM, false);
newline_and_indent (buffer, spc + 2);
}
if (BLOCK_FRAGMENT_CHAIN (block))
{
pp_string (buffer, "FRAGMENT_CHAIN: ");
for (t = BLOCK_FRAGMENT_CHAIN (block); t; t = BLOCK_FRAGMENT_CHAIN (t))
{
dump_generic_node (buffer, t, 0, flags | TDF_SLIM, false);
pp_string (buffer, " ");
}
newline_and_indent (buffer, spc + 2);
}
}
/* Dump the node NODE on the pretty_printer BUFFER, SPC spaces of
indent. FLAGS specifies details to show in the dump (see TDF_* in
tree-pass.h). If IS_STMT is true, the object printed is considered
to be a statement and it is terminated by ';' if appropriate. */
int
dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags,
bool is_stmt)
{
tree type;
tree op0, op1;
const char *str;
bool is_expr;
if (node == NULL_TREE)
return spc;
is_expr = EXPR_P (node);
if (is_stmt && (flags & TDF_STMTADDR))
pp_printf (buffer, "<&%p> ", (void *)node);
if ((flags & TDF_LINENO) && EXPR_HAS_LOCATION (node))
dump_location (buffer, EXPR_LOCATION (node));
switch (TREE_CODE (node))
{
case ERROR_MARK:
pp_string (buffer, "<<< error >>>");
break;
case IDENTIFIER_NODE:
pp_tree_identifier (buffer, node);
break;
case TREE_LIST:
while (node && node != error_mark_node)
{
if (TREE_PURPOSE (node))
{
dump_generic_node (buffer, TREE_PURPOSE (node), spc, flags, false);
pp_space (buffer);
}
dump_generic_node (buffer, TREE_VALUE (node), spc, flags, false);
node = TREE_CHAIN (node);
if (node && TREE_CODE (node) == TREE_LIST)
{
pp_character (buffer, ',');
pp_space (buffer);
}
}
break;
case TREE_BINFO:
dump_generic_node (buffer, BINFO_TYPE (node), spc, flags, false);
break;
case TREE_VEC:
{
size_t i;
if (TREE_VEC_LENGTH (node) > 0)
{
size_t len = TREE_VEC_LENGTH (node);
for (i = 0; i < len - 1; i++)
{
dump_generic_node (buffer, TREE_VEC_ELT (node, i), spc, flags,
false);
pp_character (buffer, ',');
pp_space (buffer);
}
dump_generic_node (buffer, TREE_VEC_ELT (node, len - 1), spc,
flags, false);
}
}
break;
case VOID_TYPE:
case INTEGER_TYPE:
case REAL_TYPE:
case FIXED_POINT_TYPE:
case COMPLEX_TYPE:
case VECTOR_TYPE:
case ENUMERAL_TYPE:
case BOOLEAN_TYPE:
{
unsigned int quals = TYPE_QUALS (node);
enum tree_code_class tclass;
if (quals & TYPE_QUAL_CONST)
pp_string (buffer, "const ");
else if (quals & TYPE_QUAL_VOLATILE)
pp_string (buffer, "volatile ");
else if (quals & TYPE_QUAL_RESTRICT)
pp_string (buffer, "restrict ");
if (!ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (node)))
{
pp_string (buffer, "<address-space-");
pp_decimal_int (buffer, TYPE_ADDR_SPACE (node));
pp_string (buffer, "> ");
}
tclass = TREE_CODE_CLASS (TREE_CODE (node));
if (tclass == tcc_declaration)
{
if (DECL_NAME (node))
dump_decl_name (buffer, node, flags);
else
pp_string (buffer, "<unnamed type decl>");
}
else if (tclass == tcc_type)
{
if (TYPE_NAME (node))
{
if (TREE_CODE (TYPE_NAME (node)) == IDENTIFIER_NODE)
pp_tree_identifier (buffer, TYPE_NAME (node));
else if (TREE_CODE (TYPE_NAME (node)) == TYPE_DECL
&& DECL_NAME (TYPE_NAME (node)))
dump_decl_name (buffer, TYPE_NAME (node), flags);
else
pp_string (buffer, "<unnamed type>");
}
else if (TREE_CODE (node) == VECTOR_TYPE)
{
pp_string (buffer, "vector");
pp_character (buffer, '(');
pp_wide_integer (buffer, TYPE_VECTOR_SUBPARTS (node));
pp_string (buffer, ") ");
dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false);
}
else if (TREE_CODE (node) == INTEGER_TYPE)
{
pp_string (buffer, (TYPE_UNSIGNED (node)
? "<unnamed-unsigned:"
: "<unnamed-signed:"));
pp_decimal_int (buffer, TYPE_PRECISION (node));
pp_string (buffer, ">");
}
else if (TREE_CODE (node) == COMPLEX_TYPE)
{
pp_string (buffer, "__complex__ ");
dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false);
}
else if (TREE_CODE (node) == REAL_TYPE)
{
pp_string (buffer, "<float:");
pp_decimal_int (buffer, TYPE_PRECISION (node));
pp_string (buffer, ">");
}
else if (TREE_CODE (node) == FIXED_POINT_TYPE)
{
pp_string (buffer, "<fixed-point-");
pp_string (buffer, TYPE_SATURATING (node) ? "sat:" : "nonsat:");
pp_decimal_int (buffer, TYPE_PRECISION (node));
pp_string (buffer, ">");
}
else if (TREE_CODE (node) == VOID_TYPE)
pp_string (buffer, "void");
else
pp_string (buffer, "<unnamed type>");
}
break;
}
case POINTER_TYPE:
case REFERENCE_TYPE:
str = (TREE_CODE (node) == POINTER_TYPE ? "*" : "&");
if (TREE_TYPE (node) == NULL)
{
pp_string (buffer, str);
pp_string (buffer, "<null type>");
}
else if (TREE_CODE (TREE_TYPE (node)) == FUNCTION_TYPE)
{
tree fnode = TREE_TYPE (node);
dump_generic_node (buffer, TREE_TYPE (fnode), spc, flags, false);
pp_space (buffer);
pp_character (buffer, '(');
pp_string (buffer, str);
if (TYPE_NAME (node) && DECL_NAME (TYPE_NAME (node)))
dump_decl_name (buffer, TYPE_NAME (node), flags);
else if (flags & TDF_NOUID)
pp_printf (buffer, "<Txxxx>");
else
pp_printf (buffer, "<T%x>", TYPE_UID (node));
pp_character (buffer, ')');
dump_function_declaration (buffer, fnode, spc, flags);
}
else
{
unsigned int quals = TYPE_QUALS (node);
dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false);
pp_space (buffer);
pp_string (buffer, str);
if (quals & TYPE_QUAL_CONST)
pp_string (buffer, " const");
if (quals & TYPE_QUAL_VOLATILE)
pp_string (buffer, " volatile");
if (quals & TYPE_QUAL_RESTRICT)
pp_string (buffer, " restrict");
if (!ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (node)))
{
pp_string (buffer, " <address-space-");
pp_decimal_int (buffer, TYPE_ADDR_SPACE (node));
pp_string (buffer, ">");
}
if (TYPE_REF_CAN_ALIAS_ALL (node))
pp_string (buffer, " {ref-all}");
}
break;
case OFFSET_TYPE:
NIY;
break;
case MEM_REF:
{
if (integer_zerop (TREE_OPERAND (node, 1))
/* Dump the types of INTEGER_CSTs explicitly, for we can't
infer them and MEM_ATTR caching will share MEM_REFs
with differently-typed op0s. */
&& TREE_CODE (TREE_OPERAND (node, 0)) != INTEGER_CST
/* Same pointer types, but ignoring POINTER_TYPE vs.
REFERENCE_TYPE. */
&& (TREE_TYPE (TREE_TYPE (TREE_OPERAND (node, 0)))
== TREE_TYPE (TREE_TYPE (TREE_OPERAND (node, 1))))
&& (TYPE_MODE (TREE_TYPE (TREE_OPERAND (node, 0)))
== TYPE_MODE (TREE_TYPE (TREE_OPERAND (node, 1))))
&& (TYPE_REF_CAN_ALIAS_ALL (TREE_TYPE (TREE_OPERAND (node, 0)))
== TYPE_REF_CAN_ALIAS_ALL (TREE_TYPE (TREE_OPERAND (node, 1))))
/* Same value types ignoring qualifiers. */
&& (TYPE_MAIN_VARIANT (TREE_TYPE (node))
== TYPE_MAIN_VARIANT
(TREE_TYPE (TREE_TYPE (TREE_OPERAND (node, 1))))))
{
if (TREE_CODE (TREE_OPERAND (node, 0)) != ADDR_EXPR)
{
pp_string (buffer, "*");
dump_generic_node (buffer, TREE_OPERAND (node, 0),
spc, flags, false);
}
else
dump_generic_node (buffer,
TREE_OPERAND (TREE_OPERAND (node, 0), 0),
spc, flags, false);
}
else
{
tree ptype;
pp_string (buffer, "MEM[");
pp_string (buffer, "(");
ptype = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (node, 1)));
dump_generic_node (buffer, ptype,
spc, flags | TDF_SLIM, false);
pp_string (buffer, ")");
dump_generic_node (buffer, TREE_OPERAND (node, 0),
spc, flags, false);
if (!integer_zerop (TREE_OPERAND (node, 1)))
{
pp_string (buffer, " + ");
dump_generic_node (buffer, TREE_OPERAND (node, 1),
spc, flags, false);
}
pp_string (buffer, "]");
}
break;
}
case TARGET_MEM_REF:
{
const char *sep = "";
tree tmp;
pp_string (buffer, "MEM[");
if (TREE_CODE (TMR_BASE (node)) == ADDR_EXPR)
{
pp_string (buffer, sep);
sep = ", ";
pp_string (buffer, "symbol: ");
dump_generic_node (buffer, TREE_OPERAND (TMR_BASE (node), 0),
spc, flags, false);
}
else
{
pp_string (buffer, sep);
sep = ", ";
pp_string (buffer, "base: ");
dump_generic_node (buffer, TMR_BASE (node), spc, flags, false);
}
tmp = TMR_INDEX2 (node);
if (tmp)
{
pp_string (buffer, sep);
sep = ", ";
pp_string (buffer, "base: ");
dump_generic_node (buffer, tmp, spc, flags, false);
}
tmp = TMR_INDEX (node);
if (tmp)
{
pp_string (buffer, sep);
sep = ", ";
pp_string (buffer, "index: ");
dump_generic_node (buffer, tmp, spc, flags, false);
}
tmp = TMR_STEP (node);
if (tmp)
{
pp_string (buffer, sep);
sep = ", ";
pp_string (buffer, "step: ");
dump_generic_node (buffer, tmp, spc, flags, false);
}
tmp = TMR_OFFSET (node);
if (tmp)
{
pp_string (buffer, sep);
sep = ", ";
pp_string (buffer, "offset: ");
dump_generic_node (buffer, tmp, spc, flags, false);
}
pp_string (buffer, "]");
}
break;
case ARRAY_TYPE:
{
tree tmp;
/* Print the innermost component type. */
for (tmp = TREE_TYPE (node); TREE_CODE (tmp) == ARRAY_TYPE;
tmp = TREE_TYPE (tmp))
;
dump_generic_node (buffer, tmp, spc, flags, false);
/* Print the dimensions. */
for (tmp = node; TREE_CODE (tmp) == ARRAY_TYPE; tmp = TREE_TYPE (tmp))
dump_array_domain (buffer, TYPE_DOMAIN (tmp), spc, flags);
break;
}
case RECORD_TYPE:
case UNION_TYPE:
case QUAL_UNION_TYPE:
{
unsigned int quals = TYPE_QUALS (node);
if (quals & TYPE_QUAL_CONST)
pp_string (buffer, "const ");
if (quals & TYPE_QUAL_VOLATILE)
pp_string (buffer, "volatile ");
/* Print the name of the structure. */
if (TREE_CODE (node) == RECORD_TYPE)
pp_string (buffer, "struct ");
else if (TREE_CODE (node) == UNION_TYPE)
pp_string (buffer, "union ");
if (TYPE_NAME (node))
dump_generic_node (buffer, TYPE_NAME (node), spc, flags, false);
else if (!(flags & TDF_SLIM))
/* FIXME: If we eliminate the 'else' above and attempt
to show the fields for named types, we may get stuck
following a cycle of pointers to structs. The alleged
self-reference check in print_struct_decl will not detect
cycles involving more than one pointer or struct type. */
print_struct_decl (buffer, node, spc, flags);
break;
}
case LANG_TYPE:
NIY;
break;
case INTEGER_CST:
if (TREE_CODE (TREE_TYPE (node)) == POINTER_TYPE)
{
/* In the case of a pointer, one may want to divide by the
size of the pointed-to type. Unfortunately, this not
straightforward. The C front-end maps expressions
(int *) 5
int *p; (p + 5)
in such a way that the two INTEGER_CST nodes for "5" have
different values but identical types. In the latter
case, the 5 is multiplied by sizeof (int) in c-common.c
(pointer_int_sum) to convert it to a byte address, and
yet the type of the node is left unchanged. Argh. What
is consistent though is that the number value corresponds
to bytes (UNITS) offset.
NB: Neither of the following divisors can be trivially
used to recover the original literal:
TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (node)))
TYPE_PRECISION (TREE_TYPE (TREE_TYPE (node))) */
pp_wide_integer (buffer, TREE_INT_CST_LOW (node));
pp_string (buffer, "B"); /* pseudo-unit */
}
else if (! host_integerp (node, 0))
{
tree val = node;
unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (val);
HOST_WIDE_INT high = TREE_INT_CST_HIGH (val);
if (tree_int_cst_sgn (val) < 0)
{
pp_character (buffer, '-');
high = ~high + !low;
low = -low;
}
/* Would "%x%0*x" or "%x%*0x" get zero-padding on all
systems? */
sprintf (pp_buffer (buffer)->digit_buffer,
HOST_WIDE_INT_PRINT_DOUBLE_HEX,
(unsigned HOST_WIDE_INT) high, low);
pp_string (buffer, pp_buffer (buffer)->digit_buffer);
}
else
pp_wide_integer (buffer, TREE_INT_CST_LOW (node));
break;
case REAL_CST:
/* Code copied from print_node. */
{
REAL_VALUE_TYPE d;
if (TREE_OVERFLOW (node))
pp_string (buffer, " overflow");
#if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
d = TREE_REAL_CST (node);
if (REAL_VALUE_ISINF (d))
pp_string (buffer, REAL_VALUE_NEGATIVE (d) ? " -Inf" : " Inf");
else if (REAL_VALUE_ISNAN (d))
pp_string (buffer, " Nan");
else
{
char string[100];
real_to_decimal (string, &d, sizeof (string), 0, 1);
pp_string (buffer, string);
}
#else
{
HOST_WIDE_INT i;
unsigned char *p = (unsigned char *) &TREE_REAL_CST (node);
pp_string (buffer, "0x");
for (i = 0; i < sizeof TREE_REAL_CST (node); i++)
output_formatted_integer (buffer, "%02x", *p++);
}
#endif
break;
}
case FIXED_CST:
{
char string[100];
fixed_to_decimal (string, TREE_FIXED_CST_PTR (node), sizeof (string));
pp_string (buffer, string);
break;
}
case COMPLEX_CST:
pp_string (buffer, "__complex__ (");
dump_generic_node (buffer, TREE_REALPART (node), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_IMAGPART (node), spc, flags, false);
pp_string (buffer, ")");
break;
case STRING_CST:
pp_string (buffer, "\"");
pretty_print_string (buffer, TREE_STRING_POINTER (node));
pp_string (buffer, "\"");
break;
case VECTOR_CST:
{
tree elt;
pp_string (buffer, "{ ");
for (elt = TREE_VECTOR_CST_ELTS (node); elt; elt = TREE_CHAIN (elt))
{
dump_generic_node (buffer, TREE_VALUE (elt), spc, flags, false);
if (TREE_CHAIN (elt))
pp_string (buffer, ", ");
}
pp_string (buffer, " }");
}
break;
case FUNCTION_TYPE:
case METHOD_TYPE:
dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false);
pp_space (buffer);
if (TREE_CODE (node) == METHOD_TYPE)
{
if (TYPE_METHOD_BASETYPE (node))
dump_decl_name (buffer, TYPE_NAME (TYPE_METHOD_BASETYPE (node)),
flags);
else
pp_string (buffer, "<null method basetype>");
pp_string (buffer, "::");
}
if (TYPE_NAME (node) && DECL_NAME (TYPE_NAME (node)))
dump_decl_name (buffer, TYPE_NAME (node), flags);
else if (flags & TDF_NOUID)
pp_printf (buffer, "<Txxxx>");
else
pp_printf (buffer, "<T%x>", TYPE_UID (node));
dump_function_declaration (buffer, node, spc, flags);
break;
case FUNCTION_DECL:
case CONST_DECL:
dump_decl_name (buffer, node, flags);
break;
case LABEL_DECL:
if (DECL_NAME (node))
dump_decl_name (buffer, node, flags);
else if (LABEL_DECL_UID (node) != -1)
pp_printf (buffer, "<L%d>", (int) LABEL_DECL_UID (node));
else
{
if (flags & TDF_NOUID)
pp_string (buffer, "<D.xxxx>");
else
pp_printf (buffer, "<D.%u>", DECL_UID (node));
}
break;
case TYPE_DECL:
if (DECL_IS_BUILTIN (node))
{
/* Don't print the declaration of built-in types. */
break;
}
if (DECL_NAME (node))
dump_decl_name (buffer, node, flags);
else if (TYPE_NAME (TREE_TYPE (node)) != node)
{
if ((TREE_CODE (TREE_TYPE (node)) == RECORD_TYPE
|| TREE_CODE (TREE_TYPE (node)) == UNION_TYPE)
&& TYPE_METHODS (TREE_TYPE (node)))
{
/* The type is a c++ class: all structures have at least
4 methods. */
pp_string (buffer, "class ");
dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false);
}
else
{
pp_string (buffer,
(TREE_CODE (TREE_TYPE (node)) == UNION_TYPE
? "union" : "struct "));
dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false);
}
}
else
pp_string (buffer, "<anon>");
break;
case VAR_DECL:
case PARM_DECL:
case FIELD_DECL:
case DEBUG_EXPR_DECL:
case NAMESPACE_DECL:
dump_decl_name (buffer, node, flags);
break;
case RESULT_DECL:
pp_string (buffer, "<retval>");
break;
case COMPONENT_REF:
op0 = TREE_OPERAND (node, 0);
str = ".";
if (op0
&& (TREE_CODE (op0) == INDIRECT_REF
|| (TREE_CODE (op0) == MEM_REF
&& TREE_CODE (TREE_OPERAND (op0, 0)) != ADDR_EXPR
&& integer_zerop (TREE_OPERAND (op0, 1))
/* Dump the types of INTEGER_CSTs explicitly, for we
can't infer them and MEM_ATTR caching will share
MEM_REFs with differently-typed op0s. */
&& TREE_CODE (TREE_OPERAND (op0, 0)) != INTEGER_CST
/* Same pointer types, but ignoring POINTER_TYPE vs.
REFERENCE_TYPE. */
&& (TREE_TYPE (TREE_TYPE (TREE_OPERAND (op0, 0)))
== TREE_TYPE (TREE_TYPE (TREE_OPERAND (op0, 1))))
&& (TYPE_MODE (TREE_TYPE (TREE_OPERAND (op0, 0)))
== TYPE_MODE (TREE_TYPE (TREE_OPERAND (op0, 1))))
&& (TYPE_REF_CAN_ALIAS_ALL (TREE_TYPE (TREE_OPERAND (op0, 0)))
== TYPE_REF_CAN_ALIAS_ALL (TREE_TYPE (TREE_OPERAND (op0, 1))))
/* Same value types ignoring qualifiers. */
&& (TYPE_MAIN_VARIANT (TREE_TYPE (op0))
== TYPE_MAIN_VARIANT
(TREE_TYPE (TREE_TYPE (TREE_OPERAND (op0, 1))))))))
{
op0 = TREE_OPERAND (op0, 0);
str = "->";
}
if (op_prio (op0) < op_prio (node))
pp_character (buffer, '(');
dump_generic_node (buffer, op0, spc, flags, false);
if (op_prio (op0) < op_prio (node))
pp_character (buffer, ')');
pp_string (buffer, str);
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
op0 = component_ref_field_offset (node);
if (op0 && TREE_CODE (op0) != INTEGER_CST)
{
pp_string (buffer, "{off: ");
dump_generic_node (buffer, op0, spc, flags, false);
pp_character (buffer, '}');
}
break;
case BIT_FIELD_REF:
pp_string (buffer, "BIT_FIELD_REF <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false);
pp_string (buffer, ">");
break;
case ARRAY_REF:
case ARRAY_RANGE_REF:
op0 = TREE_OPERAND (node, 0);
if (op_prio (op0) < op_prio (node))
pp_character (buffer, '(');
dump_generic_node (buffer, op0, spc, flags, false);
if (op_prio (op0) < op_prio (node))
pp_character (buffer, ')');
pp_character (buffer, '[');
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
if (TREE_CODE (node) == ARRAY_RANGE_REF)
pp_string (buffer, " ...");
pp_character (buffer, ']');
op0 = array_ref_low_bound (node);
op1 = array_ref_element_size (node);
if (!integer_zerop (op0)
|| TREE_OPERAND (node, 2)
|| TREE_OPERAND (node, 3))
{
pp_string (buffer, "{lb: ");
dump_generic_node (buffer, op0, spc, flags, false);
pp_string (buffer, " sz: ");
dump_generic_node (buffer, op1, spc, flags, false);
pp_character (buffer, '}');
}
break;
case CONSTRUCTOR:
{
unsigned HOST_WIDE_INT ix;
tree field, val;
bool is_struct_init = FALSE;
pp_character (buffer, '{');
if (TREE_CODE (TREE_TYPE (node)) == RECORD_TYPE
|| TREE_CODE (TREE_TYPE (node)) == UNION_TYPE)
is_struct_init = TRUE;
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (node), ix, field, val)
{
if (field && is_struct_init)
{
pp_character (buffer, '.');
dump_generic_node (buffer, field, spc, flags, false);
pp_string (buffer, "=");
}
if (val && TREE_CODE (val) == ADDR_EXPR)
if (TREE_CODE (TREE_OPERAND (val, 0)) == FUNCTION_DECL)
val = TREE_OPERAND (val, 0);
if (val && TREE_CODE (val) == FUNCTION_DECL)
dump_decl_name (buffer, val, flags);
else
dump_generic_node (buffer, val, spc, flags, false);
if (ix != VEC_length (constructor_elt, CONSTRUCTOR_ELTS (node)) - 1)
{
pp_character (buffer, ',');
pp_space (buffer);
}
}
pp_character (buffer, '}');
}
break;
case COMPOUND_EXPR:
{
tree *tp;
if (flags & TDF_SLIM)
{
pp_string (buffer, "<COMPOUND_EXPR>");
break;
}
dump_generic_node (buffer, TREE_OPERAND (node, 0),
spc, flags, !(flags & TDF_SLIM));
if (flags & TDF_SLIM)
newline_and_indent (buffer, spc);
else
{
pp_character (buffer, ',');
pp_space (buffer);
}
for (tp = &TREE_OPERAND (node, 1);
TREE_CODE (*tp) == COMPOUND_EXPR;
tp = &TREE_OPERAND (*tp, 1))
{
dump_generic_node (buffer, TREE_OPERAND (*tp, 0),
spc, flags, !(flags & TDF_SLIM));
if (flags & TDF_SLIM)
newline_and_indent (buffer, spc);
else
{
pp_character (buffer, ',');
pp_space (buffer);
}
}
dump_generic_node (buffer, *tp, spc, flags, !(flags & TDF_SLIM));
}
break;
case STATEMENT_LIST:
{
tree_stmt_iterator si;
bool first = true;
if (flags & TDF_SLIM)
{
pp_string (buffer, "<STATEMENT_LIST>");
break;
}
for (si = tsi_start (node); !tsi_end_p (si); tsi_next (&si))
{
if (!first)
newline_and_indent (buffer, spc);
else
first = false;
dump_generic_node (buffer, tsi_stmt (si), spc, flags, true);
}
}
break;
case MODIFY_EXPR:
case INIT_EXPR:
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags,
false);
pp_space (buffer);
pp_character (buffer, '=');
if (TREE_CODE (node) == MODIFY_EXPR
&& MOVE_NONTEMPORAL (node))
pp_string (buffer, "{nt}");
pp_space (buffer);
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags,
false);
break;
case TARGET_EXPR:
pp_string (buffer, "TARGET_EXPR <");
dump_generic_node (buffer, TARGET_EXPR_SLOT (node), spc, flags, false);
pp_character (buffer, ',');
pp_space (buffer);
dump_generic_node (buffer, TARGET_EXPR_INITIAL (node), spc, flags, false);
pp_character (buffer, '>');
break;
case DECL_EXPR:
print_declaration (buffer, DECL_EXPR_DECL (node), spc, flags);
is_stmt = false;
break;
case COND_EXPR:
if (TREE_TYPE (node) == NULL || TREE_TYPE (node) == void_type_node)
{
pp_string (buffer, "if (");
dump_generic_node (buffer, COND_EXPR_COND (node), spc, flags, false);
pp_character (buffer, ')');
/* The lowered cond_exprs should always be printed in full. */
if (COND_EXPR_THEN (node)
&& (IS_EMPTY_STMT (COND_EXPR_THEN (node))
|| TREE_CODE (COND_EXPR_THEN (node)) == GOTO_EXPR)
&& COND_EXPR_ELSE (node)
&& (IS_EMPTY_STMT (COND_EXPR_ELSE (node))
|| TREE_CODE (COND_EXPR_ELSE (node)) == GOTO_EXPR))
{
pp_space (buffer);
dump_generic_node (buffer, COND_EXPR_THEN (node),
0, flags, true);
if (!IS_EMPTY_STMT (COND_EXPR_ELSE (node)))
{
pp_string (buffer, " else ");
dump_generic_node (buffer, COND_EXPR_ELSE (node),
0, flags, true);
}
}
else if (!(flags & TDF_SLIM))
{
/* Output COND_EXPR_THEN. */
if (COND_EXPR_THEN (node))
{
newline_and_indent (buffer, spc+2);
pp_character (buffer, '{');
newline_and_indent (buffer, spc+4);
dump_generic_node (buffer, COND_EXPR_THEN (node), spc+4,
flags, true);
newline_and_indent (buffer, spc+2);
pp_character (buffer, '}');
}
/* Output COND_EXPR_ELSE. */
if (COND_EXPR_ELSE (node)
&& !IS_EMPTY_STMT (COND_EXPR_ELSE (node)))
{
newline_and_indent (buffer, spc);
pp_string (buffer, "else");
newline_and_indent (buffer, spc+2);
pp_character (buffer, '{');
newline_and_indent (buffer, spc+4);
dump_generic_node (buffer, COND_EXPR_ELSE (node), spc+4,
flags, true);
newline_and_indent (buffer, spc+2);
pp_character (buffer, '}');
}
}
is_expr = false;
}
else
{
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_space (buffer);
pp_character (buffer, '?');
pp_space (buffer);
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_space (buffer);
pp_character (buffer, ':');
pp_space (buffer);
dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false);
}
break;
case BIND_EXPR:
pp_character (buffer, '{');
if (!(flags & TDF_SLIM))
{
if (BIND_EXPR_VARS (node))
{
pp_newline (buffer);
for (op0 = BIND_EXPR_VARS (node); op0; op0 = DECL_CHAIN (op0))
{
print_declaration (buffer, op0, spc+2, flags);
pp_newline (buffer);
}
}
newline_and_indent (buffer, spc+2);
dump_generic_node (buffer, BIND_EXPR_BODY (node), spc+2, flags, true);
newline_and_indent (buffer, spc);
pp_character (buffer, '}');
}
is_expr = false;
break;
case CALL_EXPR:
print_call_name (buffer, CALL_EXPR_FN (node), flags);
/* Print parameters. */
pp_space (buffer);
pp_character (buffer, '(');
{
tree arg;
call_expr_arg_iterator iter;
FOR_EACH_CALL_EXPR_ARG (arg, iter, node)
{
dump_generic_node (buffer, arg, spc, flags, false);
if (more_call_expr_args_p (&iter))
{
pp_character (buffer, ',');
pp_space (buffer);
}
}
}
if (CALL_EXPR_VA_ARG_PACK (node))
{
if (call_expr_nargs (node) > 0)
{
pp_character (buffer, ',');
pp_space (buffer);
}
pp_string (buffer, "__builtin_va_arg_pack ()");
}
pp_character (buffer, ')');
op1 = CALL_EXPR_STATIC_CHAIN (node);
if (op1)
{
pp_string (buffer, " [static-chain: ");
dump_generic_node (buffer, op1, spc, flags, false);
pp_character (buffer, ']');
}
if (CALL_EXPR_RETURN_SLOT_OPT (node))
pp_string (buffer, " [return slot optimization]");
if (CALL_EXPR_TAILCALL (node))
pp_string (buffer, " [tail call]");
break;
case STATIC_CHAIN_EXPR:
pp_string (buffer, "<<static chain of ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ">>");
break;
case WITH_CLEANUP_EXPR:
NIY;
break;
case CLEANUP_POINT_EXPR:
pp_string (buffer, "<<cleanup_point ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ">>");
break;
case PLACEHOLDER_EXPR:
pp_string (buffer, "<PLACEHOLDER_EXPR ");
dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false);
pp_character (buffer, '>');
break;
/* Binary arithmetic and logic expressions. */
case WIDEN_SUM_EXPR:
case WIDEN_MULT_EXPR:
case MULT_EXPR:
case PLUS_EXPR:
case POINTER_PLUS_EXPR:
case MINUS_EXPR:
case TRUNC_DIV_EXPR:
case CEIL_DIV_EXPR:
case FLOOR_DIV_EXPR:
case ROUND_DIV_EXPR:
case TRUNC_MOD_EXPR:
case CEIL_MOD_EXPR:
case FLOOR_MOD_EXPR:
case ROUND_MOD_EXPR:
case RDIV_EXPR:
case EXACT_DIV_EXPR:
case LSHIFT_EXPR:
case RSHIFT_EXPR:
case LROTATE_EXPR:
case RROTATE_EXPR:
case VEC_LSHIFT_EXPR:
case VEC_RSHIFT_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
case BIT_AND_EXPR:
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_XOR_EXPR:
case LT_EXPR:
case LE_EXPR:
case GT_EXPR:
case GE_EXPR:
case EQ_EXPR:
case NE_EXPR:
case UNLT_EXPR:
case UNLE_EXPR:
case UNGT_EXPR:
case UNGE_EXPR:
case UNEQ_EXPR:
case LTGT_EXPR:
case ORDERED_EXPR:
case UNORDERED_EXPR:
{
const char *op = op_symbol (node);
op0 = TREE_OPERAND (node, 0);
op1 = TREE_OPERAND (node, 1);
/* When the operands are expressions with less priority,
keep semantics of the tree representation. */
if (op_prio (op0) <= op_prio (node))
{
pp_character (buffer, '(');
dump_generic_node (buffer, op0, spc, flags, false);
pp_character (buffer, ')');
}
else
dump_generic_node (buffer, op0, spc, flags, false);
pp_space (buffer);
pp_string (buffer, op);
pp_space (buffer);
/* When the operands are expressions with less priority,
keep semantics of the tree representation. */
if (op_prio (op1) <= op_prio (node))
{
pp_character (buffer, '(');
dump_generic_node (buffer, op1, spc, flags, false);
pp_character (buffer, ')');
}
else
dump_generic_node (buffer, op1, spc, flags, false);
}
break;
/* Unary arithmetic and logic expressions. */
case NEGATE_EXPR:
case BIT_NOT_EXPR:
case TRUTH_NOT_EXPR:
case ADDR_EXPR:
case PREDECREMENT_EXPR:
case PREINCREMENT_EXPR:
case INDIRECT_REF:
if (TREE_CODE (node) == ADDR_EXPR
&& (TREE_CODE (TREE_OPERAND (node, 0)) == STRING_CST
|| TREE_CODE (TREE_OPERAND (node, 0)) == FUNCTION_DECL))
; /* Do not output '&' for strings and function pointers. */
else
pp_string (buffer, op_symbol (node));
if (op_prio (TREE_OPERAND (node, 0)) < op_prio (node))
{
pp_character (buffer, '(');
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_character (buffer, ')');
}
else
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
break;
case POSTDECREMENT_EXPR:
case POSTINCREMENT_EXPR:
if (op_prio (TREE_OPERAND (node, 0)) < op_prio (node))
{
pp_character (buffer, '(');
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_character (buffer, ')');
}
else
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, op_symbol (node));
break;
case MIN_EXPR:
pp_string (buffer, "MIN_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_character (buffer, '>');
break;
case MAX_EXPR:
pp_string (buffer, "MAX_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_character (buffer, '>');
break;
case ABS_EXPR:
pp_string (buffer, "ABS_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_character (buffer, '>');
break;
case RANGE_EXPR:
NIY;
break;
case ADDR_SPACE_CONVERT_EXPR:
case FIXED_CONVERT_EXPR:
case FIX_TRUNC_EXPR:
case FLOAT_EXPR:
CASE_CONVERT:
type = TREE_TYPE (node);
op0 = TREE_OPERAND (node, 0);
if (type != TREE_TYPE (op0))
{
pp_character (buffer, '(');
dump_generic_node (buffer, type, spc, flags, false);
pp_string (buffer, ") ");
}
if (op_prio (op0) < op_prio (node))
pp_character (buffer, '(');
dump_generic_node (buffer, op0, spc, flags, false);
if (op_prio (op0) < op_prio (node))
pp_character (buffer, ')');
break;
case VIEW_CONVERT_EXPR:
pp_string (buffer, "VIEW_CONVERT_EXPR<");
dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false);
pp_string (buffer, ">(");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_character (buffer, ')');
break;
case PAREN_EXPR:
pp_string (buffer, "((");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, "))");
break;
case NON_LVALUE_EXPR:
pp_string (buffer, "NON_LVALUE_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_character (buffer, '>');
break;
case SAVE_EXPR:
pp_string (buffer, "SAVE_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_character (buffer, '>');
break;
case COMPLEX_EXPR:
pp_string (buffer, "COMPLEX_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, ">");
break;
case CONJ_EXPR:
pp_string (buffer, "CONJ_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ">");
break;
case REALPART_EXPR:
pp_string (buffer, "REALPART_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ">");
break;
case IMAGPART_EXPR:
pp_string (buffer, "IMAGPART_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ">");
break;
case VA_ARG_EXPR:
pp_string (buffer, "VA_ARG_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ">");
break;
case TRY_FINALLY_EXPR:
case TRY_CATCH_EXPR:
pp_string (buffer, "try");
newline_and_indent (buffer, spc+2);
pp_string (buffer, "{");
newline_and_indent (buffer, spc+4);
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc+4, flags, true);
newline_and_indent (buffer, spc+2);
pp_string (buffer, "}");
newline_and_indent (buffer, spc);
pp_string (buffer,
(TREE_CODE (node) == TRY_CATCH_EXPR) ? "catch" : "finally");
newline_and_indent (buffer, spc+2);
pp_string (buffer, "{");
newline_and_indent (buffer, spc+4);
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc+4, flags, true);
newline_and_indent (buffer, spc+2);
pp_string (buffer, "}");
is_expr = false;
break;
case CATCH_EXPR:
pp_string (buffer, "catch (");
dump_generic_node (buffer, CATCH_TYPES (node), spc+2, flags, false);
pp_string (buffer, ")");
newline_and_indent (buffer, spc+2);
pp_string (buffer, "{");
newline_and_indent (buffer, spc+4);
dump_generic_node (buffer, CATCH_BODY (node), spc+4, flags, true);
newline_and_indent (buffer, spc+2);
pp_string (buffer, "}");
is_expr = false;
break;
case EH_FILTER_EXPR:
pp_string (buffer, "<<<eh_filter (");
dump_generic_node (buffer, EH_FILTER_TYPES (node), spc+2, flags, false);
pp_string (buffer, ")>>>");
newline_and_indent (buffer, spc+2);
pp_string (buffer, "{");
newline_and_indent (buffer, spc+4);
dump_generic_node (buffer, EH_FILTER_FAILURE (node), spc+4, flags, true);
newline_and_indent (buffer, spc+2);
pp_string (buffer, "}");
is_expr = false;
break;
case LABEL_EXPR:
op0 = TREE_OPERAND (node, 0);
/* If this is for break or continue, don't bother printing it. */
if (DECL_NAME (op0))
{
const char *name = IDENTIFIER_POINTER (DECL_NAME (op0));
if (strcmp (name, "break") == 0
|| strcmp (name, "continue") == 0)
break;
}
dump_generic_node (buffer, op0, spc, flags, false);
pp_character (buffer, ':');
if (DECL_NONLOCAL (op0))
pp_string (buffer, " [non-local]");
break;
case LOOP_EXPR:
pp_string (buffer, "while (1)");
if (!(flags & TDF_SLIM))
{
newline_and_indent (buffer, spc+2);
pp_character (buffer, '{');
newline_and_indent (buffer, spc+4);
dump_generic_node (buffer, LOOP_EXPR_BODY (node), spc+4, flags, true);
newline_and_indent (buffer, spc+2);
pp_character (buffer, '}');
}
is_expr = false;
break;
case PREDICT_EXPR:
pp_string (buffer, "// predicted ");
if (PREDICT_EXPR_OUTCOME (node))
pp_string (buffer, "likely by ");
else
pp_string (buffer, "unlikely by ");
pp_string (buffer, predictor_name (PREDICT_EXPR_PREDICTOR (node)));
pp_string (buffer, " predictor.");
break;
case RETURN_EXPR:
pp_string (buffer, "return");
op0 = TREE_OPERAND (node, 0);
if (op0)
{
pp_space (buffer);
if (TREE_CODE (op0) == MODIFY_EXPR)
dump_generic_node (buffer, TREE_OPERAND (op0, 1),
spc, flags, false);
else
dump_generic_node (buffer, op0, spc, flags, false);
}
break;
case EXIT_EXPR:
pp_string (buffer, "if (");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ") break");
break;
case SWITCH_EXPR:
pp_string (buffer, "switch (");
dump_generic_node (buffer, SWITCH_COND (node), spc, flags, false);
pp_character (buffer, ')');
if (!(flags & TDF_SLIM))
{
newline_and_indent (buffer, spc+2);
pp_character (buffer, '{');
if (SWITCH_BODY (node))
{
newline_and_indent (buffer, spc+4);
dump_generic_node (buffer, SWITCH_BODY (node), spc+4, flags,
true);
}
else
{
tree vec = SWITCH_LABELS (node);
size_t i, n = TREE_VEC_LENGTH (vec);
for (i = 0; i < n; ++i)
{
tree elt = TREE_VEC_ELT (vec, i);
newline_and_indent (buffer, spc+4);
if (elt)
{
dump_generic_node (buffer, elt, spc+4, flags, false);
pp_string (buffer, " goto ");
dump_generic_node (buffer, CASE_LABEL (elt), spc+4,
flags, true);
pp_semicolon (buffer);
}
else
pp_string (buffer, "case ???: goto ???;");
}
}
newline_and_indent (buffer, spc+2);
pp_character (buffer, '}');
}
is_expr = false;
break;
case GOTO_EXPR:
op0 = GOTO_DESTINATION (node);
if (TREE_CODE (op0) != SSA_NAME && DECL_P (op0) && DECL_NAME (op0))
{
const char *name = IDENTIFIER_POINTER (DECL_NAME (op0));
if (strcmp (name, "break") == 0
|| strcmp (name, "continue") == 0)
{
pp_string (buffer, name);
break;
}
}
pp_string (buffer, "goto ");
dump_generic_node (buffer, op0, spc, flags, false);
break;
case ASM_EXPR:
pp_string (buffer, "__asm__");
if (ASM_VOLATILE_P (node))
pp_string (buffer, " __volatile__");
pp_character (buffer, '(');
dump_generic_node (buffer, ASM_STRING (node), spc, flags, false);
pp_character (buffer, ':');
dump_generic_node (buffer, ASM_OUTPUTS (node), spc, flags, false);
pp_character (buffer, ':');
dump_generic_node (buffer, ASM_INPUTS (node), spc, flags, false);
if (ASM_CLOBBERS (node))
{
pp_character (buffer, ':');
dump_generic_node (buffer, ASM_CLOBBERS (node), spc, flags, false);
}
pp_string (buffer, ")");
break;
case CASE_LABEL_EXPR:
if (CASE_LOW (node) && CASE_HIGH (node))
{
pp_string (buffer, "case ");
dump_generic_node (buffer, CASE_LOW (node), spc, flags, false);
pp_string (buffer, " ... ");
dump_generic_node (buffer, CASE_HIGH (node), spc, flags, false);
}
else if (CASE_LOW (node))
{
pp_string (buffer, "case ");
dump_generic_node (buffer, CASE_LOW (node), spc, flags, false);
}
else
pp_string (buffer, "default");
pp_character (buffer, ':');
break;
case OBJ_TYPE_REF:
pp_string (buffer, "OBJ_TYPE_REF(");
dump_generic_node (buffer, OBJ_TYPE_REF_EXPR (node), spc, flags, false);
pp_character (buffer, ';');
dump_generic_node (buffer, OBJ_TYPE_REF_OBJECT (node), spc, flags, false);
pp_character (buffer, '-');
pp_character (buffer, '>');
dump_generic_node (buffer, OBJ_TYPE_REF_TOKEN (node), spc, flags, false);
pp_character (buffer, ')');
break;
case SSA_NAME:
dump_generic_node (buffer, SSA_NAME_VAR (node), spc, flags, false);
pp_string (buffer, "_");
pp_decimal_int (buffer, SSA_NAME_VERSION (node));
if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (node))
pp_string (buffer, "(ab)");
else if (SSA_NAME_IS_DEFAULT_DEF (node))
pp_string (buffer, "(D)");
break;
case WITH_SIZE_EXPR:
pp_string (buffer, "WITH_SIZE_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, ">");
break;
case ASSERT_EXPR:
pp_string (buffer, "ASSERT_EXPR <");
dump_generic_node (buffer, ASSERT_EXPR_VAR (node), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, ASSERT_EXPR_COND (node), spc, flags, false);
pp_string (buffer, ">");
break;
case SCEV_KNOWN:
pp_string (buffer, "scev_known");
break;
case SCEV_NOT_KNOWN:
pp_string (buffer, "scev_not_known");
break;
case POLYNOMIAL_CHREC:
pp_string (buffer, "{");
dump_generic_node (buffer, CHREC_LEFT (node), spc, flags, false);
pp_string (buffer, ", +, ");
dump_generic_node (buffer, CHREC_RIGHT (node), spc, flags, false);
pp_string (buffer, "}_");
dump_generic_node (buffer, CHREC_VAR (node), spc, flags, false);
is_stmt = false;
break;
case REALIGN_LOAD_EXPR:
pp_string (buffer, "REALIGN_LOAD <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false);
pp_string (buffer, ">");
break;
case VEC_COND_EXPR:
pp_string (buffer, " VEC_COND_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, " , ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, " , ");
dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false);
pp_string (buffer, " > ");
break;
case DOT_PROD_EXPR:
pp_string (buffer, " DOT_PROD_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false);
pp_string (buffer, " > ");
break;
case WIDEN_MULT_PLUS_EXPR:
pp_string (buffer, " WIDEN_MULT_PLUS_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false);
pp_string (buffer, " > ");
break;
case WIDEN_MULT_MINUS_EXPR:
pp_string (buffer, " WIDEN_MULT_MINUS_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false);
pp_string (buffer, " > ");
break;
case FMA_EXPR:
pp_string (buffer, " FMA_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false);
pp_string (buffer, " > ");
break;
case OMP_PARALLEL:
pp_string (buffer, "#pragma omp parallel");
dump_omp_clauses (buffer, OMP_PARALLEL_CLAUSES (node), spc, flags);
dump_omp_body:
if (!(flags & TDF_SLIM) && OMP_BODY (node))
{
newline_and_indent (buffer, spc + 2);
pp_character (buffer, '{');
newline_and_indent (buffer, spc + 4);
dump_generic_node (buffer, OMP_BODY (node), spc + 4, flags, false);
newline_and_indent (buffer, spc + 2);
pp_character (buffer, '}');
}
is_expr = false;
break;
case OMP_TASK:
pp_string (buffer, "#pragma omp task");
dump_omp_clauses (buffer, OMP_TASK_CLAUSES (node), spc, flags);
goto dump_omp_body;
case OMP_FOR:
pp_string (buffer, "#pragma omp for");
dump_omp_clauses (buffer, OMP_FOR_CLAUSES (node), spc, flags);
if (!(flags & TDF_SLIM))
{
int i;
if (OMP_FOR_PRE_BODY (node))
{
newline_and_indent (buffer, spc + 2);
pp_character (buffer, '{');
spc += 4;
newline_and_indent (buffer, spc);
dump_generic_node (buffer, OMP_FOR_PRE_BODY (node),
spc, flags, false);
}
spc -= 2;
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (node)); i++)
{
spc += 2;
newline_and_indent (buffer, spc);
pp_string (buffer, "for (");
dump_generic_node (buffer, TREE_VEC_ELT (OMP_FOR_INIT (node), i),
spc, flags, false);
pp_string (buffer, "; ");
dump_generic_node (buffer, TREE_VEC_ELT (OMP_FOR_COND (node), i),
spc, flags, false);
pp_string (buffer, "; ");
dump_generic_node (buffer, TREE_VEC_ELT (OMP_FOR_INCR (node), i),
spc, flags, false);
pp_string (buffer, ")");
}
if (OMP_FOR_BODY (node))
{
newline_and_indent (buffer, spc + 2);
pp_character (buffer, '{');
newline_and_indent (buffer, spc + 4);
dump_generic_node (buffer, OMP_FOR_BODY (node), spc + 4, flags,
false);
newline_and_indent (buffer, spc + 2);
pp_character (buffer, '}');
}
spc -= 2 * TREE_VEC_LENGTH (OMP_FOR_INIT (node)) - 2;
if (OMP_FOR_PRE_BODY (node))
{
spc -= 4;
newline_and_indent (buffer, spc + 2);
pp_character (buffer, '}');
}
}
is_expr = false;
break;
case OMP_SECTIONS:
pp_string (buffer, "#pragma omp sections");
dump_omp_clauses (buffer, OMP_SECTIONS_CLAUSES (node), spc, flags);
goto dump_omp_body;
case OMP_SECTION:
pp_string (buffer, "#pragma omp section");
goto dump_omp_body;
case OMP_MASTER:
pp_string (buffer, "#pragma omp master");
goto dump_omp_body;
case OMP_ORDERED:
pp_string (buffer, "#pragma omp ordered");
goto dump_omp_body;
case OMP_CRITICAL:
pp_string (buffer, "#pragma omp critical");
if (OMP_CRITICAL_NAME (node))
{
pp_space (buffer);
pp_character (buffer, '(');
dump_generic_node (buffer, OMP_CRITICAL_NAME (node), spc,
flags, false);
pp_character (buffer, ')');
}
goto dump_omp_body;
case OMP_ATOMIC:
pp_string (buffer, "#pragma omp atomic");
newline_and_indent (buffer, spc + 2);
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_space (buffer);
pp_character (buffer, '=');
pp_space (buffer);
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
break;
case OMP_SINGLE:
pp_string (buffer, "#pragma omp single");
dump_omp_clauses (buffer, OMP_SINGLE_CLAUSES (node), spc, flags);
goto dump_omp_body;
case OMP_CLAUSE:
dump_omp_clause (buffer, node, spc, flags);
is_expr = false;
break;
case REDUC_MAX_EXPR:
pp_string (buffer, " REDUC_MAX_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, " > ");
break;
case REDUC_MIN_EXPR:
pp_string (buffer, " REDUC_MIN_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, " > ");
break;
case REDUC_PLUS_EXPR:
pp_string (buffer, " REDUC_PLUS_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, " > ");
break;
case VEC_WIDEN_MULT_HI_EXPR:
pp_string (buffer, " VEC_WIDEN_MULT_HI_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, " > ");
break;
case VEC_WIDEN_MULT_LO_EXPR:
pp_string (buffer, " VEC_WIDEN_MULT_LO_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, " > ");
break;
case VEC_UNPACK_HI_EXPR:
pp_string (buffer, " VEC_UNPACK_HI_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, " > ");
break;
case VEC_UNPACK_LO_EXPR:
pp_string (buffer, " VEC_UNPACK_LO_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, " > ");
break;
case VEC_UNPACK_FLOAT_HI_EXPR:
pp_string (buffer, " VEC_UNPACK_FLOAT_HI_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, " > ");
break;
case VEC_UNPACK_FLOAT_LO_EXPR:
pp_string (buffer, " VEC_UNPACK_FLOAT_LO_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, " > ");
break;
case VEC_PACK_TRUNC_EXPR:
pp_string (buffer, " VEC_PACK_TRUNC_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, " > ");
break;
case VEC_PACK_SAT_EXPR:
pp_string (buffer, " VEC_PACK_SAT_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, " > ");
break;
case VEC_PACK_FIX_TRUNC_EXPR:
pp_string (buffer, " VEC_PACK_FIX_TRUNC_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, " > ");
break;
case BLOCK:
dump_block_node (buffer, node, spc, flags);
break;
case VEC_EXTRACT_EVEN_EXPR:
pp_string (buffer, " VEC_EXTRACT_EVEN_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, " > ");
break;
case VEC_EXTRACT_ODD_EXPR:
pp_string (buffer, " VEC_EXTRACT_ODD_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, " > ");
break;
case VEC_INTERLEAVE_HIGH_EXPR:
pp_string (buffer, " VEC_INTERLEAVE_HIGH_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, " > ");
break;
case VEC_INTERLEAVE_LOW_EXPR:
pp_string (buffer, " VEC_INTERLEAVE_LOW_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, " > ");
break;
default:
NIY;
}
if (is_stmt && is_expr)
pp_semicolon (buffer);
/* If we're building a diagnostic, the formatted text will be written
into BUFFER's stream by the caller; otherwise, write it now. */
if (!(flags & TDF_DIAGNOSTIC))
pp_write_text_to_stream (buffer);
return spc;
}
/* Print the declaration of a variable. */
void
print_declaration (pretty_printer *buffer, tree t, int spc, int flags)
{
INDENT (spc);
if (TREE_CODE (t) == TYPE_DECL)
pp_string (buffer, "typedef ");
if (CODE_CONTAINS_STRUCT (TREE_CODE (t), TS_DECL_WRTL) && DECL_REGISTER (t))
pp_string (buffer, "register ");
if (TREE_PUBLIC (t) && DECL_EXTERNAL (t))
pp_string (buffer, "extern ");
else if (TREE_STATIC (t))
pp_string (buffer, "static ");
/* Print the type and name. */
if (TREE_TYPE (t) && TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)
{
tree tmp;
/* Print array's type. */
tmp = TREE_TYPE (t);
while (TREE_CODE (TREE_TYPE (tmp)) == ARRAY_TYPE)
tmp = TREE_TYPE (tmp);
dump_generic_node (buffer, TREE_TYPE (tmp), spc, flags, false);
/* Print variable's name. */
pp_space (buffer);
dump_generic_node (buffer, t, spc, flags, false);
/* Print the dimensions. */
tmp = TREE_TYPE (t);
while (TREE_CODE (tmp) == ARRAY_TYPE)
{
dump_array_domain (buffer, TYPE_DOMAIN (tmp), spc, flags);
tmp = TREE_TYPE (tmp);
}
}
else if (TREE_CODE (t) == FUNCTION_DECL)
{
dump_generic_node (buffer, TREE_TYPE (TREE_TYPE (t)), spc, flags, false);
pp_space (buffer);
dump_decl_name (buffer, t, flags);
dump_function_declaration (buffer, TREE_TYPE (t), spc, flags);
}
else
{
/* Print type declaration. */
dump_generic_node (buffer, TREE_TYPE (t), spc, flags, false);
/* Print variable's name. */
pp_space (buffer);
dump_generic_node (buffer, t, spc, flags, false);
}
if (TREE_CODE (t) == VAR_DECL && DECL_HARD_REGISTER (t))
{
pp_string (buffer, " __asm__ ");
pp_character (buffer, '(');
dump_generic_node (buffer, DECL_ASSEMBLER_NAME (t), spc, flags, false);
pp_character (buffer, ')');
}
/* The initial value of a function serves to determine whether the function
is declared or defined. So the following does not apply to function
nodes. */
if (TREE_CODE (t) != FUNCTION_DECL)
{
/* Print the initial value. */
if (DECL_INITIAL (t))
{
pp_space (buffer);
pp_character (buffer, '=');
pp_space (buffer);
dump_generic_node (buffer, DECL_INITIAL (t), spc, flags, false);
}
}
if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
{
pp_string (buffer, " [value-expr: ");
dump_generic_node (buffer, DECL_VALUE_EXPR (t), spc, flags, false);
pp_character (buffer, ']');
}
pp_character (buffer, ';');
}
/* Prints a structure: name, fields, and methods.
FIXME: Still incomplete. */
static void
print_struct_decl (pretty_printer *buffer, const_tree node, int spc, int flags)
{
/* Print the name of the structure. */
if (TYPE_NAME (node))
{
INDENT (spc);
if (TREE_CODE (node) == RECORD_TYPE)
pp_string (buffer, "struct ");
else if ((TREE_CODE (node) == UNION_TYPE
|| TREE_CODE (node) == QUAL_UNION_TYPE))
pp_string (buffer, "union ");
dump_generic_node (buffer, TYPE_NAME (node), spc, 0, false);
}
/* Print the contents of the structure. */
pp_newline (buffer);
INDENT (spc);
pp_character (buffer, '{');
pp_newline (buffer);
/* Print the fields of the structure. */
{
tree tmp;
tmp = TYPE_FIELDS (node);
while (tmp)
{
/* Avoid to print recursively the structure. */
/* FIXME : Not implemented correctly...,
what about the case when we have a cycle in the contain graph? ...
Maybe this could be solved by looking at the scope in which the
structure was declared. */
if (TREE_TYPE (tmp) != node
&& (TREE_CODE (TREE_TYPE (tmp)) != POINTER_TYPE
|| TREE_TYPE (TREE_TYPE (tmp)) != node))
{
print_declaration (buffer, tmp, spc+2, flags);
pp_newline (buffer);
}
tmp = DECL_CHAIN (tmp);
}
}
INDENT (spc);
pp_character (buffer, '}');
}
/* Return the priority of the operator CODE.
From lowest to highest precedence with either left-to-right (L-R)
or right-to-left (R-L) associativity]:
1 [L-R] ,
2 [R-L] = += -= *= /= %= &= ^= |= <<= >>=
3 [R-L] ?:
4 [L-R] ||
5 [L-R] &&
6 [L-R] |
7 [L-R] ^
8 [L-R] &
9 [L-R] == !=
10 [L-R] < <= > >=
11 [L-R] << >>
12 [L-R] + -
13 [L-R] * / %
14 [R-L] ! ~ ++ -- + - * & (type) sizeof
15 [L-R] fn() [] -> .
unary +, - and * have higher precedence than the corresponding binary
operators. */
int
op_code_prio (enum tree_code code)
{
switch (code)
{
case TREE_LIST:
case COMPOUND_EXPR:
case BIND_EXPR:
return 1;
case MODIFY_EXPR:
case INIT_EXPR:
return 2;
case COND_EXPR:
return 3;
case TRUTH_OR_EXPR:
case TRUTH_ORIF_EXPR:
return 4;
case TRUTH_AND_EXPR:
case TRUTH_ANDIF_EXPR:
return 5;
case BIT_IOR_EXPR:
return 6;
case BIT_XOR_EXPR:
case TRUTH_XOR_EXPR:
return 7;
case BIT_AND_EXPR:
return 8;
case EQ_EXPR:
case NE_EXPR:
return 9;
case UNLT_EXPR:
case UNLE_EXPR:
case UNGT_EXPR:
case UNGE_EXPR:
case UNEQ_EXPR:
case LTGT_EXPR:
case ORDERED_EXPR:
case UNORDERED_EXPR:
case LT_EXPR:
case LE_EXPR:
case GT_EXPR:
case GE_EXPR:
return 10;
case LSHIFT_EXPR:
case RSHIFT_EXPR:
case LROTATE_EXPR:
case RROTATE_EXPR:
return 11;
case WIDEN_SUM_EXPR:
case PLUS_EXPR:
case POINTER_PLUS_EXPR:
case MINUS_EXPR:
return 12;
case VEC_WIDEN_MULT_HI_EXPR:
case VEC_WIDEN_MULT_LO_EXPR:
case WIDEN_MULT_EXPR:
case DOT_PROD_EXPR:
case WIDEN_MULT_PLUS_EXPR:
case WIDEN_MULT_MINUS_EXPR:
case MULT_EXPR:
case TRUNC_DIV_EXPR:
case CEIL_DIV_EXPR:
case FLOOR_DIV_EXPR:
case ROUND_DIV_EXPR:
case RDIV_EXPR:
case EXACT_DIV_EXPR:
case TRUNC_MOD_EXPR:
case CEIL_MOD_EXPR:
case FLOOR_MOD_EXPR:
case ROUND_MOD_EXPR:
case FMA_EXPR:
return 13;
case TRUTH_NOT_EXPR:
case BIT_NOT_EXPR:
case POSTINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
case PREINCREMENT_EXPR:
case PREDECREMENT_EXPR:
case NEGATE_EXPR:
case INDIRECT_REF:
case ADDR_EXPR:
case FLOAT_EXPR:
CASE_CONVERT:
case FIX_TRUNC_EXPR:
case TARGET_EXPR:
return 14;
case CALL_EXPR:
case ARRAY_REF:
case ARRAY_RANGE_REF:
case COMPONENT_REF:
return 15;
/* Special expressions. */
case MIN_EXPR:
case MAX_EXPR:
case ABS_EXPR:
case REALPART_EXPR:
case IMAGPART_EXPR:
case REDUC_MAX_EXPR:
case REDUC_MIN_EXPR:
case REDUC_PLUS_EXPR:
case VEC_LSHIFT_EXPR:
case VEC_RSHIFT_EXPR:
case VEC_UNPACK_HI_EXPR:
case VEC_UNPACK_LO_EXPR:
case VEC_UNPACK_FLOAT_HI_EXPR:
case VEC_UNPACK_FLOAT_LO_EXPR:
case VEC_PACK_TRUNC_EXPR:
case VEC_PACK_SAT_EXPR:
return 16;
default:
/* Return an arbitrarily high precedence to avoid surrounding single
VAR_DECLs in ()s. */
return 9999;
}
}
/* Return the priority of the operator OP. */
int
op_prio (const_tree op)
{
enum tree_code code;
if (op == NULL)
return 9999;
code = TREE_CODE (op);
if (code == SAVE_EXPR || code == NON_LVALUE_EXPR)
return op_prio (TREE_OPERAND (op, 0));
return op_code_prio (code);
}
/* Return the symbol associated with operator CODE. */
const char *
op_symbol_code (enum tree_code code)
{
switch (code)
{
case MODIFY_EXPR:
return "=";
case TRUTH_OR_EXPR:
case TRUTH_ORIF_EXPR:
return "||";
case TRUTH_AND_EXPR:
case TRUTH_ANDIF_EXPR:
return "&&";
case BIT_IOR_EXPR:
return "|";
case TRUTH_XOR_EXPR:
case BIT_XOR_EXPR:
return "^";
case ADDR_EXPR:
case BIT_AND_EXPR:
return "&";
case ORDERED_EXPR:
return "ord";
case UNORDERED_EXPR:
return "unord";
case EQ_EXPR:
return "==";
case UNEQ_EXPR:
return "u==";
case NE_EXPR:
return "!=";
case LT_EXPR:
return "<";
case UNLT_EXPR:
return "u<";
case LE_EXPR:
return "<=";
case UNLE_EXPR:
return "u<=";
case GT_EXPR:
return ">";
case UNGT_EXPR:
return "u>";
case GE_EXPR:
return ">=";
case UNGE_EXPR:
return "u>=";
case LTGT_EXPR:
return "<>";
case LSHIFT_EXPR:
return "<<";
case RSHIFT_EXPR:
return ">>";
case LROTATE_EXPR:
return "r<<";
case RROTATE_EXPR:
return "r>>";
case VEC_LSHIFT_EXPR:
return "v<<";
case VEC_RSHIFT_EXPR:
return "v>>";
case POINTER_PLUS_EXPR:
return "+";
case PLUS_EXPR:
return "+";
case REDUC_PLUS_EXPR:
return "r+";
case WIDEN_SUM_EXPR:
return "w+";
case WIDEN_MULT_EXPR:
return "w*";
case NEGATE_EXPR:
case MINUS_EXPR:
return "-";
case BIT_NOT_EXPR:
return "~";
case TRUTH_NOT_EXPR:
return "!";
case MULT_EXPR:
case INDIRECT_REF:
return "*";
case TRUNC_DIV_EXPR:
case RDIV_EXPR:
return "/";
case CEIL_DIV_EXPR:
return "/[cl]";
case FLOOR_DIV_EXPR:
return "/[fl]";
case ROUND_DIV_EXPR:
return "/[rd]";
case EXACT_DIV_EXPR:
return "/[ex]";
case TRUNC_MOD_EXPR:
return "%";
case CEIL_MOD_EXPR:
return "%[cl]";
case FLOOR_MOD_EXPR:
return "%[fl]";
case ROUND_MOD_EXPR:
return "%[rd]";
case PREDECREMENT_EXPR:
return " --";
case PREINCREMENT_EXPR:
return " ++";
case POSTDECREMENT_EXPR:
return "-- ";
case POSTINCREMENT_EXPR:
return "++ ";
case MAX_EXPR:
return "max";
case MIN_EXPR:
return "min";
default:
return "<<< ??? >>>";
}
}
/* Return the symbol associated with operator OP. */
static const char *
op_symbol (const_tree op)
{
return op_symbol_code (TREE_CODE (op));
}
/* Prints the name of a call. NODE is the CALL_EXPR_FN of a CALL_EXPR or
the gimple_call_fn of a GIMPLE_CALL. */
void
print_call_name (pretty_printer *buffer, tree node, int flags)
{
tree op0 = node;
if (TREE_CODE (op0) == NON_LVALUE_EXPR)
op0 = TREE_OPERAND (op0, 0);
again:
switch (TREE_CODE (op0))
{
case VAR_DECL:
case PARM_DECL:
case FUNCTION_DECL:
dump_function_name (buffer, op0, flags);
break;
case ADDR_EXPR:
case INDIRECT_REF:
case NOP_EXPR:
op0 = TREE_OPERAND (op0, 0);
goto again;
case COND_EXPR:
pp_string (buffer, "(");
dump_generic_node (buffer, TREE_OPERAND (op0, 0), 0, flags, false);
pp_string (buffer, ") ? ");
dump_generic_node (buffer, TREE_OPERAND (op0, 1), 0, flags, false);
pp_string (buffer, " : ");
dump_generic_node (buffer, TREE_OPERAND (op0, 2), 0, flags, false);
break;
case ARRAY_REF:
if (TREE_CODE (TREE_OPERAND (op0, 0)) == VAR_DECL)
dump_function_name (buffer, TREE_OPERAND (op0, 0), flags);
else
dump_generic_node (buffer, op0, 0, flags, false);
break;
case MEM_REF:
if (integer_zerop (TREE_OPERAND (op0, 1)))
{
op0 = TREE_OPERAND (op0, 0);
goto again;
}
/* Fallthru. */
case COMPONENT_REF:
case SSA_NAME:
case OBJ_TYPE_REF:
dump_generic_node (buffer, op0, 0, flags, false);
break;
default:
NIY;
}
}
/* Parses the string STR and replaces new-lines by '\n', tabs by '\t', ... */
static void
pretty_print_string (pretty_printer *buffer, const char *str)
{
if (str == NULL)
return;
while (*str)
{
switch (str[0])
{
case '\b':
pp_string (buffer, "\\b");
break;
case '\f':
pp_string (buffer, "\\f");
break;
case '\n':
pp_string (buffer, "\\n");
break;
case '\r':
pp_string (buffer, "\\r");
break;
case '\t':
pp_string (buffer, "\\t");
break;
case '\v':
pp_string (buffer, "\\v");
break;
case '\\':
pp_string (buffer, "\\\\");
break;
case '\"':
pp_string (buffer, "\\\"");
break;
case '\'':
pp_string (buffer, "\\'");
break;
/* No need to handle \0; the loop terminates on \0. */
case '\1':
pp_string (buffer, "\\1");
break;
case '\2':
pp_string (buffer, "\\2");
break;
case '\3':
pp_string (buffer, "\\3");
break;
case '\4':
pp_string (buffer, "\\4");
break;
case '\5':
pp_string (buffer, "\\5");
break;
case '\6':
pp_string (buffer, "\\6");
break;
case '\7':
pp_string (buffer, "\\7");
break;
default:
pp_character (buffer, str[0]);
break;
}
str++;
}
}
static void
maybe_init_pretty_print (FILE *file)
{
if (!initialized)
{
pp_construct (&buffer, /* prefix */NULL, /* line-width */0);
pp_needs_newline (&buffer) = true;
pp_translate_identifiers (&buffer) = false;
initialized = 1;
}
buffer.buffer->stream = file;
}
static void
newline_and_indent (pretty_printer *buffer, int spc)
{
pp_newline (buffer);
INDENT (spc);
}
/* Handle a %K format for TEXT. Separate from default_tree_printer so
it can also be used in front ends.
%K: a statement, from which EXPR_LOCATION and TREE_BLOCK will be recorded.
*/
void
percent_K_format (text_info *text)
{
tree t = va_arg (*text->args_ptr, tree), block;
gcc_assert (text->locus != NULL);
*text->locus = EXPR_LOCATION (t);
gcc_assert (pp_ti_abstract_origin (text) != NULL);
block = TREE_BLOCK (t);
*pp_ti_abstract_origin (text) = NULL;
while (block
&& TREE_CODE (block) == BLOCK
&& BLOCK_ABSTRACT_ORIGIN (block))
{
tree ao = BLOCK_ABSTRACT_ORIGIN (block);
while (TREE_CODE (ao) == BLOCK
&& BLOCK_ABSTRACT_ORIGIN (ao)
&& BLOCK_ABSTRACT_ORIGIN (ao) != ao)
ao = BLOCK_ABSTRACT_ORIGIN (ao);
if (TREE_CODE (ao) == FUNCTION_DECL)
{
*pp_ti_abstract_origin (text) = block;
break;
}
block = BLOCK_SUPERCONTEXT (block);
}
}
/* Print the identifier ID to PRETTY-PRINTER. */
void
pp_base_tree_identifier (pretty_printer *pp, tree id)
{
if (pp_translate_identifiers (pp))
{
const char *text = identifier_to_locale (IDENTIFIER_POINTER (id));
pp_append_text (pp, text, text + strlen (text));
}
else
pp_append_text (pp, IDENTIFIER_POINTER (id),
IDENTIFIER_POINTER (id) + IDENTIFIER_LENGTH (id));
}
EXTERN_C_END
|
bkmeans_internal.h | #ifndef PQKMEANS_BKMEANS_INTERNAL_H
#define PQKMEANS_BKMEANS_INTERNAL_H
#include <iostream>
#include <sstream>
#include <random>
#include <bitset>
#include <chrono>
#include <climits>
#include <cassert>
#include <memory>
#include "i_bkmeans_internal.h"
namespace pqkmeans {
namespace BKmeansUtil {
enum class InitCenterType {
RandomPick, Random, Outer
};
enum class FindNNType {
Table, Linear, Auto
};
}
template<size_t N, size_t SUB>
class BKmeansInternal : public IBKmeansInternal {
public:
BKmeansUtil::FindNNType find_nn_type_;
BKmeansInternal(unsigned int k,
unsigned int iteration,
bool verbose = false,
BKmeansUtil::InitCenterType init_center_type = BKmeansUtil::InitCenterType::RandomPick
) :
find_nn_type_(BKmeansUtil::FindNNType::Linear), k_(k), iteration_(iteration),
verbose_(verbose), init_center_type_(init_center_type) {
// initialize hash tables
for (unsigned int i = 0; i < N; i += SUB) {
std::vector<std::vector<int>> table(1UL << SUB);
this->tables_.push_back(table);
}
this->num_subspace_ = this->tables_.size();
for (unsigned int i = 0; i < N; i++) {
std::bitset<N> bc;
bc[i] = 1;
this->bit_count_map_.push_back(bc);
}
this->bit_combinations_ = BitCombinations((unsigned int) SUB);
}
void fit(const std::vector<std::vector<unsigned int >> &data) {
fit(data, std::vector<unsigned int>());
}
std::bitset<N> vector2bitset(const std::vector<unsigned int> &datum) {
if (datum.size() != N) {
std::ostringstream msg;
msg
<< "datum.size ("
<< datum.size()
<< " ) should be same as input_dim";
throw msg.str();
}
std::bitset<N> bitset;
for (std::size_t j = 0; j < N; ++j) {
bitset[j] = (datum[j] > 0);
}
return bitset;
}
std::vector<unsigned int> bitset2vector(const std::bitset<N> bitset) {
std::vector<unsigned int> vector(bitset.size());
for (std::size_t i = 0; i < bitset.size(); ++i) {
vector[i] = (bitset[i] == true ? 1 : 0);
}
return vector;
}
const std::vector<int> GetAssignments() {
return assignments_;
};
const std::shared_ptr<std::vector<std::vector<unsigned int>>> GetClusterCenters() {
std::shared_ptr<std::vector<std::vector<unsigned int>>> cluster_centers_array(
new std::vector<std::vector<unsigned int>>());
for (auto datum: cluster_centers_) {
cluster_centers_array->push_back(bitset2vector(datum));
}
return cluster_centers_array;
};
void fit(const std::vector<std::vector<unsigned int >> &data,
std::vector<unsigned int> initialCentroidIndexs = std::vector<unsigned int>()
) {
std::vector<std::bitset<N>> bitset_data;
for (std::size_t i = 0; i < data.size(); ++i) {
bitset_data.push_back(vector2bitset(data[i]));
}
fit(bitset_data, initialCentroidIndexs);
}
void fit(const std::vector<std::bitset<N>> &data,
std::vector<unsigned int> initialCentroidIndexs = std::vector<unsigned int>()) {
InitialzeCentroids(data, k_, this->init_center_type_, initialCentroidIndexs);
for (unsigned int i = 0; i < data.size(); i++) this->assignments_.push_back(0);
// update hash tables
for (unsigned int i = 0; i < k_; i++) {
auto subvecs = SplitToSubSpace(this->cluster_centers_.at(i));
for (unsigned int j = 0; j < subvecs.size(); j++) {
this->tables_.at(j)[subvecs.at(j).to_ulong()].push_back(i);
}
}
// select faster FindNN
if (find_nn_type_ == BKmeansUtil::FindNNType::Auto) {
find_nn_type_ = SelectFasterFindNNType(data);
}
// update centers
long last_time = 0;
for (unsigned int i = 0; i < iteration_; i++) {
auto start = std::chrono::system_clock::now();
this->UpdateCenter(data);
auto end = std::chrono::system_clock::now();
// record time & assignment filename
if (verbose_)std::cout << "iteration" << i << "," << last_time << std::endl;
last_time = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
}
}
void UpdateCenter(const std::vector<std::bitset<N>> &data) {
this->error_ = 0;
// assign and count
std::vector<long> all_count;
std::vector<std::vector<long>> count;
for (unsigned int i = 0; i < this->k_; i++) {
all_count.push_back(0);
count.push_back(std::vector<long>(N));
}
// critical section
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(data.size()); i++) {
assignments_.at(i) = FindNearestCentroid(data.at(i));
}
for (unsigned int i = 0; i < data.size(); i++) {
all_count[assignments_[i]] += 1;
for (unsigned int d = 0; d < N; d++) {
if (data.at(i)[d] == 1) {
count.at(assignments_.at(i)).at(d) += 1;
} else {
count.at(assignments_.at(i)).at(d) += -1;
}
}
}
if (verbose_)std::cout << "error:" << this->error_ << std::endl;
// update
for (unsigned int i = 0; i < this->k_; i++) {
for (unsigned int d = 0; d < N; d++) {
if (count.at(i).at(d) > 0) {
cluster_centers_.at(i)[d] = 1;
} else if (count.at(i).at(d) < 0) {
cluster_centers_.at(i)[d] = 0;
} else {
// nothing to do if there are same numbers of positives and negatives
}
}
}
}
int FindNearestCentroid(const std::vector<unsigned int> &query) {
return FindNearestCentroid(vector2bitset(query));
}
int FindNearestCentroid(const std::bitset<N> &query) {
if (find_nn_type_ == BKmeansUtil::FindNNType::Table) {
return FindNNTable(query);
} else if (find_nn_type_ == BKmeansUtil::FindNNType::Linear) {
return FindNNLinear(query);
} else {
std::cerr << "ERROR: FINDNNTYPE" << std::endl;
throw;
}
}
private:
std::vector<std::vector<std::vector<int>>> tables_;
std::vector<std::bitset<N>> cluster_centers_;
std::vector<int> assignments_;
unsigned int k_;
unsigned int iteration_;
bool verbose_;
BKmeansUtil::InitCenterType init_center_type_;
unsigned long error_;
unsigned long num_subspace_; //ceil(N/SUB)
// [000] -> [100, 010, 001] -> [110, 101, ...]
std::vector<std::vector<unsigned long>> bit_combinations_;
std::vector<std::bitset<N>> bit_count_map_;
BKmeansUtil::FindNNType SelectFasterFindNNType(const std::vector<std::bitset<N>> &data) {
if (verbose_)std::cout << "Start SelectFasterFindNNType" << std::endl;
unsigned int SAMPLE = 100;
std::mt19937 mt(123);
std::vector<std::bitset<N> > sampled_codes;
for (unsigned int i = 0; i < SAMPLE; ++i) { // 100 samples
int random_id = (unsigned int) mt() % (int) data.size();
sampled_codes.push_back(data[random_id]);
}
// LINEAR
auto start_linear = std::chrono::system_clock::now();
for (const auto &code : sampled_codes) {
FindNNLinear(code);
}
auto end_linear = std::chrono::system_clock::now();
// TABLE
auto start_table = std::chrono::system_clock::now();
for (const auto &code : sampled_codes) {
FindNNTable(code);
}
auto end_table = std::chrono::system_clock::now();
// select faster method
auto time_linear = std::chrono::duration_cast<std::chrono::nanoseconds>(end_linear - start_linear).count();
auto time_table = std::chrono::duration_cast<std::chrono::nanoseconds>(end_table - start_table).count();
if (verbose_) {
std::cout << "<" << SAMPLE << "sample test> " <<
"Linear: " << time_linear << "[ms]" <<
"Table: " << time_table << "[ms]" << std::endl;
}
if (time_linear < time_table) {
if (verbose_)std::cout << "Use Linear" << std::endl;
return BKmeansUtil::FindNNType::Linear;
} else {
if (verbose_)std::cout << "Use Table" << std::endl;
return BKmeansUtil::FindNNType::Table;
}
}
std::vector<std::bitset<SUB>> SplitToSubSpace(const std::bitset<N> &vec) {
std::vector<std::bitset<SUB>> subvecs;
for (unsigned int i = 0; i < vec.size(); i += SUB) {
std::bitset<SUB> subvec = SliceBitSet(vec, i, i + SUB);
subvecs.push_back(subvec);
}
return subvecs;
}
std::bitset<SUB> SliceBitSet(const std::bitset<N> &vec, unsigned int start, unsigned int end) {
std::bitset<SUB> sub;
for (unsigned int i = start; i < end; i++) {
sub[i - start] = vec[i];
}
return sub;
}
std::vector<std::vector<unsigned long >> BitCombinations(size_t num_bits) {
std::vector<std::vector<unsigned long>> ret;
for (unsigned int target_bit = 0; target_bit < num_bits + 1; target_bit++) {
std::vector<unsigned long> combinations;
for (unsigned long num = 0; num < (unsigned long) (1 << num_bits); num++) {
if (PopulationCount(num, num_bits) == target_bit) combinations.push_back(num);
}
ret.push_back(combinations);
}
return ret;
}
unsigned int PopulationCount(unsigned long value, unsigned int num_bits) {
unsigned int count = 0;
for (unsigned long mask = 1; mask < (unsigned long) (1 << num_bits); mask <<= 1) {
if ((value & mask) != 0) count += 1;
}
return count;
}
unsigned int PopulationCount(std::bitset<N> value) {
return value.count();
}
void InitialzeCentroids(const std::vector<std::bitset<N>> &data, unsigned int k,
BKmeansUtil::InitCenterType initCenterType,
std::vector<unsigned int> initialCentroidIndexs) {
this->cluster_centers_.clear();
std::mt19937 mt(0);
if (initCenterType == BKmeansUtil::InitCenterType::Random) {
std::uniform_int_distribution<unsigned long> randbit_generator(0, 1);
// initialize cluster_centers_
for (unsigned int i = 0; i < k; i++) {
std::bitset<N> centroid;
for (unsigned long j = 0; j < centroid.size(); j++) {
centroid[j] = randbit_generator(mt);
}
cluster_centers_.push_back(centroid);
}
} else if (initCenterType == BKmeansUtil::InitCenterType::RandomPick) {
// initialize cluster_centers_ with data
std::uniform_int_distribution<unsigned long> randdataindex(0, data.size() - 1);
for (unsigned int i = 0; i < k; i++) {
unsigned long randomIndex;
randomIndex = randdataindex(mt);
std::bitset<N> copy(data.at(randomIndex));
cluster_centers_.push_back(copy);
}
} else if (initCenterType == BKmeansUtil::InitCenterType::Outer) {
for (auto &&index: initialCentroidIndexs) {
std::bitset<N> copy(data.at(index));
cluster_centers_.push_back(copy);
}
}
}
int FindNNTable(const std::bitset<N> &query) {
auto subvecs = SplitToSubSpace(query);
for (unsigned int subradius = 0; subradius < N; subradius++) {
const auto differences = this->bit_combinations_.at(subradius);
// is there any candidate really within radius from query?
int minindex = -1;
unsigned long mindistance = N;
unsigned long cnt = 0;
for (auto difference: differences) {
for (unsigned int subindex = 0; subindex < this->num_subspace_; subindex++) {
for (auto &&candidate: this->tables_[subindex][subvecs[subindex].to_ulong() ^ difference]) {
cnt += 1;
auto distance = CalcDistance(this->cluster_centers_.at(candidate), query);
if (distance < mindistance &&
distance <= (subradius + 1) * this->num_subspace_ - 1) { // true_radius
minindex = candidate;
mindistance = distance;
}
}
}
}
if (minindex != -1) {
this->error_ += mindistance;
return minindex;
}
}
return -1;
}
int FindNNLinear(const std::bitset<N> &query) {
int minindex = -1;
unsigned long mindistance = N;
for (unsigned int i = 0; i < this->k_; i++) {
auto distance = CalcDistance(cluster_centers_.at(i), query);
if (distance < mindistance) {
minindex = i;
mindistance = distance;
}
}
this->error_ += mindistance;
return minindex;
}
unsigned int CalcDistance(const std::bitset<N> &a, const std::bitset<N> &b) {
return PopulationCount(a ^ b);
}
};
}
#endif //PQKMEANS_BKMEANS_INTERNAL_H
|
validation_criterion.h | // -----------------------------------------------------------------------------
//
// Copyright (C) 2021 CERN & Newcastle University for the benefit of the
// BioDynaMo collaboration. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef VALIDATION_CRITERION_H_
#define VALIDATION_CRITERION_H_
#include <vector>
#include "biodynamo.h"
#include "my_cell.h"
namespace bdm {
// Returns 0 if the cell locations within a subvolume of the total system,
// comprising approximately target_n cells, are arranged as clusters, and 1
// otherwise.
static bool GetCriterion(double spatial_range, int target_n) {
auto* sim = Simulation::GetActive();
auto* rm = sim->GetResourceManager();
auto* param = sim->GetParam();
// get number of MyCells
int n = rm->GetNumAgents();
// number of cells that are close (i.e. within a distance of
// spatial_range)
int num_close = 0;
double curr_dist;
// number of cells of the same type, and that are close (i.e.
// within a distance of spatial_range)
int same_type_close = 0;
// number of cells of opposite types, and that are close (i.e.
// within a distance of spatial_range)
int diff_type_close = 0;
std::vector<Double3> pos_sub_vol(n);
std::vector<int> types_sub_vol(n);
// Define the subvolume to be the first octant of a cube
double sub_vol_max = param->max_bound / 2;
// The number of cells within the subvolume
int num_cells_sub_vol = 0;
// the locations of all cells within the subvolume are copied
// to pos_sub_vol
rm->ForEachAgent([&](Agent* agent) {
if (auto* cell = dynamic_cast<MyCell*>(agent)) {
const auto& pos = cell->GetPosition();
auto type = cell->GetCellType();
if ((fabs(pos[0] - 0.5) < sub_vol_max) &&
(fabs(pos[1] - 0.5) < sub_vol_max) &&
(fabs(pos[2] - 0.5) < sub_vol_max)) {
pos_sub_vol[num_cells_sub_vol][0] = pos[0];
pos_sub_vol[num_cells_sub_vol][1] = pos[1];
pos_sub_vol[num_cells_sub_vol][2] = pos[2];
types_sub_vol[num_cells_sub_vol] = type;
num_cells_sub_vol++;
}
}
});
std::cout << "number of cells in subvolume: " << num_cells_sub_vol
<< std::endl;
// If there are not enough cells within the subvolume, the correctness
// criterion is not fulfilled
if (((static_cast<double>((num_cells_sub_vol))) /
static_cast<double>(target_n)) < 0.25) {
std::cout << "not enough cells in subvolume: " << num_cells_sub_vol
<< std::endl;
return false;
}
// If there are too many cells within the subvolume, the correctness
// criterion is not fulfilled
if (((static_cast<double>((num_cells_sub_vol))) /
static_cast<double>(target_n)) > 4) {
std::cout << "too many cells in subvolume: " << num_cells_sub_vol
<< std::endl;
return false;
}
#pragma omp parallel for reduction(+ : same_type_close, diff_type_close, \
num_close)
for (int i1 = 0; i1 < num_cells_sub_vol; i1++) {
for (int i2 = i1 + 1; i2 < num_cells_sub_vol; i2++) {
curr_dist = Math::GetL2Distance(pos_sub_vol[i1], pos_sub_vol[i2]);
if (curr_dist < spatial_range) {
num_close++;
if (types_sub_vol[i1] * types_sub_vol[i2] < 0) {
diff_type_close++;
} else {
same_type_close++;
}
}
}
}
double correctness_coefficient =
(static_cast<double>(diff_type_close)) / (num_close + 1.0);
// check if there are many cells of opposite types located within a close
// distance, indicative of bad clustering
if (correctness_coefficient > 0.1) {
std::cout << "cells in subvolume are not well-clustered: "
<< correctness_coefficient << std::endl;
return false;
}
// check if clusters are large enough, i.e. whether cells have more than 100
// cells of the same type located nearby
double avg_neighbors =
(static_cast<double>(same_type_close / num_cells_sub_vol));
std::cout << "average neighbors in subvolume: " << avg_neighbors << std::endl;
if (avg_neighbors < 5) {
std::cout << "cells in subvolume do not have enough neighbors: "
<< avg_neighbors << std::endl;
return false;
}
std::cout << "correctness coefficient: " << correctness_coefficient
<< std::endl;
return true;
}
} // namespace bdm
#endif // VALIDATION_CRITERION_H_
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.